summaryrefslogtreecommitdiff
path: root/deps
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2021-02-11 19:03:35 +0100
committerMichaël Zasso <targos@protonmail.com>2021-02-11 19:09:18 +0100
commitc7b329225126ad3b9eeb2408e0f0801f1aea5eb1 (patch)
tree193c193111d5f302031ad345bc94d17a3f67bf66 /deps
parent6ea9af9906cd74ed07ca05cf6aa44382025a6044 (diff)
downloadnode-new-c7b329225126ad3b9eeb2408e0f0801f1aea5eb1.tar.gz
deps: update V8 to 8.8.278.17
PR-URL: https://github.com/nodejs/node/pull/36139 Reviewed-By: Jiawen Geng <technicalcute@gmail.com> Reviewed-By: Colin Ihrig <cjihrig@gmail.com> Reviewed-By: Myles Borins <myles.borins@gmail.com> Reviewed-By: Shelley Vohr <codebytere@gmail.com>
Diffstat (limited to 'deps')
-rw-r--r--deps/v8/.gitignore1
-rw-r--r--deps/v8/AUTHORS4
-rw-r--r--deps/v8/BUILD.gn211
-rw-r--r--deps/v8/DEPS58
-rw-r--r--deps/v8/DIR_METADATA12
-rw-r--r--deps/v8/OWNERS3
-rw-r--r--deps/v8/PPC_OWNERS1
-rw-r--r--deps/v8/PRESUBMIT.py10
-rw-r--r--deps/v8/S390_OWNERS1
-rw-r--r--deps/v8/base/trace_event/common/trace_event_common.h1
-rw-r--r--deps/v8/gni/proto_library.gni11
-rw-r--r--deps/v8/include/DIR_METADATA11
-rw-r--r--deps/v8/include/OWNERS2
-rw-r--r--deps/v8/include/cppgc/DEPS1
-rw-r--r--deps/v8/include/cppgc/allocation.h48
-rw-r--r--deps/v8/include/cppgc/cross-thread-persistent.h311
-rw-r--r--deps/v8/include/cppgc/custom-space.h31
-rw-r--r--deps/v8/include/cppgc/default-platform.h87
-rw-r--r--deps/v8/include/cppgc/ephemeron-pair.h25
-rw-r--r--deps/v8/include/cppgc/garbage-collected.h4
-rw-r--r--deps/v8/include/cppgc/heap.h14
-rw-r--r--deps/v8/include/cppgc/internal/gc-info.h6
-rw-r--r--deps/v8/include/cppgc/internal/name-trait.h111
-rw-r--r--deps/v8/include/cppgc/internal/persistent-node.h10
-rw-r--r--deps/v8/include/cppgc/internal/pointer-policies.h18
-rw-r--r--deps/v8/include/cppgc/liveness-broker.h2
-rw-r--r--deps/v8/include/cppgc/member.h6
-rw-r--r--deps/v8/include/cppgc/name-provider.h65
-rw-r--r--deps/v8/include/cppgc/platform.h52
-rw-r--r--deps/v8/include/cppgc/source-location.h2
-rw-r--r--deps/v8/include/cppgc/trace-trait.h9
-rw-r--r--deps/v8/include/cppgc/visitor.h122
-rw-r--r--deps/v8/include/js_protocol.pdl12
-rw-r--r--deps/v8/include/v8-cppgc.h194
-rw-r--r--deps/v8/include/v8-fast-api-calls.h62
-rw-r--r--deps/v8/include/v8-inspector.h4
-rw-r--r--deps/v8/include/v8-internal.h66
-rw-r--r--deps/v8/include/v8-metrics.h5
-rw-r--r--deps/v8/include/v8-platform.h29
-rw-r--r--deps/v8/include/v8-profiler.h31
-rw-r--r--deps/v8/include/v8-unwinder-state.h30
-rw-r--r--deps/v8/include/v8-version.h6
-rw-r--r--deps/v8/include/v8.h417
-rw-r--r--deps/v8/include/v8config.h9
-rw-r--r--deps/v8/infra/mb/mb_config.pyl18
-rw-r--r--deps/v8/infra/testing/PRESUBMIT.py1
-rw-r--r--deps/v8/infra/testing/builders.pyl133
-rw-r--r--deps/v8/samples/cppgc/cppgc-for-v8-embedders.cc40
-rw-r--r--deps/v8/src/DIR_METADATA11
-rw-r--r--deps/v8/src/OWNERS2
-rw-r--r--deps/v8/src/api/DIR_METADATA11
-rw-r--r--deps/v8/src/api/OWNERS2
-rw-r--r--deps/v8/src/api/api-natives.cc3
-rw-r--r--deps/v8/src/api/api.cc226
-rw-r--r--deps/v8/src/asmjs/DIR_METADATA11
-rw-r--r--deps/v8/src/asmjs/OWNERS2
-rw-r--r--deps/v8/src/ast/DIR_METADATA11
-rw-r--r--deps/v8/src/ast/OWNERS2
-rw-r--r--deps/v8/src/ast/ast-function-literal-id-reindexer.cc9
-rw-r--r--deps/v8/src/ast/ast-source-ranges.h19
-rw-r--r--deps/v8/src/ast/ast-value-factory.cc67
-rw-r--r--deps/v8/src/ast/ast-value-factory.h37
-rw-r--r--deps/v8/src/ast/ast.cc8
-rw-r--r--deps/v8/src/ast/ast.h12
-rw-r--r--deps/v8/src/ast/modules.cc93
-rw-r--r--deps/v8/src/ast/modules.h63
-rw-r--r--deps/v8/src/ast/prettyprinter.cc2
-rw-r--r--deps/v8/src/ast/scopes.h2
-rw-r--r--deps/v8/src/base/DIR_METADATA11
-rw-r--r--deps/v8/src/base/OWNERS2
-rw-r--r--deps/v8/src/base/bounded-page-allocator.h4
-rw-r--r--deps/v8/src/base/build_config.h4
-rw-r--r--deps/v8/src/base/debug/stack_trace_posix.cc11
-rw-r--r--deps/v8/src/base/hashmap-entry.h52
-rw-r--r--deps/v8/src/base/hashmap.h46
-rw-r--r--deps/v8/src/base/lazy-instance.h5
-rw-r--r--deps/v8/src/base/macros.h4
-rw-r--r--deps/v8/src/base/platform/DIR_METADATA11
-rw-r--r--deps/v8/src/base/platform/OWNERS2
-rw-r--r--deps/v8/src/base/platform/condition-variable.h4
-rw-r--r--deps/v8/src/base/platform/mutex.h20
-rw-r--r--deps/v8/src/base/platform/platform-aix.cc2
-rw-r--r--deps/v8/src/base/platform/platform-freebsd.cc2
-rw-r--r--deps/v8/src/base/platform/platform-fuchsia.cc12
-rw-r--r--deps/v8/src/base/platform/platform-macos.cc2
-rw-r--r--deps/v8/src/base/platform/platform-posix.cc16
-rw-r--r--deps/v8/src/base/platform/platform-win32.cc4
-rw-r--r--deps/v8/src/base/platform/platform.h50
-rw-r--r--deps/v8/src/base/platform/semaphore.h4
-rw-r--r--deps/v8/src/base/platform/wrappers.h31
-rw-r--r--deps/v8/src/base/platform/wrappers_starboard.cc31
-rw-r--r--deps/v8/src/base/platform/wrappers_std.cc34
-rw-r--r--deps/v8/src/base/region-allocator.h4
-rw-r--r--deps/v8/src/base/ring-buffer.h5
-rw-r--r--deps/v8/src/base/safe_conversions.h372
-rw-r--r--deps/v8/src/base/safe_conversions_arm_impl.h60
-rw-r--r--deps/v8/src/base/safe_conversions_impl.h822
-rw-r--r--deps/v8/src/base/threaded-list.h4
-rw-r--r--deps/v8/src/builtins/DIR_METADATA11
-rw-r--r--deps/v8/src/builtins/OWNERS2
-rw-r--r--deps/v8/src/builtins/accessors.cc11
-rw-r--r--deps/v8/src/builtins/accessors.h4
-rw-r--r--deps/v8/src/builtins/aggregate-error.tq4
-rw-r--r--deps/v8/src/builtins/arm/builtins-arm.cc1032
-rw-r--r--deps/v8/src/builtins/arm64/builtins-arm64.cc1364
-rw-r--r--deps/v8/src/builtins/base.tq28
-rw-r--r--deps/v8/src/builtins/builtins-api.cc16
-rw-r--r--deps/v8/src/builtins/builtins-array-gen.cc195
-rw-r--r--deps/v8/src/builtins/builtins-array.cc4
-rw-r--r--deps/v8/src/builtins/builtins-arraybuffer.cc10
-rw-r--r--deps/v8/src/builtins/builtins-async-function-gen.cc44
-rw-r--r--deps/v8/src/builtins/builtins-async-gen.cc6
-rw-r--r--deps/v8/src/builtins/builtins-async-generator-gen.cc86
-rw-r--r--deps/v8/src/builtins/builtins-async-iterator-gen.cc12
-rw-r--r--deps/v8/src/builtins/builtins-bigint-gen.cc15
-rw-r--r--deps/v8/src/builtins/builtins-bigint.tq (renamed from deps/v8/src/builtins/bigint.tq)18
-rw-r--r--deps/v8/src/builtins/builtins-call-gen.cc111
-rw-r--r--deps/v8/src/builtins/builtins-callsite.cc16
-rw-r--r--deps/v8/src/builtins/builtins-collections-gen.cc192
-rw-r--r--deps/v8/src/builtins/builtins-constructor-gen.cc82
-rw-r--r--deps/v8/src/builtins/builtins-conversion-gen.cc14
-rw-r--r--deps/v8/src/builtins/builtins-dataview.cc2
-rw-r--r--deps/v8/src/builtins/builtins-date-gen.cc82
-rw-r--r--deps/v8/src/builtins/builtins-definitions.h11
-rw-r--r--deps/v8/src/builtins/builtins-function.cc10
-rw-r--r--deps/v8/src/builtins/builtins-generator-gen.cc20
-rw-r--r--deps/v8/src/builtins/builtins-global-gen.cc8
-rw-r--r--deps/v8/src/builtins/builtins-handler-gen.cc104
-rw-r--r--deps/v8/src/builtins/builtins-internal-gen.cc177
-rw-r--r--deps/v8/src/builtins/builtins-intl-gen.cc14
-rw-r--r--deps/v8/src/builtins/builtins-iterator-gen.cc46
-rw-r--r--deps/v8/src/builtins/builtins-lazy-gen.cc109
-rw-r--r--deps/v8/src/builtins/builtins-lazy-gen.h2
-rw-r--r--deps/v8/src/builtins/builtins-microtask-queue-gen.cc13
-rw-r--r--deps/v8/src/builtins/builtins-number-gen.cc75
-rw-r--r--deps/v8/src/builtins/builtins-object-gen.cc104
-rw-r--r--deps/v8/src/builtins/builtins-object.cc4
-rw-r--r--deps/v8/src/builtins/builtins-proxy-gen.cc16
-rw-r--r--deps/v8/src/builtins/builtins-reflect.cc6
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.cc102
-rw-r--r--deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc164
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.cc282
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.h28
-rw-r--r--deps/v8/src/builtins/builtins-string.cc2
-rw-r--r--deps/v8/src/builtins/builtins-typed-array-gen.cc49
-rw-r--r--deps/v8/src/builtins/builtins-typed-array-gen.h1
-rw-r--r--deps/v8/src/builtins/builtins-utils-gen.h51
-rw-r--r--deps/v8/src/builtins/builtins-utils-inl.h8
-rw-r--r--deps/v8/src/builtins/builtins-utils.h5
-rw-r--r--deps/v8/src/builtins/builtins-wasm-gen.cc61
-rw-r--r--deps/v8/src/builtins/builtins.cc5
-rw-r--r--deps/v8/src/builtins/cast.tq17
-rw-r--r--deps/v8/src/builtins/constants-table-builder.cc15
-rw-r--r--deps/v8/src/builtins/convert.tq5
-rw-r--r--deps/v8/src/builtins/generate-bytecodes-builtins-list.cc34
-rw-r--r--deps/v8/src/builtins/ia32/builtins-ia32.cc1049
-rw-r--r--deps/v8/src/builtins/ic-dynamic-map-checks.tq155
-rw-r--r--deps/v8/src/builtins/ic.tq4
-rw-r--r--deps/v8/src/builtins/internal.tq43
-rw-r--r--deps/v8/src/builtins/mips/builtins-mips.cc777
-rw-r--r--deps/v8/src/builtins/mips64/builtins-mips64.cc769
-rw-r--r--deps/v8/src/builtins/ppc/builtins-ppc.cc755
-rw-r--r--deps/v8/src/builtins/regexp.tq10
-rw-r--r--deps/v8/src/builtins/s390/builtins-s390.cc760
-rw-r--r--deps/v8/src/builtins/setup-builtins-internal.cc3
-rw-r--r--deps/v8/src/builtins/string-trim.tq168
-rw-r--r--deps/v8/src/builtins/torque-internal.tq5
-rw-r--r--deps/v8/src/builtins/typed-array-createtypedarray.tq1
-rw-r--r--deps/v8/src/builtins/typed-array-sort.tq8
-rw-r--r--deps/v8/src/builtins/typed-array.tq4
-rw-r--r--deps/v8/src/builtins/wasm.tq35
-rw-r--r--deps/v8/src/builtins/x64/builtins-x64.cc1117
-rw-r--r--deps/v8/src/codegen/DIR_METADATA11
-rw-r--r--deps/v8/src/codegen/OWNERS2
-rw-r--r--deps/v8/src/codegen/arm/assembler-arm.cc53
-rw-r--r--deps/v8/src/codegen/arm/assembler-arm.h4
-rw-r--r--deps/v8/src/codegen/arm/interface-descriptors-arm.cc48
-rw-r--r--deps/v8/src/codegen/arm/macro-assembler-arm.cc180
-rw-r--r--deps/v8/src/codegen/arm/macro-assembler-arm.h27
-rw-r--r--deps/v8/src/codegen/arm64/assembler-arm64.cc15
-rw-r--r--deps/v8/src/codegen/arm64/interface-descriptors-arm64.cc48
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64.cc193
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64.h20
-rw-r--r--deps/v8/src/codegen/arm64/register-arm64.h2
-rw-r--r--deps/v8/src/codegen/assembler.cc13
-rw-r--r--deps/v8/src/codegen/assembler.h11
-rw-r--r--deps/v8/src/codegen/bailout-reason.h2
-rw-r--r--deps/v8/src/codegen/code-desc.h34
-rw-r--r--deps/v8/src/codegen/code-stub-assembler.cc922
-rw-r--r--deps/v8/src/codegen/code-stub-assembler.h532
-rw-r--r--deps/v8/src/codegen/compilation-cache.cc12
-rw-r--r--deps/v8/src/codegen/compilation-cache.h2
-rw-r--r--deps/v8/src/codegen/compiler.cc222
-rw-r--r--deps/v8/src/codegen/compiler.h30
-rw-r--r--deps/v8/src/codegen/external-reference.cc17
-rw-r--r--deps/v8/src/codegen/external-reference.h25
-rw-r--r--deps/v8/src/codegen/handler-table.cc2
-rw-r--r--deps/v8/src/codegen/ia32/assembler-ia32.cc16
-rw-r--r--deps/v8/src/codegen/ia32/assembler-ia32.h1
-rw-r--r--deps/v8/src/codegen/ia32/interface-descriptors-ia32.cc48
-rw-r--r--deps/v8/src/codegen/ia32/macro-assembler-ia32.cc169
-rw-r--r--deps/v8/src/codegen/ia32/macro-assembler-ia32.h26
-rw-r--r--deps/v8/src/codegen/interface-descriptors.cc40
-rw-r--r--deps/v8/src/codegen/interface-descriptors.h33
-rw-r--r--deps/v8/src/codegen/mips/assembler-mips.cc10
-rw-r--r--deps/v8/src/codegen/mips/interface-descriptors-mips.cc48
-rw-r--r--deps/v8/src/codegen/mips/macro-assembler-mips.cc139
-rw-r--r--deps/v8/src/codegen/mips/macro-assembler-mips.h28
-rw-r--r--deps/v8/src/codegen/mips64/assembler-mips64.cc14
-rw-r--r--deps/v8/src/codegen/mips64/interface-descriptors-mips64.cc48
-rw-r--r--deps/v8/src/codegen/mips64/macro-assembler-mips64.cc145
-rw-r--r--deps/v8/src/codegen/mips64/macro-assembler-mips64.h28
-rw-r--r--deps/v8/src/codegen/optimized-compilation-info.cc7
-rw-r--r--deps/v8/src/codegen/optimized-compilation-info.h9
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc.cc20
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc.h2
-rw-r--r--deps/v8/src/codegen/ppc/constants-ppc.h48
-rw-r--r--deps/v8/src/codegen/ppc/interface-descriptors-ppc.cc48
-rw-r--r--deps/v8/src/codegen/ppc/macro-assembler-ppc.cc45
-rw-r--r--deps/v8/src/codegen/ppc/macro-assembler-ppc.h15
-rw-r--r--deps/v8/src/codegen/ppc/register-ppc.h6
-rw-r--r--deps/v8/src/codegen/register-configuration.cc4
-rw-r--r--deps/v8/src/codegen/register-configuration.h5
-rw-r--r--deps/v8/src/codegen/reloc-info.cc2
-rw-r--r--deps/v8/src/codegen/s390/assembler-s390.cc9
-rw-r--r--deps/v8/src/codegen/s390/interface-descriptors-s390.cc48
-rw-r--r--deps/v8/src/codegen/s390/macro-assembler-s390.cc43
-rw-r--r--deps/v8/src/codegen/s390/macro-assembler-s390.h15
-rw-r--r--deps/v8/src/codegen/s390/register-s390.h6
-rw-r--r--deps/v8/src/codegen/safepoint-table.cc2
-rw-r--r--deps/v8/src/codegen/tnode.h19
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64.cc117
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64.h101
-rw-r--r--deps/v8/src/codegen/x64/interface-descriptors-x64.cc75
-rw-r--r--deps/v8/src/codegen/x64/macro-assembler-x64.cc360
-rw-r--r--deps/v8/src/codegen/x64/macro-assembler-x64.h62
-rw-r--r--deps/v8/src/common/DIR_METADATA11
-rw-r--r--deps/v8/src/common/OWNERS2
-rw-r--r--deps/v8/src/common/assert-scope.cc2
-rw-r--r--deps/v8/src/common/assert-scope.h24
-rw-r--r--deps/v8/src/common/external-pointer-inl.h93
-rw-r--r--deps/v8/src/common/external-pointer.h43
-rw-r--r--deps/v8/src/common/globals.h194
-rw-r--r--deps/v8/src/common/message-template.h13
-rw-r--r--deps/v8/src/common/ptr-compr-inl.h43
-rw-r--r--deps/v8/src/common/ptr-compr.h4
-rw-r--r--deps/v8/src/compiler-dispatcher/DIR_METADATA11
-rw-r--r--deps/v8/src/compiler-dispatcher/OWNERS2
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc2
-rw-r--r--deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc15
-rw-r--r--deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h7
-rw-r--r--deps/v8/src/compiler/DIR_METADATA11
-rw-r--r--deps/v8/src/compiler/OWNERS3
-rw-r--r--deps/v8/src/compiler/access-builder.cc84
-rw-r--r--deps/v8/src/compiler/access-builder.h3
-rw-r--r--deps/v8/src/compiler/access-info.cc67
-rw-r--r--deps/v8/src/compiler/access-info.h26
-rw-r--r--deps/v8/src/compiler/add-type-assertions-reducer.h5
-rw-r--r--deps/v8/src/compiler/allocation-builder-inl.h3
-rw-r--r--deps/v8/src/compiler/allocation-builder.h2
-rw-r--r--deps/v8/src/compiler/backend/DIR_METADATA11
-rw-r--r--deps/v8/src/compiler/backend/OWNERS2
-rw-r--r--deps/v8/src/compiler/backend/arm/code-generator-arm.cc131
-rw-r--r--deps/v8/src/compiler/backend/arm/instruction-codes-arm.h38
-rw-r--r--deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc38
-rw-r--r--deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc132
-rw-r--r--deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc361
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h68
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc64
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc410
-rw-r--r--deps/v8/src/compiler/backend/code-generator-impl.h11
-rw-r--r--deps/v8/src/compiler/backend/code-generator.cc20
-rw-r--r--deps/v8/src/compiler/backend/code-generator.h10
-rw-r--r--deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc210
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h52
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc52
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc136
-rw-r--r--deps/v8/src/compiler/backend/instruction-codes.h200
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector.cc244
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector.h6
-rw-r--r--deps/v8/src/compiler/backend/instruction.h36
-rw-r--r--deps/v8/src/compiler/backend/mid-tier-register-allocator.cc843
-rw-r--r--deps/v8/src/compiler/backend/mid-tier-register-allocator.h9
-rw-r--r--deps/v8/src/compiler/backend/mips/code-generator-mips.cc124
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-codes-mips.h37
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc37
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc132
-rw-r--r--deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc181
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h39
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc39
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc190
-rw-r--r--deps/v8/src/compiler/backend/move-optimizer.h5
-rw-r--r--deps/v8/src/compiler/backend/ppc/OWNERS1
-rw-r--r--deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc304
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h20
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc20
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc109
-rw-r--r--deps/v8/src/compiler/backend/register-allocator-verifier.h25
-rw-r--r--deps/v8/src/compiler/backend/register-allocator.cc41
-rw-r--r--deps/v8/src/compiler/backend/register-allocator.h75
-rw-r--r--deps/v8/src/compiler/backend/s390/code-generator-s390.cc181
-rw-r--r--deps/v8/src/compiler/backend/s390/instruction-codes-s390.h27
-rw-r--r--deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc27
-rw-r--r--deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc74
-rw-r--r--deps/v8/src/compiler/backend/spill-placer.h5
-rw-r--r--deps/v8/src/compiler/backend/x64/code-generator-x64.cc793
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-codes-x64.h69
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc69
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc641
-rw-r--r--deps/v8/src/compiler/bytecode-analysis.h4
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.cc230
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.h2
-rw-r--r--deps/v8/src/compiler/bytecode-liveness-map.h4
-rw-r--r--deps/v8/src/compiler/c-linkage.cc3
-rw-r--r--deps/v8/src/compiler/code-assembler.cc65
-rw-r--r--deps/v8/src/compiler/code-assembler.h137
-rw-r--r--deps/v8/src/compiler/common-node-cache.h5
-rw-r--r--deps/v8/src/compiler/common-operator-reducer.cc19
-rw-r--r--deps/v8/src/compiler/common-operator.cc2
-rw-r--r--deps/v8/src/compiler/common-operator.h4
-rw-r--r--deps/v8/src/compiler/compilation-dependencies.cc12
-rw-r--r--deps/v8/src/compiler/compiler-source-position-table.h7
-rw-r--r--deps/v8/src/compiler/constant-folding-reducer.h4
-rw-r--r--deps/v8/src/compiler/control-flow-optimizer.cc8
-rw-r--r--deps/v8/src/compiler/control-flow-optimizer.h4
-rw-r--r--deps/v8/src/compiler/csa-load-elimination.cc6
-rw-r--r--deps/v8/src/compiler/csa-load-elimination.h4
-rw-r--r--deps/v8/src/compiler/dead-code-elimination.h4
-rw-r--r--deps/v8/src/compiler/decompression-optimizer.h4
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.cc384
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.h4
-rw-r--r--deps/v8/src/compiler/escape-analysis-reducer.cc33
-rw-r--r--deps/v8/src/compiler/escape-analysis-reducer.h4
-rw-r--r--deps/v8/src/compiler/escape-analysis.cc15
-rw-r--r--deps/v8/src/compiler/feedback-source.cc3
-rw-r--r--deps/v8/src/compiler/feedback-source.h1
-rw-r--r--deps/v8/src/compiler/frame.h4
-rw-r--r--deps/v8/src/compiler/globals.h9
-rw-r--r--deps/v8/src/compiler/graph-assembler.cc16
-rw-r--r--deps/v8/src/compiler/graph-assembler.h6
-rw-r--r--deps/v8/src/compiler/graph-reducer.h5
-rw-r--r--deps/v8/src/compiler/graph-trimmer.h4
-rw-r--r--deps/v8/src/compiler/graph-visualizer.cc12
-rw-r--r--deps/v8/src/compiler/graph.h8
-rw-r--r--deps/v8/src/compiler/heap-refs.h18
-rw-r--r--deps/v8/src/compiler/int64-lowering.cc4
-rw-r--r--deps/v8/src/compiler/js-call-reducer.cc110
-rw-r--r--deps/v8/src/compiler/js-context-specialization.h4
-rw-r--r--deps/v8/src/compiler/js-create-lowering.cc3
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc125
-rw-r--r--deps/v8/src/compiler/js-graph.h5
-rw-r--r--deps/v8/src/compiler/js-heap-broker.cc335
-rw-r--r--deps/v8/src/compiler/js-heap-broker.h70
-rw-r--r--deps/v8/src/compiler/js-heap-copy-reducer.cc6
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.cc4
-rw-r--r--deps/v8/src/compiler/js-inlining.cc21
-rw-r--r--deps/v8/src/compiler/js-inlining.h2
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.cc2
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc490
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.h58
-rw-r--r--deps/v8/src/compiler/js-operator.cc67
-rw-r--r--deps/v8/src/compiler/js-operator.h80
-rw-r--r--deps/v8/src/compiler/js-type-hint-lowering.cc10
-rw-r--r--deps/v8/src/compiler/js-type-hint-lowering.h9
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc120
-rw-r--r--deps/v8/src/compiler/linkage.cc4
-rw-r--r--deps/v8/src/compiler/linkage.h13
-rw-r--r--deps/v8/src/compiler/load-elimination.h4
-rw-r--r--deps/v8/src/compiler/machine-graph.h4
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.cc494
-rw-r--r--deps/v8/src/compiler/machine-operator.cc290
-rw-r--r--deps/v8/src/compiler/machine-operator.h124
-rw-r--r--deps/v8/src/compiler/map-inference.cc6
-rw-r--r--deps/v8/src/compiler/memory-lowering.cc43
-rw-r--r--deps/v8/src/compiler/memory-lowering.h7
-rw-r--r--deps/v8/src/compiler/node-cache.h4
-rw-r--r--deps/v8/src/compiler/node-marker.h4
-rw-r--r--deps/v8/src/compiler/node-matchers.h206
-rw-r--r--deps/v8/src/compiler/node-origin-table.h12
-rw-r--r--deps/v8/src/compiler/node-properties.cc28
-rw-r--r--deps/v8/src/compiler/node-properties.h31
-rw-r--r--deps/v8/src/compiler/node.cc4
-rw-r--r--deps/v8/src/compiler/node.h4
-rw-r--r--deps/v8/src/compiler/opcodes.h61
-rw-r--r--deps/v8/src/compiler/operator-properties.cc11
-rw-r--r--deps/v8/src/compiler/operator-properties.h6
-rw-r--r--deps/v8/src/compiler/operator.h4
-rw-r--r--deps/v8/src/compiler/pipeline-statistics.h13
-rw-r--r--deps/v8/src/compiler/pipeline.cc156
-rw-r--r--deps/v8/src/compiler/processed-feedback.h8
-rw-r--r--deps/v8/src/compiler/property-access-builder.cc74
-rw-r--r--deps/v8/src/compiler/property-access-builder.h20
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.cc15
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.h17
-rw-r--r--deps/v8/src/compiler/redundancy-elimination.h4
-rw-r--r--deps/v8/src/compiler/representation-change.cc6
-rw-r--r--deps/v8/src/compiler/schedule.h8
-rw-r--r--deps/v8/src/compiler/serializer-for-background-compilation.cc161
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.cc653
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.h2
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc60
-rw-r--r--deps/v8/src/compiler/simplified-lowering.h4
-rw-r--r--deps/v8/src/compiler/simplified-operator-reducer.cc52
-rw-r--r--deps/v8/src/compiler/simplified-operator-reducer.h5
-rw-r--r--deps/v8/src/compiler/simplified-operator.cc13
-rw-r--r--deps/v8/src/compiler/simplified-operator.h44
-rw-r--r--deps/v8/src/compiler/type-narrowing-reducer.h4
-rw-r--r--deps/v8/src/compiler/typed-optimization.cc2
-rw-r--r--deps/v8/src/compiler/typed-optimization.h4
-rw-r--r--deps/v8/src/compiler/typer.cc6
-rw-r--r--deps/v8/src/compiler/typer.h4
-rw-r--r--deps/v8/src/compiler/types.cc47
-rw-r--r--deps/v8/src/compiler/verifier.cc2
-rw-r--r--deps/v8/src/compiler/verifier.h4
-rw-r--r--deps/v8/src/compiler/wasm-compiler.cc1187
-rw-r--r--deps/v8/src/compiler/wasm-compiler.h50
-rw-r--r--deps/v8/src/compiler/zone-stats.h12
-rw-r--r--deps/v8/src/d8/d8.cc48
-rw-r--r--deps/v8/src/d8/d8.h3
-rw-r--r--deps/v8/src/date/DIR_METADATA11
-rw-r--r--deps/v8/src/date/OWNERS2
-rw-r--r--deps/v8/src/debug/DIR_METADATA11
-rw-r--r--deps/v8/src/debug/OWNERS2
-rw-r--r--deps/v8/src/debug/debug-evaluate.cc66
-rw-r--r--deps/v8/src/debug/debug-evaluate.h5
-rw-r--r--deps/v8/src/debug/debug-frames.cc10
-rw-r--r--deps/v8/src/debug/debug-frames.h8
-rw-r--r--deps/v8/src/debug/debug-interface.h10
-rw-r--r--deps/v8/src/debug/debug-stack-trace-iterator.cc24
-rw-r--r--deps/v8/src/debug/debug.cc12
-rw-r--r--deps/v8/src/debug/liveedit.cc2
-rw-r--r--deps/v8/src/debug/ppc/OWNERS1
-rw-r--r--deps/v8/src/debug/wasm/gdb-server/DIR_METADATA11
-rw-r--r--deps/v8/src/debug/wasm/gdb-server/OWNERS2
-rw-r--r--deps/v8/src/debug/wasm/gdb-server/wasm-module-debug.cc4
-rw-r--r--deps/v8/src/deoptimizer/DIR_METADATA11
-rw-r--r--deps/v8/src/deoptimizer/OWNERS2
-rw-r--r--deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc240
-rw-r--r--deps/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc287
-rw-r--r--deps/v8/src/deoptimizer/deoptimizer.cc253
-rw-r--r--deps/v8/src/deoptimizer/deoptimizer.h83
-rw-r--r--deps/v8/src/deoptimizer/ia32/deoptimizer-ia32.cc195
-rw-r--r--deps/v8/src/deoptimizer/mips/deoptimizer-mips.cc209
-rw-r--r--deps/v8/src/deoptimizer/mips64/deoptimizer-mips64.cc209
-rw-r--r--deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc236
-rw-r--r--deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc234
-rw-r--r--deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc211
-rw-r--r--deps/v8/src/diagnostics/arm/disasm-arm.cc1343
-rw-r--r--deps/v8/src/diagnostics/arm/unwinder-arm.cc37
-rw-r--r--deps/v8/src/diagnostics/arm64/unwinder-arm64.cc12
-rw-r--r--deps/v8/src/diagnostics/basic-block-profiler.cc2
-rw-r--r--deps/v8/src/diagnostics/basic-block-profiler.h4
-rw-r--r--deps/v8/src/diagnostics/disassembler.cc5
-rw-r--r--deps/v8/src/diagnostics/ia32/disasm-ia32.cc38
-rw-r--r--deps/v8/src/diagnostics/ia32/unwinder-ia32.cc12
-rw-r--r--deps/v8/src/diagnostics/mips/unwinder-mips.cc12
-rw-r--r--deps/v8/src/diagnostics/mips64/unwinder-mips64.cc12
-rw-r--r--deps/v8/src/diagnostics/objects-debug.cc79
-rw-r--r--deps/v8/src/diagnostics/objects-printer.cc138
-rw-r--r--deps/v8/src/diagnostics/perf-jit.cc11
-rw-r--r--deps/v8/src/diagnostics/ppc/unwinder-ppc.cc8
-rw-r--r--deps/v8/src/diagnostics/s390/unwinder-s390.cc8
-rw-r--r--deps/v8/src/diagnostics/unwinder.cc28
-rw-r--r--deps/v8/src/diagnostics/unwinder.h17
-rw-r--r--deps/v8/src/diagnostics/unwinding-info-win64.cc31
-rw-r--r--deps/v8/src/diagnostics/x64/disasm-x64.cc906
-rw-r--r--deps/v8/src/diagnostics/x64/unwinder-x64.cc12
-rw-r--r--deps/v8/src/execution/DIR_METADATA11
-rw-r--r--deps/v8/src/execution/OWNERS2
-rw-r--r--deps/v8/src/execution/arguments.h6
-rw-r--r--deps/v8/src/execution/arm/frame-constants-arm.h8
-rw-r--r--deps/v8/src/execution/arm/simulator-arm.cc2571
-rw-r--r--deps/v8/src/execution/arm/simulator-arm.h7
-rw-r--r--deps/v8/src/execution/arm64/frame-constants-arm64.h2
-rw-r--r--deps/v8/src/execution/arm64/pointer-auth-arm64.cc3
-rw-r--r--deps/v8/src/execution/execution.cc16
-rw-r--r--deps/v8/src/execution/external-pointer-table.cc22
-rw-r--r--deps/v8/src/execution/external-pointer-table.h80
-rw-r--r--deps/v8/src/execution/frame-constants.h36
-rw-r--r--deps/v8/src/execution/frames-inl.h98
-rw-r--r--deps/v8/src/execution/frames.cc164
-rw-r--r--deps/v8/src/execution/frames.h492
-rw-r--r--deps/v8/src/execution/isolate-data.h22
-rw-r--r--deps/v8/src/execution/isolate-inl.h4
-rw-r--r--deps/v8/src/execution/isolate-utils-inl.h31
-rw-r--r--deps/v8/src/execution/isolate-utils.h2
-rw-r--r--deps/v8/src/execution/isolate.cc281
-rw-r--r--deps/v8/src/execution/isolate.h138
-rw-r--r--deps/v8/src/execution/local-isolate-inl.h4
-rw-r--r--deps/v8/src/execution/local-isolate.cc16
-rw-r--r--deps/v8/src/execution/local-isolate.h22
-rw-r--r--deps/v8/src/execution/messages.cc89
-rw-r--r--deps/v8/src/execution/messages.h27
-rw-r--r--deps/v8/src/execution/ppc/frame-constants-ppc.h4
-rw-r--r--deps/v8/src/execution/runtime-profiler.cc101
-rw-r--r--deps/v8/src/execution/runtime-profiler.h11
-rw-r--r--deps/v8/src/execution/s390/simulator-s390.cc25
-rw-r--r--deps/v8/src/extensions/gc-extension.cc2
-rw-r--r--deps/v8/src/flags/flag-definitions.h100
-rw-r--r--deps/v8/src/handles/DIR_METADATA11
-rw-r--r--deps/v8/src/handles/OWNERS2
-rw-r--r--deps/v8/src/handles/global-handles.cc89
-rw-r--r--deps/v8/src/handles/global-handles.h52
-rw-r--r--deps/v8/src/handles/handles-inl.h10
-rw-r--r--deps/v8/src/handles/handles.cc9
-rw-r--r--deps/v8/src/handles/handles.h9
-rw-r--r--deps/v8/src/handles/maybe-handles-inl.h34
-rw-r--r--deps/v8/src/handles/maybe-handles.h13
-rw-r--r--deps/v8/src/heap/DIR_METADATA11
-rw-r--r--deps/v8/src/heap/OWNERS2
-rw-r--r--deps/v8/src/heap/array-buffer-sweeper.cc137
-rw-r--r--deps/v8/src/heap/array-buffer-sweeper.h45
-rw-r--r--deps/v8/src/heap/base/stack.cc16
-rw-r--r--deps/v8/src/heap/base/worklist.h14
-rw-r--r--deps/v8/src/heap/basic-memory-chunk.h2
-rw-r--r--deps/v8/src/heap/code-object-registry.cc70
-rw-r--r--deps/v8/src/heap/code-object-registry.h6
-rw-r--r--deps/v8/src/heap/code-stats.cc7
-rw-r--r--deps/v8/src/heap/collection-barrier.cc100
-rw-r--r--deps/v8/src/heap/collection-barrier.h93
-rw-r--r--deps/v8/src/heap/concurrent-allocator.cc4
-rw-r--r--deps/v8/src/heap/concurrent-marking.cc209
-rw-r--r--deps/v8/src/heap/concurrent-marking.h58
-rw-r--r--deps/v8/src/heap/cppgc-js/cpp-heap.cc91
-rw-r--r--deps/v8/src/heap/cppgc-js/cpp-heap.h8
-rw-r--r--deps/v8/src/heap/cppgc-js/cpp-snapshot.cc713
-rw-r--r--deps/v8/src/heap/cppgc-js/cpp-snapshot.h29
-rw-r--r--deps/v8/src/heap/cppgc-js/unified-heap-marking-state.h13
-rw-r--r--deps/v8/src/heap/cppgc-js/unified-heap-marking-verifier.cc70
-rw-r--r--deps/v8/src/heap/cppgc-js/unified-heap-marking-verifier.h30
-rw-r--r--deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.cc92
-rw-r--r--deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.h66
-rw-r--r--deps/v8/src/heap/cppgc/compaction-worklists.cc14
-rw-r--r--deps/v8/src/heap/cppgc/compaction-worklists.h35
-rw-r--r--deps/v8/src/heap/cppgc/compactor.cc505
-rw-r--r--deps/v8/src/heap/cppgc/compactor.h56
-rw-r--r--deps/v8/src/heap/cppgc/concurrent-marker.cc246
-rw-r--r--deps/v8/src/heap/cppgc/concurrent-marker.h76
-rw-r--r--deps/v8/src/heap/cppgc/default-job.h186
-rw-r--r--deps/v8/src/heap/cppgc/default-platform.cc143
-rw-r--r--deps/v8/src/heap/cppgc/garbage-collector.h2
-rw-r--r--deps/v8/src/heap/cppgc/gc-info-table.h3
-rw-r--r--deps/v8/src/heap/cppgc/gc-info.cc5
-rw-r--r--deps/v8/src/heap/cppgc/globals.h3
-rw-r--r--deps/v8/src/heap/cppgc/heap-base.cc11
-rw-r--r--deps/v8/src/heap/cppgc/heap-base.h23
-rw-r--r--deps/v8/src/heap/cppgc/heap-object-header.cc10
-rw-r--r--deps/v8/src/heap/cppgc/heap-object-header.h42
-rw-r--r--deps/v8/src/heap/cppgc/heap-page.cc2
-rw-r--r--deps/v8/src/heap/cppgc/heap-page.h43
-rw-r--r--deps/v8/src/heap/cppgc/heap-space.cc14
-rw-r--r--deps/v8/src/heap/cppgc/heap-space.h8
-rw-r--r--deps/v8/src/heap/cppgc/heap.cc18
-rw-r--r--deps/v8/src/heap/cppgc/incremental-marking-schedule.cc25
-rw-r--r--deps/v8/src/heap/cppgc/incremental-marking-schedule.h10
-rw-r--r--deps/v8/src/heap/cppgc/marker.cc201
-rw-r--r--deps/v8/src/heap/cppgc/marker.h23
-rw-r--r--deps/v8/src/heap/cppgc/marking-state.cc22
-rw-r--r--deps/v8/src/heap/cppgc/marking-state.h359
-rw-r--r--deps/v8/src/heap/cppgc/marking-verifier.cc96
-rw-r--r--deps/v8/src/heap/cppgc/marking-verifier.h49
-rw-r--r--deps/v8/src/heap/cppgc/marking-visitor.cc83
-rw-r--r--deps/v8/src/heap/cppgc/marking-visitor.h59
-rw-r--r--deps/v8/src/heap/cppgc/marking-worklists.cc7
-rw-r--r--deps/v8/src/heap/cppgc/marking-worklists.h139
-rw-r--r--deps/v8/src/heap/cppgc/name-trait.cc41
-rw-r--r--deps/v8/src/heap/cppgc/object-allocator.h12
-rw-r--r--deps/v8/src/heap/cppgc/object-start-bitmap.h58
-rw-r--r--deps/v8/src/heap/cppgc/persistent-node.cc9
-rw-r--r--deps/v8/src/heap/cppgc/pointer-policies.cc12
-rw-r--r--deps/v8/src/heap/cppgc/process-heap.cc4
-rw-r--r--deps/v8/src/heap/cppgc/process-heap.h18
-rw-r--r--deps/v8/src/heap/cppgc/raw-heap.cc13
-rw-r--r--deps/v8/src/heap/cppgc/raw-heap.h3
-rw-r--r--deps/v8/src/heap/cppgc/sanitizers.h31
-rw-r--r--deps/v8/src/heap/cppgc/sweeper.cc59
-rw-r--r--deps/v8/src/heap/cppgc/sweeper.h16
-rw-r--r--deps/v8/src/heap/cppgc/trace-trait.cc10
-rw-r--r--deps/v8/src/heap/cppgc/visitor.cc17
-rw-r--r--deps/v8/src/heap/cppgc/visitor.h16
-rw-r--r--deps/v8/src/heap/cppgc/write-barrier.cc9
-rw-r--r--deps/v8/src/heap/embedder-tracing.cc30
-rw-r--r--deps/v8/src/heap/embedder-tracing.h19
-rw-r--r--deps/v8/src/heap/factory-base.cc54
-rw-r--r--deps/v8/src/heap/factory-base.h2
-rw-r--r--deps/v8/src/heap/factory.cc525
-rw-r--r--deps/v8/src/heap/factory.h70
-rw-r--r--deps/v8/src/heap/free-list.cc44
-rw-r--r--deps/v8/src/heap/free-list.h25
-rw-r--r--deps/v8/src/heap/gc-tracer.cc31
-rw-r--r--deps/v8/src/heap/heap-inl.h44
-rw-r--r--deps/v8/src/heap/heap-write-barrier-inl.h1
-rw-r--r--deps/v8/src/heap/heap.cc425
-rw-r--r--deps/v8/src/heap/heap.h106
-rw-r--r--deps/v8/src/heap/incremental-marking.cc108
-rw-r--r--deps/v8/src/heap/incremental-marking.h3
-rw-r--r--deps/v8/src/heap/local-heap-inl.h6
-rw-r--r--deps/v8/src/heap/local-heap.cc58
-rw-r--r--deps/v8/src/heap/local-heap.h19
-rw-r--r--deps/v8/src/heap/mark-compact-inl.h4
-rw-r--r--deps/v8/src/heap/mark-compact.cc712
-rw-r--r--deps/v8/src/heap/mark-compact.h65
-rw-r--r--deps/v8/src/heap/marking-visitor-inl.h85
-rw-r--r--deps/v8/src/heap/marking-visitor.h53
-rw-r--r--deps/v8/src/heap/memory-allocator.cc87
-rw-r--r--deps/v8/src/heap/memory-allocator.h18
-rw-r--r--deps/v8/src/heap/memory-chunk-layout.cc7
-rw-r--r--deps/v8/src/heap/memory-chunk-layout.h2
-rw-r--r--deps/v8/src/heap/memory-chunk.h1
-rw-r--r--deps/v8/src/heap/memory-measurement-inl.h6
-rw-r--r--deps/v8/src/heap/memory-measurement.cc32
-rw-r--r--deps/v8/src/heap/memory-measurement.h5
-rw-r--r--deps/v8/src/heap/new-spaces.cc2
-rw-r--r--deps/v8/src/heap/object-stats.cc11
-rw-r--r--deps/v8/src/heap/objects-visiting-inl.h6
-rw-r--r--deps/v8/src/heap/objects-visiting.cc8
-rw-r--r--deps/v8/src/heap/objects-visiting.h3
-rw-r--r--deps/v8/src/heap/paged-spaces.cc61
-rw-r--r--deps/v8/src/heap/paged-spaces.h6
-rw-r--r--deps/v8/src/heap/parallel-work-item.h32
-rw-r--r--deps/v8/src/heap/read-only-heap-inl.h4
-rw-r--r--deps/v8/src/heap/read-only-heap.cc35
-rw-r--r--deps/v8/src/heap/read-only-heap.h9
-rw-r--r--deps/v8/src/heap/read-only-spaces.cc21
-rw-r--r--deps/v8/src/heap/read-only-spaces.h7
-rw-r--r--deps/v8/src/heap/safepoint.cc21
-rw-r--r--deps/v8/src/heap/safepoint.h32
-rw-r--r--deps/v8/src/heap/scavenger-inl.h18
-rw-r--r--deps/v8/src/heap/scavenger.cc212
-rw-r--r--deps/v8/src/heap/scavenger.h101
-rw-r--r--deps/v8/src/heap/setup-heap-internal.cc43
-rw-r--r--deps/v8/src/heap/spaces.h6
-rw-r--r--deps/v8/src/heap/third-party/heap-api.h5
-rw-r--r--deps/v8/src/heap/weak-object-worklists.cc172
-rw-r--r--deps/v8/src/heap/weak-object-worklists.h90
-rw-r--r--deps/v8/src/ic/DIR_METADATA11
-rw-r--r--deps/v8/src/ic/OWNERS2
-rw-r--r--deps/v8/src/ic/accessor-assembler.cc261
-rw-r--r--deps/v8/src/ic/call-optimization.cc11
-rw-r--r--deps/v8/src/ic/handler-configuration.cc4
-rw-r--r--deps/v8/src/ic/ic-inl.h3
-rw-r--r--deps/v8/src/ic/ic.cc31
-rw-r--r--deps/v8/src/ic/keyed-store-generic.cc18
-rw-r--r--deps/v8/src/init/DIR_METADATA11
-rw-r--r--deps/v8/src/init/OWNERS2
-rw-r--r--deps/v8/src/init/bootstrapper.cc57
-rw-r--r--deps/v8/src/init/heap-symbols.h5
-rw-r--r--deps/v8/src/init/isolate-allocator.cc20
-rw-r--r--deps/v8/src/init/isolate-allocator.h10
-rw-r--r--deps/v8/src/inspector/DIR_METADATA11
-rw-r--r--deps/v8/src/inspector/OWNERS2
-rw-r--r--deps/v8/src/inspector/injected-script.cc31
-rw-r--r--deps/v8/src/inspector/remote-object-id.cc81
-rw-r--r--deps/v8/src/inspector/remote-object-id.h21
-rw-r--r--deps/v8/src/inspector/string-16.h2
-rw-r--r--deps/v8/src/inspector/v8-console.cc1
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.cc4
-rw-r--r--deps/v8/src/inspector/v8-debugger.cc4
-rw-r--r--deps/v8/src/inspector/v8-inspector-session-impl.cc2
-rw-r--r--deps/v8/src/inspector/v8-runtime-agent-impl.cc80
-rw-r--r--deps/v8/src/inspector/v8-runtime-agent-impl.h9
-rw-r--r--deps/v8/src/inspector/v8-stack-trace-impl.cc12
-rw-r--r--deps/v8/src/inspector/value-mirror.cc23
-rw-r--r--deps/v8/src/interpreter/DIR_METADATA11
-rw-r--r--deps/v8/src/interpreter/OWNERS2
-rw-r--r--deps/v8/src/interpreter/bytecode-array-accessor.h5
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.cc9
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.h6
-rw-r--r--deps/v8/src/interpreter/bytecode-array-iterator.h6
-rw-r--r--deps/v8/src/interpreter/bytecode-array-random-iterator.h6
-rw-r--r--deps/v8/src/interpreter/bytecode-array-writer.h3
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.cc48
-rw-r--r--deps/v8/src/interpreter/bytecode-label.h4
-rw-r--r--deps/v8/src/interpreter/bytecode-register-allocator.h5
-rw-r--r--deps/v8/src/interpreter/bytecode-register-optimizer.cc8
-rw-r--r--deps/v8/src/interpreter/bytecode-register-optimizer.h10
-rw-r--r--deps/v8/src/interpreter/bytecode-register.cc15
-rw-r--r--deps/v8/src/interpreter/bytecodes.h1
-rw-r--r--deps/v8/src/interpreter/constant-array-builder.h5
-rw-r--r--deps/v8/src/interpreter/handler-table-builder.h4
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.cc87
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.h14
-rw-r--r--deps/v8/src/interpreter/interpreter-generator.cc139
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics-generator.cc199
-rw-r--r--deps/v8/src/interpreter/interpreter.cc30
-rw-r--r--deps/v8/src/interpreter/interpreter.h4
-rw-r--r--deps/v8/src/json/DIR_METADATA11
-rw-r--r--deps/v8/src/json/OWNERS2
-rw-r--r--deps/v8/src/json/json-parser.cc18
-rw-r--r--deps/v8/src/json/json-stringifier.cc6
-rw-r--r--deps/v8/src/libplatform/DIR_METADATA11
-rw-r--r--deps/v8/src/libplatform/OWNERS2
-rw-r--r--deps/v8/src/libplatform/default-job.cc27
-rw-r--r--deps/v8/src/libplatform/default-job.h20
-rw-r--r--deps/v8/src/libsampler/DIR_METADATA11
-rw-r--r--deps/v8/src/libsampler/OWNERS2
-rw-r--r--deps/v8/src/libsampler/sampler.cc4
-rw-r--r--deps/v8/src/logging/counters-definitions.h11
-rw-r--r--deps/v8/src/logging/counters.cc17
-rw-r--r--deps/v8/src/logging/counters.h6
-rw-r--r--deps/v8/src/logging/log.cc111
-rw-r--r--deps/v8/src/logging/log.h2
-rw-r--r--deps/v8/src/logging/metrics.h7
-rw-r--r--deps/v8/src/numbers/DIR_METADATA11
-rw-r--r--deps/v8/src/numbers/OWNERS2
-rw-r--r--deps/v8/src/objects/DIR_METADATA11
-rw-r--r--deps/v8/src/objects/OWNERS2
-rw-r--r--deps/v8/src/objects/all-objects-inl.h104
-rw-r--r--deps/v8/src/objects/allocation-site-inl.h2
-rw-r--r--deps/v8/src/objects/allocation-site.h2
-rw-r--r--deps/v8/src/objects/api-callbacks-inl.h3
-rw-r--r--deps/v8/src/objects/api-callbacks.h3
-rw-r--r--deps/v8/src/objects/arguments-inl.h2
-rw-r--r--deps/v8/src/objects/arguments.h2
-rw-r--r--deps/v8/src/objects/backing-store.cc78
-rw-r--r--deps/v8/src/objects/backing-store.h4
-rw-r--r--deps/v8/src/objects/bigint-inl.h24
-rw-r--r--deps/v8/src/objects/bigint.cc14
-rw-r--r--deps/v8/src/objects/bigint.h2
-rw-r--r--deps/v8/src/objects/bigint.tq21
-rw-r--r--deps/v8/src/objects/cell-inl.h2
-rw-r--r--deps/v8/src/objects/cell.h3
-rw-r--r--deps/v8/src/objects/class-definitions-tq-deps-inl.h44
-rw-r--r--deps/v8/src/objects/code-inl.h218
-rw-r--r--deps/v8/src/objects/code-kind.h93
-rw-r--r--deps/v8/src/objects/code.cc76
-rw-r--r--deps/v8/src/objects/code.h340
-rw-r--r--deps/v8/src/objects/compilation-cache-table-inl.h (renamed from deps/v8/src/objects/compilation-cache-inl.h)9
-rw-r--r--deps/v8/src/objects/compilation-cache-table.cc447
-rw-r--r--deps/v8/src/objects/compilation-cache-table.h (renamed from deps/v8/src/objects/compilation-cache.h)62
-rw-r--r--deps/v8/src/objects/compressed-slots-inl.h18
-rw-r--r--deps/v8/src/objects/compressed-slots.h16
-rw-r--r--deps/v8/src/objects/contexts-inl.h24
-rw-r--r--deps/v8/src/objects/contexts.h7
-rw-r--r--deps/v8/src/objects/data-handler-inl.h2
-rw-r--r--deps/v8/src/objects/data-handler.h3
-rw-r--r--deps/v8/src/objects/debug-objects-inl.h2
-rw-r--r--deps/v8/src/objects/debug-objects.h2
-rw-r--r--deps/v8/src/objects/descriptor-array-inl.h23
-rw-r--r--deps/v8/src/objects/descriptor-array.h16
-rw-r--r--deps/v8/src/objects/descriptor-array.tq8
-rw-r--r--deps/v8/src/objects/dictionary-inl.h20
-rw-r--r--deps/v8/src/objects/dictionary.h40
-rw-r--r--deps/v8/src/objects/elements.cc21
-rw-r--r--deps/v8/src/objects/elements.h4
-rw-r--r--deps/v8/src/objects/embedder-data-array-inl.h5
-rw-r--r--deps/v8/src/objects/embedder-data-array.h3
-rw-r--r--deps/v8/src/objects/embedder-data-slot-inl.h90
-rw-r--r--deps/v8/src/objects/embedder-data-slot.h28
-rw-r--r--deps/v8/src/objects/feedback-cell-inl.h13
-rw-r--r--deps/v8/src/objects/feedback-cell.h7
-rw-r--r--deps/v8/src/objects/feedback-vector-inl.h202
-rw-r--r--deps/v8/src/objects/feedback-vector.cc381
-rw-r--r--deps/v8/src/objects/feedback-vector.h217
-rw-r--r--deps/v8/src/objects/feedback-vector.tq18
-rw-r--r--deps/v8/src/objects/field-index-inl.h8
-rw-r--r--deps/v8/src/objects/field-index.h2
-rw-r--r--deps/v8/src/objects/fixed-array-inl.h32
-rw-r--r--deps/v8/src/objects/fixed-array.h31
-rw-r--r--deps/v8/src/objects/foreign-inl.h16
-rw-r--r--deps/v8/src/objects/foreign.h5
-rw-r--r--deps/v8/src/objects/foreign.tq3
-rw-r--r--deps/v8/src/objects/free-space-inl.h2
-rw-r--r--deps/v8/src/objects/free-space.h3
-rw-r--r--deps/v8/src/objects/hash-table-inl.h17
-rw-r--r--deps/v8/src/objects/hash-table.h18
-rw-r--r--deps/v8/src/objects/heap-number-inl.h2
-rw-r--r--deps/v8/src/objects/heap-number.h2
-rw-r--r--deps/v8/src/objects/heap-object.h16
-rw-r--r--deps/v8/src/objects/internal-index.h8
-rw-r--r--deps/v8/src/objects/intl-objects.cc69
-rw-r--r--deps/v8/src/objects/intl-objects.tq153
-rw-r--r--deps/v8/src/objects/js-array-buffer-inl.h85
-rw-r--r--deps/v8/src/objects/js-array-buffer.cc1
-rw-r--r--deps/v8/src/objects/js-array-buffer.h24
-rw-r--r--deps/v8/src/objects/js-array-buffer.tq5
-rw-r--r--deps/v8/src/objects/js-break-iterator-inl.h2
-rw-r--r--deps/v8/src/objects/js-break-iterator.h2
-rw-r--r--deps/v8/src/objects/js-break-iterator.tq17
-rw-r--r--deps/v8/src/objects/js-collator-inl.h2
-rw-r--r--deps/v8/src/objects/js-collator.h2
-rw-r--r--deps/v8/src/objects/js-collator.tq12
-rw-r--r--deps/v8/src/objects/js-collection-inl.h14
-rw-r--r--deps/v8/src/objects/js-collection-iterator-inl.h26
-rw-r--r--deps/v8/src/objects/js-collection-iterator.h2
-rw-r--r--deps/v8/src/objects/js-collection.h2
-rw-r--r--deps/v8/src/objects/js-date-time-format-inl.h2
-rw-r--r--deps/v8/src/objects/js-date-time-format.h2
-rw-r--r--deps/v8/src/objects/js-date-time-format.tq23
-rw-r--r--deps/v8/src/objects/js-display-names-inl.h2
-rw-r--r--deps/v8/src/objects/js-display-names.h2
-rw-r--r--deps/v8/src/objects/js-display-names.tq19
-rw-r--r--deps/v8/src/objects/js-function-inl.h23
-rw-r--r--deps/v8/src/objects/js-function.cc52
-rw-r--r--deps/v8/src/objects/js-function.h8
-rw-r--r--deps/v8/src/objects/js-function.tq34
-rw-r--r--deps/v8/src/objects/js-generator-inl.h2
-rw-r--r--deps/v8/src/objects/js-generator.h2
-rw-r--r--deps/v8/src/objects/js-list-format-inl.h2
-rw-r--r--deps/v8/src/objects/js-list-format.cc55
-rw-r--r--deps/v8/src/objects/js-list-format.h2
-rw-r--r--deps/v8/src/objects/js-list-format.tq19
-rw-r--r--deps/v8/src/objects/js-locale-inl.h2
-rw-r--r--deps/v8/src/objects/js-locale.h2
-rw-r--r--deps/v8/src/objects/js-locale.tq10
-rw-r--r--deps/v8/src/objects/js-number-format-inl.h2
-rw-r--r--deps/v8/src/objects/js-number-format.cc80
-rw-r--r--deps/v8/src/objects/js-number-format.h2
-rw-r--r--deps/v8/src/objects/js-number-format.tq13
-rw-r--r--deps/v8/src/objects/js-objects-inl.h46
-rw-r--r--deps/v8/src/objects/js-objects.cc303
-rw-r--r--deps/v8/src/objects/js-objects.h23
-rw-r--r--deps/v8/src/objects/js-objects.tq31
-rw-r--r--deps/v8/src/objects/js-plural-rules-inl.h2
-rw-r--r--deps/v8/src/objects/js-plural-rules.h2
-rw-r--r--deps/v8/src/objects/js-plural-rules.tq19
-rw-r--r--deps/v8/src/objects/js-promise-inl.h2
-rw-r--r--deps/v8/src/objects/js-promise.h2
-rw-r--r--deps/v8/src/objects/js-proxy-inl.h2
-rw-r--r--deps/v8/src/objects/js-proxy.h2
-rw-r--r--deps/v8/src/objects/js-regexp-inl.h2
-rw-r--r--deps/v8/src/objects/js-regexp-string-iterator-inl.h2
-rw-r--r--deps/v8/src/objects/js-regexp-string-iterator.h2
-rw-r--r--deps/v8/src/objects/js-regexp.cc7
-rw-r--r--deps/v8/src/objects/js-regexp.h9
-rw-r--r--deps/v8/src/objects/js-regexp.tq1
-rw-r--r--deps/v8/src/objects/js-relative-time-format-inl.h2
-rw-r--r--deps/v8/src/objects/js-relative-time-format.cc9
-rw-r--r--deps/v8/src/objects/js-relative-time-format.h2
-rw-r--r--deps/v8/src/objects/js-relative-time-format.tq19
-rw-r--r--deps/v8/src/objects/js-segment-iterator-inl.h2
-rw-r--r--deps/v8/src/objects/js-segment-iterator.h2
-rw-r--r--deps/v8/src/objects/js-segment-iterator.tq16
-rw-r--r--deps/v8/src/objects/js-segmenter-inl.h2
-rw-r--r--deps/v8/src/objects/js-segmenter.h2
-rw-r--r--deps/v8/src/objects/js-segmenter.tq18
-rw-r--r--deps/v8/src/objects/js-segments-inl.h2
-rw-r--r--deps/v8/src/objects/js-segments.h2
-rw-r--r--deps/v8/src/objects/js-segments.tq16
-rw-r--r--deps/v8/src/objects/js-weak-refs-inl.h2
-rw-r--r--deps/v8/src/objects/js-weak-refs.h2
-rw-r--r--deps/v8/src/objects/keys.cc283
-rw-r--r--deps/v8/src/objects/keys.h89
-rw-r--r--deps/v8/src/objects/layout-descriptor-inl.h7
-rw-r--r--deps/v8/src/objects/layout-descriptor.cc16
-rw-r--r--deps/v8/src/objects/literal-objects-inl.h10
-rw-r--r--deps/v8/src/objects/literal-objects.cc5
-rw-r--r--deps/v8/src/objects/literal-objects.h6
-rw-r--r--deps/v8/src/objects/lookup-cache.h3
-rw-r--r--deps/v8/src/objects/lookup.cc119
-rw-r--r--deps/v8/src/objects/map-inl.h57
-rw-r--r--deps/v8/src/objects/map-updater.cc26
-rw-r--r--deps/v8/src/objects/map.cc155
-rw-r--r--deps/v8/src/objects/map.h21
-rw-r--r--deps/v8/src/objects/maybe-object-inl.h11
-rw-r--r--deps/v8/src/objects/maybe-object.h6
-rw-r--r--deps/v8/src/objects/microtask-inl.h2
-rw-r--r--deps/v8/src/objects/microtask.h2
-rw-r--r--deps/v8/src/objects/module-inl.h18
-rw-r--r--deps/v8/src/objects/module.cc38
-rw-r--r--deps/v8/src/objects/module.h8
-rw-r--r--deps/v8/src/objects/name-inl.h2
-rw-r--r--deps/v8/src/objects/name.h2
-rw-r--r--deps/v8/src/objects/object-list-macros.h2
-rw-r--r--deps/v8/src/objects/object-macros-undef.h6
-rw-r--r--deps/v8/src/objects/object-macros.h94
-rw-r--r--deps/v8/src/objects/objects-body-descriptors-inl.h10
-rw-r--r--deps/v8/src/objects/objects-definitions.h3
-rw-r--r--deps/v8/src/objects/objects-inl.h61
-rw-r--r--deps/v8/src/objects/objects.cc677
-rw-r--r--deps/v8/src/objects/objects.h22
-rw-r--r--deps/v8/src/objects/oddball-inl.h2
-rw-r--r--deps/v8/src/objects/oddball.h7
-rw-r--r--deps/v8/src/objects/oddball.tq6
-rw-r--r--deps/v8/src/objects/ordered-hash-table-inl.h64
-rw-r--r--deps/v8/src/objects/ordered-hash-table.cc415
-rw-r--r--deps/v8/src/objects/ordered-hash-table.h187
-rw-r--r--deps/v8/src/objects/ordered-hash-table.tq3
-rw-r--r--deps/v8/src/objects/primitive-heap-object-inl.h3
-rw-r--r--deps/v8/src/objects/primitive-heap-object.h3
-rw-r--r--deps/v8/src/objects/promise-inl.h2
-rw-r--r--deps/v8/src/objects/promise.h2
-rw-r--r--deps/v8/src/objects/property-array-inl.h4
-rw-r--r--deps/v8/src/objects/property-array.h2
-rw-r--r--deps/v8/src/objects/property-descriptor-object-inl.h2
-rw-r--r--deps/v8/src/objects/property-descriptor-object.h2
-rw-r--r--deps/v8/src/objects/property-descriptor.cc2
-rw-r--r--deps/v8/src/objects/property.cc2
-rw-r--r--deps/v8/src/objects/prototype-info-inl.h2
-rw-r--r--deps/v8/src/objects/prototype-info.h2
-rw-r--r--deps/v8/src/objects/prototype.h4
-rw-r--r--deps/v8/src/objects/regexp-match-info.h3
-rw-r--r--deps/v8/src/objects/scope-info.cc22
-rw-r--r--deps/v8/src/objects/script-inl.h2
-rw-r--r--deps/v8/src/objects/script.h5
-rw-r--r--deps/v8/src/objects/shared-function-info-inl.h149
-rw-r--r--deps/v8/src/objects/shared-function-info.cc26
-rw-r--r--deps/v8/src/objects/shared-function-info.h31
-rw-r--r--deps/v8/src/objects/shared-function-info.tq1
-rw-r--r--deps/v8/src/objects/slots-inl.h12
-rw-r--r--deps/v8/src/objects/slots.h12
-rw-r--r--deps/v8/src/objects/source-text-module-inl.h29
-rw-r--r--deps/v8/src/objects/source-text-module.cc33
-rw-r--r--deps/v8/src/objects/source-text-module.h18
-rw-r--r--deps/v8/src/objects/source-text-module.tq10
-rw-r--r--deps/v8/src/objects/stack-frame-info-inl.h2
-rw-r--r--deps/v8/src/objects/stack-frame-info.cc3
-rw-r--r--deps/v8/src/objects/stack-frame-info.h2
-rw-r--r--deps/v8/src/objects/string-comparator.cc2
-rw-r--r--deps/v8/src/objects/string-comparator.h9
-rw-r--r--deps/v8/src/objects/string-inl.h164
-rw-r--r--deps/v8/src/objects/string-table.cc45
-rw-r--r--deps/v8/src/objects/string-table.h4
-rw-r--r--deps/v8/src/objects/string.cc83
-rw-r--r--deps/v8/src/objects/string.h73
-rw-r--r--deps/v8/src/objects/string.tq5
-rw-r--r--deps/v8/src/objects/struct-inl.h3
-rw-r--r--deps/v8/src/objects/struct.h3
-rw-r--r--deps/v8/src/objects/synthetic-module-inl.h27
-rw-r--r--deps/v8/src/objects/synthetic-module.cc3
-rw-r--r--deps/v8/src/objects/synthetic-module.h2
-rw-r--r--deps/v8/src/objects/tagged-field-inl.h12
-rw-r--r--deps/v8/src/objects/tagged-field.h12
-rw-r--r--deps/v8/src/objects/template-objects-inl.h2
-rw-r--r--deps/v8/src/objects/template-objects.h2
-rw-r--r--deps/v8/src/objects/templates-inl.h13
-rw-r--r--deps/v8/src/objects/templates.h5
-rw-r--r--deps/v8/src/objects/templates.tq (renamed from deps/v8/src/objects/template.tq)3
-rw-r--r--deps/v8/src/objects/torque-defined-classes-inl.h23
-rw-r--r--deps/v8/src/objects/torque-defined-classes.h25
-rw-r--r--deps/v8/src/objects/torque-defined-classes.tq17
-rw-r--r--deps/v8/src/objects/transitions-inl.h16
-rw-r--r--deps/v8/src/objects/transitions.cc7
-rw-r--r--deps/v8/src/objects/value-serializer.cc64
-rw-r--r--deps/v8/src/objects/value-serializer.h8
-rw-r--r--deps/v8/src/parsing/DIR_METADATA11
-rw-r--r--deps/v8/src/parsing/OWNERS2
-rw-r--r--deps/v8/src/parsing/parse-info.cc2
-rw-r--r--deps/v8/src/parsing/parse-info.h2
-rw-r--r--deps/v8/src/parsing/parser-base.h51
-rw-r--r--deps/v8/src/parsing/parser.cc236
-rw-r--r--deps/v8/src/parsing/parser.h16
-rw-r--r--deps/v8/src/parsing/rewriter.cc49
-rw-r--r--deps/v8/src/parsing/scanner-character-streams.cc4
-rw-r--r--deps/v8/src/profiler/DIR_METADATA11
-rw-r--r--deps/v8/src/profiler/OWNERS2
-rw-r--r--deps/v8/src/profiler/cpu-profiler.cc69
-rw-r--r--deps/v8/src/profiler/cpu-profiler.h26
-rw-r--r--deps/v8/src/profiler/heap-profiler.cc13
-rw-r--r--deps/v8/src/profiler/heap-profiler.h10
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.cc83
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.h15
-rw-r--r--deps/v8/src/profiler/profile-generator-inl.h7
-rw-r--r--deps/v8/src/profiler/profile-generator.cc198
-rw-r--r--deps/v8/src/profiler/profile-generator.h34
-rw-r--r--deps/v8/src/profiler/symbolizer.cc190
-rw-r--r--deps/v8/src/profiler/symbolizer.h44
-rw-r--r--deps/v8/src/regexp/DIR_METADATA11
-rw-r--r--deps/v8/src/regexp/OWNERS2
-rw-r--r--deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc17
-rw-r--r--deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h1
-rw-r--r--deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc17
-rw-r--r--deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h1
-rw-r--r--deps/v8/src/regexp/experimental/experimental-bytecode.h8
-rw-r--r--deps/v8/src/regexp/experimental/experimental-compiler.cc239
-rw-r--r--deps/v8/src/regexp/experimental/experimental-interpreter.cc176
-rw-r--r--deps/v8/src/regexp/experimental/experimental-interpreter.h18
-rw-r--r--deps/v8/src/regexp/experimental/experimental.cc220
-rw-r--r--deps/v8/src/regexp/experimental/experimental.h14
-rw-r--r--deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc16
-rw-r--r--deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h1
-rw-r--r--deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc16
-rw-r--r--deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h1
-rw-r--r--deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc16
-rw-r--r--deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h1
-rw-r--r--deps/v8/src/regexp/ppc/OWNERS1
-rw-r--r--deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc20
-rw-r--r--deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h1
-rw-r--r--deps/v8/src/regexp/regexp-bytecode-generator.cc8
-rw-r--r--deps/v8/src/regexp/regexp-compiler.cc6
-rw-r--r--deps/v8/src/regexp/regexp-error.h1
-rw-r--r--deps/v8/src/regexp/regexp-interpreter.cc8
-rw-r--r--deps/v8/src/regexp/regexp-interpreter.h1
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler.cc2
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler.h22
-rw-r--r--deps/v8/src/regexp/regexp-parser.cc9
-rw-r--r--deps/v8/src/regexp/regexp-parser.h4
-rw-r--r--deps/v8/src/regexp/regexp-stack.cc17
-rw-r--r--deps/v8/src/regexp/regexp-stack.h15
-rw-r--r--deps/v8/src/regexp/regexp-utils.cc7
-rw-r--r--deps/v8/src/regexp/regexp.cc83
-rw-r--r--deps/v8/src/regexp/regexp.h11
-rw-r--r--deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc16
-rw-r--r--deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h1
-rw-r--r--deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc16
-rw-r--r--deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h1
-rw-r--r--deps/v8/src/roots/DIR_METADATA11
-rw-r--r--deps/v8/src/roots/OWNERS2
-rw-r--r--deps/v8/src/roots/roots.h7
-rw-r--r--deps/v8/src/runtime/DIR_METADATA11
-rw-r--r--deps/v8/src/runtime/OWNERS2
-rw-r--r--deps/v8/src/runtime/runtime-array.cc5
-rw-r--r--deps/v8/src/runtime/runtime-classes.cc10
-rw-r--r--deps/v8/src/runtime/runtime-compiler.cc53
-rw-r--r--deps/v8/src/runtime/runtime-debug.cc2
-rw-r--r--deps/v8/src/runtime/runtime-literals.cc9
-rw-r--r--deps/v8/src/runtime/runtime-numbers.cc9
-rw-r--r--deps/v8/src/runtime/runtime-object.cc13
-rw-r--r--deps/v8/src/runtime/runtime-regexp.cc17
-rw-r--r--deps/v8/src/runtime/runtime-scopes.cc3
-rw-r--r--deps/v8/src/runtime/runtime-test.cc29
-rw-r--r--deps/v8/src/runtime/runtime-wasm.cc94
-rw-r--r--deps/v8/src/runtime/runtime.h9
-rw-r--r--deps/v8/src/snapshot/DIR_METADATA11
-rw-r--r--deps/v8/src/snapshot/OWNERS2
-rw-r--r--deps/v8/src/snapshot/code-serializer.cc136
-rw-r--r--deps/v8/src/snapshot/code-serializer.h26
-rw-r--r--deps/v8/src/snapshot/context-deserializer.cc37
-rw-r--r--deps/v8/src/snapshot/context-deserializer.h8
-rw-r--r--deps/v8/src/snapshot/context-serializer.cc75
-rw-r--r--deps/v8/src/snapshot/context-serializer.h7
-rw-r--r--deps/v8/src/snapshot/deserializer-allocator.cc217
-rw-r--r--deps/v8/src/snapshot/deserializer-allocator.h104
-rw-r--r--deps/v8/src/snapshot/deserializer.cc961
-rw-r--r--deps/v8/src/snapshot/deserializer.h178
-rw-r--r--deps/v8/src/snapshot/embedded/embedded-data.cc240
-rw-r--r--deps/v8/src/snapshot/embedded/embedded-data.h157
-rw-r--r--deps/v8/src/snapshot/embedded/embedded-empty.cc16
-rw-r--r--deps/v8/src/snapshot/embedded/embedded-file-writer.cc120
-rw-r--r--deps/v8/src/snapshot/embedded/embedded-file-writer.h42
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.cc4
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc3
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.cc4
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc4
-rw-r--r--deps/v8/src/snapshot/object-deserializer.cc44
-rw-r--r--deps/v8/src/snapshot/object-deserializer.h4
-rw-r--r--deps/v8/src/snapshot/read-only-deserializer.cc23
-rw-r--r--deps/v8/src/snapshot/read-only-deserializer.h9
-rw-r--r--deps/v8/src/snapshot/read-only-serializer.cc53
-rw-r--r--deps/v8/src/snapshot/read-only-serializer.h11
-rw-r--r--deps/v8/src/snapshot/references.h177
-rw-r--r--deps/v8/src/snapshot/roots-serializer.cc3
-rw-r--r--deps/v8/src/snapshot/roots-serializer.h6
-rw-r--r--deps/v8/src/snapshot/serializer-allocator.cc167
-rw-r--r--deps/v8/src/snapshot/serializer-allocator.h78
-rw-r--r--deps/v8/src/snapshot/serializer-deserializer.cc31
-rw-r--r--deps/v8/src/snapshot/serializer-deserializer.h99
-rw-r--r--deps/v8/src/snapshot/serializer.cc720
-rw-r--r--deps/v8/src/snapshot/serializer.h176
-rw-r--r--deps/v8/src/snapshot/snapshot-data.cc31
-rw-r--r--deps/v8/src/snapshot/snapshot-data.h30
-rw-r--r--deps/v8/src/snapshot/snapshot-source-sink.h30
-rw-r--r--deps/v8/src/snapshot/snapshot-utils.cc15
-rw-r--r--deps/v8/src/snapshot/snapshot-utils.h2
-rw-r--r--deps/v8/src/snapshot/snapshot.cc63
-rw-r--r--deps/v8/src/snapshot/startup-deserializer.cc65
-rw-r--r--deps/v8/src/snapshot/startup-deserializer.h10
-rw-r--r--deps/v8/src/snapshot/startup-serializer.cc76
-rw-r--r--deps/v8/src/snapshot/startup-serializer.h16
-rw-r--r--deps/v8/src/strings/DIR_METADATA11
-rw-r--r--deps/v8/src/strings/OWNERS2
-rw-r--r--deps/v8/src/strings/char-predicates-inl.h96
-rw-r--r--deps/v8/src/strings/string-stream.cc2
-rw-r--r--deps/v8/src/strings/unicode-inl.h19
-rw-r--r--deps/v8/src/strings/unicode.h2
-rw-r--r--deps/v8/src/torque/ast.h82
-rw-r--r--deps/v8/src/torque/cc-generator.cc460
-rw-r--r--deps/v8/src/torque/cc-generator.h46
-rw-r--r--deps/v8/src/torque/constants.h14
-rw-r--r--deps/v8/src/torque/csa-generator.cc83
-rw-r--r--deps/v8/src/torque/csa-generator.h76
-rw-r--r--deps/v8/src/torque/declarable.h46
-rw-r--r--deps/v8/src/torque/declarations.cc1
-rw-r--r--deps/v8/src/torque/global-context.h14
-rw-r--r--deps/v8/src/torque/implementation-visitor.cc773
-rw-r--r--deps/v8/src/torque/implementation-visitor.h48
-rw-r--r--deps/v8/src/torque/instance-type-generator.cc2
-rw-r--r--deps/v8/src/torque/instructions.h56
-rw-r--r--deps/v8/src/torque/runtime-macro-shims.h36
-rw-r--r--deps/v8/src/torque/torque-code-generator.cc60
-rw-r--r--deps/v8/src/torque/torque-code-generator.h93
-rw-r--r--deps/v8/src/torque/torque-compiler.cc6
-rw-r--r--deps/v8/src/torque/torque-parser.cc64
-rw-r--r--deps/v8/src/torque/type-visitor.cc32
-rw-r--r--deps/v8/src/torque/types.cc252
-rw-r--r--deps/v8/src/torque/types.h47
-rw-r--r--deps/v8/src/tracing/DIR_METADATA11
-rw-r--r--deps/v8/src/tracing/OWNERS2
-rw-r--r--deps/v8/src/tracing/trace-categories.h1
-rw-r--r--deps/v8/src/trap-handler/DIR_METADATA11
-rw-r--r--deps/v8/src/trap-handler/OWNERS2
-rw-r--r--deps/v8/src/trap-handler/handler-outside.cc12
-rw-r--r--deps/v8/src/trap-handler/trap-handler.h21
-rw-r--r--deps/v8/src/utils/DIR_METADATA11
-rw-r--r--deps/v8/src/utils/OWNERS2
-rw-r--r--deps/v8/src/utils/bit-vector.cc2
-rw-r--r--deps/v8/src/utils/bit-vector.h2
-rw-r--r--deps/v8/src/utils/identity-map.cc144
-rw-r--r--deps/v8/src/utils/identity-map.h71
-rw-r--r--deps/v8/src/utils/locked-queue-inl.h6
-rw-r--r--deps/v8/src/utils/locked-queue.h2
-rw-r--r--deps/v8/src/utils/utils.h73
-rw-r--r--deps/v8/src/wasm/DIR_METADATA11
-rw-r--r--deps/v8/src/wasm/OWNERS2
-rw-r--r--deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h108
-rw-r--r--deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h123
-rw-r--r--deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h122
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.cc23
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.h59
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.cc242
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-register.h4
-rw-r--r--deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h76
-rw-r--r--deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h101
-rw-r--r--deps/v8/src/wasm/baseline/ppc/OWNERS1
-rw-r--r--deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h60
-rw-r--r--deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h60
-rw-r--r--deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h127
-rw-r--r--deps/v8/src/wasm/c-api.cc12
-rw-r--r--deps/v8/src/wasm/decoder.h189
-rw-r--r--deps/v8/src/wasm/function-body-decoder-impl.h1221
-rw-r--r--deps/v8/src/wasm/function-body-decoder.cc44
-rw-r--r--deps/v8/src/wasm/function-body-decoder.h4
-rw-r--r--deps/v8/src/wasm/function-compiler.cc22
-rw-r--r--deps/v8/src/wasm/function-compiler.h18
-rw-r--r--deps/v8/src/wasm/graph-builder-interface.cc53
-rw-r--r--deps/v8/src/wasm/memory-tracing.cc41
-rw-r--r--deps/v8/src/wasm/memory-tracing.h8
-rw-r--r--deps/v8/src/wasm/module-compiler.cc901
-rw-r--r--deps/v8/src/wasm/module-compiler.h3
-rw-r--r--deps/v8/src/wasm/module-decoder.cc81
-rw-r--r--deps/v8/src/wasm/module-instantiate.cc62
-rw-r--r--deps/v8/src/wasm/streaming-decoder.cc13
-rw-r--r--deps/v8/src/wasm/value-type.h133
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.cc50
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.h25
-rw-r--r--deps/v8/src/wasm/wasm-constants.h5
-rw-r--r--deps/v8/src/wasm/wasm-debug-evaluate.cc8
-rw-r--r--deps/v8/src/wasm/wasm-debug-evaluate.h6
-rw-r--r--deps/v8/src/wasm/wasm-debug.cc81
-rw-r--r--deps/v8/src/wasm/wasm-debug.h4
-rw-r--r--deps/v8/src/wasm/wasm-engine.cc5
-rw-r--r--deps/v8/src/wasm/wasm-engine.h4
-rw-r--r--deps/v8/src/wasm/wasm-external-refs.cc60
-rw-r--r--deps/v8/src/wasm/wasm-js.cc785
-rw-r--r--deps/v8/src/wasm/wasm-js.h4
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.cc2
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.h4
-rw-r--r--deps/v8/src/wasm/wasm-module.cc11
-rw-r--r--deps/v8/src/wasm/wasm-module.h47
-rw-r--r--deps/v8/src/wasm/wasm-objects-inl.h25
-rw-r--r--deps/v8/src/wasm/wasm-objects.cc73
-rw-r--r--deps/v8/src/wasm/wasm-objects.h32
-rw-r--r--deps/v8/src/wasm/wasm-objects.tq13
-rw-r--r--deps/v8/src/wasm/wasm-opcodes-inl.h49
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.h126
-rw-r--r--deps/v8/src/wasm/wasm-result.h9
-rw-r--r--deps/v8/src/wasm/wasm-serialization.cc14
-rw-r--r--deps/v8/src/wasm/wasm-value.h11
-rw-r--r--deps/v8/src/zone/zone-containers.h4
-rw-r--r--deps/v8/test/BUILD.gn1
-rw-r--r--deps/v8/test/cctest/BUILD.gn5
-rw-r--r--deps/v8/test/cctest/assembler-helper-arm.cc2
-rw-r--r--deps/v8/test/cctest/cctest-utils.h60
-rw-r--r--deps/v8/test/cctest/cctest.cc2
-rw-r--r--deps/v8/test/cctest/cctest.h5
-rw-r--r--deps/v8/test/cctest/cctest.status42
-rw-r--r--deps/v8/test/cctest/compiler/code-assembler-tester.h20
-rw-r--r--deps/v8/test/cctest/compiler/codegen-tester.h2
-rw-r--r--deps/v8/test/cctest/compiler/function-tester.cc2
-rw-r--r--deps/v8/test/cctest/compiler/test-code-assembler.cc21
-rw-r--r--deps/v8/test/cctest/compiler/test-code-generator.cc18
-rw-r--r--deps/v8/test/cctest/compiler/test-js-context-specialization.cc14
-rw-r--r--deps/v8/test/cctest/compiler/test-linkage.cc13
-rw-r--r--deps/v8/test/cctest/compiler/test-multiple-return.cc23
-rw-r--r--deps/v8/test/cctest/compiler/test-representation-change.cc24
-rw-r--r--deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc6
-rw-r--r--deps/v8/test/cctest/compiler/test-run-calls-to-external-references.cc4
-rw-r--r--deps/v8/test/cctest/compiler/test-run-machops.cc20
-rw-r--r--deps/v8/test/cctest/compiler/test-run-native-calls.cc2
-rw-r--r--deps/v8/test/cctest/compiler/test-run-retpoline.cc4
-rw-r--r--deps/v8/test/cctest/compiler/test-run-tail-calls.cc4
-rw-r--r--deps/v8/test/cctest/compiler/value-helper.h2
-rw-r--r--deps/v8/test/cctest/heap/heap-tester.h2
-rw-r--r--deps/v8/test/cctest/heap/test-alloc.cc7
-rw-r--r--deps/v8/test/cctest/heap/test-array-buffer-tracker.cc2
-rw-r--r--deps/v8/test/cctest/heap/test-compaction.cc31
-rw-r--r--deps/v8/test/cctest/heap/test-concurrent-allocation.cc81
-rw-r--r--deps/v8/test/cctest/heap/test-concurrent-marking.cc27
-rw-r--r--deps/v8/test/cctest/heap/test-embedder-tracing.cc118
-rw-r--r--deps/v8/test/cctest/heap/test-heap.cc198
-rw-r--r--deps/v8/test/cctest/heap/test-memory-measurement.cc59
-rw-r--r--deps/v8/test/cctest/heap/test-spaces.cc20
-rw-r--r--deps/v8/test/cctest/heap/test-unmapper.cc22
-rw-r--r--deps/v8/test/cctest/heap/test-weak-references.cc2
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden6
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorAccess.golden8
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodAccess.golden4
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/StaticPrivateMethodAccess.golden20
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden9
-rw-r--r--deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc4
-rw-r--r--deps/v8/test/cctest/interpreter/interpreter-tester.h7
-rw-r--r--deps/v8/test/cctest/interpreter/test-bytecode-generator.cc4
-rw-r--r--deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc7
-rw-r--r--deps/v8/test/cctest/libplatform/test-tracing.cc14
-rw-r--r--deps/v8/test/cctest/test-accessor-assembler.cc13
-rw-r--r--deps/v8/test/cctest/test-accessors.cc24
-rw-r--r--deps/v8/test/cctest/test-api-icu.cc2
-rw-r--r--deps/v8/test/cctest/test-api.cc710
-rw-r--r--deps/v8/test/cctest/test-assembler-arm.cc86
-rw-r--r--deps/v8/test/cctest/test-assembler-arm64.cc43
-rw-r--r--deps/v8/test/cctest/test-assembler-ia32.cc71
-rw-r--r--deps/v8/test/cctest/test-assembler-mips.cc166
-rw-r--r--deps/v8/test/cctest/test-assembler-mips64.cc186
-rw-r--r--deps/v8/test/cctest/test-assembler-ppc.cc24
-rw-r--r--deps/v8/test/cctest/test-assembler-s390.cc38
-rw-r--r--deps/v8/test/cctest/test-assembler-x64.cc34
-rw-r--r--deps/v8/test/cctest/test-code-layout.cc37
-rw-r--r--deps/v8/test/cctest/test-code-pages.cc6
-rw-r--r--deps/v8/test/cctest/test-code-stub-assembler.cc584
-rw-r--r--deps/v8/test/cctest/test-compiler.cc29
-rw-r--r--deps/v8/test/cctest/test-concurrent-descriptor-array.cc9
-rw-r--r--deps/v8/test/cctest/test-concurrent-feedback-vector.cc268
-rw-r--r--deps/v8/test/cctest/test-concurrent-prototype.cc7
-rw-r--r--deps/v8/test/cctest/test-concurrent-script-context-table.cc18
-rw-r--r--deps/v8/test/cctest/test-concurrent-transition-array.cc11
-rw-r--r--deps/v8/test/cctest/test-cpu-profiler.cc248
-rw-r--r--deps/v8/test/cctest/test-descriptor-array.cc26
-rw-r--r--deps/v8/test/cctest/test-disasm-arm.cc14
-rw-r--r--deps/v8/test/cctest/test-disasm-ia32.cc2
-rw-r--r--deps/v8/test/cctest/test-disasm-x64.cc18
-rw-r--r--deps/v8/test/cctest/test-factory.cc3
-rw-r--r--deps/v8/test/cctest/test-feedback-vector.cc4
-rw-r--r--deps/v8/test/cctest/test-field-type-tracking.cc47
-rw-r--r--deps/v8/test/cctest/test-global-handles.cc12
-rw-r--r--deps/v8/test/cctest/test-heap-profiler.cc21
-rw-r--r--deps/v8/test/cctest/test-identity-map.cc153
-rw-r--r--deps/v8/test/cctest/test-local-handles.cc9
-rw-r--r--deps/v8/test/cctest/test-lockers.cc4
-rw-r--r--deps/v8/test/cctest/test-log.cc9
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-arm.cc28
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-arm64.cc37
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-mips.cc26
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-mips64.cc32
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-x64.cc29
-rw-r--r--deps/v8/test/cctest/test-modules.cc133
-rw-r--r--deps/v8/test/cctest/test-object.cc90
-rw-r--r--deps/v8/test/cctest/test-orderedhashtable.cc495
-rw-r--r--deps/v8/test/cctest/test-parsing.cc558
-rw-r--r--deps/v8/test/cctest/test-persistent-handles.cc21
-rw-r--r--deps/v8/test/cctest/test-platform.cc35
-rw-r--r--deps/v8/test/cctest/test-pointer-auth-arm64.cc3
-rw-r--r--deps/v8/test/cctest/test-profile-generator.cc120
-rw-r--r--deps/v8/test/cctest/test-regexp.cc56
-rw-r--r--deps/v8/test/cctest/test-serialize.cc103
-rw-r--r--deps/v8/test/cctest/test-stack-unwinding-win64.cc2
-rw-r--r--deps/v8/test/cctest/test-strings.cc8
-rw-r--r--deps/v8/test/cctest/test-sync-primitives-arm64.cc4
-rw-r--r--deps/v8/test/cctest/test-thread-termination.cc6
-rw-r--r--deps/v8/test/cctest/test-trace-event.cc4
-rw-r--r--deps/v8/test/cctest/test-unboxed-doubles.cc66
-rw-r--r--deps/v8/test/cctest/test-unwinder-code-pages.cc251
-rw-r--r--deps/v8/test/cctest/test-verifiers.cc187
-rw-r--r--deps/v8/test/cctest/torque/test-torque.cc41
-rw-r--r--deps/v8/test/cctest/wasm/DIR_METADATA11
-rw-r--r--deps/v8/test/cctest/wasm/OWNERS2
-rw-r--r--deps/v8/test/cctest/wasm/test-c-wasm-entry.cc7
-rw-r--r--deps/v8/test/cctest/wasm/test-gc.cc19
-rw-r--r--deps/v8/test/cctest/wasm/test-jump-table-assembler.cc10
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-64.cc16
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-simd-scalar-lowering.cc53
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-simd.cc977
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-wrappers.cc184
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm.cc23
-rw-r--r--deps/v8/test/cctest/wasm/test-streaming-compilation.cc165
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-debug-evaluate.cc120
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-metrics.cc79
-rw-r--r--deps/v8/test/cctest/wasm/wasm-run-utils.cc5
-rw-r--r--deps/v8/test/cctest/wasm/wasm-run-utils.h7
-rw-r--r--deps/v8/test/common/flag-utils.h38
-rw-r--r--deps/v8/test/common/wasm/flag-utils.h23
-rw-r--r--deps/v8/test/common/wasm/wasm-interpreter.cc616
-rw-r--r--deps/v8/test/common/wasm/wasm-interpreter.h5
-rw-r--r--deps/v8/test/common/wasm/wasm-macro-gen.h6
-rw-r--r--deps/v8/test/common/wasm/wasm-module-runner.cc8
-rw-r--r--deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect-builtins.js2
-rw-r--r--deps/v8/test/debugger/test-api.js2
-rw-r--r--deps/v8/test/debugging/wasm/gdb-server/DIR_METADATA11
-rw-r--r--deps/v8/test/debugging/wasm/gdb-server/OWNERS2
-rw-r--r--deps/v8/test/fuzzer/BUILD.gn3
-rw-r--r--deps/v8/test/fuzzer/inspector-fuzzer.cc616
-rw-r--r--deps/v8/test/fuzzer/inspector/empty0
-rw-r--r--deps/v8/test/fuzzer/inspector/invalid1
-rw-r--r--deps/v8/test/fuzzer/multi-return.cc5
-rw-r--r--deps/v8/test/fuzzer/regexp-builtins.cc7
-rw-r--r--deps/v8/test/fuzzer/testcfg.py3
-rw-r--r--deps/v8/test/fuzzer/wasm-async.cc18
-rw-r--r--deps/v8/test/fuzzer/wasm-compile.cc29
-rw-r--r--deps/v8/test/fuzzer/wasm-fuzzer-common.cc18
-rw-r--r--deps/v8/test/fuzzer/wasm-fuzzer-common.h6
-rw-r--r--deps/v8/test/fuzzer/wasm.cc11
-rw-r--r--deps/v8/test/fuzzilli/README.md9
-rw-r--r--deps/v8/test/fuzzilli/main.cc64
-rw-r--r--deps/v8/test/inspector/BUILD.gn30
-rw-r--r--deps/v8/test/inspector/DEPS8
-rw-r--r--deps/v8/test/inspector/DIR_METADATA11
-rw-r--r--deps/v8/test/inspector/OWNERS2
-rw-r--r--deps/v8/test/inspector/cpu-profiler/console-profile-wasm.js14
-rw-r--r--deps/v8/test/inspector/debugger/class-private-methods-static-nested-expected.txt2
-rw-r--r--deps/v8/test/inspector/debugger/class-private-methods-static-nested.js2
-rw-r--r--deps/v8/test/inspector/debugger/class-private-methods-unused-expected.txt20
-rw-r--r--deps/v8/test/inspector/debugger/class-private-methods-unused.js10
-rw-r--r--deps/v8/test/inspector/debugger/destroy-in-break-program-expected.txt (renamed from deps/v8/test/inspector/debugger/destory-in-break-program-expected.txt)0
-rw-r--r--deps/v8/test/inspector/debugger/destroy-in-break-program.js (renamed from deps/v8/test/inspector/debugger/destory-in-break-program.js)0
-rw-r--r--deps/v8/test/inspector/debugger/destroy-in-break-program2-expected.txt3
-rw-r--r--deps/v8/test/inspector/debugger/destroy-in-break-program2.js49
-rw-r--r--deps/v8/test/inspector/debugger/pause-on-oom-expected.txt1
-rw-r--r--deps/v8/test/inspector/debugger/pause-on-oom-extrawide-expected.txt1
-rw-r--r--deps/v8/test/inspector/debugger/pause-on-oom-wide-expected.txt1
-rw-r--r--deps/v8/test/inspector/debugger/wasm-breakpoint-reset-on-debugger-restart-expected.txt2
-rw-r--r--deps/v8/test/inspector/debugger/wasm-debug-command-expected.txt2
-rw-r--r--deps/v8/test/inspector/debugger/wasm-imports-expected.txt4
-rw-r--r--deps/v8/test/inspector/debugger/wasm-remove-breakpoint-expected.txt2
-rw-r--r--deps/v8/test/inspector/debugger/wasm-scope-info-expected.txt278
-rw-r--r--deps/v8/test/inspector/debugger/wasm-scope-info-liftoff-expected.txt79
-rw-r--r--deps/v8/test/inspector/debugger/wasm-scope-info-liftoff.js1
-rw-r--r--deps/v8/test/inspector/debugger/wasm-scope-info.js4
-rw-r--r--deps/v8/test/inspector/debugger/wasm-set-breakpoint-liftoff-expected.txt58
-rw-r--r--deps/v8/test/inspector/debugger/wasm-step-after-trap-expected.txt4
-rw-r--r--deps/v8/test/inspector/debugger/wasm-step-from-non-breakable-position-expected.txt12
-rw-r--r--deps/v8/test/inspector/debugger/wasm-step-from-non-breakable-position.js54
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stepping-byte-offsets-expected.txt317
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stepping-byte-offsets.js106
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stepping-expected.txt69
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stepping-in-from-js-expected.txt4
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stepping-liftoff-expected.txt69
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stepping-to-js-expected.txt10
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stepping-with-skiplist-expected.txt120
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stepping-with-source-map-expected.txt46
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stepping.js (renamed from deps/v8/test/inspector/debugger/wasm-stepping-liftoff.js)0
-rw-r--r--deps/v8/test/inspector/frontend-channel.h79
-rw-r--r--deps/v8/test/inspector/inspector-test.cc559
-rw-r--r--deps/v8/test/inspector/inspector.status39
-rw-r--r--deps/v8/test/inspector/isolate-data.cc104
-rw-r--r--deps/v8/test/inspector/isolate-data.h54
-rw-r--r--deps/v8/test/inspector/protocol-test.js15
-rw-r--r--deps/v8/test/inspector/runtime/add-binding-expected.txt107
-rw-r--r--deps/v8/test/inspector/runtime/add-binding.js68
-rw-r--r--deps/v8/test/inspector/runtime/custom-preview-expected.txt98
-rw-r--r--deps/v8/test/inspector/runtime/custom-preview.js2
-rw-r--r--deps/v8/test/inspector/runtime/regression-1140845-expected.txt38
-rw-r--r--deps/v8/test/inspector/runtime/regression-1140845.js36
-rw-r--r--deps/v8/test/inspector/task-runner.cc32
-rw-r--r--deps/v8/test/inspector/task-runner.h33
-rw-r--r--deps/v8/test/inspector/tasks.cc53
-rw-r--r--deps/v8/test/inspector/tasks.h187
-rw-r--r--deps/v8/test/inspector/utils.cc82
-rw-r--r--deps/v8/test/inspector/utils.h36
-rw-r--r--deps/v8/test/inspector/wasm-inspector-test.js1
-rw-r--r--deps/v8/test/intl/DIR_METADATA11
-rw-r--r--deps/v8/test/intl/OWNERS2
-rwxr-xr-xdeps/v8/test/intl/number-format/check-minimum-fraction-digits.js9
-rw-r--r--deps/v8/test/intl/regress-1074578.js8
-rw-r--r--deps/v8/test/intl/regress-10960.js38
-rw-r--r--deps/v8/test/intl/regress-1107661.js9
-rw-r--r--deps/v8/test/memory/Memory.json4
-rw-r--r--deps/v8/test/message/fail/dynamic-import-missing-specifier.js2
-rw-r--r--deps/v8/test/message/fail/dynamic-import-missing-specifier.out2
-rw-r--r--deps/v8/test/message/fail/modules-duplicate-export5.mjs2
-rw-r--r--deps/v8/test/message/fail/modules-duplicate-export5.out2
-rw-r--r--deps/v8/test/message/wasm-trace-memory-liftoff.out28
-rw-r--r--deps/v8/test/message/wasm-trace-memory.out28
-rw-r--r--deps/v8/test/mjsunit/BUILD.gn11
-rw-r--r--deps/v8/test/mjsunit/array-concat.js26
-rw-r--r--deps/v8/test/mjsunit/code-coverage-block.js55
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-1125145.js20
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-1146652.js26
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-1150649.js24
-rw-r--r--deps/v8/test/mjsunit/compiler/test-dynamic-map-check-deprecated-maps-polymorphic.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/test-dynamic-map-check-deprecated-maps.js17
-rw-r--r--deps/v8/test/mjsunit/compiler/test-dynamic-map-check-deprecated-maps2.js44
-rw-r--r--deps/v8/test/mjsunit/compiler/test-dynamic-map-checks-poly-mono.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/test-dynamic-map-checks-wrong-handler.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/test-dynamic-map-checks-wrong-handler1.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/test-dynamic-map-checks.js2
-rw-r--r--deps/v8/test/mjsunit/es6/object-assign.js42
-rw-r--r--deps/v8/test/mjsunit/es6/super-ic-opt-dynamic-map-checks.js42
-rw-r--r--deps/v8/test/mjsunit/es6/super-ic-opt-no-turboprop.js51
-rw-r--r--deps/v8/test/mjsunit/es6/super-ic-opt.js608
-rw-r--r--deps/v8/test/mjsunit/es6/super-ic.js10
-rw-r--r--deps/v8/test/mjsunit/harmony/import-from-instantiation-errored.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/promise-all-settled.js2
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status47
-rw-r--r--deps/v8/test/mjsunit/promise-perform-all-settled-resolve-lookup.js2
-rw-r--r--deps/v8/test/mjsunit/regexp-backtrack-limit.js1
-rw-r--r--deps/v8/test/mjsunit/regexp-experimental.js2
-rw-r--r--deps/v8/test/mjsunit/regexp-fallback-large-default.js20
-rw-r--r--deps/v8/test/mjsunit/regexp-fallback.js37
-rw-r--r--deps/v8/test/mjsunit/regexp-linear-flag.js35
-rw-r--r--deps/v8/test/mjsunit/regexp-no-linear-flag.js22
-rw-r--r--deps/v8/test/mjsunit/regress-1146106.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-10908.js19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-10931.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1112155.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1125871.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1132111.js23
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1137979.js21
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1138075.js27
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1138611.js34
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1139782.js37
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1141502.js21
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1142158.js37
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1144672.js20
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1161357.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-542823.js25
-rw-r--r--deps/v8/test/mjsunit/regress/regress-6248.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1038178.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1130213.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1137586.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1137594.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1151890.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1171954.js19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-696622.js8
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1065599.js16
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-11024.js22
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1132461.js27
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1137582.js10
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1146861.js56
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1153442.js61
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1161654.js56
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-9447.js6
-rw-r--r--deps/v8/test/mjsunit/runtime-callstats-helpers.js21
-rw-r--r--deps/v8/test/mjsunit/smi-mul-const.js20
-rw-r--r--deps/v8/test/mjsunit/smi-mul.js21
-rw-r--r--deps/v8/test/mjsunit/stack-traces.js20
-rw-r--r--deps/v8/test/mjsunit/stackoverflow-underapplication.js54
-rw-r--r--deps/v8/test/mjsunit/tools/codemap.mjs47
-rw-r--r--deps/v8/test/mjsunit/tools/log_two_byte.js3
-rw-r--r--deps/v8/test/mjsunit/tools/processor.mjs56
-rw-r--r--deps/v8/test/mjsunit/tools/timeline.mjs24
-rw-r--r--deps/v8/test/mjsunit/wasm/asm-wasm-stack.js15
-rw-r--r--deps/v8/test/mjsunit/wasm/atomics-non-shared.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/atomics.js17
-rw-r--r--deps/v8/test/mjsunit/wasm/call-ref.js62
-rw-r--r--deps/v8/test/mjsunit/wasm/externref-globals-liftoff.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/externref-liftoff.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/generic-wrapper.js17
-rw-r--r--deps/v8/test/mjsunit/wasm/imported-function-types.js44
-rw-r--r--deps/v8/test/mjsunit/wasm/indirect-call-non-zero-table.js4
-rw-r--r--deps/v8/test/mjsunit/wasm/indirect-calls.js10
-rw-r--r--deps/v8/test/mjsunit/wasm/indirect-tables.js4
-rw-r--r--deps/v8/test/mjsunit/wasm/many-memories-no-trap-handler.js22
-rw-r--r--deps/v8/test/mjsunit/wasm/many-memories.js24
-rw-r--r--deps/v8/test/mjsunit/wasm/return-calls.js5
-rw-r--r--deps/v8/test/mjsunit/wasm/table-grow-from-wasm.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/table-grow.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/trap-location.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-module-builder.js485
-rw-r--r--deps/v8/test/test262/local-tests/test/intl402/NumberFormat/default-currency-maximum-fraction-digits.js17
-rw-r--r--deps/v8/test/test262/test262.status87
-rw-r--r--deps/v8/test/test262/testcfg.py2
-rw-r--r--deps/v8/test/unittests/BUILD.gn12
-rw-r--r--deps/v8/test/unittests/api/isolate-unittest.cc4
-rw-r--r--deps/v8/test/unittests/assembler/turbo-assembler-arm-unittest.cc2
-rw-r--r--deps/v8/test/unittests/assembler/turbo-assembler-arm64-unittest.cc2
-rw-r--r--deps/v8/test/unittests/base/functional-unittest.cc4
-rw-r--r--deps/v8/test/unittests/base/platform/platform-unittest.cc9
-rw-r--r--deps/v8/test/unittests/codegen/code-stub-assembler-unittest.cc5
-rw-r--r--deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc20
-rw-r--r--deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc11
-rw-r--r--deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.h4
-rw-r--r--deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc4
-rw-r--r--deps/v8/test/unittests/compiler/effect-control-linearizer-unittest.cc56
-rw-r--r--deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc50
-rw-r--r--deps/v8/test/unittests/compiler/ppc/OWNERS1
-rw-r--r--deps/v8/test/unittests/compiler/regalloc/mid-tier-register-allocator-unittest.cc154
-rw-r--r--deps/v8/test/unittests/compiler/simplified-lowering-unittest.cc4
-rw-r--r--deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc201
-rw-r--r--deps/v8/test/unittests/execution/microtask-queue-unittest.cc5
-rw-r--r--deps/v8/test/unittests/heap/base/worklist-unittest.cc34
-rw-r--r--deps/v8/test/unittests/heap/cppgc/compactor-unittest.cc250
-rw-r--r--deps/v8/test/unittests/heap/cppgc/concurrent-marking-unittest.cc147
-rw-r--r--deps/v8/test/unittests/heap/cppgc/concurrent-sweeper-unittest.cc25
-rw-r--r--deps/v8/test/unittests/heap/cppgc/cross-thread-persistent-unittest.cc101
-rw-r--r--deps/v8/test/unittests/heap/cppgc/custom-spaces-unittest.cc107
-rw-r--r--deps/v8/test/unittests/heap/cppgc/ephemeron-pair-unittest.cc112
-rw-r--r--deps/v8/test/unittests/heap/cppgc/gc-info-unittest.cc16
-rw-r--r--deps/v8/test/unittests/heap/cppgc/gc-invoker-unittest.cc2
-rw-r--r--deps/v8/test/unittests/heap/cppgc/heap-object-header-unittest.cc16
-rw-r--r--deps/v8/test/unittests/heap/cppgc/heap-unittest.cc36
-rw-r--r--deps/v8/test/unittests/heap/cppgc/marker-unittest.cc22
-rw-r--r--deps/v8/test/unittests/heap/cppgc/marking-verifier-unittest.cc45
-rw-r--r--deps/v8/test/unittests/heap/cppgc/marking-visitor-unittest.cc54
-rw-r--r--deps/v8/test/unittests/heap/cppgc/name-trait-unittest.cc133
-rw-r--r--deps/v8/test/unittests/heap/cppgc/page-memory-unittest.cc11
-rw-r--r--deps/v8/test/unittests/heap/cppgc/persistent-family-unittest.cc (renamed from deps/v8/test/unittests/heap/cppgc/persistent-unittest.cc)184
-rw-r--r--deps/v8/test/unittests/heap/cppgc/stack-unittest.cc3
-rw-r--r--deps/v8/test/unittests/heap/cppgc/sweeper-unittest.cc5
-rw-r--r--deps/v8/test/unittests/heap/cppgc/test-platform.cc123
-rw-r--r--deps/v8/test/unittests/heap/cppgc/test-platform.h57
-rw-r--r--deps/v8/test/unittests/heap/cppgc/tests.h5
-rw-r--r--deps/v8/test/unittests/heap/cppgc/weak-container-unittest.cc184
-rw-r--r--deps/v8/test/unittests/heap/cppgc/write-barrier-unittest.cc10
-rw-r--r--deps/v8/test/unittests/heap/heap-unittest.cc9
-rw-r--r--deps/v8/test/unittests/heap/heap-utils.h5
-rw-r--r--deps/v8/test/unittests/heap/item-parallel-job-unittest.cc3
-rw-r--r--deps/v8/test/unittests/heap/js-member-unittest.cc164
-rw-r--r--deps/v8/test/unittests/heap/local-factory-unittest.cc4
-rw-r--r--deps/v8/test/unittests/heap/local-heap-unittest.cc10
-rw-r--r--deps/v8/test/unittests/heap/safepoint-unittest.cc9
-rw-r--r--deps/v8/test/unittests/heap/traced-reference-unittest.cc204
-rw-r--r--deps/v8/test/unittests/heap/unified-heap-snapshot-unittest.cc491
-rw-r--r--deps/v8/test/unittests/heap/unified-heap-unittest.cc64
-rw-r--r--deps/v8/test/unittests/heap/unified-heap-utils.cc81
-rw-r--r--deps/v8/test/unittests/heap/unified-heap-utils.h50
-rw-r--r--deps/v8/test/unittests/heap/unmapper-unittest.cc46
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc7
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc16
-rw-r--r--deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc2
-rw-r--r--deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h6
-rw-r--r--deps/v8/test/unittests/objects/value-serializer-unittest.cc27
-rw-r--r--deps/v8/test/unittests/parser/preparser-unittest.cc5
-rw-r--r--deps/v8/test/unittests/tasks/background-compile-task-unittest.cc10
-rw-r--r--deps/v8/test/unittests/test-helpers.h4
-rw-r--r--deps/v8/test/unittests/test-utils.cc11
-rw-r--r--deps/v8/test/unittests/test-utils.h54
-rw-r--r--deps/v8/test/unittests/unittests.status3
-rw-r--r--deps/v8/test/unittests/wasm/DIR_METADATA11
-rw-r--r--deps/v8/test/unittests/wasm/OWNERS2
-rw-r--r--deps/v8/test/unittests/wasm/decoder-unittest.cc127
-rw-r--r--deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc230
-rw-r--r--deps/v8/test/unittests/wasm/leb-helper-unittest.cc27
-rw-r--r--deps/v8/test/unittests/wasm/module-decoder-unittest.cc44
-rw-r--r--deps/v8/test/unittests/wasm/trap-handler-x64-unittest.cc17
-rw-r--r--deps/v8/test/wasm-api-tests/traps.cc26
-rw-r--r--deps/v8/test/wasm-api-tests/wasm-api-test.h6
-rw-r--r--deps/v8/test/wasm-js/tests.tar.gz.sha12
-rw-r--r--deps/v8/test/wasm-spec-tests/DIR_METADATA11
-rw-r--r--deps/v8/test/wasm-spec-tests/OWNERS2
-rw-r--r--deps/v8/test/wasm-spec-tests/tests.tar.gz.sha12
-rw-r--r--deps/v8/test/wasm-spec-tests/wasm-spec-tests.status13
-rw-r--r--deps/v8/testing/gtest/BUILD.gn18
-rw-r--r--deps/v8/third_party/markupsafe/DIR_METADATA3
-rw-r--r--deps/v8/third_party/markupsafe/OWNERS2
-rw-r--r--deps/v8/third_party/zlib/deflate.c3
-rw-r--r--deps/v8/third_party/zlib/google/compression_utils.cc37
-rw-r--r--deps/v8/third_party/zlib/google/compression_utils.h28
-rw-r--r--deps/v8/third_party/zlib/google/compression_utils_unittest.cc8
-rw-r--r--deps/v8/third_party/zlib/patches/0007-zero-init-deflate-window.patch40
-rwxr-xr-xdeps/v8/tools/android-sync.sh19
-rw-r--r--deps/v8/tools/arguments.mjs22
-rw-r--r--deps/v8/tools/clusterfuzz/js_fuzzer/DIR_METADATA11
-rw-r--r--deps/v8/tools/clusterfuzz/js_fuzzer/OWNERS2
-rw-r--r--deps/v8/tools/codemap.mjs433
-rw-r--r--deps/v8/tools/consarray.mjs2
-rw-r--r--deps/v8/tools/csvparser.mjs6
-rw-r--r--deps/v8/tools/debug_helper/get-object-properties.cc7
-rw-r--r--deps/v8/tools/dumpcpp-driver.mjs14
-rw-r--r--deps/v8/tools/dumpcpp.mjs97
-rw-r--r--deps/v8/tools/gcmole/gcmole-test.cc13
-rw-r--r--deps/v8/tools/gcmole/gcmole-tools.tar.gz.sha12
-rw-r--r--deps/v8/tools/gcmole/gcmole.cc37
-rw-r--r--deps/v8/tools/gcmole/test-expectations.txt5
-rw-r--r--deps/v8/tools/gen-postmortem-metadata.py25
-rw-r--r--deps/v8/tools/ic-explorer.html389
-rw-r--r--deps/v8/tools/ic-processor-driver.mjs74
-rw-r--r--deps/v8/tools/ic-processor.mjs197
-rw-r--r--deps/v8/tools/index.html17
-rwxr-xr-xdeps/v8/tools/linux-tick-processor2
-rw-r--r--deps/v8/tools/logreader.mjs34
-rwxr-xr-xdeps/v8/tools/map-processor37
-rw-r--r--deps/v8/tools/map-processor-driver.mjs38
-rw-r--r--deps/v8/tools/map-processor.html1315
-rw-r--r--deps/v8/tools/map-processor.mjs783
-rwxr-xr-xdeps/v8/tools/mb/mb_unittest.py8
-rw-r--r--deps/v8/tools/parse-processor-driver.mjs12
-rw-r--r--deps/v8/tools/parse-processor.mjs26
-rw-r--r--deps/v8/tools/profile.mjs1434
-rw-r--r--deps/v8/tools/profile_view.mjs32
-rw-r--r--deps/v8/tools/sourcemap.mjs126
-rw-r--r--deps/v8/tools/splaytree.mjs30
-rw-r--r--deps/v8/tools/system-analyzer/app-model.mjs128
-rw-r--r--deps/v8/tools/system-analyzer/events.mjs33
-rw-r--r--deps/v8/tools/system-analyzer/helper.mjs148
-rw-r--r--deps/v8/tools/system-analyzer/ic-model.mjs13
-rw-r--r--deps/v8/tools/system-analyzer/ic-panel-template.html74
-rw-r--r--deps/v8/tools/system-analyzer/ic-panel.mjs405
-rw-r--r--deps/v8/tools/system-analyzer/index.css95
-rw-r--r--deps/v8/tools/system-analyzer/index.html261
-rw-r--r--deps/v8/tools/system-analyzer/index.mjs273
-rw-r--r--deps/v8/tools/system-analyzer/log-file-reader-template.html24
-rw-r--r--deps/v8/tools/system-analyzer/log-file-reader.mjs138
-rw-r--r--deps/v8/tools/system-analyzer/log/deopt.mjs10
-rw-r--r--deps/v8/tools/system-analyzer/log/ic.mjs19
-rw-r--r--deps/v8/tools/system-analyzer/log/log.mjs25
-rw-r--r--deps/v8/tools/system-analyzer/log/map.mjs45
-rw-r--r--deps/v8/tools/system-analyzer/map-panel-template.html1
-rw-r--r--deps/v8/tools/system-analyzer/map-panel.mjs130
-rw-r--r--deps/v8/tools/system-analyzer/map-panel/map-details.mjs61
-rw-r--r--deps/v8/tools/system-analyzer/map-panel/map-transitions-template.html2
-rw-r--r--deps/v8/tools/system-analyzer/map-panel/map-transitions.mjs366
-rw-r--r--deps/v8/tools/system-analyzer/processor.mjs185
-rw-r--r--deps/v8/tools/system-analyzer/source-panel-template.html36
-rw-r--r--deps/v8/tools/system-analyzer/source-panel.mjs300
-rw-r--r--deps/v8/tools/system-analyzer/stats-panel-template.html47
-rw-r--r--deps/v8/tools/system-analyzer/stats-panel.mjs152
-rw-r--r--deps/v8/tools/system-analyzer/timeline-panel-template.html45
-rw-r--r--deps/v8/tools/system-analyzer/timeline-panel.mjs118
-rw-r--r--deps/v8/tools/system-analyzer/timeline.mjs106
-rw-r--r--deps/v8/tools/system-analyzer/timeline/timeline-track-template.html90
-rw-r--r--deps/v8/tools/system-analyzer/timeline/timeline-track.mjs904
-rw-r--r--deps/v8/tools/testrunner/base_runner.py6
-rw-r--r--deps/v8/tools/testrunner/local/junit_output.py49
-rw-r--r--deps/v8/tools/testrunner/local/variants.py10
-rw-r--r--deps/v8/tools/testrunner/objects/testcase.py39
-rw-r--r--deps/v8/tools/testrunner/outproc/base.py3
-rw-r--r--deps/v8/tools/testrunner/testproc/progress.py40
-rw-r--r--deps/v8/tools/tickprocessor-driver.mjs14
-rw-r--r--deps/v8/tools/tickprocessor.mjs225
-rwxr-xr-xdeps/v8/tools/v8_presubmit.py63
-rw-r--r--deps/v8/tools/v8heapconst.py343
-rw-r--r--deps/v8/tools/whitespace.txt5
1623 files changed, 61459 insertions, 43675 deletions
diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore
index 6d2cf1077a..11c7ae61dd 100644
--- a/deps/v8/.gitignore
+++ b/deps/v8/.gitignore
@@ -29,6 +29,7 @@
.cproject
.gclient_entries
.gdb_history
+.jslint-cache
.landmines
.project
.pydevproject
diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS
index 36abcfba77..72e450430c 100644
--- a/deps/v8/AUTHORS
+++ b/deps/v8/AUTHORS
@@ -72,6 +72,7 @@ Bert Belder <bertbelder@gmail.com>
Burcu Dogan <burcujdogan@gmail.com>
Caitlin Potter <caitpotter88@gmail.com>
Craig Schlenter <craig.schlenter@gmail.com>
+Charles Kerr <charles@charleskerr.com>
Chengzhong Wu <legendecas@gmail.com>
Choongwoo Han <cwhan.tunz@gmail.com>
Chris Nardi <hichris123@gmail.com>
@@ -117,6 +118,7 @@ Janusz Majnert <jmajnert@gmail.com>
Jay Freeman <saurik@saurik.com>
James Pike <g00gle@chilon.net>
James M Snell <jasnell@gmail.com>
+Javad Amiri <javad.amiri@anu.edu.au>
Jianghua Yang <jianghua.yjh@alibaba-inc.com>
Jiawen Geng <technicalcute@gmail.com>
Jiaxun Yang <jiaxun.yang@flygoat.com>
@@ -185,6 +187,7 @@ Rob Wu <rob@robwu.nl>
Robert Meijer <robert.s.meijer@gmail.com>
Robert Mustacchi <rm@fingolfin.org>
Robert Nagy <robert.nagy@gmail.com>
+Robert O'Callahan <rocallahan@gmail.com>
Rong Wang <wangrong089@gmail.com>
Ross Kirsling <rkirsling@gmail.com>
Ruben Bridgewater <ruben@bridgewater.de>
@@ -225,3 +228,4 @@ Zhao Jiazhong <kyslie3100@gmail.com>
Zhongping Wang <kewpie.w.zp@gmail.com>
柳荣一 <admin@web-tinker.com>
Tianping Yang <yangtianping@oppo.com>
+Takeshi Yoneda <takeshi@tetrate.io>
diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn
index 94b598bc9a..1dc6c7d5ac 100644
--- a/deps/v8/BUILD.gn
+++ b/deps/v8/BUILD.gn
@@ -110,10 +110,10 @@ declare_args() {
v8_enable_31bit_smis_on_64bit_arch = false
# Disable arguments adaptor frame (sets -dV8_NO_ARGUMENTS_ADAPTOR).
- v8_disable_arguments_adaptor = false
-
- # Reverse JS arguments order in the stack (sets -dV8_REVERSE_JSARGS).
- v8_enable_reverse_jsargs = true
+ v8_disable_arguments_adaptor =
+ v8_current_cpu == "x86" || v8_current_cpu == "x64" ||
+ v8_current_cpu == "arm" || v8_current_cpu == "arm64" ||
+ v8_current_cpu == "mipsel" || v8_current_cpu == "mips64el"
# Sets -dOBJECT_PRINT.
v8_enable_object_print = ""
@@ -130,7 +130,15 @@ declare_args() {
# Sets -dV8_TRACE_FEEDBACK_UPDATES.
v8_enable_trace_feedback_updates = false
- # Sets -dV8_CONCURRENT_MARKING
+ # Sets -dV8_ATOMIC_OBJECT_FIELD_WRITES and turns all field write operations
+ # into relaxed atomic operations.
+ v8_enable_atomic_object_field_writes = ""
+
+ # Sets -dV8_ATOMIC_MARKING_STATE
+ v8_enable_atomic_marking_state = ""
+
+ # Controls the default values of v8_enable_atomic_object_field_writes and
+ # v8_enable_concurrent_marking_state. See the default setting code below.
v8_enable_concurrent_marking = true
# Runs mksnapshot with --turbo-profiling. After building in this
@@ -273,6 +281,10 @@ declare_args() {
# Experimental feature for collecting per-class zone memory stats.
# Requires use_rtti = true
v8_enable_precise_zone_stats = false
+
+ # Experimental feature for always keeping prototypes in dict/"slow" mode
+ # Sets -DV8_DICT_MODE_PROTOTYPES
+ v8_dict_mode_prototypes = false
}
# Derived defaults.
@@ -317,6 +329,16 @@ if (v8_enable_heap_sandbox == "") {
if (v8_enable_single_generation == "") {
v8_enable_single_generation = v8_disable_write_barriers
}
+if (v8_enable_atomic_object_field_writes == "") {
+ v8_enable_atomic_object_field_writes = v8_enable_concurrent_marking
+}
+if (v8_enable_atomic_marking_state == "") {
+ v8_enable_atomic_marking_state = v8_enable_concurrent_marking
+}
+assert(!v8_enable_concurrent_marking || v8_enable_atomic_object_field_writes,
+ "Concurrent marking requires atomic object field writes.")
+assert(!v8_enable_concurrent_marking || v8_enable_atomic_marking_state,
+ "Concurrent marking requires atomic marking state.")
# Toggle pointer compression for correctness fuzzing when building the
# clang_x64_pointer_compression toolchain. We'll correctness-compare the
@@ -348,17 +370,9 @@ if (v8_enable_shared_ro_heap && v8_enable_pointer_compression) {
assert(!v8_use_multi_snapshots || !v8_control_flow_integrity,
"Control-flow integrity does not support multisnapshots")
-assert(
- !v8_enable_pointer_compression || !v8_enable_shared_ro_heap,
- "Pointer compression is not supported with shared read-only heap enabled")
-
assert(!v8_enable_heap_sandbox || v8_enable_pointer_compression,
"V8 Heap Sandbox requires pointer compression")
-assert(
- !v8_disable_arguments_adaptor || v8_enable_reverse_jsargs,
- "Disabling the arguments adaptor frame requires reversing the JS arguments stack")
-
assert(!v8_enable_unconditional_write_barriers || !v8_disable_write_barriers,
"Write barriers can't be both enabled and disabled")
@@ -517,9 +531,6 @@ config("v8_header_features") {
if (v8_disable_arguments_adaptor) {
defines += [ "V8_NO_ARGUMENTS_ADAPTOR" ]
}
- if (v8_enable_reverse_jsargs) {
- defines += [ "V8_REVERSE_JSARGS" ]
- }
}
# Put defines here that are only used in our internal files and NEVER in
@@ -614,8 +625,11 @@ config("features") {
if (v8_use_external_startup_data) {
defines += [ "V8_USE_EXTERNAL_STARTUP_DATA" ]
}
- if (v8_enable_concurrent_marking) {
- defines += [ "V8_CONCURRENT_MARKING" ]
+ if (v8_enable_atomic_object_field_writes) {
+ defines += [ "V8_ATOMIC_OBJECT_FIELD_WRITES" ]
+ }
+ if (v8_enable_atomic_marking_state) {
+ defines += [ "V8_ATOMIC_MARKING_STATE" ]
}
if (v8_enable_lazy_source_positions) {
defines += [ "V8_ENABLE_LAZY_SOURCE_POSITIONS" ]
@@ -656,6 +670,9 @@ config("features") {
if (v8_fuzzilli) {
defines += [ "V8_FUZZILLI" ]
}
+ if (v8_dict_mode_prototypes) {
+ defines += [ "V8_DICT_MODE_PROTOTYPES" ]
+ }
}
config("toolchain") {
@@ -941,6 +958,10 @@ config("toolchain") {
# GCC assumes that control can get past an exhaustive switch and then
# warns if there's no return there (see https://crbug.com/v8/7658).
"-Wno-return-type",
+
+ # Disable gcc warnings for using enum constant in boolean context.
+ # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=97266
+ "-Wno-int-in-bool-context",
]
}
@@ -1134,8 +1155,8 @@ torque_files = [
"src/builtins/array.tq",
"src/builtins/arraybuffer.tq",
"src/builtins/base.tq",
- "src/builtins/bigint.tq",
"src/builtins/boolean.tq",
+ "src/builtins/builtins-bigint.tq",
"src/builtins/builtins-string.tq",
"src/builtins/cast.tq",
"src/builtins/collections.tq",
@@ -1150,6 +1171,7 @@ torque_files = [
"src/builtins/function.tq",
"src/builtins/growable-fixed-array.tq",
"src/builtins/ic-callable.tq",
+ "src/builtins/ic-dynamic-map-checks.tq",
"src/builtins/ic.tq",
"src/builtins/internal-coverage.tq",
"src/builtins/internal.tq",
@@ -1200,8 +1222,9 @@ torque_files = [
"src/builtins/string-replaceall.tq",
"src/builtins/string-slice.tq",
"src/builtins/string-startswith.tq",
- "src/builtins/string-substring.tq",
"src/builtins/string-substr.tq",
+ "src/builtins/string-substring.tq",
+ "src/builtins/string-trim.tq",
"src/builtins/symbol.tq",
"src/builtins/torque-internal.tq",
"src/builtins/typed-array-createtypedarray.tq",
@@ -1229,6 +1252,7 @@ torque_files = [
"src/objects/allocation-site.tq",
"src/objects/api-callbacks.tq",
"src/objects/arguments.tq",
+ "src/objects/bigint.tq",
"src/objects/cell.tq",
"src/objects/code.tq",
"src/objects/contexts.tq",
@@ -1243,11 +1267,11 @@ torque_files = [
"src/objects/free-space.tq",
"src/objects/heap-number.tq",
"src/objects/heap-object.tq",
- "src/objects/intl-objects.tq",
"src/objects/js-array-buffer.tq",
"src/objects/js-array.tq",
"src/objects/js-collection-iterator.tq",
"src/objects/js-collection.tq",
+ "src/objects/js-function.tq",
"src/objects/js-generator.tq",
"src/objects/js-objects.tq",
"src/objects/js-promise.tq",
@@ -1278,14 +1302,29 @@ torque_files = [
"src/objects/struct.tq",
"src/objects/synthetic-module.tq",
"src/objects/template-objects.tq",
- "src/objects/template.tq",
+ "src/objects/templates.tq",
+ "src/objects/torque-defined-classes.tq",
"src/wasm/wasm-objects.tq",
"test/torque/test-torque.tq",
"third_party/v8/builtins/array-sort.tq",
]
-if (!v8_enable_i18n_support) {
- torque_files -= [ "src/objects/intl-objects.tq" ]
+if (v8_enable_i18n_support) {
+ torque_files += [
+ "src/objects/intl-objects.tq",
+ "src/objects/js-break-iterator.tq",
+ "src/objects/js-collator.tq",
+ "src/objects/js-date-time-format.tq",
+ "src/objects/js-display-names.tq",
+ "src/objects/js-list-format.tq",
+ "src/objects/js-locale.tq",
+ "src/objects/js-number-format.tq",
+ "src/objects/js-plural-rules.tq",
+ "src/objects/js-relative-time-format.tq",
+ "src/objects/js-segment-iterator.tq",
+ "src/objects/js-segmenter.tq",
+ "src/objects/js-segments.tq",
+ ]
}
# Template for running torque
@@ -1328,19 +1367,15 @@ template("run_torque") {
"$target_gen_dir/torque-generated/enum-verifiers.cc",
"$target_gen_dir/torque-generated/objects-printer.cc",
"$target_gen_dir/torque-generated/objects-body-descriptors-inl.inc",
- "$target_gen_dir/torque-generated/class-definitions.cc",
- "$target_gen_dir/torque-generated/class-definitions-inl.h",
- "$target_gen_dir/torque-generated/class-definitions.h",
"$target_gen_dir/torque-generated/class-debug-readers.cc",
"$target_gen_dir/torque-generated/class-debug-readers.h",
"$target_gen_dir/torque-generated/exported-macros-assembler.cc",
"$target_gen_dir/torque-generated/exported-macros-assembler.h",
"$target_gen_dir/torque-generated/csa-types.h",
"$target_gen_dir/torque-generated/instance-types.h",
- "$target_gen_dir/torque-generated/internal-class-definitions.h",
- "$target_gen_dir/torque-generated/internal-class-definitions-inl.h",
- "$target_gen_dir/torque-generated/exported-class-definitions.h",
- "$target_gen_dir/torque-generated/exported-class-definitions-inl.h",
+ "$target_gen_dir/torque-generated/runtime-macros.cc",
+ "$target_gen_dir/torque-generated/runtime-macros.h",
+ "$target_gen_dir/torque-generated/class-forward-declarations.h",
]
outputs = []
@@ -1351,10 +1386,13 @@ template("run_torque") {
}
foreach(file, torque_files) {
- filetq = string_replace(file, ".tq", "-tq-csa")
+ filetq = string_replace(file, ".tq", "-tq")
outputs += [
- "$destination_folder/$filetq.cc",
- "$destination_folder/$filetq.h",
+ "$target_gen_dir/torque-generated/$filetq-csa.cc",
+ "$target_gen_dir/torque-generated/$filetq-csa.h",
+ "$target_gen_dir/torque-generated/$filetq-inl.inc",
+ "$target_gen_dir/torque-generated/$filetq.cc",
+ "$target_gen_dir/torque-generated/$filetq.inc",
]
}
@@ -1430,10 +1468,10 @@ v8_source_set("torque_generated_initializers") {
"src/torque/runtime-support.h",
]
foreach(file, torque_files) {
- filetq = string_replace(file, ".tq", "-tq-csa")
+ filetq = string_replace(file, ".tq", "-tq")
sources += [
- "$target_gen_dir/torque-generated/$filetq.cc",
- "$target_gen_dir/torque-generated/$filetq.h",
+ "$target_gen_dir/torque-generated/$filetq-csa.cc",
+ "$target_gen_dir/torque-generated/$filetq-csa.h",
]
}
@@ -1452,12 +1490,21 @@ v8_source_set("torque_generated_definitions") {
public_deps = [ ":v8_maybe_icu" ]
sources = [
- "$target_gen_dir/torque-generated/class-definitions.cc",
+ "$target_gen_dir/torque-generated/class-forward-declarations.h",
"$target_gen_dir/torque-generated/class-verifiers.cc",
"$target_gen_dir/torque-generated/class-verifiers.h",
"$target_gen_dir/torque-generated/factory.cc",
"$target_gen_dir/torque-generated/objects-printer.cc",
+ "$target_gen_dir/torque-generated/runtime-macros.cc",
]
+ foreach(file, torque_files) {
+ filetq = string_replace(file, ".tq", "-tq")
+ sources += [
+ "$target_gen_dir/torque-generated/$filetq-inl.inc",
+ "$target_gen_dir/torque-generated/$filetq.cc",
+ "$target_gen_dir/torque-generated/$filetq.inc",
+ ]
+ }
configs = [ ":internal_config" ]
}
@@ -1535,9 +1582,10 @@ template("run_mksnapshot") {
args += [ "--turbo-profiling-verbose" ]
}
if (v8_builtins_profiling_log_file != "") {
+ sources += [ v8_builtins_profiling_log_file ]
args += [
"--turbo-profiling-log-file",
- v8_builtins_profiling_log_file,
+ rebase_path(v8_builtins_profiling_log_file, root_build_dir),
]
}
@@ -1665,6 +1713,9 @@ action("v8_dump_build_config") {
"is_ubsan_vptr=$is_ubsan_vptr",
"target_cpu=\"$target_cpu\"",
"v8_current_cpu=\"$v8_current_cpu\"",
+ "v8_enable_atomic_marking_state=$v8_enable_atomic_marking_state",
+ "v8_enable_atomic_object_field_writes=" +
+ "$v8_enable_atomic_object_field_writes",
"v8_enable_concurrent_marking=$v8_enable_concurrent_marking",
"v8_enable_i18n_support=$v8_enable_i18n_support",
"v8_enable_verify_predictable=$v8_enable_verify_predictable",
@@ -2263,6 +2314,7 @@ v8_source_set("v8_base_without_compiler") {
"include/v8-metrics.h",
"include/v8-platform.h",
"include/v8-profiler.h",
+ "include/v8-unwinder-state.h",
"include/v8-util.h",
"include/v8-wasm-trap-handler-posix.h",
"include/v8.h",
@@ -2469,11 +2521,14 @@ v8_source_set("v8_base_without_compiler") {
"src/diagnostics/perf-jit.cc",
"src/diagnostics/perf-jit.h",
"src/diagnostics/unwinder.cc",
+ "src/diagnostics/unwinder.h",
"src/execution/arguments-inl.h",
"src/execution/arguments.cc",
"src/execution/arguments.h",
"src/execution/execution.cc",
"src/execution/execution.h",
+ "src/execution/external-pointer-table.cc",
+ "src/execution/external-pointer-table.h",
"src/execution/frame-constants.h",
"src/execution/frames-inl.h",
"src/execution/frames.cc",
@@ -2554,6 +2609,8 @@ v8_source_set("v8_base_without_compiler") {
"src/heap/code-object-registry.h",
"src/heap/code-stats.cc",
"src/heap/code-stats.h",
+ "src/heap/collection-barrier.cc",
+ "src/heap/collection-barrier.h",
"src/heap/combined-heap.cc",
"src/heap/combined-heap.h",
"src/heap/concurrent-allocator-inl.h",
@@ -2563,7 +2620,11 @@ v8_source_set("v8_base_without_compiler") {
"src/heap/concurrent-marking.h",
"src/heap/cppgc-js/cpp-heap.cc",
"src/heap/cppgc-js/cpp-heap.h",
+ "src/heap/cppgc-js/cpp-snapshot.cc",
+ "src/heap/cppgc-js/cpp-snapshot.h",
"src/heap/cppgc-js/unified-heap-marking-state.h",
+ "src/heap/cppgc-js/unified-heap-marking-verifier.cc",
+ "src/heap/cppgc-js/unified-heap-marking-verifier.h",
"src/heap/cppgc-js/unified-heap-marking-visitor.cc",
"src/heap/cppgc-js/unified-heap-marking-visitor.h",
"src/heap/embedder-tracing.cc",
@@ -2647,6 +2708,7 @@ v8_source_set("v8_base_without_compiler") {
"src/heap/paged-spaces-inl.h",
"src/heap/paged-spaces.cc",
"src/heap/paged-spaces.h",
+ "src/heap/parallel-work-item.h",
"src/heap/read-only-heap-inl.h",
"src/heap/read-only-heap.cc",
"src/heap/read-only-heap.h",
@@ -2672,6 +2734,8 @@ v8_source_set("v8_base_without_compiler") {
"src/heap/stress-scavenge-observer.h",
"src/heap/sweeper.cc",
"src/heap/sweeper.h",
+ "src/heap/weak-object-worklists.cc",
+ "src/heap/weak-object-worklists.h",
"src/heap/worklist.h",
"src/ic/call-optimization.cc",
"src/ic/call-optimization.h",
@@ -2785,6 +2849,7 @@ v8_source_set("v8_base_without_compiler") {
"src/numbers/math-random.h",
"src/numbers/strtod.cc",
"src/numbers/strtod.h",
+ "src/objects/all-objects-inl.h",
"src/objects/allocation-site-inl.h",
"src/objects/allocation-site-scopes-inl.h",
"src/objects/allocation-site-scopes.h",
@@ -2795,6 +2860,7 @@ v8_source_set("v8_base_without_compiler") {
"src/objects/arguments.h",
"src/objects/backing-store.cc",
"src/objects/backing-store.h",
+ "src/objects/bigint-inl.h",
"src/objects/bigint.cc",
"src/objects/bigint.h",
"src/objects/cell-inl.h",
@@ -2804,13 +2870,15 @@ v8_source_set("v8_base_without_compiler") {
"src/objects/code-kind.h",
"src/objects/code.cc",
"src/objects/code.h",
- "src/objects/compilation-cache-inl.h",
- "src/objects/compilation-cache.h",
+ "src/objects/compilation-cache-table-inl.h",
+ "src/objects/compilation-cache-table.cc",
+ "src/objects/compilation-cache-table.h",
"src/objects/compressed-slots-inl.h",
"src/objects/compressed-slots.h",
"src/objects/contexts-inl.h",
"src/objects/contexts.cc",
"src/objects/contexts.h",
+ "src/objects/data-handler-inl.h",
"src/objects/data-handler.h",
"src/objects/debug-objects-inl.h",
"src/objects/debug-objects.cc",
@@ -2840,8 +2908,12 @@ v8_source_set("v8_base_without_compiler") {
"src/objects/field-type.h",
"src/objects/fixed-array-inl.h",
"src/objects/fixed-array.h",
+ "src/objects/foreign-inl.h",
+ "src/objects/foreign.h",
"src/objects/frame-array-inl.h",
"src/objects/frame-array.h",
+ "src/objects/free-space-inl.h",
+ "src/objects/free-space.h",
"src/objects/function-kind.h",
"src/objects/hash-table-inl.h",
"src/objects/hash-table.h",
@@ -3009,6 +3081,7 @@ v8_source_set("v8_base_without_compiler") {
"src/objects/string.h",
"src/objects/struct-inl.h",
"src/objects/struct.h",
+ "src/objects/synthetic-module-inl.h",
"src/objects/synthetic-module.cc",
"src/objects/synthetic-module.h",
"src/objects/tagged-field-inl.h",
@@ -3024,6 +3097,8 @@ v8_source_set("v8_base_without_compiler") {
"src/objects/template-objects.h",
"src/objects/templates-inl.h",
"src/objects/templates.h",
+ "src/objects/torque-defined-classes-inl.h",
+ "src/objects/torque-defined-classes.h",
"src/objects/transitions-inl.h",
"src/objects/transitions.cc",
"src/objects/transitions.h",
@@ -3084,6 +3159,8 @@ v8_source_set("v8_base_without_compiler") {
"src/profiler/sampling-heap-profiler.h",
"src/profiler/strings-storage.cc",
"src/profiler/strings-storage.h",
+ "src/profiler/symbolizer.cc",
+ "src/profiler/symbolizer.h",
"src/profiler/tick-sample.cc",
"src/profiler/tick-sample.h",
"src/profiler/tracing-cpu-profiler.cc",
@@ -3178,8 +3255,6 @@ v8_source_set("v8_base_without_compiler") {
"src/snapshot/context-deserializer.h",
"src/snapshot/context-serializer.cc",
"src/snapshot/context-serializer.h",
- "src/snapshot/deserializer-allocator.cc",
- "src/snapshot/deserializer-allocator.h",
"src/snapshot/deserializer.cc",
"src/snapshot/deserializer.h",
"src/snapshot/embedded/embedded-data.cc",
@@ -3193,8 +3268,6 @@ v8_source_set("v8_base_without_compiler") {
"src/snapshot/references.h",
"src/snapshot/roots-serializer.cc",
"src/snapshot/roots-serializer.h",
- "src/snapshot/serializer-allocator.cc",
- "src/snapshot/serializer-allocator.h",
"src/snapshot/serializer-deserializer.cc",
"src/snapshot/serializer-deserializer.h",
"src/snapshot/serializer.cc",
@@ -3439,6 +3512,7 @@ v8_source_set("v8_base_without_compiler") {
"src/debug/ia32/debug-ia32.cc",
"src/deoptimizer/ia32/deoptimizer-ia32.cc",
"src/diagnostics/ia32/disasm-ia32.cc",
+ "src/diagnostics/ia32/unwinder-ia32.cc",
"src/execution/ia32/frame-constants-ia32.cc",
"src/execution/ia32/frame-constants-ia32.h",
"src/regexp/ia32/regexp-macro-assembler-ia32.cc",
@@ -3468,6 +3542,7 @@ v8_source_set("v8_base_without_compiler") {
"src/deoptimizer/x64/deoptimizer-x64.cc",
"src/diagnostics/x64/disasm-x64.cc",
"src/diagnostics/x64/eh-frame-x64.cc",
+ "src/diagnostics/x64/unwinder-x64.cc",
"src/execution/x64/frame-constants-x64.cc",
"src/execution/x64/frame-constants-x64.h",
"src/regexp/x64/regexp-macro-assembler-x64.cc",
@@ -3516,6 +3591,7 @@ v8_source_set("v8_base_without_compiler") {
"src/deoptimizer/arm/deoptimizer-arm.cc",
"src/diagnostics/arm/disasm-arm.cc",
"src/diagnostics/arm/eh-frame-arm.cc",
+ "src/diagnostics/arm/unwinder-arm.cc",
"src/execution/arm/frame-constants-arm.cc",
"src/execution/arm/frame-constants-arm.h",
"src/execution/arm/simulator-arm.cc",
@@ -3556,6 +3632,7 @@ v8_source_set("v8_base_without_compiler") {
"src/diagnostics/arm64/disasm-arm64.cc",
"src/diagnostics/arm64/disasm-arm64.h",
"src/diagnostics/arm64/eh-frame-arm64.cc",
+ "src/diagnostics/arm64/unwinder-arm64.cc",
"src/execution/arm64/frame-constants-arm64.cc",
"src/execution/arm64/frame-constants-arm64.h",
"src/execution/arm64/pointer-auth-arm64.cc",
@@ -3594,6 +3671,7 @@ v8_source_set("v8_base_without_compiler") {
"src/debug/mips/debug-mips.cc",
"src/deoptimizer/mips/deoptimizer-mips.cc",
"src/diagnostics/mips/disasm-mips.cc",
+ "src/diagnostics/mips/unwinder-mips.cc",
"src/execution/mips/frame-constants-mips.cc",
"src/execution/mips/frame-constants-mips.h",
"src/execution/mips/simulator-mips.cc",
@@ -3621,6 +3699,7 @@ v8_source_set("v8_base_without_compiler") {
"src/debug/mips64/debug-mips64.cc",
"src/deoptimizer/mips64/deoptimizer-mips64.cc",
"src/diagnostics/mips64/disasm-mips64.cc",
+ "src/diagnostics/mips64/unwinder-mips64.cc",
"src/execution/mips64/frame-constants-mips64.cc",
"src/execution/mips64/frame-constants-mips64.h",
"src/execution/mips64/simulator-mips64.cc",
@@ -3651,6 +3730,7 @@ v8_source_set("v8_base_without_compiler") {
"src/deoptimizer/ppc/deoptimizer-ppc.cc",
"src/diagnostics/ppc/disasm-ppc.cc",
"src/diagnostics/ppc/eh-frame-ppc.cc",
+ "src/diagnostics/ppc/unwinder-ppc.cc",
"src/execution/ppc/frame-constants-ppc.cc",
"src/execution/ppc/frame-constants-ppc.h",
"src/execution/ppc/simulator-ppc.cc",
@@ -3681,6 +3761,7 @@ v8_source_set("v8_base_without_compiler") {
"src/deoptimizer/ppc/deoptimizer-ppc.cc",
"src/diagnostics/ppc/disasm-ppc.cc",
"src/diagnostics/ppc/eh-frame-ppc.cc",
+ "src/diagnostics/ppc/unwinder-ppc.cc",
"src/execution/ppc/frame-constants-ppc.cc",
"src/execution/ppc/frame-constants-ppc.h",
"src/execution/ppc/simulator-ppc.cc",
@@ -3711,6 +3792,7 @@ v8_source_set("v8_base_without_compiler") {
"src/deoptimizer/s390/deoptimizer-s390.cc",
"src/diagnostics/s390/disasm-s390.cc",
"src/diagnostics/s390/eh-frame-s390.cc",
+ "src/diagnostics/s390/unwinder-s390.cc",
"src/execution/s390/frame-constants-s390.cc",
"src/execution/s390/frame-constants-s390.h",
"src/execution/s390/simulator-s390.cc",
@@ -3852,6 +3934,8 @@ v8_source_set("torque_base") {
sources = [
"src/torque/ast.h",
+ "src/torque/cc-generator.cc",
+ "src/torque/cc-generator.h",
"src/torque/cfg.cc",
"src/torque/cfg.h",
"src/torque/class-debug-reader-generator.cc",
@@ -3879,6 +3963,8 @@ v8_source_set("torque_base") {
"src/torque/server-data.h",
"src/torque/source-positions.cc",
"src/torque/source-positions.h",
+ "src/torque/torque-code-generator.cc",
+ "src/torque/torque-code-generator.h",
"src/torque/torque-compiler.cc",
"src/torque/torque-compiler.h",
"src/torque/torque-parser.cc",
@@ -4020,10 +4106,13 @@ v8_component("v8_libbase") {
"src/base/platform/semaphore.h",
"src/base/platform/time.cc",
"src/base/platform/time.h",
+ "src/base/platform/wrappers.h",
+ "src/base/platform/wrappers_std.cc",
"src/base/region-allocator.cc",
"src/base/region-allocator.h",
"src/base/ring-buffer.h",
"src/base/safe_conversions.h",
+ "src/base/safe_conversions_arm_impl.h",
"src/base/safe_conversions_impl.h",
"src/base/small-vector.h",
"src/base/sys-info.cc",
@@ -4304,6 +4393,7 @@ v8_source_set("cppgc_base") {
"include/cppgc/common.h",
"include/cppgc/custom-space.h",
"include/cppgc/default-platform.h",
+ "include/cppgc/ephemeron-pair.h",
"include/cppgc/garbage-collected.h",
"include/cppgc/heap.h",
"include/cppgc/internal/api-constants.h",
@@ -4311,6 +4401,7 @@ v8_source_set("cppgc_base") {
"include/cppgc/internal/compiler-specific.h",
"include/cppgc/internal/finalizer-trait.h",
"include/cppgc/internal/gc-info.h",
+ "include/cppgc/internal/name-trait.h",
"include/cppgc/internal/persistent-node.h",
"include/cppgc/internal/pointer-policies.h",
"include/cppgc/internal/prefinalizer-handler.h",
@@ -4319,6 +4410,7 @@ v8_source_set("cppgc_base") {
"include/cppgc/liveness-broker.h",
"include/cppgc/macros.h",
"include/cppgc/member.h",
+ "include/cppgc/name-provider.h",
"include/cppgc/persistent.h",
"include/cppgc/platform.h",
"include/cppgc/prefinalizer.h",
@@ -4328,8 +4420,12 @@ v8_source_set("cppgc_base") {
"include/cppgc/visitor.h",
"include/v8config.h",
"src/heap/cppgc/allocation.cc",
- "src/heap/cppgc/default-job.h",
- "src/heap/cppgc/default-platform.cc",
+ "src/heap/cppgc/compaction-worklists.cc",
+ "src/heap/cppgc/compaction-worklists.h",
+ "src/heap/cppgc/compactor.cc",
+ "src/heap/cppgc/compactor.h",
+ "src/heap/cppgc/concurrent-marker.cc",
+ "src/heap/cppgc/concurrent-marker.h",
"src/heap/cppgc/free-list.cc",
"src/heap/cppgc/free-list.h",
"src/heap/cppgc/garbage-collector.h",
@@ -4366,6 +4462,7 @@ v8_source_set("cppgc_base") {
"src/heap/cppgc/marking-visitor.h",
"src/heap/cppgc/marking-worklists.cc",
"src/heap/cppgc/marking-worklists.h",
+ "src/heap/cppgc/name-trait.cc",
"src/heap/cppgc/object-allocator.cc",
"src/heap/cppgc/object-allocator.h",
"src/heap/cppgc/object-start-bitmap.h",
@@ -4377,6 +4474,7 @@ v8_source_set("cppgc_base") {
"src/heap/cppgc/prefinalizer-handler.cc",
"src/heap/cppgc/prefinalizer-handler.h",
"src/heap/cppgc/process-heap.cc",
+ "src/heap/cppgc/process-heap.h",
"src/heap/cppgc/raw-heap.cc",
"src/heap/cppgc/raw-heap.h",
"src/heap/cppgc/sanitizers.h",
@@ -4410,6 +4508,7 @@ v8_source_set("cppgc_base") {
public_deps = [
":v8_cppgc_shared",
":v8_libbase",
+ ":v8_libplatform",
]
}
@@ -4711,6 +4810,7 @@ if (is_fuchsia && !build_with_chromium) {
group("v8_fuzzers") {
testonly = true
data_deps = [
+ ":v8_simple_inspector_fuzzer",
":v8_simple_json_fuzzer",
":v8_simple_multi_return_fuzzer",
":v8_simple_parser_fuzzer",
@@ -5175,6 +5275,23 @@ v8_source_set("wasm_compile_fuzzer") {
v8_fuzzer("wasm_compile_fuzzer") {
}
+v8_source_set("inspector_fuzzer") {
+ sources = [ "test/fuzzer/inspector-fuzzer.cc" ]
+
+ deps = [
+ ":fuzzer_support",
+ "test/inspector:inspector_test",
+ ]
+
+ configs = [
+ ":external_config",
+ ":internal_config_base",
+ ]
+}
+
+v8_fuzzer("inspector_fuzzer") {
+}
+
# Target to build all generated .cc files.
group("v8_generated_cc_files") {
testonly = true
diff --git a/deps/v8/DEPS b/deps/v8/DEPS
index 6bddd2cc9f..3e3fed387d 100644
--- a/deps/v8/DEPS
+++ b/deps/v8/DEPS
@@ -6,6 +6,9 @@ use_relative_paths = True
gclient_gn_args_file = 'build/config/gclient_args.gni'
gclient_gn_args = [
+ # TODO(https://crbug.com/1137662, https://crbug.com/1080854)
+ # Remove when migration is complete.
+ 'checkout_fuchsia_for_arm64_host',
'checkout_google_benchmark',
'mac_xcode_version',
]
@@ -26,6 +29,12 @@ vars = {
# Wildcards are supported (e.g. "qemu.*").
'checkout_fuchsia_boot_images': "qemu.x64,qemu.arm64",
+ # TODO(https://crbug.com/1137662, https://crbug.com/1080854)
+ # Remove when migration is complete.
+ # By default, do not check out files required to run fuchsia tests in
+ # qemu on linux-arm64 machines.
+ 'checkout_fuchsia_for_arm64_host': False,
+
'checkout_instrumented_libraries': False,
'checkout_ittapi': False,
# Fetch clang-tidy into the same bin/ directory as our clang binary.
@@ -41,10 +50,10 @@ vars = {
'mac_xcode_version': 'default',
# GN CIPD package version.
- 'gn_version': 'git_revision:e002e68a48d1c82648eadde2f6aafa20d08c36f2',
+ 'gn_version': 'git_revision:53d92014bf94c3893886470a1c7c1289f8818db0',
# luci-go CIPD package version.
- 'luci_go': 'git_revision:83c3df996b224edf5061840744395707a0e513e7',
+ 'luci_go': 'git_revision:1a022d3a4c50be4207ee93451255d71896416596',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_build-tools_version
@@ -77,20 +86,20 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_tools-lint_version
# and whatever else without interference from each other.
- 'android_sdk_cmdline-tools_version': 'ijpIFSitwBfaEdO9VXBGPqDHUVzPimXy_whw3aHTN9oC',
+ 'android_sdk_cmdline-tools_version': 'V__2Ycej-H2-6AcXX5A3gi7sIk74SuN44PBm2uC_N1sC',
}
deps = {
'build':
- Var('chromium_url') + '/chromium/src/build.git' + '@' + '38a49c12ded01dd8c4628b432cb7eebfb29e77f1',
+ Var('chromium_url') + '/chromium/src/build.git' + '@' + '2101eff1ac4bfd25f2dfa71ad632a600a38c1ed9',
'third_party/depot_tools':
- Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '1099c11d5d12255458303c1ba4e5584cfde90477',
+ Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '982b2a757087c2519e42b82b93cbfe5adf43cdd5',
'third_party/icu':
- Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'aef20f06d47ba76fdf13abcdb033e2a408b5a94d',
+ Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'c2a4cae149aae7fd30c4cbe3cf1b30df03b386f1',
'third_party/instrumented_libraries':
- Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + '3c52ccdd3b9edf8fb7b3bd8ba945cce47d887ea8',
+ Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + '6ba978ccb754d270b6cd12da58c8269b617e4f6e',
'buildtools':
- Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '3ff4f5027b4b81a6c9c36d64d71444f2709a4896',
+ Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '6302c1175607a436e18947a5abe9df2209e845fc',
'buildtools/clang_format/script':
Var('chromium_url') + '/chromium/llvm-project/cfe/tools/clang-format.git' + '@' + '96636aa0e9f047f17447f2d45a094d0b59ed7917',
'buildtools/linux64': {
@@ -130,13 +139,13 @@ deps = {
'condition': 'host_os == "win"',
},
'base/trace_event/common':
- Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '23ef5333a357fc7314630ef88b44c3a545881dee',
+ Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + 'eb94f1c7aa96207f469008f29989a43feb2718f8',
'third_party/android_ndk': {
'url': Var('chromium_url') + '/android_ndk.git' + '@' + '27c0a8d090c666a50e40fceb4ee5b40b1a2d3f87',
'condition': 'checkout_android',
},
'third_party/android_platform': {
- 'url': Var('chromium_url') + '/chromium/src/third_party/android_platform.git' + '@' + 'fc6c6840eeb254ac4fd199c548c54178ce3545bb',
+ 'url': Var('chromium_url') + '/chromium/src/third_party/android_platform.git' + '@' + 'ef64306e7772dea22df5f98102e6288da3510843',
'condition': 'checkout_android',
},
'third_party/android_sdk/public': {
@@ -178,7 +187,7 @@ deps = {
'dep_type': 'cipd',
},
'third_party/catapult': {
- 'url': Var('chromium_url') + '/catapult.git' + '@' + '18d69fb4e7b2225974dfc306ca0c11a58fe4e917',
+ 'url': Var('chromium_url') + '/catapult.git' + '@' + '434681c2378b686117c2b003a58c54d78f22185f',
'condition': 'checkout_android',
},
'third_party/colorama/src': {
@@ -186,7 +195,7 @@ deps = {
'condition': 'checkout_android',
},
'third_party/fuchsia-sdk': {
- 'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-sdk.git' + '@' + '6a38b0e1f1f4a6255959b259a681e46ee72dee58',
+ 'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-sdk.git' + '@' + 'f8df9ff79b878d1998970cc04a197061069e48ce',
'condition': 'checkout_fuchsia',
},
'third_party/googletest/src':
@@ -198,15 +207,15 @@ deps = {
'third_party/jinja2':
Var('chromium_url') + '/chromium/src/third_party/jinja2.git' + '@' + 'a82a4944a7f2496639f34a89c9923be5908b80aa',
'third_party/markupsafe':
- Var('chromium_url') + '/chromium/src/third_party/markupsafe.git' + '@' + 'f2fb0f21ef1e1d4ffd43be8c63fc3d4928dea7ab',
+ Var('chromium_url') + '/chromium/src/third_party/markupsafe.git' + '@' + '0944e71f4b2cb9a871bcbe353f95e889b64a611a',
'tools/swarming_client':
- Var('chromium_url') + '/infra/luci/client-py.git' + '@' + '44c13d73156581ea09b9389001e58c23a4b8d70a',
+ Var('chromium_url') + '/infra/luci/client-py.git' + '@' + 'd46ea7635f2911208268170512cb611412488fd8',
'test/benchmarks/data':
Var('chromium_url') + '/v8/deps/third_party/benchmarks.git' + '@' + '05d7188267b4560491ff9155c5ee13e207ecd65f',
'test/mozilla/data':
Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be',
'test/test262/data':
- Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '63976020376c8c2b0ebabf37c364f25288d4b93b',
+ Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '0e7319c015fe935594f8bcafaedb0c94f7fec1df',
'test/test262/harness':
Var('chromium_url') + '/external/github.com/test262-utils/test262-harness-py.git' + '@' + '4555345a943d0c99a9461182705543fb171dda4b',
'third_party/qemu-linux-x64': {
@@ -233,7 +242,7 @@ deps = {
'packages': [
{
'package': 'fuchsia/third_party/aemu/linux-amd64',
- 'version': 'FfxmX7LQ9OID3pVAmcemr6u9lK3xjXzAXxvqzEcclMwC'
+ 'version': 'xP4TXh9wWGTG0qr4y6eFcUO_0HOBmt3vorgtVmpwBJsC'
},
],
'condition': 'host_os == "linux" and checkout_fuchsia',
@@ -250,7 +259,7 @@ deps = {
'dep_type': 'cipd',
},
'tools/clang':
- Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '3017edade60658a699be776d9e282509a902ffe9',
+ Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'a37c0858a645506c8e9d3bebab1ed5a5b1f9df61',
'tools/luci-go': {
'packages': [
{
@@ -280,11 +289,11 @@ deps = {
'dep_type': 'cipd',
},
'third_party/perfetto':
- Var('android_url') + '/platform/external/perfetto.git' + '@' + 'ff70e0d273ed10995866c803f23e11250eb3dc52',
+ Var('android_url') + '/platform/external/perfetto.git' + '@' + '7cdc44f903d3bcfd1d0f67188bfa797a24756868',
'third_party/protobuf':
Var('chromium_url') + '/external/github.com/google/protobuf'+ '@' + 'b68a347f56137b4b1a746e8c7438495a6ac1bd91',
'third_party/zlib':
- Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + '4668feaaa47973a6f9d9f9caeb14cd03731854f1',
+ Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + 'e84c9a3fd75fdc39055b7ae27d6ec508e50bd39e',
'third_party/jsoncpp/source':
Var('chromium_url') + '/external/github.com/open-source-parsers/jsoncpp.git'+ '@' + '9059f5cad030ba11d37818847443a53918c327b1',
'third_party/ittapi': {
@@ -519,12 +528,21 @@ hooks = [
'-o', 'build/util/LASTCHANGE'],
},
{
- 'name': 'fuchsia_sdk',
+ 'name': 'Download Fuchsia SDK',
'pattern': '.',
'condition': 'checkout_fuchsia',
'action': [
'python',
'build/fuchsia/update_sdk.py',
+ ],
+ },
+ {
+ 'name': 'Download Fuchsia system images',
+ 'pattern': '.',
+ 'condition': 'checkout_fuchsia',
+ 'action': [
+ 'python',
+ 'build/fuchsia/update_images.py',
'--boot-images={checkout_fuchsia_boot_images}',
],
},
diff --git a/deps/v8/DIR_METADATA b/deps/v8/DIR_METADATA
new file mode 100644
index 0000000000..72c04a4d91
--- /dev/null
+++ b/deps/v8/DIR_METADATA
@@ -0,0 +1,12 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript"
+}
+team_email: "v8-dev@googlegroups.com" \ No newline at end of file
diff --git a/deps/v8/OWNERS b/deps/v8/OWNERS
index e096d3c950..3698d14dd9 100644
--- a/deps/v8/OWNERS
+++ b/deps/v8/OWNERS
@@ -27,6 +27,3 @@ per-file *-mips*=file:MIPS_OWNERS
per-file *-mips64*=file:MIPS_OWNERS
per-file *-ppc*=file:PPC_OWNERS
per-file *-s390*=file:S390_OWNERS
-
-# TEAM: v8-dev@googlegroups.com
-# COMPONENT: Blink>JavaScript
diff --git a/deps/v8/PPC_OWNERS b/deps/v8/PPC_OWNERS
index 6edd45a6ef..02c2cd757c 100644
--- a/deps/v8/PPC_OWNERS
+++ b/deps/v8/PPC_OWNERS
@@ -2,3 +2,4 @@ junyan@redhat.com
joransiu@ca.ibm.com
midawson@redhat.com
mfarazma@redhat.com
+vasili.skurydzin@ibm.com
diff --git a/deps/v8/PRESUBMIT.py b/deps/v8/PRESUBMIT.py
index eba4158d81..113ed2fd61 100644
--- a/deps/v8/PRESUBMIT.py
+++ b/deps/v8/PRESUBMIT.py
@@ -80,6 +80,7 @@ def _V8PresubmitChecks(input_api, output_api):
sys.path.append(input_api.os_path.join(
input_api.PresubmitLocalPath(), 'tools'))
from v8_presubmit import CppLintProcessor
+ from v8_presubmit import JSLintProcessor
from v8_presubmit import TorqueLintProcessor
from v8_presubmit import SourceProcessor
from v8_presubmit import StatusFilesProcessor
@@ -95,6 +96,11 @@ def _V8PresubmitChecks(input_api, output_api):
affected_file,
files_to_check=(r'.+\.tq'))
+ def FilterJSFile(affected_file):
+ return input_api.FilterSourceFile(
+ affected_file,
+ files_to_check=(r'.+\.m?js'))
+
results = []
if not CppLintProcessor().RunOnFiles(
input_api.AffectedFiles(file_filter=FilterFile, include_deletes=False)):
@@ -103,6 +109,10 @@ def _V8PresubmitChecks(input_api, output_api):
input_api.AffectedFiles(file_filter=FilterTorqueFile,
include_deletes=False)):
results.append(output_api.PresubmitError("Torque format check failed"))
+ if not JSLintProcessor().RunOnFiles(
+ input_api.AffectedFiles(file_filter=FilterJSFile,
+ include_deletes=False)):
+ results.append(output_api.PresubmitError("JS format check failed"))
if not SourceProcessor().RunOnFiles(
input_api.AffectedFiles(include_deletes=False)):
results.append(output_api.PresubmitError(
diff --git a/deps/v8/S390_OWNERS b/deps/v8/S390_OWNERS
index 6edd45a6ef..02c2cd757c 100644
--- a/deps/v8/S390_OWNERS
+++ b/deps/v8/S390_OWNERS
@@ -2,3 +2,4 @@ junyan@redhat.com
joransiu@ca.ibm.com
midawson@redhat.com
mfarazma@redhat.com
+vasili.skurydzin@ibm.com
diff --git a/deps/v8/base/trace_event/common/trace_event_common.h b/deps/v8/base/trace_event/common/trace_event_common.h
index 28b7275345..120481f30f 100644
--- a/deps/v8/base/trace_event/common/trace_event_common.h
+++ b/deps/v8/base/trace_event/common/trace_event_common.h
@@ -969,6 +969,7 @@
#define TRACE_TASK_EXECUTION(run_function, task) \
INTERNAL_TRACE_TASK_EXECUTION(run_function, task)
+// Special trace event macro to trace log messages.
#define TRACE_LOG_MESSAGE(file, message, line) \
INTERNAL_TRACE_LOG_MESSAGE(file, message, line)
diff --git a/deps/v8/gni/proto_library.gni b/deps/v8/gni/proto_library.gni
index eca3ffb84e..0b72d7b8a4 100644
--- a/deps/v8/gni/proto_library.gni
+++ b/deps/v8/gni/proto_library.gni
@@ -11,8 +11,6 @@ template("proto_library") {
assert(defined(invoker.sources))
proto_sources = invoker.sources
- set_sources_assignment_filter([])
-
if (host_os == "win") {
host_executable_suffix = ".exe"
} else {
@@ -141,6 +139,12 @@ template("proto_library") {
]
}
+ if (defined(invoker.import_dirs)) {
+ foreach(path, invoker.import_dirs) {
+ args += [ "--import-dir=" + rebase_path(path, root_build_dir) ]
+ }
+ }
+
if (generate_with_plugin) {
plugin_path_rebased = rebase_path(plugin_path, root_build_dir)
plugin_out_args = ""
@@ -187,10 +191,7 @@ template("proto_library") {
"visibility",
])
- # Exclude the config.descriptor file which is an output for some reason.
- set_sources_assignment_filter([ "*.descriptor" ])
sources = get_target_outputs(":$action_name")
- set_sources_assignment_filter(sources_assignment_filter)
# configs -= [ "//gn/standalone:extra_warnings" ]
if (defined(invoker.extra_configs)) {
diff --git a/deps/v8/include/DIR_METADATA b/deps/v8/include/DIR_METADATA
new file mode 100644
index 0000000000..a27ea1b53a
--- /dev/null
+++ b/deps/v8/include/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>API"
+} \ No newline at end of file
diff --git a/deps/v8/include/OWNERS b/deps/v8/include/OWNERS
index 9bb043db7c..19c21fbf52 100644
--- a/deps/v8/include/OWNERS
+++ b/deps/v8/include/OWNERS
@@ -21,5 +21,3 @@ per-file js_protocol.pdl=pfeldman@chromium.org
per-file v8-version.h=file:../INFRA_OWNERS
per-file v8-version.h=hablich@chromium.org
per-file v8-version.h=vahl@chromium.org
-
-# COMPONENT: Blink>JavaScript>API
diff --git a/deps/v8/include/cppgc/DEPS b/deps/v8/include/cppgc/DEPS
index 04c343de27..861d1187ba 100644
--- a/deps/v8/include/cppgc/DEPS
+++ b/deps/v8/include/cppgc/DEPS
@@ -4,4 +4,5 @@ include_rules = [
"+v8-platform.h",
"+cppgc",
"-src",
+ "+libplatform/libplatform.h",
]
diff --git a/deps/v8/include/cppgc/allocation.h b/deps/v8/include/cppgc/allocation.h
index ac5062ad01..556f313a4a 100644
--- a/deps/v8/include/cppgc/allocation.h
+++ b/deps/v8/include/cppgc/allocation.h
@@ -113,14 +113,23 @@ class MakeGarbageCollectedTraitBase
};
/**
+ * struct used specify to MakeGarbageCollected how many bytes should be
+ * appended to the allocated object.
+ */
+struct AdditionalBytes {
+ explicit AdditionalBytes(size_t bytes) : value(bytes) {}
+ const size_t value;
+};
+
+/**
* Default trait class that specifies how to construct an object of type T.
* Advanced users may override how an object is constructed using the utilities
* that are provided through MakeGarbageCollectedTraitBase.
*
* Any trait overriding construction must
- * - allocate through MakeGarbageCollectedTraitBase<T>::Allocate;
+ * - allocate through `MakeGarbageCollectedTraitBase<T>::Allocate`;
* - mark the object as fully constructed using
- * MakeGarbageCollectedTraitBase<T>::MarkObjectAsFullyConstructed;
+ * `MakeGarbageCollectedTraitBase<T>::MarkObjectAsFullyConstructed`;
*/
template <typename T>
class MakeGarbageCollectedTrait : public MakeGarbageCollectedTraitBase<T> {
@@ -139,6 +148,22 @@ class MakeGarbageCollectedTrait : public MakeGarbageCollectedTraitBase<T> {
MakeGarbageCollectedTraitBase<T>::MarkObjectAsFullyConstructed(object);
return object;
}
+
+ template <typename... Args>
+ static T* Call(AllocationHandle& handle, AdditionalBytes additional_bytes,
+ Args&&... args) {
+ static_assert(internal::IsGarbageCollectedType<T>::value,
+ "T needs to be a garbage collected object");
+ static_assert(
+ !internal::IsGarbageCollectedMixinType<T>::value ||
+ sizeof(T) <= internal::api_constants::kLargeObjectSizeThreshold,
+ "GarbageCollectedMixin may not be a large object");
+ void* memory = MakeGarbageCollectedTraitBase<T>::Allocate(
+ handle, sizeof(T) + additional_bytes.value);
+ T* object = ::new (memory) T(std::forward<Args>(args)...);
+ MakeGarbageCollectedTraitBase<T>::MarkObjectAsFullyConstructed(object);
+ return object;
+ }
};
/**
@@ -168,6 +193,25 @@ T* MakeGarbageCollected(AllocationHandle& handle, Args&&... args) {
return object;
}
+/**
+ * Constructs a managed object of type T where T transitively inherits from
+ * GarbageCollected. Created objects will have additional bytes appended to
+ * it. Allocated memory would suffice for `sizeof(T) + additional_bytes`.
+ *
+ * \param additional_bytes Denotes how many bytes to append to T.
+ * \param args List of arguments with which an instance of T will be
+ * constructed.
+ * \returns an instance of type T.
+ */
+template <typename T, typename... Args>
+T* MakeGarbageCollected(AllocationHandle& handle,
+ AdditionalBytes additional_bytes, Args&&... args) {
+ T* object = MakeGarbageCollectedTrait<T>::Call(handle, additional_bytes,
+ std::forward<Args>(args)...);
+ PostConstructionCallbackTrait<T>::Call(object);
+ return object;
+}
+
} // namespace cppgc
#endif // INCLUDE_CPPGC_ALLOCATION_H_
diff --git a/deps/v8/include/cppgc/cross-thread-persistent.h b/deps/v8/include/cppgc/cross-thread-persistent.h
new file mode 100644
index 0000000000..3d49d557c2
--- /dev/null
+++ b/deps/v8/include/cppgc/cross-thread-persistent.h
@@ -0,0 +1,311 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_CROSS_THREAD_PERSISTENT_H_
+#define INCLUDE_CPPGC_CROSS_THREAD_PERSISTENT_H_
+
+#include <atomic>
+
+#include "cppgc/internal/persistent-node.h"
+#include "cppgc/internal/pointer-policies.h"
+#include "cppgc/persistent.h"
+#include "cppgc/visitor.h"
+
+namespace cppgc {
+
+namespace internal {
+
+template <typename T, typename WeaknessPolicy, typename LocationPolicy,
+ typename CheckingPolicy>
+class BasicCrossThreadPersistent final : public PersistentBase,
+ public LocationPolicy,
+ private WeaknessPolicy,
+ private CheckingPolicy {
+ public:
+ using typename WeaknessPolicy::IsStrongPersistent;
+ using PointeeType = T;
+
+ ~BasicCrossThreadPersistent() { Clear(); }
+
+ BasicCrossThreadPersistent( // NOLINT
+ const SourceLocation& loc = SourceLocation::Current())
+ : LocationPolicy(loc) {}
+
+ BasicCrossThreadPersistent( // NOLINT
+ std::nullptr_t, const SourceLocation& loc = SourceLocation::Current())
+ : LocationPolicy(loc) {}
+
+ BasicCrossThreadPersistent( // NOLINT
+ SentinelPointer s, const SourceLocation& loc = SourceLocation::Current())
+ : PersistentBase(s), LocationPolicy(loc) {}
+
+ BasicCrossThreadPersistent( // NOLINT
+ T* raw, const SourceLocation& loc = SourceLocation::Current())
+ : PersistentBase(raw), LocationPolicy(loc) {
+ if (!IsValid(raw)) return;
+ PersistentRegionLock guard;
+ PersistentRegion& region = this->GetPersistentRegion(raw);
+ SetNode(region.AllocateNode(this, &Trace));
+ this->CheckPointer(raw);
+ }
+
+ BasicCrossThreadPersistent( // NOLINT
+ T& raw, const SourceLocation& loc = SourceLocation::Current())
+ : BasicCrossThreadPersistent(&raw, loc) {}
+
+ template <typename U, typename MemberBarrierPolicy,
+ typename MemberWeaknessTag, typename MemberCheckingPolicy,
+ typename = std::enable_if_t<std::is_base_of<T, U>::value>>
+ BasicCrossThreadPersistent( // NOLINT
+ internal::BasicMember<U, MemberBarrierPolicy, MemberWeaknessTag,
+ MemberCheckingPolicy>
+ member,
+ const SourceLocation& loc = SourceLocation::Current())
+ : BasicCrossThreadPersistent(member.Get(), loc) {}
+
+ BasicCrossThreadPersistent(
+ const BasicCrossThreadPersistent& other,
+ const SourceLocation& loc = SourceLocation::Current())
+ : BasicCrossThreadPersistent(loc) {
+ // Invoke operator=.
+ *this = other;
+ }
+
+ // Heterogeneous ctor.
+ template <typename U, typename OtherWeaknessPolicy,
+ typename OtherLocationPolicy, typename OtherCheckingPolicy,
+ typename = std::enable_if_t<std::is_base_of<T, U>::value>>
+ BasicCrossThreadPersistent( // NOLINT
+ const BasicCrossThreadPersistent<U, OtherWeaknessPolicy,
+ OtherLocationPolicy,
+ OtherCheckingPolicy>& other,
+ const SourceLocation& loc = SourceLocation::Current())
+ : BasicCrossThreadPersistent(loc) {
+ *this = other;
+ }
+
+ BasicCrossThreadPersistent(
+ BasicCrossThreadPersistent&& other,
+ const SourceLocation& loc = SourceLocation::Current()) noexcept {
+ // Invoke operator=.
+ *this = std::move(other);
+ }
+
+ BasicCrossThreadPersistent& operator=(
+ const BasicCrossThreadPersistent& other) {
+ PersistentRegionLock guard;
+ AssignUnsafe(other.Get());
+ return *this;
+ }
+
+ template <typename U, typename OtherWeaknessPolicy,
+ typename OtherLocationPolicy, typename OtherCheckingPolicy,
+ typename = std::enable_if_t<std::is_base_of<T, U>::value>>
+ BasicCrossThreadPersistent& operator=(
+ const BasicCrossThreadPersistent<U, OtherWeaknessPolicy,
+ OtherLocationPolicy,
+ OtherCheckingPolicy>& other) {
+ PersistentRegionLock guard;
+ AssignUnsafe(other.Get());
+ return *this;
+ }
+
+ BasicCrossThreadPersistent& operator=(BasicCrossThreadPersistent&& other) {
+ if (this == &other) return *this;
+ Clear();
+ PersistentRegionLock guard;
+ PersistentBase::operator=(std::move(other));
+ LocationPolicy::operator=(std::move(other));
+ if (!IsValid(GetValue())) return *this;
+ GetNode()->UpdateOwner(this);
+ other.SetValue(nullptr);
+ other.SetNode(nullptr);
+ this->CheckPointer(GetValue());
+ return *this;
+ }
+
+ BasicCrossThreadPersistent& operator=(T* other) {
+ Assign(other);
+ return *this;
+ }
+
+ // Assignment from member.
+ template <typename U, typename MemberBarrierPolicy,
+ typename MemberWeaknessTag, typename MemberCheckingPolicy,
+ typename = std::enable_if_t<std::is_base_of<T, U>::value>>
+ BasicCrossThreadPersistent& operator=(
+ internal::BasicMember<U, MemberBarrierPolicy, MemberWeaknessTag,
+ MemberCheckingPolicy>
+ member) {
+ return operator=(member.Get());
+ }
+
+ BasicCrossThreadPersistent& operator=(std::nullptr_t) {
+ Clear();
+ return *this;
+ }
+
+ BasicCrossThreadPersistent& operator=(SentinelPointer s) {
+ Assign(s);
+ return *this;
+ }
+
+ /**
+ * Returns a pointer to the stored object.
+ *
+ * Note: **Not thread-safe.**
+ *
+ * \returns a pointer to the stored object.
+ */
+ // CFI cast exemption to allow passing SentinelPointer through T* and support
+ // heterogeneous assignments between different Member and Persistent handles
+ // based on their actual types.
+ V8_CLANG_NO_SANITIZE("cfi-unrelated-cast") T* Get() const {
+ return static_cast<T*>(GetValue());
+ }
+
+ /**
+ * Clears the stored object.
+ */
+ void Clear() { Assign(nullptr); }
+
+ /**
+ * Returns a pointer to the stored object and releases it.
+ *
+ * Note: **Not thread-safe.**
+ *
+ * \returns a pointer to the stored object.
+ */
+ T* Release() {
+ T* result = Get();
+ Clear();
+ return result;
+ }
+
+ /**
+ * Conversio to boolean.
+ *
+ * Note: **Not thread-safe.**
+ *
+ * \returns true if an actual object has been stored and false otherwise.
+ */
+ explicit operator bool() const { return Get(); }
+
+ /**
+ * Conversion to object of type T.
+ *
+ * Note: **Not thread-safe.**
+ *
+ * \returns the object.
+ */
+ operator T*() const { return Get(); } // NOLINT
+
+ /**
+ * Dereferences the stored object.
+ *
+ * Note: **Not thread-safe.**
+ */
+ T* operator->() const { return Get(); }
+ T& operator*() const { return *Get(); }
+
+ private:
+ static bool IsValid(void* ptr) { return ptr && ptr != kSentinelPointer; }
+
+ static void Trace(Visitor* v, const void* ptr) {
+ const auto* handle = static_cast<const BasicCrossThreadPersistent*>(ptr);
+ v->TraceRoot(*handle, handle->Location());
+ }
+
+ void Assign(T* ptr) {
+ void* old_value = GetValue();
+ if (IsValid(old_value)) {
+ PersistentRegionLock guard;
+ PersistentRegion& region = this->GetPersistentRegion(old_value);
+ if (IsValid(ptr) && (&region == &this->GetPersistentRegion(ptr))) {
+ SetValue(ptr);
+ this->CheckPointer(ptr);
+ return;
+ }
+ region.FreeNode(GetNode());
+ SetNode(nullptr);
+ }
+ SetValue(ptr);
+ if (!IsValid(ptr)) return;
+ PersistentRegionLock guard;
+ SetNode(this->GetPersistentRegion(ptr).AllocateNode(this, &Trace));
+ this->CheckPointer(ptr);
+ }
+
+ void AssignUnsafe(T* ptr) {
+ void* old_value = GetValue();
+ if (IsValid(old_value)) {
+ PersistentRegion& region = this->GetPersistentRegion(old_value);
+ if (IsValid(ptr) && (&region == &this->GetPersistentRegion(ptr))) {
+ SetValue(ptr);
+ this->CheckPointer(ptr);
+ return;
+ }
+ region.FreeNode(GetNode());
+ SetNode(nullptr);
+ }
+ SetValue(ptr);
+ if (!IsValid(ptr)) return;
+ SetNode(this->GetPersistentRegion(ptr).AllocateNode(this, &Trace));
+ this->CheckPointer(ptr);
+ }
+
+ void ClearFromGC() const {
+ if (IsValid(GetValue())) {
+ WeaknessPolicy::GetPersistentRegion(GetValue()).FreeNode(GetNode());
+ PersistentBase::ClearFromGC();
+ }
+ }
+
+ friend class cppgc::Visitor;
+};
+
+template <typename T, typename LocationPolicy, typename CheckingPolicy>
+struct IsWeak<
+ BasicCrossThreadPersistent<T, internal::WeakCrossThreadPersistentPolicy,
+ LocationPolicy, CheckingPolicy>>
+ : std::true_type {};
+
+} // namespace internal
+
+namespace subtle {
+
+/**
+ * **DO NOT USE: Has known caveats, see below.**
+ *
+ * CrossThreadPersistent allows retaining objects from threads other than the
+ * thread the owning heap is operating on.
+ *
+ * Known caveats:
+ * - Does not protect the heap owning an object from terminating.
+ * - Reaching transitively through the graph is unsupported as objects may be
+ * moved concurrently on the thread owning the object.
+ */
+template <typename T>
+using CrossThreadPersistent = internal::BasicCrossThreadPersistent<
+ T, internal::StrongCrossThreadPersistentPolicy>;
+
+/**
+ * **DO NOT USE: Has known caveats, see below.**
+ *
+ * CrossThreadPersistent allows weakly retaining objects from threads other than
+ * the thread the owning heap is operating on.
+ *
+ * Known caveats:
+ * - Does not protect the heap owning an object from terminating.
+ * - Reaching transitively through the graph is unsupported as objects may be
+ * moved concurrently on the thread owning the object.
+ */
+template <typename T>
+using WeakCrossThreadPersistent = internal::BasicCrossThreadPersistent<
+ T, internal::WeakCrossThreadPersistentPolicy>;
+
+} // namespace subtle
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_CROSS_THREAD_PERSISTENT_H_
diff --git a/deps/v8/include/cppgc/custom-space.h b/deps/v8/include/cppgc/custom-space.h
index 2597a5bdef..9a8cd876c3 100644
--- a/deps/v8/include/cppgc/custom-space.h
+++ b/deps/v8/include/cppgc/custom-space.h
@@ -22,11 +22,12 @@ class CustomSpaceBase {
public:
virtual ~CustomSpaceBase() = default;
virtual CustomSpaceIndex GetCustomSpaceIndex() const = 0;
+ virtual bool IsCompactable() const = 0;
};
/**
* Base class custom spaces should directly inherit from. The class inheriting
- * from CustomSpace must define kSpaceIndex as unique space index. These
+ * from `CustomSpace` must define `kSpaceIndex` as unique space index. These
* indices need for form a sequence starting at 0.
*
* Example:
@@ -47,6 +48,12 @@ class CustomSpace : public CustomSpaceBase {
CustomSpaceIndex GetCustomSpaceIndex() const final {
return ConcreteCustomSpace::kSpaceIndex;
}
+ bool IsCompactable() const final {
+ return ConcreteCustomSpace::kSupportsCompaction;
+ }
+
+ protected:
+ static constexpr bool kSupportsCompaction = false;
};
/**
@@ -57,6 +64,28 @@ struct SpaceTrait {
using Space = void;
};
+namespace internal {
+
+template <typename CustomSpace>
+struct IsAllocatedOnCompactableSpaceImpl {
+ static constexpr bool value = CustomSpace::kSupportsCompaction;
+};
+
+template <>
+struct IsAllocatedOnCompactableSpaceImpl<void> {
+ // Non-custom spaces are by default not compactable.
+ static constexpr bool value = false;
+};
+
+template <typename T>
+struct IsAllocatedOnCompactableSpace {
+ public:
+ static constexpr bool value =
+ IsAllocatedOnCompactableSpaceImpl<typename SpaceTrait<T>::Space>::value;
+};
+
+} // namespace internal
+
} // namespace cppgc
#endif // INCLUDE_CPPGC_CUSTOM_SPACE_H_
diff --git a/deps/v8/include/cppgc/default-platform.h b/deps/v8/include/cppgc/default-platform.h
index da8129a81b..28990da92e 100644
--- a/deps/v8/include/cppgc/default-platform.h
+++ b/deps/v8/include/cppgc/default-platform.h
@@ -9,74 +9,49 @@
#include <vector>
#include "cppgc/platform.h"
+#include "libplatform/libplatform.h"
#include "v8config.h" // NOLINT(build/include_directory)
namespace cppgc {
-namespace internal {
-class DefaultJob;
-} // namespace internal
-
-/**
- * Default task runner implementation. Keep posted tasks in a list that can be
- * processed by calling RunSingleTask() or RunUntilIdle().
- */
-class V8_EXPORT DefaultTaskRunner final : public cppgc::TaskRunner {
- public:
- DefaultTaskRunner() = default;
-
- DefaultTaskRunner(const DefaultTaskRunner&) = delete;
- DefaultTaskRunner& operator=(const DefaultTaskRunner&) = delete;
-
- void PostTask(std::unique_ptr<cppgc::Task> task) override;
- void PostDelayedTask(std::unique_ptr<cppgc::Task> task, double) override;
-
- bool NonNestableTasksEnabled() const final { return false; }
- bool NonNestableDelayedTasksEnabled() const final { return false; }
- void PostNonNestableTask(std::unique_ptr<cppgc::Task> task) override;
- void PostNonNestableDelayedTask(std::unique_ptr<cppgc::Task> task,
- double) override;
-
- void PostIdleTask(std::unique_ptr<cppgc::IdleTask> task) override;
- bool IdleTasksEnabled() override { return true; }
-
- bool RunSingleTask();
- bool RunSingleIdleTask(double duration_in_seconds);
-
- void RunUntilIdle();
-
- private:
- std::vector<std::unique_ptr<cppgc::Task>> tasks_;
- std::vector<std::unique_ptr<cppgc::IdleTask>> idle_tasks_;
-};
-
/**
- * Default platform implementation that uses std::thread for spawning job tasks.
+ * Platform provided by cppgc. Uses V8's DefaultPlatform provided by
+ * libplatform internally. Exception: `GetForegroundTaskRunner()`, see below.
*/
-class V8_EXPORT DefaultPlatform final : public Platform {
+class V8_EXPORT DefaultPlatform : public Platform {
public:
- DefaultPlatform();
- ~DefaultPlatform() noexcept override;
-
- cppgc::PageAllocator* GetPageAllocator() final;
-
- double MonotonicallyIncreasingTime() final;
-
- std::shared_ptr<cppgc::TaskRunner> GetForegroundTaskRunner() final;
+ using IdleTaskSupport = v8::platform::IdleTaskSupport;
+ explicit DefaultPlatform(
+ int thread_pool_size = 0,
+ IdleTaskSupport idle_task_support = IdleTaskSupport::kDisabled)
+ : v8_platform_(v8::platform::NewDefaultPlatform(thread_pool_size,
+ idle_task_support)) {}
+
+ cppgc::PageAllocator* GetPageAllocator() override {
+ return v8_platform_->GetPageAllocator();
+ }
+
+ double MonotonicallyIncreasingTime() override {
+ return v8_platform_->MonotonicallyIncreasingTime();
+ }
+
+ std::shared_ptr<cppgc::TaskRunner> GetForegroundTaskRunner() override {
+ // V8's default platform creates a new task runner when passed the
+ // `v8::Isolate` pointer the first time. For non-default platforms this will
+ // require getting the appropriate task runner.
+ return v8_platform_->GetForegroundTaskRunner(kNoIsolate);
+ }
- // DefaultPlatform does not support job priorities. All jobs would be
- // assigned the same priority regardless of the cppgc::TaskPriority parameter.
std::unique_ptr<cppgc::JobHandle> PostJob(
cppgc::TaskPriority priority,
- std::unique_ptr<cppgc::JobTask> job_task) final;
+ std::unique_ptr<cppgc::JobTask> job_task) override {
+ return v8_platform_->PostJob(priority, std::move(job_task));
+ }
- void WaitAllForegroundTasks();
- void WaitAllBackgroundTasks();
+ protected:
+ static constexpr v8::Isolate* kNoIsolate = nullptr;
- private:
- std::unique_ptr<PageAllocator> page_allocator_;
- std::shared_ptr<DefaultTaskRunner> foreground_task_runner_;
- std::vector<std::shared_ptr<internal::DefaultJob>> jobs_;
+ std::unique_ptr<v8::Platform> v8_platform_;
};
} // namespace cppgc
diff --git a/deps/v8/include/cppgc/ephemeron-pair.h b/deps/v8/include/cppgc/ephemeron-pair.h
new file mode 100644
index 0000000000..47163d3071
--- /dev/null
+++ b/deps/v8/include/cppgc/ephemeron-pair.h
@@ -0,0 +1,25 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_EPHEMERON_PAIR_H_
+#define INCLUDE_CPPGC_EPHEMERON_PAIR_H_
+
+#include "cppgc/member.h"
+
+namespace cppgc {
+
+/**
+ * An ephemeron pair is used to conditionally retain an object.
+ * The `value` will be kept alive only if the `key` is alive.
+ */
+template <typename K, typename V>
+struct EphemeronPair {
+ EphemeronPair(K* k, V* v) : key(k), value(v) {}
+ WeakMember<K> key;
+ Member<V> value;
+};
+
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_EPHEMERON_PAIR_H_
diff --git a/deps/v8/include/cppgc/garbage-collected.h b/deps/v8/include/cppgc/garbage-collected.h
index 3c800ef61b..d28a39074a 100644
--- a/deps/v8/include/cppgc/garbage-collected.h
+++ b/deps/v8/include/cppgc/garbage-collected.h
@@ -39,8 +39,8 @@ class GarbageCollectedBase {
} // namespace internal
/**
- * Base class for managed objects. Only descendent types of GarbageCollected
- * can be constructed using MakeGarbageCollected. Must be inherited from as
+ * Base class for managed objects. Only descendent types of `GarbageCollected`
+ * can be constructed using `MakeGarbageCollected()`. Must be inherited from as
* left-most base class.
*
* Types inheriting from GarbageCollected must provide a method of
diff --git a/deps/v8/include/cppgc/heap.h b/deps/v8/include/cppgc/heap.h
index 029158f4a5..04a55598bd 100644
--- a/deps/v8/include/cppgc/heap.h
+++ b/deps/v8/include/cppgc/heap.h
@@ -66,20 +66,20 @@ class V8_EXPORT Heap {
/**
* Options specifying Heap properties (e.g. custom spaces) when initializing a
- * heap through Heap::Create().
+ * heap through `Heap::Create()`.
*/
struct HeapOptions {
/**
* Creates reasonable defaults for instantiating a Heap.
*
- * \returns the HeapOptions that can be passed to Heap::Create().
+ * \returns the HeapOptions that can be passed to `Heap::Create()`.
*/
static HeapOptions Default() { return {}; }
/**
* Custom spaces added to heap are required to have indices forming a
- * numbered sequence starting at 0, i.e., their kSpaceIndex must correspond
- * to the index they reside in the vector.
+ * numbered sequence starting at 0, i.e., their `kSpaceIndex` must
+ * correspond to the index they reside in the vector.
*/
std::vector<std::unique_ptr<CustomSpaceBase>> custom_spaces;
@@ -89,7 +89,7 @@ class V8_EXPORT Heap {
* garbage collections using non-nestable task, which are guaranteed to have
* no interesting stack, through the provided Platform. If such tasks are
* not supported by the Platform, the embedder must take care of invoking
- * the GC through ForceGarbageCollectionSlow().
+ * the GC through `ForceGarbageCollectionSlow()`.
*/
StackSupport stack_support = StackSupport::kSupportsConservativeStackScan;
@@ -126,6 +126,10 @@ class V8_EXPORT Heap {
const char* source, const char* reason,
StackState stack_state = StackState::kMayContainHeapPointers);
+ /**
+ * \returns the opaque handle for allocating objects using
+ * `MakeGarbageCollected()`.
+ */
AllocationHandle& GetAllocationHandle();
private:
diff --git a/deps/v8/include/cppgc/internal/gc-info.h b/deps/v8/include/cppgc/internal/gc-info.h
index 3d361e6d71..9c26d6aa5b 100644
--- a/deps/v8/include/cppgc/internal/gc-info.h
+++ b/deps/v8/include/cppgc/internal/gc-info.h
@@ -8,6 +8,7 @@
#include <stdint.h>
#include "cppgc/internal/finalizer-trait.h"
+#include "cppgc/internal/name-trait.h"
#include "cppgc/trace-trait.h"
#include "v8config.h" // NOLINT(build/include_directory)
@@ -19,7 +20,8 @@ using GCInfoIndex = uint16_t;
class V8_EXPORT RegisteredGCInfoIndex final {
public:
RegisteredGCInfoIndex(FinalizationCallback finalization_callback,
- TraceCallback trace_callback, bool has_v_table);
+ TraceCallback trace_callback,
+ NameCallback name_callback, bool has_v_table);
GCInfoIndex GetIndex() const { return index_; }
private:
@@ -34,7 +36,7 @@ struct GCInfoTrait {
static_assert(sizeof(T), "T must be fully defined");
static const RegisteredGCInfoIndex registered_index(
FinalizerTrait<T>::kCallback, TraceTrait<T>::Trace,
- std::is_polymorphic<T>::value);
+ NameTrait<T>::GetName, std::is_polymorphic<T>::value);
return registered_index.GetIndex();
}
};
diff --git a/deps/v8/include/cppgc/internal/name-trait.h b/deps/v8/include/cppgc/internal/name-trait.h
new file mode 100644
index 0000000000..ae99d41c0d
--- /dev/null
+++ b/deps/v8/include/cppgc/internal/name-trait.h
@@ -0,0 +1,111 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_INTERNAL_NAME_TRAIT_H_
+#define INCLUDE_CPPGC_INTERNAL_NAME_TRAIT_H_
+
+#include <cstddef>
+
+#include "cppgc/name-provider.h"
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace cppgc {
+namespace internal {
+
+#if CPPGC_SUPPORTS_OBJECT_NAMES && defined(__clang__)
+#define CPPGC_SUPPORTS_COMPILE_TIME_TYPENAME 1
+
+// Provides constexpr c-string storage for a name of fixed |Size| characters.
+// Automatically appends terminating 0 byte.
+template <size_t Size>
+struct NameBuffer {
+ char name[Size + 1]{};
+
+ static constexpr NameBuffer FromCString(const char* str) {
+ NameBuffer result;
+ for (size_t i = 0; i < Size; ++i) result.name[i] = str[i];
+ result.name[Size] = 0;
+ return result;
+ }
+};
+
+template <typename T>
+const char* GetTypename() {
+ static constexpr char kSelfPrefix[] =
+ "const char *cppgc::internal::GetTypename() [T =";
+ static_assert(__builtin_strncmp(__PRETTY_FUNCTION__, kSelfPrefix,
+ sizeof(kSelfPrefix) - 1) == 0,
+ "The prefix must match");
+ static constexpr const char* kTypenameStart =
+ __PRETTY_FUNCTION__ + sizeof(kSelfPrefix);
+ static constexpr size_t kTypenameSize =
+ __builtin_strlen(__PRETTY_FUNCTION__) - sizeof(kSelfPrefix) - 1;
+ // NameBuffer is an indirection that is needed to make sure that only a
+ // substring of __PRETTY_FUNCTION__ gets materialized in the binary.
+ static constexpr auto buffer =
+ NameBuffer<kTypenameSize>::FromCString(kTypenameStart);
+ return buffer.name;
+}
+
+#else
+#define CPPGC_SUPPORTS_COMPILE_TIME_TYPENAME 0
+#endif
+
+struct HeapObjectName {
+ const char* value;
+ bool name_was_hidden;
+};
+
+class V8_EXPORT NameTraitBase {
+ protected:
+ static HeapObjectName GetNameFromTypeSignature(const char*);
+};
+
+// Trait that specifies how the garbage collector retrieves the name for a
+// given object.
+template <typename T>
+class NameTrait final : public NameTraitBase {
+ public:
+ static HeapObjectName GetName(const void* obj) {
+ return GetNameFor(static_cast<const T*>(obj));
+ }
+
+ private:
+ static HeapObjectName GetNameFor(const NameProvider* name_provider) {
+ return {name_provider->GetName(), false};
+ }
+
+ static HeapObjectName GetNameFor(...) {
+#if CPPGC_SUPPORTS_COMPILE_TIME_TYPENAME
+ return {GetTypename<T>(), false};
+#elif CPPGC_SUPPORTS_OBJECT_NAMES
+
+#if defined(V8_CC_GNU)
+#define PRETTY_FUNCTION_VALUE __PRETTY_FUNCTION__
+#elif defined(V8_CC_MSVC)
+#define PRETTY_FUNCTION_VALUE __FUNCSIG__
+#else
+#define PRETTY_FUNCTION_VALUE nullptr
+#endif
+
+ static const HeapObjectName leaky_name =
+ GetNameFromTypeSignature(PRETTY_FUNCTION_VALUE);
+ return leaky_name;
+
+#undef PRETTY_FUNCTION_VALUE
+
+#else // !CPPGC_SUPPORTS_OBJECT_NAMES
+ return {NameProvider::kHiddenName, true};
+#endif // !CPPGC_SUPPORTS_OBJECT_NAMES
+ }
+};
+
+using NameCallback = HeapObjectName (*)(const void*);
+
+} // namespace internal
+} // namespace cppgc
+
+#undef CPPGC_SUPPORTS_COMPILE_TIME_TYPENAME
+
+#endif // INCLUDE_CPPGC_INTERNAL_NAME_TRAIT_H_
diff --git a/deps/v8/include/cppgc/internal/persistent-node.h b/deps/v8/include/cppgc/internal/persistent-node.h
index e05efe3621..685d8a2d6a 100644
--- a/deps/v8/include/cppgc/internal/persistent-node.h
+++ b/deps/v8/include/cppgc/internal/persistent-node.h
@@ -19,7 +19,7 @@ class Visitor;
namespace internal {
-// PersistentNode represesents a variant of two states:
+// PersistentNode represents a variant of two states:
// 1) traceable node with a back pointer to the Persistent object;
// 2) freelist entry.
class PersistentNode final {
@@ -109,6 +109,14 @@ class V8_EXPORT PersistentRegion final {
PersistentNode* free_list_head_ = nullptr;
};
+// CrossThreadPersistent uses PersistentRegion but protects it using this lock
+// when needed.
+class V8_EXPORT PersistentRegionLock final {
+ public:
+ PersistentRegionLock();
+ ~PersistentRegionLock();
+};
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/include/cppgc/internal/pointer-policies.h b/deps/v8/include/cppgc/internal/pointer-policies.h
index a6cd4e8586..50c5d5768f 100644
--- a/deps/v8/include/cppgc/internal/pointer-policies.h
+++ b/deps/v8/include/cppgc/internal/pointer-policies.h
@@ -62,6 +62,7 @@ class KeepLocationPolicy {
constexpr const SourceLocation& Location() const { return location_; }
protected:
+ constexpr KeepLocationPolicy() = default;
constexpr explicit KeepLocationPolicy(const SourceLocation& location)
: location_(location) {}
@@ -82,6 +83,7 @@ class IgnoreLocationPolicy {
constexpr SourceLocation Location() const { return {}; }
protected:
+ constexpr IgnoreLocationPolicy() = default;
constexpr explicit IgnoreLocationPolicy(const SourceLocation&) {}
};
@@ -93,17 +95,29 @@ using DefaultLocationPolicy = IgnoreLocationPolicy;
struct StrongPersistentPolicy {
using IsStrongPersistent = std::true_type;
-
static V8_EXPORT PersistentRegion& GetPersistentRegion(void* object);
};
struct WeakPersistentPolicy {
using IsStrongPersistent = std::false_type;
+ static V8_EXPORT PersistentRegion& GetPersistentRegion(void* object);
+};
+
+struct StrongCrossThreadPersistentPolicy {
+ using IsStrongPersistent = std::true_type;
+ static V8_EXPORT PersistentRegion& GetPersistentRegion(void* object);
+};
+struct WeakCrossThreadPersistentPolicy {
+ using IsStrongPersistent = std::false_type;
static V8_EXPORT PersistentRegion& GetPersistentRegion(void* object);
};
-// Persistent/Member forward declarations.
+// Forward declarations setting up the default policies.
+template <typename T, typename WeaknessPolicy,
+ typename LocationPolicy = DefaultLocationPolicy,
+ typename CheckingPolicy = DisabledCheckingPolicy>
+class BasicCrossThreadPersistent;
template <typename T, typename WeaknessPolicy,
typename LocationPolicy = DefaultLocationPolicy,
typename CheckingPolicy = DefaultCheckingPolicy>
diff --git a/deps/v8/include/cppgc/liveness-broker.h b/deps/v8/include/cppgc/liveness-broker.h
index 883be46240..b69a69535b 100644
--- a/deps/v8/include/cppgc/liveness-broker.h
+++ b/deps/v8/include/cppgc/liveness-broker.h
@@ -19,7 +19,7 @@ class LivenessBrokerFactory;
/**
* The broker is passed to weak callbacks to allow (temporarily) querying
* the liveness state of an object. References to non-live objects must be
- * cleared when IsHeapObjectAlive() returns false.
+ * cleared when `IsHeapObjectAlive()` returns false.
*
* \code
* class GCedWithCustomWeakCallback final
diff --git a/deps/v8/include/cppgc/member.h b/deps/v8/include/cppgc/member.h
index 116a2c7e54..84e81251c2 100644
--- a/deps/v8/include/cppgc/member.h
+++ b/deps/v8/include/cppgc/member.h
@@ -24,7 +24,7 @@ class MemberBase {
MemberBase() = default;
explicit MemberBase(void* value) : raw_(value) {}
- void* const* GetRawSlot() const { return &raw_; }
+ void** GetRawSlot() const { return &raw_; }
void* GetRaw() const { return raw_; }
void SetRaw(void* value) { raw_ = value; }
@@ -178,6 +178,10 @@ class BasicMember final : private MemberBase, private CheckingPolicy {
return result;
}
+ const T** GetSlotForTesting() const {
+ return reinterpret_cast<const T**>(const_cast<const void**>(GetRawSlot()));
+ }
+
private:
T* GetRawAtomic() const {
return static_cast<T*>(MemberBase::GetRawAtomic());
diff --git a/deps/v8/include/cppgc/name-provider.h b/deps/v8/include/cppgc/name-provider.h
new file mode 100644
index 0000000000..8b70b8ea5e
--- /dev/null
+++ b/deps/v8/include/cppgc/name-provider.h
@@ -0,0 +1,65 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_CPPGC_NAME_PROVIDER_H_
+#define INCLUDE_CPPGC_NAME_PROVIDER_H_
+
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace cppgc {
+
+/**
+ * NameProvider allows for providing a human-readable name for garbage-collected
+ * objects.
+ *
+ * There's two cases of names to distinguish:
+ * a. Explicitly specified names via using NameProvider. Such names are always
+ * preserved in the system.
+ * b. Internal names that Oilpan infers from a C++ type on the class hierarchy
+ * of the object. This is not necessarily the type of the actually
+ * instantiated object.
+ *
+ * Depending on the build configuration, Oilpan may hide names, i.e., represent
+ * them with kHiddenName, of case b. to avoid exposing internal details.
+ */
+class V8_EXPORT NameProvider {
+ public:
+ /**
+ * Name that is used when hiding internals.
+ */
+ static constexpr const char kHiddenName[] = "InternalNode";
+
+ /**
+ * Name that is used in case compiler support is missing for composing a name
+ * from C++ types.
+ */
+ static constexpr const char kNoNameDeducible[] = "<No name>";
+
+ /**
+ * Indicating whether internal names are hidden or not.
+ *
+ * @returns true if C++ names should be hidden and represented by kHiddenName.
+ */
+ static constexpr bool HideInternalNames() {
+#if CPPGC_SUPPORTS_OBJECT_NAMES
+ return false;
+#else // !CPPGC_SUPPORTS_OBJECT_NAMES
+ return true;
+#endif // !CPPGC_SUPPORTS_OBJECT_NAMES
+ }
+
+ virtual ~NameProvider() = default;
+
+ /**
+ * Specifies a name for the garbage-collected object. Such names will never
+ * be hidden, as they are explicitly specified by the user of this API.
+ *
+ * @returns a human readable name for the object.
+ */
+ virtual const char* GetName() const = 0;
+};
+
+} // namespace cppgc
+
+#endif // INCLUDE_CPPGC_NAME_PROVIDER_H_
diff --git a/deps/v8/include/cppgc/platform.h b/deps/v8/include/cppgc/platform.h
index b6c21bdec0..fb0b6b2082 100644
--- a/deps/v8/include/cppgc/platform.h
+++ b/deps/v8/include/cppgc/platform.h
@@ -51,22 +51,23 @@ class V8_EXPORT Platform {
}
/**
- * Posts |job_task| to run in parallel. Returns a JobHandle associated with
- * the Job, which can be joined or canceled.
+ * Posts `job_task` to run in parallel. Returns a `JobHandle` associated with
+ * the `Job`, which can be joined or canceled.
* This avoids degenerate cases:
- * - Calling CallOnWorkerThread() for each work item, causing significant
+ * - Calling `CallOnWorkerThread()` for each work item, causing significant
* overhead.
- * - Fixed number of CallOnWorkerThread() calls that split the work and might
- * run for a long time. This is problematic when many components post
+ * - Fixed number of `CallOnWorkerThread()` calls that split the work and
+ * might run for a long time. This is problematic when many components post
* "num cores" tasks and all expect to use all the cores. In these cases,
* the scheduler lacks context to be fair to multiple same-priority requests
* and/or ability to request lower priority work to yield when high priority
* work comes in.
- * A canonical implementation of |job_task| looks like:
+ * A canonical implementation of `job_task` looks like:
+ * \code
* class MyJobTask : public JobTask {
* public:
* MyJobTask(...) : worker_queue_(...) {}
- * // JobTask:
+ * // JobTask implementation.
* void Run(JobDelegate* delegate) override {
* while (!delegate->ShouldYield()) {
* // Smallest unit of work.
@@ -80,28 +81,33 @@ class V8_EXPORT Platform {
* return worker_queue_.GetSize(); // Thread safe.
* }
* };
+ *
+ * // ...
* auto handle = PostJob(TaskPriority::kUserVisible,
* std::make_unique<MyJobTask>(...));
* handle->Join();
+ * \endcode
*
- * PostJob() and methods of the returned JobHandle/JobDelegate, must never be
- * called while holding a lock that could be acquired by JobTask::Run or
- * JobTask::GetMaxConcurrency -- that could result in a deadlock. This is
- * because [1] JobTask::GetMaxConcurrency may be invoked while holding
- * internal lock (A), hence JobTask::GetMaxConcurrency can only use a lock (B)
- * if that lock is *never* held while calling back into JobHandle from any
- * thread (A=>B/B=>A deadlock) and [2] JobTask::Run or
- * JobTask::GetMaxConcurrency may be invoked synchronously from JobHandle
- * (B=>JobHandle::foo=>B deadlock).
+ * `PostJob()` and methods of the returned JobHandle/JobDelegate, must never
+ * be called while holding a lock that could be acquired by `JobTask::Run()`
+ * or `JobTask::GetMaxConcurrency()` -- that could result in a deadlock. This
+ * is because (1) `JobTask::GetMaxConcurrency()` may be invoked while holding
+ * internal lock (A), hence `JobTask::GetMaxConcurrency()` can only use a lock
+ * (B) if that lock is *never* held while calling back into `JobHandle` from
+ * any thread (A=>B/B=>A deadlock) and (2) `JobTask::Run()` or
+ * `JobTask::GetMaxConcurrency()` may be invoked synchronously from
+ * `JobHandle` (B=>JobHandle::foo=>B deadlock).
*
- * A sufficient PostJob() implementation that uses the default Job provided in
- * libplatform looks like:
- * std::unique_ptr<JobHandle> PostJob(
- * TaskPriority priority, std::unique_ptr<JobTask> job_task) override {
- * return std::make_unique<DefaultJobHandle>(
- * std::make_shared<DefaultJobState>(
- * this, std::move(job_task), kNumThreads));
+ * A sufficient `PostJob()` implementation that uses the default Job provided
+ * in libplatform looks like:
+ * \code
+ * std::unique_ptr<JobHandle> PostJob(
+ * TaskPriority priority, std::unique_ptr<JobTask> job_task) override {
+ * return std::make_unique<DefaultJobHandle>(
+ * std::make_shared<DefaultJobState>(
+ * this, std::move(job_task), kNumThreads));
* }
+ * \endcode
*/
virtual std::unique_ptr<JobHandle> PostJob(
TaskPriority priority, std::unique_ptr<JobTask> job_task) {
diff --git a/deps/v8/include/cppgc/source-location.h b/deps/v8/include/cppgc/source-location.h
index 139c9d86c0..29d69b0a13 100644
--- a/deps/v8/include/cppgc/source-location.h
+++ b/deps/v8/include/cppgc/source-location.h
@@ -25,7 +25,7 @@ namespace cppgc {
/**
* Encapsulates source location information. Mimics C++20's
- * std::source_location.
+ * `std::source_location`.
*/
class V8_EXPORT SourceLocation final {
public:
diff --git a/deps/v8/include/cppgc/trace-trait.h b/deps/v8/include/cppgc/trace-trait.h
index b0a7c7235c..e33d3ad36b 100644
--- a/deps/v8/include/cppgc/trace-trait.h
+++ b/deps/v8/include/cppgc/trace-trait.h
@@ -55,8 +55,6 @@ struct V8_EXPORT TraceTraitFromInnerAddressImpl {
static TraceDescriptor GetTraceDescriptor(const void* address);
};
-} // namespace internal
-
/**
* Trait specifying how the garbage collector processes an object of type T.
*
@@ -64,7 +62,7 @@ struct V8_EXPORT TraceTraitFromInnerAddressImpl {
* type.
*/
template <typename T>
-struct TraceTrait {
+struct TraceTraitBase {
static_assert(internal::IsTraceableV<T>, "T must have a Trace() method");
/**
@@ -89,6 +87,11 @@ struct TraceTrait {
}
};
+} // namespace internal
+
+template <typename T>
+struct TraceTrait : public internal::TraceTraitBase<T> {};
+
namespace internal {
template <typename T>
diff --git a/deps/v8/include/cppgc/visitor.h b/deps/v8/include/cppgc/visitor.h
index c671c55e05..01f5f20e52 100644
--- a/deps/v8/include/cppgc/visitor.h
+++ b/deps/v8/include/cppgc/visitor.h
@@ -5,6 +5,8 @@
#ifndef INCLUDE_CPPGC_VISITOR_H_
#define INCLUDE_CPPGC_VISITOR_H_
+#include "cppgc/custom-space.h"
+#include "cppgc/ephemeron-pair.h"
#include "cppgc/garbage-collected.h"
#include "cppgc/internal/logging.h"
#include "cppgc/internal/pointer-policies.h"
@@ -12,17 +14,20 @@
#include "cppgc/member.h"
#include "cppgc/source-location.h"
#include "cppgc/trace-trait.h"
+#include "cppgc/type-traits.h"
namespace cppgc {
namespace internal {
template <typename T, typename WeaknessPolicy, typename LocationPolicy,
typename CheckingPolicy>
+class BasicCrossThreadPersistent;
+template <typename T, typename WeaknessPolicy, typename LocationPolicy,
+ typename CheckingPolicy>
class BasicPersistent;
class ConservativeTracingVisitor;
class VisitorBase;
class VisitorFactory;
-
} // namespace internal
using WeakCallback = void (*)(const LivenessBroker&, const void*);
@@ -44,7 +49,7 @@ using WeakCallback = void (*)(const LivenessBroker&, const void*);
* };
* \endcode
*/
-class Visitor {
+class V8_EXPORT Visitor {
public:
class Key {
private:
@@ -78,6 +83,8 @@ class Visitor {
static_assert(sizeof(T), "Pointee type must be fully defined.");
static_assert(internal::IsGarbageCollectedType<T>::value,
"T must be GarbageCollected or GarbageCollectedMixin type");
+ static_assert(!internal::IsAllocatedOnCompactableSpace<T>::value,
+ "Weak references to compactable objects are not allowed");
const T* value = weak_member.GetRawAtomic();
@@ -86,8 +93,7 @@ class Visitor {
return;
}
- // TODO(chromium:1056170): DCHECK (or similar) for deleted values as they
- // should come in at a different path.
+ CPPGC_DCHECK(value != kSentinelPointer);
VisitWeak(value, TraceTrait<T>::GetTraceDescriptor(value),
&HandleWeak<WeakMember<T>>, &weak_member);
}
@@ -122,6 +128,74 @@ class Visitor {
}
/**
+ * Trace method for EphemeronPair.
+ *
+ * \param ephemeron_pair EphemeronPair reference weakly retaining a key object
+ * and strongly retaining a value object in case the key object is alive.
+ */
+ template <typename K, typename V>
+ void Trace(const EphemeronPair<K, V>& ephemeron_pair) {
+ TraceEphemeron(ephemeron_pair.key, ephemeron_pair.value.GetRawAtomic());
+ }
+
+ /**
+ * Trace method for ephemerons. Used for tracing raw ephemeron in which the
+ * key and value are kept separately.
+ *
+ * \param key WeakMember reference weakly retaining a key object.
+ * \param value Member reference weakly retaining a value object.
+ */
+ template <typename K, typename V>
+ void TraceEphemeron(const WeakMember<K>& key, const V* value) {
+ TraceDescriptor value_desc = TraceTrait<V>::GetTraceDescriptor(value);
+ VisitEphemeron(key, value_desc);
+ }
+
+ /**
+ * Trace method that strongifies a WeakMember.
+ *
+ * \param weak_member WeakMember reference retaining an object.
+ */
+ template <typename T>
+ void TraceStrongly(const WeakMember<T>& weak_member) {
+ const T* value = weak_member.GetRawAtomic();
+ CPPGC_DCHECK(value != kSentinelPointer);
+ Trace(value);
+ }
+
+ /**
+ * Trace method for weak containers.
+ *
+ * \param object reference of the weak container.
+ * \param callback to be invoked.
+ * \param data custom data that is passed to the callback.
+ */
+ template <typename T>
+ void TraceWeakContainer(const T* object, WeakCallback callback,
+ const void* data) {
+ if (!object) return;
+ VisitWeakContainer(object, TraceTrait<T>::GetTraceDescriptor(object),
+ TraceTrait<T>::GetWeakTraceDescriptor(object), callback,
+ data);
+ }
+
+ /**
+ * Registers a slot containing a reference to an object allocated on a
+ * compactable space. Such references maybe be arbitrarily moved by the GC.
+ *
+ * \param slot location of reference to object that might be moved by the GC.
+ */
+ template <typename T>
+ void RegisterMovableReference(const T** slot) {
+ static_assert(internal::IsAllocatedOnCompactableSpace<T>::value,
+ "Only references to objects allocated on compactable spaces "
+ "should be registered as movable slots.");
+ static_assert(!internal::IsGarbageCollectedMixinTypeV<T>,
+ "Mixin types do not support compaction.");
+ HandleMovableReference(reinterpret_cast<const void**>(slot));
+ }
+
+ /**
* Registers a weak callback that is invoked during garbage collection.
*
* \param callback to be invoked.
@@ -129,13 +203,37 @@ class Visitor {
*/
virtual void RegisterWeakCallback(WeakCallback callback, const void* data) {}
+ /**
+ * Defers tracing an object from a concurrent thread to the mutator thread.
+ * Should be called by Trace methods of types that are not safe to trace
+ * concurrently.
+ *
+ * \param parameter tells the trace callback which object was deferred.
+ * \param callback to be invoked for tracing on the mutator thread.
+ * \param deferred_size size of deferred object.
+ *
+ * \returns false if the object does not need to be deferred (i.e. currently
+ * traced on the mutator thread) and true otherwise (i.e. currently traced on
+ * a concurrent thread).
+ */
+ virtual V8_WARN_UNUSED_RESULT bool DeferTraceToMutatorThreadIfConcurrent(
+ const void* parameter, TraceCallback callback, size_t deferred_size) {
+ // By default tracing is not deferred.
+ return false;
+ }
+
protected:
virtual void Visit(const void* self, TraceDescriptor) {}
virtual void VisitWeak(const void* self, TraceDescriptor, WeakCallback,
const void* weak_member) {}
- virtual void VisitRoot(const void*, TraceDescriptor) {}
+ virtual void VisitRoot(const void*, TraceDescriptor, const SourceLocation&) {}
virtual void VisitWeakRoot(const void* self, TraceDescriptor, WeakCallback,
- const void* weak_root) {}
+ const void* weak_root, const SourceLocation&) {}
+ virtual void VisitEphemeron(const void* key, TraceDescriptor value_desc) {}
+ virtual void VisitWeakContainer(const void* self, TraceDescriptor strong_desc,
+ TraceDescriptor weak_desc,
+ WeakCallback callback, const void* data) {}
+ virtual void HandleMovableReference(const void**) {}
private:
template <typename T, void (T::*method)(const LivenessBroker&)>
@@ -169,7 +267,8 @@ class Visitor {
if (!p.Get()) {
return;
}
- VisitRoot(p.Get(), TraceTrait<PointeeType>::GetTraceDescriptor(p.Get()));
+ VisitRoot(p.Get(), TraceTrait<PointeeType>::GetTraceDescriptor(p.Get()),
+ loc);
}
template <
@@ -182,8 +281,10 @@ class Visitor {
static_assert(internal::IsGarbageCollectedType<PointeeType>::value,
"Persistent's pointee type must be GarbageCollected or "
"GarbageCollectedMixin");
+ static_assert(!internal::IsAllocatedOnCompactableSpace<PointeeType>::value,
+ "Weak references to compactable objects are not allowed");
VisitWeakRoot(p.Get(), TraceTrait<PointeeType>::GetTraceDescriptor(p.Get()),
- &HandleWeak<WeakPersistent>, &p);
+ &HandleWeak<WeakPersistent>, &p, loc);
}
template <typename T>
@@ -198,11 +299,14 @@ class Visitor {
}
#if V8_ENABLE_CHECKS
- V8_EXPORT void CheckObjectNotInConstruction(const void* address);
+ void CheckObjectNotInConstruction(const void* address);
#endif // V8_ENABLE_CHECKS
template <typename T, typename WeaknessPolicy, typename LocationPolicy,
typename CheckingPolicy>
+ friend class internal::BasicCrossThreadPersistent;
+ template <typename T, typename WeaknessPolicy, typename LocationPolicy,
+ typename CheckingPolicy>
friend class internal::BasicPersistent;
friend class internal::ConservativeTracingVisitor;
friend class internal::VisitorBase;
diff --git a/deps/v8/include/js_protocol.pdl b/deps/v8/include/js_protocol.pdl
index 4c1b567404..6971edd510 100644
--- a/deps/v8/include/js_protocol.pdl
+++ b/deps/v8/include/js_protocol.pdl
@@ -1542,15 +1542,23 @@ domain Runtime
# If executionContextId is empty, adds binding with the given name on the
# global objects of all inspected contexts, including those created later,
# bindings survive reloads.
- # If executionContextId is specified, adds binding only on global object of
- # given execution context.
# Binding function takes exactly one argument, this argument should be string,
# in case of any other input, function throws an exception.
# Each binding function call produces Runtime.bindingCalled notification.
experimental command addBinding
parameters
string name
+ # If specified, the binding would only be exposed to the specified
+ # execution context. If omitted and `executionContextName` is not set,
+ # the binding is exposed to all execution contexts of the target.
+ # This parameter is mutually exclusive with `executionContextName`.
optional ExecutionContextId executionContextId
+ # If specified, the binding is exposed to the executionContext with
+ # matching name, even for contexts created after the binding is added.
+ # See also `ExecutionContext.name` and `worldName` parameter to
+ # `Page.addScriptToEvaluateOnNewDocument`.
+ # This parameter is mutually exclusive with `executionContextId`.
+ experimental optional string executionContextName
# This method does not remove binding function from global object but
# unsubscribes current runtime agent from Runtime.bindingCalled notifications.
diff --git a/deps/v8/include/v8-cppgc.h b/deps/v8/include/v8-cppgc.h
index e202293bcf..805eb015e0 100644
--- a/deps/v8/include/v8-cppgc.h
+++ b/deps/v8/include/v8-cppgc.h
@@ -11,203 +11,19 @@
namespace v8 {
-class Isolate;
-template <typename T>
-class JSMember;
-
-namespace internal {
-
-class JSMemberBaseExtractor;
-
-class V8_EXPORT JSMemberBase {
- public:
- /**
- * Returns true if the reference is empty, i.e., has not been assigned
- * object.
- */
- bool IsEmpty() const { return val_ == nullptr; }
-
- /**
- * Clears the reference. IsEmpty() will return true after this call.
- */
- inline void Reset();
-
- private:
- static internal::Address* New(v8::Isolate* isolate,
- internal::Address* object_slot,
- internal::Address** this_slot);
- static void Delete(internal::Address* object);
- static void Copy(const internal::Address* const* from_slot,
- internal::Address** to_slot);
- static void Move(internal::Address** from_slot, internal::Address** to_slot);
-
- JSMemberBase() = default;
-
- JSMemberBase(v8::Isolate* isolate, internal::Address* object_slot)
- : val_(New(isolate, object_slot, &val_)) {}
-
- inline JSMemberBase& CopyImpl(const JSMemberBase& other);
- inline JSMemberBase& MoveImpl(JSMemberBase&& other);
-
- // val_ points to a GlobalHandles node.
- internal::Address* val_ = nullptr;
-
- template <typename T>
- friend class v8::JSMember;
- friend class v8::internal::JSMemberBaseExtractor;
-};
-
-JSMemberBase& JSMemberBase::CopyImpl(const JSMemberBase& other) {
- if (this != &other) {
- Reset();
- if (!other.IsEmpty()) {
- Copy(&other.val_, &val_);
- }
- }
- return *this;
-}
-
-JSMemberBase& JSMemberBase::MoveImpl(JSMemberBase&& other) {
- if (this != &other) {
- // No call to Reset() as Move() will conditionally reset itself when needed,
- // and otherwise reuse the internal meta data.
- Move(&other.val_, &val_);
- }
- return *this;
-}
-
-void JSMemberBase::Reset() {
- if (IsEmpty()) return;
- Delete(val_);
- val_ = nullptr;
-}
-
-} // namespace internal
-
-/**
- * A traced handle without destructor that clears the handle. The handle may
- * only be used in GarbageCollected objects and must be processed in a Trace()
- * method.
- */
-template <typename T>
-class V8_EXPORT JSMember : public internal::JSMemberBase {
- static_assert(std::is_base_of<v8::Value, T>::value,
- "JSMember only supports references to v8::Value");
-
- public:
- JSMember() = default;
-
- template <typename U,
- typename = std::enable_if_t<std::is_base_of<T, U>::value>>
- JSMember(Isolate* isolate, Local<U> that)
- : internal::JSMemberBase(isolate,
- reinterpret_cast<internal::Address*>(*that)) {}
-
- JSMember(const JSMember& other) { CopyImpl(other); }
-
- template <typename U,
- typename = std::enable_if_t<std::is_base_of<T, U>::value>>
- JSMember(const JSMember<U>& other) { // NOLINT
- CopyImpl(other);
- }
-
- JSMember(JSMember&& other) { MoveImpl(std::move(other)); }
-
- template <typename U,
- typename = std::enable_if_t<std::is_base_of<T, U>::value>>
- JSMember(JSMember<U>&& other) { // NOLINT
- MoveImpl(std::move(other));
- }
-
- JSMember& operator=(const JSMember& other) { return CopyImpl(other); }
-
- template <typename U,
- typename = std::enable_if_t<std::is_base_of<T, U>::value>>
- JSMember& operator=(const JSMember<U>& other) {
- return CopyImpl(other);
- }
-
- JSMember& operator=(JSMember&& other) { return MoveImpl(other); }
-
- template <typename U,
- typename = std::enable_if_t<std::is_base_of<T, U>::value>>
- JSMember& operator=(JSMember<U>&& other) {
- return MoveImpl(other);
- }
-
- T* operator->() const { return reinterpret_cast<T*>(val_); }
- T* operator*() const { return reinterpret_cast<T*>(val_); }
-
- using internal::JSMemberBase::Reset;
-
- template <typename U,
- typename = std::enable_if_t<std::is_base_of<T, U>::value>>
- void Set(v8::Isolate* isolate, Local<U> that) {
- Reset();
- val_ = New(isolate, reinterpret_cast<internal::Address*>(*that), &val_);
- }
-};
-
-template <typename T1, typename T2,
- typename = std::enable_if_t<std::is_base_of<T2, T1>::value ||
- std::is_base_of<T1, T2>::value>>
-inline bool operator==(const JSMember<T1>& lhs, const JSMember<T2>& rhs) {
- v8::internal::Address* a = reinterpret_cast<v8::internal::Address*>(*lhs);
- v8::internal::Address* b = reinterpret_cast<v8::internal::Address*>(*rhs);
- if (a == nullptr) return b == nullptr;
- if (b == nullptr) return false;
- return *a == *b;
-}
-
-template <typename T1, typename T2,
- typename = std::enable_if_t<std::is_base_of<T2, T1>::value ||
- std::is_base_of<T1, T2>::value>>
-inline bool operator!=(const JSMember<T1>& lhs, const JSMember<T2>& rhs) {
- return !(lhs == rhs);
-}
-
-template <typename T1, typename T2,
- typename = std::enable_if_t<std::is_base_of<T2, T1>::value ||
- std::is_base_of<T1, T2>::value>>
-inline bool operator==(const JSMember<T1>& lhs, const Local<T2>& rhs) {
- v8::internal::Address* a = reinterpret_cast<v8::internal::Address*>(*lhs);
- v8::internal::Address* b = reinterpret_cast<v8::internal::Address*>(*rhs);
- if (a == nullptr) return b == nullptr;
- if (b == nullptr) return false;
- return *a == *b;
-}
-
-template <typename T1, typename T2,
- typename = std::enable_if_t<std::is_base_of<T2, T1>::value ||
- std::is_base_of<T1, T2>::value>>
-inline bool operator==(const Local<T1>& lhs, const JSMember<T2> rhs) {
- return rhs == lhs;
-}
-
-template <typename T1, typename T2>
-inline bool operator!=(const JSMember<T1>& lhs, const T2& rhs) {
- return !(lhs == rhs);
-}
-
-template <typename T1, typename T2>
-inline bool operator!=(const T1& lhs, const JSMember<T2>& rhs) {
- return !(lhs == rhs);
-}
-
class JSVisitor : public cppgc::Visitor {
public:
explicit JSVisitor(cppgc::Visitor::Key key) : cppgc::Visitor(key) {}
- template <typename T>
- void Trace(const JSMember<T>& ref) {
- if (ref.IsEmpty()) return;
+ void Trace(const TracedReferenceBase& ref) {
+ if (ref.IsEmptyThreadSafe()) return;
Visit(ref);
}
protected:
using cppgc::Visitor::Visit;
- virtual void Visit(const internal::JSMemberBase& ref) {}
+ virtual void Visit(const TracedReferenceBase& ref) {}
};
} // namespace v8
@@ -215,8 +31,8 @@ class JSVisitor : public cppgc::Visitor {
namespace cppgc {
template <typename T>
-struct TraceTrait<v8::JSMember<T>> {
- static void Trace(Visitor* visitor, const v8::JSMember<T>* self) {
+struct TraceTrait<v8::TracedReference<T>> {
+ static void Trace(Visitor* visitor, const v8::TracedReference<T>* self) {
static_cast<v8::JSVisitor*>(visitor)->Trace(*self);
}
};
diff --git a/deps/v8/include/v8-fast-api-calls.h b/deps/v8/include/v8-fast-api-calls.h
index 1cac9a6be3..2dea8db271 100644
--- a/deps/v8/include/v8-fast-api-calls.h
+++ b/deps/v8/include/v8-fast-api-calls.h
@@ -18,6 +18,38 @@
* &v8::CFunction::Make(FastMethod));
* \endcode
*
+ * By design, fast calls are limited by the following requirements, which
+ * the embedder should enforce themselves:
+ * - they should not allocate on the JS heap;
+ * - they should not trigger JS execution.
+ * To enforce them, the embedder could use the existing
+ * v8::Isolate::DisallowJavascriptExecutionScope and a utility similar to
+ * Blink's NoAllocationScope:
+ * https://source.chromium.org/chromium/chromium/src/+/master:third_party/blink/renderer/platform/heap/thread_state_scopes.h;l=16
+ *
+ * Due to these limitations, it's not directly possible to report errors by
+ * throwing a JS exception or to otherwise do an allocation. There is an
+ * alternative way of creating fast calls that supports falling back to the
+ * slow call and then performing the necessary allocation. When one creates
+ * the fast method by using CFunction::MakeWithFallbackSupport instead of
+ * CFunction::Make, the fast callback gets as last parameter an output variable,
+ * through which it can request falling back to the slow call. So one might
+ * declare their method like:
+ *
+ * \code
+ * void FastMethodWithFallback(int param, FastApiCallbackOptions& options);
+ * \endcode
+ *
+ * If the callback wants to signal an error condition or to perform an
+ * allocation, it must set options.fallback to true and do an early return from
+ * the fast method. Then V8 checks the value of options.fallback and if it's
+ * true, falls back to executing the SlowCallback, which is capable of reporting
+ * the error (either by throwing a JS exception or logging to the console) or
+ * doing the allocation. It's the embedder's responsibility to ensure that the
+ * fast callback is idempotent up to the point where error and fallback
+ * conditions are checked, because otherwise executing the slow callback might
+ * produce visible side-effects twice.
+ *
* An example for custom embedder type support might employ a way to wrap/
* unwrap various C++ types in JSObject instances, e.g:
*
@@ -124,13 +156,21 @@
* - uint32_t
* - int64_t
* - uint64_t
+ * - float32_t
+ * - float64_t
+ *
* The 64-bit integer types currently have the IDL (unsigned) long long
* semantics: https://heycam.github.io/webidl/#abstract-opdef-converttoint
* In the future we'll extend the API to also provide conversions from/to
* BigInt to preserve full precision.
+ * The floating point types currently have the IDL (unrestricted) semantics,
+ * which is the only one used by WebGL. We plan to add support also for
+ * restricted floats/doubles, similarly to the BigInt conversion policies.
+ * We also differ from the specific NaN bit pattern that WebIDL prescribes
+ * (https://heycam.github.io/webidl/#es-unrestricted-float) in that Blink
+ * passes NaN values as-is, i.e. doesn't normalize them.
+ *
* To be supported types:
- * - float32_t
- * - float64_t
* - arrays of C types
* - arrays of embedder types
*/
@@ -291,14 +331,14 @@ struct GetCType<T*> : public GetCTypePointerImpl<T> {};
template <typename R, bool RaisesException, typename... Args>
class CFunctionInfoImpl : public CFunctionInfo {
public:
- static constexpr int kHasErrorArgCount = (RaisesException ? 1 : 0);
+ static constexpr int kFallbackArgCount = (RaisesException ? 1 : 0);
static constexpr int kReceiverCount = 1;
CFunctionInfoImpl()
: return_info_(internal::GetCType<R>::Get()),
- arg_count_(sizeof...(Args) - kHasErrorArgCount),
+ arg_count_(sizeof...(Args) - kFallbackArgCount),
arg_info_{internal::GetCType<Args>::Get()...} {
- static_assert(sizeof...(Args) >= kHasErrorArgCount + kReceiverCount,
- "The receiver or the has_error argument is missing.");
+ static_assert(sizeof...(Args) >= kFallbackArgCount + kReceiverCount,
+ "The receiver or the fallback argument is missing.");
static_assert(
internal::GetCType<R>::Get().GetType() == CTypeInfo::Type::kVoid,
"Only void return types are currently supported.");
@@ -342,8 +382,8 @@ class V8_EXPORT CFunction {
}
template <typename F>
- static CFunction MakeWithErrorSupport(F* func) {
- return ArgUnwrap<F*>::MakeWithErrorSupport(func);
+ static CFunction MakeWithFallbackSupport(F* func) {
+ return ArgUnwrap<F*>::MakeWithFallbackSupport(func);
}
template <typename F>
@@ -376,13 +416,17 @@ class V8_EXPORT CFunction {
return CFunction(reinterpret_cast<const void*>(func),
GetCFunctionInfo<R, false, Args...>());
}
- static CFunction MakeWithErrorSupport(R (*func)(Args...)) {
+ static CFunction MakeWithFallbackSupport(R (*func)(Args...)) {
return CFunction(reinterpret_cast<const void*>(func),
GetCFunctionInfo<R, true, Args...>());
}
};
};
+struct FastApiCallbackOptions {
+ bool fallback;
+};
+
} // namespace v8
#endif // INCLUDE_V8_FAST_API_CALLS_H_
diff --git a/deps/v8/include/v8-inspector.h b/deps/v8/include/v8-inspector.h
index 6573940e2f..86fcf51877 100644
--- a/deps/v8/include/v8-inspector.h
+++ b/deps/v8/include/v8-inspector.h
@@ -181,6 +181,10 @@ class V8_EXPORT V8InspectorClient {
virtual std::unique_ptr<StringBuffer> valueSubtype(v8::Local<v8::Value>) {
return nullptr;
}
+ virtual std::unique_ptr<StringBuffer> descriptionForValueSubtype(
+ v8::Local<v8::Context>, v8::Local<v8::Value>) {
+ return nullptr;
+ }
virtual bool formatAccessorsAsProperties(v8::Local<v8::Value>) {
return false;
}
diff --git a/deps/v8/include/v8-internal.h b/deps/v8/include/v8-internal.h
index 0d9cce82b4..06846d7005 100644
--- a/deps/v8/include/v8-internal.h
+++ b/deps/v8/include/v8-internal.h
@@ -120,6 +120,23 @@ constexpr bool HeapSandboxIsEnabled() {
using ExternalPointer_t = Address;
+// If the heap sandbox is enabled, these tag values will be XORed with the
+// external pointers in the external pointer table to prevent use of pointers of
+// the wrong type.
+enum ExternalPointerTag : Address {
+ kExternalPointerNullTag = static_cast<Address>(0ULL),
+ kArrayBufferBackingStoreTag = static_cast<Address>(1ULL << 48),
+ kTypedArrayExternalPointerTag = static_cast<Address>(2ULL << 48),
+ kDataViewDataPointerTag = static_cast<Address>(3ULL << 48),
+ kExternalStringResourceTag = static_cast<Address>(4ULL << 48),
+ kExternalStringResourceDataTag = static_cast<Address>(5ULL << 48),
+ kForeignForeignAddressTag = static_cast<Address>(6ULL << 48),
+ kNativeContextMicrotaskQueueTag = static_cast<Address>(7ULL << 48),
+ // TODO(v8:10391, saelo): Currently has to be zero so that raw zero values are
+ // also nullptr
+ kEmbedderDataSlotPayloadTag = static_cast<Address>(0ULL << 48),
+};
+
#ifdef V8_31BIT_SMIS_ON_64BIT_ARCH
using PlatformSmiTagging = SmiTagging<kApiInt32Size>;
#else
@@ -140,6 +157,11 @@ V8_INLINE static constexpr internal::Address IntToSmi(int value) {
kSmiTag;
}
+// Converts encoded external pointer to address.
+V8_EXPORT Address DecodeExternalPointerImpl(const Isolate* isolate,
+ ExternalPointer_t pointer,
+ ExternalPointerTag tag);
+
// {obj} must be the raw tagged pointer representation of a HeapObject
// that's guaranteed to never be in ReadOnlySpace.
V8_EXPORT internal::Isolate* IsolateFromNeverReadOnlySpaceObject(Address obj);
@@ -168,6 +190,9 @@ class Internals {
static const int kFixedArrayHeaderSize = 2 * kApiTaggedSize;
static const int kEmbedderDataArrayHeaderSize = 2 * kApiTaggedSize;
static const int kEmbedderDataSlotSize = kApiSystemPointerSize;
+#ifdef V8_HEAP_SANDBOX
+ static const int kEmbedderDataSlotRawPayloadOffset = kApiTaggedSize;
+#endif
static const int kNativeContextEmbedderDataOffset = 6 * kApiTaggedSize;
static const int kFullStringRepresentationMask = 0x0f;
static const int kStringEncodingMask = 0x8;
@@ -187,6 +212,12 @@ class Internals {
static const int kIsolateRootsOffset =
kIsolateStackGuardOffset + 7 * kApiSystemPointerSize;
+ static const int kExternalPointerTableBufferOffset = 0;
+ static const int kExternalPointerTableLengthOffset =
+ kExternalPointerTableBufferOffset + kApiSystemPointerSize;
+ static const int kExternalPointerTableCapacityOffset =
+ kExternalPointerTableLengthOffset + kApiInt32Size;
+
static const int kUndefinedValueRootIndex = 4;
static const int kTheHoleValueRootIndex = 5;
static const int kNullValueRootIndex = 6;
@@ -352,15 +383,28 @@ class Internals {
#endif
}
+ V8_INLINE static Address DecodeExternalPointer(
+ const Isolate* isolate, ExternalPointer_t encoded_pointer,
+ ExternalPointerTag tag) {
+#ifdef V8_HEAP_SANDBOX
+ return internal::DecodeExternalPointerImpl(isolate, encoded_pointer, tag);
+#else
+ return encoded_pointer;
+#endif
+ }
+
V8_INLINE static internal::Address ReadExternalPointerField(
- internal::Isolate* isolate, internal::Address heap_object_ptr,
- int offset) {
- internal::Address value = ReadRawField<Address>(heap_object_ptr, offset);
+ internal::Isolate* isolate, internal::Address heap_object_ptr, int offset,
+ ExternalPointerTag tag) {
#ifdef V8_HEAP_SANDBOX
+ internal::ExternalPointer_t encoded_value =
+ ReadRawField<uint32_t>(heap_object_ptr, offset);
// We currently have to treat zero as nullptr in embedder slots.
- if (value) value = DecodeExternalPointer(isolate, value);
+ return encoded_value ? DecodeExternalPointer(isolate, encoded_value, tag)
+ : 0;
+#else
+ return ReadRawField<Address>(heap_object_ptr, offset);
#endif
- return value;
}
#ifdef V8_COMPRESS_POINTERS
@@ -368,10 +412,6 @@ class Internals {
static constexpr size_t kPtrComprHeapReservationSize = size_t{1} << 32;
static constexpr size_t kPtrComprIsolateRootAlignment = size_t{1} << 32;
- // See v8:10391 for details about V8 heap sandbox.
- static constexpr uint32_t kExternalPointerSalt =
- 0x7fffffff & ~static_cast<uint32_t>(kHeapObjectTagMask);
-
V8_INLINE static internal::Address GetRootFromOnHeapAddress(
internal::Address addr) {
return addr & -static_cast<intptr_t>(kPtrComprIsolateRootAlignment);
@@ -383,14 +423,6 @@ class Internals {
return root + static_cast<internal::Address>(static_cast<uintptr_t>(value));
}
- V8_INLINE static Address DecodeExternalPointer(
- const Isolate* isolate, ExternalPointer_t encoded_pointer) {
-#ifndef V8_HEAP_SANDBOX
- return encoded_pointer;
-#else
- return encoded_pointer ^ kExternalPointerSalt;
-#endif
- }
#endif // V8_COMPRESS_POINTERS
};
diff --git a/deps/v8/include/v8-metrics.h b/deps/v8/include/v8-metrics.h
index 9734ac1b36..69784dcb0f 100644
--- a/deps/v8/include/v8-metrics.h
+++ b/deps/v8/include/v8-metrics.h
@@ -10,6 +10,7 @@
namespace v8 {
namespace metrics {
+// TODO(sartang@microsoft.com): Remove wall_clock_time_in_us.
struct WasmModuleDecoded {
bool async = false;
bool streamed = false;
@@ -17,6 +18,7 @@ struct WasmModuleDecoded {
size_t module_size_in_bytes = 0;
size_t function_count = 0;
int64_t wall_clock_time_in_us = -1;
+ int64_t wall_clock_duration_in_us = -1;
};
struct WasmModuleCompiled {
@@ -29,6 +31,7 @@ struct WasmModuleCompiled {
size_t code_size_in_bytes = 0;
size_t liftoff_bailout_count = 0;
int64_t wall_clock_time_in_us = -1;
+ int64_t wall_clock_duration_in_us = -1;
};
struct WasmModuleInstantiated {
@@ -36,12 +39,14 @@ struct WasmModuleInstantiated {
bool success = false;
size_t imported_function_count = 0;
int64_t wall_clock_time_in_us = -1;
+ int64_t wall_clock_duration_in_us = -1;
};
struct WasmModuleTieredUp {
bool lazy = false;
size_t code_size_in_bytes = 0;
int64_t wall_clock_time_in_us = -1;
+ int64_t wall_clock_duration_in_us = -1;
};
struct WasmModulesPerIsolate {
diff --git a/deps/v8/include/v8-platform.h b/deps/v8/include/v8-platform.h
index 6669ed9ac7..1f1497f6cc 100644
--- a/deps/v8/include/v8-platform.h
+++ b/deps/v8/include/v8-platform.h
@@ -216,16 +216,41 @@ class JobHandle {
*/
virtual void Cancel() = 0;
+ /*
+ * Forces all existing workers to yield ASAP but doesn’t wait for them.
+ * Warning, this is dangerous if the Job's callback is bound to or has access
+ * to state which may be deleted after this call.
+ * TODO(etiennep): Cleanup once implemented by all embedders.
+ */
+ virtual void CancelAndDetach() { Cancel(); }
+
/**
- * Returns true if there's no work pending and no worker running.
+ * Returns true if there's currently no work pending and no worker running.
+ * TODO(etiennep): Deprecate IsCompleted in favor of IsActive once implemented
+ * by all embedders.
*/
virtual bool IsCompleted() = 0;
+ virtual bool IsActive() { return !IsCompleted(); }
/**
* Returns true if associated with a Job and other methods may be called.
- * Returns false after Join() or Cancel() was called.
+ * Returns false after Join() or Cancel() was called. This may return true
+ * even if no workers are running and IsCompleted() returns true
+ * TODO(etiennep): Deprecate IsRunning in favor of IsValid once implemented by
+ * all embedders.
*/
virtual bool IsRunning() = 0;
+ virtual bool IsValid() { return IsRunning(); }
+
+ /**
+ * Returns true if job priority can be changed.
+ */
+ virtual bool UpdatePriorityEnabled() const { return false; }
+
+ /**
+ * Update this Job's priority.
+ */
+ virtual void UpdatePriority(TaskPriority new_priority) {}
};
/**
diff --git a/deps/v8/include/v8-profiler.h b/deps/v8/include/v8-profiler.h
index 7ec1993734..74b6df884d 100644
--- a/deps/v8/include/v8-profiler.h
+++ b/deps/v8/include/v8-profiler.h
@@ -249,6 +249,15 @@ enum CpuProfilingLoggingMode {
kEagerLogging,
};
+// Enum for returning profiling status. Once StartProfiling is called,
+// we want to return to clients whether the profiling was able to start
+// correctly, or return a descriptive error.
+enum class CpuProfilingStatus {
+ kStarted,
+ kAlreadyStarted,
+ kErrorTooManyProfilers
+};
+
/**
* Optional profiling attributes.
*/
@@ -337,7 +346,8 @@ class V8_EXPORT CpuProfiler {
* profiles may be collected at once. Attempts to start collecting several
* profiles with the same title are silently ignored.
*/
- void StartProfiling(Local<String> title, CpuProfilingOptions options);
+ CpuProfilingStatus StartProfiling(Local<String> title,
+ CpuProfilingOptions options);
/**
* Starts profiling with the same semantics as above, except with expanded
@@ -350,7 +360,7 @@ class V8_EXPORT CpuProfiler {
* recorded by the profiler. Samples obtained after this limit will be
* discarded.
*/
- void StartProfiling(
+ CpuProfilingStatus StartProfiling(
Local<String> title, CpuProfilingMode mode, bool record_samples = false,
unsigned max_samples = CpuProfilingOptions::kNoSampleLimit);
/**
@@ -358,7 +368,8 @@ class V8_EXPORT CpuProfiler {
* kLeafNodeLineNumbers mode, which was the previous default behavior of the
* profiler.
*/
- void StartProfiling(Local<String> title, bool record_samples = false);
+ CpuProfilingStatus StartProfiling(Local<String> title,
+ bool record_samples = false);
/**
* Stops collecting CPU profile with a given title and returns it.
@@ -806,6 +817,18 @@ class V8_EXPORT HeapProfiler {
v8::EmbedderGraph* graph,
void* data);
+ /**
+ * Callback function invoked during heap snapshot generation to retrieve
+ * the detachedness state of an object referenced by a TracedReference.
+ *
+ * The callback takes Local<Value> as parameter to allow the embedder to
+ * unpack the TracedReference into a Local and reuse that Local for different
+ * purposes.
+ */
+ using GetDetachednessCallback = EmbedderGraph::Node::Detachedness (*)(
+ v8::Isolate* isolate, const v8::Local<v8::Value>& v8_value,
+ uint16_t class_id, void* data);
+
/** Returns the number of snapshots taken. */
int GetSnapshotCount();
@@ -956,6 +979,8 @@ class V8_EXPORT HeapProfiler {
void RemoveBuildEmbedderGraphCallback(BuildEmbedderGraphCallback callback,
void* data);
+ void SetGetDetachednessCallback(GetDetachednessCallback callback, void* data);
+
/**
* Default value of persistent handle class ID. Must not be used to
* define a class. Can be used to reset a class of a persistent
diff --git a/deps/v8/include/v8-unwinder-state.h b/deps/v8/include/v8-unwinder-state.h
new file mode 100644
index 0000000000..ed9988711b
--- /dev/null
+++ b/deps/v8/include/v8-unwinder-state.h
@@ -0,0 +1,30 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_UNWINDER_STATE_H_
+#define INCLUDE_V8_UNWINDER_STATE_H_
+
+namespace v8 {
+
+#ifdef V8_TARGET_ARCH_ARM
+struct CalleeSavedRegisters {
+ void* arm_r4;
+ void* arm_r5;
+ void* arm_r6;
+ void* arm_r7;
+ void* arm_r8;
+ void* arm_r9;
+ void* arm_r10;
+};
+#elif V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64 || \
+ V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC || \
+ V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_S390
+struct CalleeSavedRegisters {};
+#else
+#error Target architecture was not detected as supported by v8
+#endif
+
+} // namespace v8
+
+#endif // INCLUDE_V8_UNWINDER _STATE_H_
diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h
index 46810a77ce..8ecd81cb5d 100644
--- a/deps/v8/include/v8-version.h
+++ b/deps/v8/include/v8-version.h
@@ -9,9 +9,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 8
-#define V8_MINOR_VERSION 7
-#define V8_BUILD_NUMBER 220
-#define V8_PATCH_LEVEL 24
+#define V8_MINOR_VERSION 8
+#define V8_BUILD_NUMBER 278
+#define V8_PATCH_LEVEL 17
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index f51fad14d9..835178204a 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -83,6 +83,7 @@ class String;
class StringObject;
class Symbol;
class SymbolObject;
+class TracedReferenceBase;
class PrimitiveArray;
class Private;
class Uint32;
@@ -104,7 +105,7 @@ class TracedGlobal;
template <class T>
class TracedReference;
template <class T>
-class TracedReferenceBase;
+class BasicTracedReference;
template<class K, class V, class T> class PersistentValueMap;
template <class K, class V, class T>
class PersistentValueMapBase;
@@ -127,6 +128,7 @@ namespace internal {
enum class ArgumentsType;
template <ArgumentsType>
class Arguments;
+class BasicTracedReferenceExtractor;
template <typename T>
class CustomArguments;
class FunctionCallbackArguments;
@@ -301,9 +303,10 @@ class Local {
V8_INLINE static Local<T> New(Isolate* isolate,
const PersistentBase<T>& that);
V8_INLINE static Local<T> New(Isolate* isolate,
- const TracedReferenceBase<T>& that);
+ const BasicTracedReference<T>& that);
private:
+ friend class TracedReferenceBase;
friend class Utils;
template<class F> friend class Eternal;
template<class F> friend class PersistentBase;
@@ -335,7 +338,7 @@ class Local {
template <class F>
friend class TracedGlobal;
template <class F>
- friend class TracedReferenceBase;
+ friend class BasicTracedReference;
template <class F>
friend class TracedReference;
@@ -825,27 +828,11 @@ using UniquePersistent = Global<T>;
template <typename T>
struct TracedGlobalTrait {};
-/**
- * A traced handle with copy and move semantics. The handle is to be used
- * together with |v8::EmbedderHeapTracer| and specifies edges from the embedder
- * into V8's heap.
- *
- * The exact semantics are:
- * - Tracing garbage collections use |v8::EmbedderHeapTracer|.
- * - Non-tracing garbage collections refer to
- * |v8::EmbedderHeapTracer::IsRootForNonTracingGC()| whether the handle should
- * be treated as root or not.
- *
- * Note that the base class cannot be instantiated itself. Choose from
- * - TracedGlobal
- * - TracedReference
- */
-template <typename T>
class TracedReferenceBase {
public:
/**
- * Returns true if this TracedReferenceBase is empty, i.e., has not been
- * assigned an object.
+ * Returns true if the reference is empty, i.e., has not been assigned
+ * object.
*/
bool IsEmpty() const { return val_ == nullptr; }
@@ -856,36 +843,16 @@ class TracedReferenceBase {
V8_INLINE void Reset();
/**
- * Construct a Local<T> from this handle.
+ * Construct a Local<Value> from this handle.
*/
- Local<T> Get(Isolate* isolate) const { return Local<T>::New(isolate, *this); }
-
- template <class S>
- V8_INLINE bool operator==(const TracedReferenceBase<S>& that) const {
- internal::Address* a = reinterpret_cast<internal::Address*>(val_);
- internal::Address* b = reinterpret_cast<internal::Address*>(that.val_);
- if (a == nullptr) return b == nullptr;
- if (b == nullptr) return false;
- return *a == *b;
- }
-
- template <class S>
- V8_INLINE bool operator==(const Local<S>& that) const {
- internal::Address* a = reinterpret_cast<internal::Address*>(val_);
- internal::Address* b = reinterpret_cast<internal::Address*>(that.val_);
- if (a == nullptr) return b == nullptr;
- if (b == nullptr) return false;
- return *a == *b;
- }
-
- template <class S>
- V8_INLINE bool operator!=(const TracedReferenceBase<S>& that) const {
- return !operator==(that);
- }
+ V8_INLINE v8::Local<v8::Value> Get(v8::Isolate* isolate) const;
- template <class S>
- V8_INLINE bool operator!=(const Local<S>& that) const {
- return !operator==(that);
+ /**
+ * Returns true if this TracedReference is empty, i.e., has not been
+ * assigned an object. This version of IsEmpty is thread-safe.
+ */
+ bool IsEmptyThreadSafe() const {
+ return this->GetSlotThreadSafe() == nullptr;
}
/**
@@ -899,41 +866,77 @@ class TracedReferenceBase {
*/
V8_INLINE uint16_t WrapperClassId() const;
- template <class S>
- V8_INLINE TracedReferenceBase<S>& As() const {
- return reinterpret_cast<TracedReferenceBase<S>&>(
- const_cast<TracedReferenceBase<T>&>(*this));
- }
-
protected:
/**
- * Update this reference in a thread-safe way
+ * Update this reference in a thread-safe way.
*/
- void SetSlotThreadSafe(T* new_val) {
- reinterpret_cast<std::atomic<T*>*>(&val_)->store(new_val,
- std::memory_order_relaxed);
+ void SetSlotThreadSafe(void* new_val) {
+ reinterpret_cast<std::atomic<void*>*>(&val_)->store(
+ new_val, std::memory_order_relaxed);
}
/**
* Get this reference in a thread-safe way
*/
- const T* GetSlotThreadSafe() const {
- return reinterpret_cast<std::atomic<const T*> const*>(&val_)->load(
+ const void* GetSlotThreadSafe() const {
+ return reinterpret_cast<std::atomic<const void*> const*>(&val_)->load(
std::memory_order_relaxed);
}
+ // val_ points to a GlobalHandles node.
+ internal::Address* val_ = nullptr;
+
+ friend class internal::BasicTracedReferenceExtractor;
+ template <typename F>
+ friend class Local;
+ template <typename U>
+ friend bool operator==(const TracedReferenceBase&, const Local<U>&);
+ friend bool operator==(const TracedReferenceBase&,
+ const TracedReferenceBase&);
+};
+
+/**
+ * A traced handle with copy and move semantics. The handle is to be used
+ * together with |v8::EmbedderHeapTracer| or as part of GarbageCollected objects
+ * (see v8-cppgc.h) and specifies edges from C++ objects to JavaScript.
+ *
+ * The exact semantics are:
+ * - Tracing garbage collections use |v8::EmbedderHeapTracer| or cppgc.
+ * - Non-tracing garbage collections refer to
+ * |v8::EmbedderHeapTracer::IsRootForNonTracingGC()| whether the handle should
+ * be treated as root or not.
+ *
+ * Note that the base class cannot be instantiated itself. Choose from
+ * - TracedGlobal
+ * - TracedReference
+ */
+template <typename T>
+class BasicTracedReference : public TracedReferenceBase {
+ public:
+ /**
+ * Construct a Local<T> from this handle.
+ */
+ Local<T> Get(Isolate* isolate) const { return Local<T>::New(isolate, *this); }
+
+ template <class S>
+ V8_INLINE BasicTracedReference<S>& As() const {
+ return reinterpret_cast<BasicTracedReference<S>&>(
+ const_cast<BasicTracedReference<T>&>(*this));
+ }
+
+ T* operator->() const { return reinterpret_cast<T*>(val_); }
+ T* operator*() const { return reinterpret_cast<T*>(val_); }
+
private:
enum DestructionMode { kWithDestructor, kWithoutDestructor };
/**
- * An empty TracedReferenceBase without storage cell.
+ * An empty BasicTracedReference without storage cell.
*/
- TracedReferenceBase() = default;
-
- V8_INLINE static T* New(Isolate* isolate, T* that, void* slot,
- DestructionMode destruction_mode);
+ BasicTracedReference() = default;
- T* val_ = nullptr;
+ V8_INLINE static internal::Address* New(Isolate* isolate, T* that, void* slot,
+ DestructionMode destruction_mode);
friend class EmbedderHeapTracer;
template <typename F>
@@ -944,27 +947,29 @@ class TracedReferenceBase {
template <typename F>
friend class TracedReference;
template <typename F>
+ friend class BasicTracedReference;
+ template <typename F>
friend class ReturnValue;
};
/**
* A traced handle with destructor that clears the handle. For more details see
- * TracedReferenceBase.
+ * BasicTracedReference.
*/
template <typename T>
-class TracedGlobal : public TracedReferenceBase<T> {
+class TracedGlobal : public BasicTracedReference<T> {
public:
- using TracedReferenceBase<T>::Reset;
+ using BasicTracedReference<T>::Reset;
/**
- * Destructor resetting the handle.
+ * Destructor resetting the handle.Is
*/
~TracedGlobal() { this->Reset(); }
/**
* An empty TracedGlobal without storage cell.
*/
- TracedGlobal() : TracedReferenceBase<T>() {}
+ TracedGlobal() : BasicTracedReference<T>() {}
/**
* Construct a TracedGlobal from a Local.
@@ -973,9 +978,9 @@ class TracedGlobal : public TracedReferenceBase<T> {
* pointing to the same object.
*/
template <class S>
- TracedGlobal(Isolate* isolate, Local<S> that) : TracedReferenceBase<T>() {
+ TracedGlobal(Isolate* isolate, Local<S> that) : BasicTracedReference<T>() {
this->val_ = this->New(isolate, that.val_, &this->val_,
- TracedReferenceBase<T>::kWithDestructor);
+ BasicTracedReference<T>::kWithDestructor);
static_assert(std::is_base_of<T, S>::value, "type check");
}
@@ -1072,7 +1077,7 @@ class TracedGlobal : public TracedReferenceBase<T> {
* A traced handle without destructor that clears the handle. The embedder needs
* to ensure that the handle is not accessed once the V8 object has been
* reclaimed. This can happen when the handle is not passed through the
- * EmbedderHeapTracer. For more details see TracedReferenceBase.
+ * EmbedderHeapTracer. For more details see BasicTracedReference.
*
* The reference assumes the embedder has precise knowledge about references at
* all times. In case V8 needs to separately handle on-stack references, the
@@ -1080,14 +1085,14 @@ class TracedGlobal : public TracedReferenceBase<T> {
* |EmbedderHeapTracer::SetStackStart|.
*/
template <typename T>
-class TracedReference : public TracedReferenceBase<T> {
+class TracedReference : public BasicTracedReference<T> {
public:
- using TracedReferenceBase<T>::Reset;
+ using BasicTracedReference<T>::Reset;
/**
* An empty TracedReference without storage cell.
*/
- TracedReference() : TracedReferenceBase<T>() {}
+ TracedReference() : BasicTracedReference<T>() {}
/**
* Construct a TracedReference from a Local.
@@ -1096,9 +1101,9 @@ class TracedReference : public TracedReferenceBase<T> {
* pointing to the same object.
*/
template <class S>
- TracedReference(Isolate* isolate, Local<S> that) : TracedReferenceBase<T>() {
+ TracedReference(Isolate* isolate, Local<S> that) : BasicTracedReference<T>() {
this->val_ = this->New(isolate, that.val_, &this->val_,
- TracedReferenceBase<T>::kWithoutDestructor);
+ BasicTracedReference<T>::kWithoutDestructor);
static_assert(std::is_base_of<T, S>::value, "type check");
}
@@ -1174,14 +1179,6 @@ class TracedReference : public TracedReferenceBase<T> {
return reinterpret_cast<TracedReference<S>&>(
const_cast<TracedReference<T>&>(*this));
}
-
- /**
- * Returns true if this TracedReference is empty, i.e., has not been
- * assigned an object. This version of IsEmpty is thread-safe.
- */
- bool IsEmptyThreadSafe() const {
- return this->GetSlotThreadSafe() == nullptr;
- }
};
/**
@@ -1609,6 +1606,14 @@ class V8_EXPORT Module : public Data {
int ScriptId();
/**
+ * Returns whether this module or any of its requested modules is async,
+ * i.e. contains top-level await.
+ *
+ * The module's status must be at least kInstantiated.
+ */
+ bool IsGraphAsync() const;
+
+ /**
* Returns whether the module is a SourceTextModule.
*/
bool IsSourceTextModule() const;
@@ -1834,11 +1839,9 @@ class V8_EXPORT ScriptCompiler {
public:
enum Encoding { ONE_BYTE, TWO_BYTE, UTF8 };
-#if defined(_MSC_VER) && _MSC_VER >= 1910 /* Disable on VS2015 */
V8_DEPRECATE_SOON(
"This class takes ownership of source_stream, so use the constructor "
"taking a unique_ptr to make these semantics clearer")
-#endif
StreamedSource(ExternalSourceStream* source_stream, Encoding encoding);
StreamedSource(std::unique_ptr<ExternalSourceStream> source_stream,
Encoding encoding);
@@ -1856,7 +1859,7 @@ class V8_EXPORT ScriptCompiler {
/**
* A streaming task which the embedder must run on a background thread to
- * stream scripts into V8. Returned by ScriptCompiler::StartStreamingScript.
+ * stream scripts into V8. Returned by ScriptCompiler::StartStreaming.
*/
class V8_EXPORT ScriptStreamingTask final {
public:
@@ -1943,9 +1946,12 @@ class V8_EXPORT ScriptCompiler {
* This API allows to start the streaming with as little data as possible, and
* the remaining data (for example, the ScriptOrigin) is passed to Compile.
*/
+ V8_DEPRECATE_SOON("Use ScriptCompiler::StartStreamingScript instead.")
static ScriptStreamingTask* StartStreamingScript(
Isolate* isolate, StreamedSource* source,
CompileOptions options = kNoCompileOptions);
+ static ScriptStreamingTask* StartStreaming(Isolate* isolate,
+ StreamedSource* source);
/**
* Compiles a streamed script (bound to current context).
@@ -2266,14 +2272,25 @@ enum StateTag {
IDLE
};
+// Holds the callee saved registers needed for the stack unwinder. It is the
+// empty struct if no registers are required. Implemented in
+// include/v8-unwinder-state.h.
+struct CalleeSavedRegisters;
+
// A RegisterState represents the current state of registers used
// by the sampling profiler API.
-struct RegisterState {
- RegisterState() : pc(nullptr), sp(nullptr), fp(nullptr), lr(nullptr) {}
+struct V8_EXPORT RegisterState {
+ RegisterState();
+ ~RegisterState();
+ RegisterState(const RegisterState& other);
+ RegisterState& operator=(const RegisterState& other);
+
void* pc; // Instruction pointer.
void* sp; // Stack pointer.
void* fp; // Frame pointer.
void* lr; // Link register (or nullptr on platforms without a link register).
+ // Callee saved registers (or null if no callee saved registers were stored)
+ std::unique_ptr<CalleeSavedRegisters> callee_saved;
};
// The output structure filled up by GetStackSample API function.
@@ -3118,7 +3135,7 @@ class V8_EXPORT String : public Name {
* Returns true if the string is external two-byte.
*
*/
- V8_DEPRECATE_SOON(
+ V8_DEPRECATED(
"Use String::IsExternalTwoByte() or String::IsExternalOneByte()")
bool IsExternal() const;
@@ -3975,10 +3992,10 @@ class V8_EXPORT Object : public Value {
return object.val_->InternalFieldCount();
}
- /** Same as above, but works for TracedReferenceBase. */
+ /** Same as above, but works for BasicTracedReference. */
V8_INLINE static int InternalFieldCount(
- const TracedReferenceBase<Object>& object) {
- return object.val_->InternalFieldCount();
+ const BasicTracedReference<Object>& object) {
+ return object->InternalFieldCount();
}
/** Gets the value from an internal field. */
@@ -4002,8 +4019,8 @@ class V8_EXPORT Object : public Value {
/** Same as above, but works for TracedGlobal. */
V8_INLINE static void* GetAlignedPointerFromInternalField(
- const TracedReferenceBase<Object>& object, int index) {
- return object.val_->GetAlignedPointerFromInternalField(index);
+ const BasicTracedReference<Object>& object, int index) {
+ return object->GetAlignedPointerFromInternalField(index);
}
/**
@@ -4186,6 +4203,16 @@ class V8_EXPORT Object : public Value {
V8_INLINE static Object* Cast(Value* obj);
+ /**
+ * Support for TC39 "dynamic code brand checks" proposal.
+ *
+ * This API allows to query whether an object was constructed from a
+ * "code like" ObjectTemplate.
+ *
+ * See also: v8::ObjectTemplate::SetCodeLike
+ */
+ bool IsCodeLike(Isolate* isolate);
+
private:
Object();
static void CheckCast(Value* obj);
@@ -4299,7 +4326,7 @@ class ReturnValue {
template <typename S>
V8_INLINE void Set(const Global<S>& handle);
template <typename S>
- V8_INLINE void Set(const TracedReferenceBase<S>& handle);
+ V8_INLINE void Set(const BasicTracedReference<S>& handle);
template <typename S>
V8_INLINE void Set(const Local<S> handle);
// Fast primitive setters
@@ -4594,6 +4621,15 @@ class V8_EXPORT Function : public Object {
*/
Local<Value> GetBoundFunction() const;
+ /**
+ * Calls builtin Function.prototype.toString on this function.
+ * This is different from Value::ToString() that may call a user-defined
+ * toString() function, and different than Object::ObjectProtoToString() which
+ * always serializes "[object Function]".
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<String> FunctionProtoToString(
+ Local<Context> context);
+
ScriptOrigin GetScriptOrigin() const;
V8_INLINE static Function* Cast(Value* obj);
static const int kLineOffsetNotFound;
@@ -5966,6 +6002,10 @@ class V8_EXPORT RegExp : public Object {
/**
* Regular expression flag bits. They can be or'ed to enable a set
* of flags.
+ * The kLinear value ('l') is experimental and can only be used with
+ * --enable-experimental-regexp-engine. RegExps with kLinear flag are
+ * guaranteed to be executed in asymptotic linear time wrt. the length of
+ * the subject string.
*/
enum Flags {
kNone = 0,
@@ -5975,9 +6015,10 @@ class V8_EXPORT RegExp : public Object {
kSticky = 1 << 3,
kUnicode = 1 << 4,
kDotAll = 1 << 5,
+ kLinear = 1 << 6,
};
- static constexpr int kFlagCount = 6;
+ static constexpr int kFlagCount = 7;
/**
* Creates a regular expression from the given pattern string and
@@ -6119,7 +6160,6 @@ class V8_EXPORT Template : public Data {
void SetNativeDataProperty(
Local<String> name, AccessorGetterCallback getter,
AccessorSetterCallback setter = nullptr,
- // TODO(dcarney): gcc can't handle Local below
Local<Value> data = Local<Value>(), PropertyAttribute attribute = None,
Local<AccessorSignature> signature = Local<AccessorSignature>(),
AccessControl settings = DEFAULT,
@@ -6128,7 +6168,6 @@ class V8_EXPORT Template : public Data {
void SetNativeDataProperty(
Local<Name> name, AccessorNameGetterCallback getter,
AccessorNameSetterCallback setter = nullptr,
- // TODO(dcarney): gcc can't handle Local below
Local<Value> data = Local<Value>(), PropertyAttribute attribute = None,
Local<AccessorSignature> signature = Local<AccessorSignature>(),
AccessControl settings = DEFAULT,
@@ -6974,6 +7013,18 @@ class V8_EXPORT ObjectTemplate : public Template {
*/
void SetImmutableProto();
+ /**
+ * Support for TC39 "dynamic code brand checks" proposal.
+ *
+ * This API allows to mark (& query) objects as "code like", which causes
+ * them to be treated like Strings in the context of eval and function
+ * constructor.
+ *
+ * Reference: https://github.com/tc39/proposal-dynamic-code-brand-checks
+ */
+ void SetCodeLike();
+ bool IsCodeLike();
+
V8_INLINE static ObjectTemplate* Cast(Data* data);
private:
@@ -7180,25 +7231,25 @@ class V8_EXPORT ResourceConstraints {
/**
* Deprecated functions. Do not use in new code.
*/
- V8_DEPRECATE_SOON("Use code_range_size_in_bytes.")
+ V8_DEPRECATED("Use code_range_size_in_bytes.")
size_t code_range_size() const { return code_range_size_ / kMB; }
- V8_DEPRECATE_SOON("Use set_code_range_size_in_bytes.")
+ V8_DEPRECATED("Use set_code_range_size_in_bytes.")
void set_code_range_size(size_t limit_in_mb) {
code_range_size_ = limit_in_mb * kMB;
}
- V8_DEPRECATE_SOON("Use max_young_generation_size_in_bytes.")
+ V8_DEPRECATED("Use max_young_generation_size_in_bytes.")
size_t max_semi_space_size_in_kb() const;
- V8_DEPRECATE_SOON("Use set_max_young_generation_size_in_bytes.")
+ V8_DEPRECATED("Use set_max_young_generation_size_in_bytes.")
void set_max_semi_space_size_in_kb(size_t limit_in_kb);
- V8_DEPRECATE_SOON("Use max_old_generation_size_in_bytes.")
+ V8_DEPRECATED("Use max_old_generation_size_in_bytes.")
size_t max_old_space_size() const { return max_old_generation_size_ / kMB; }
- V8_DEPRECATE_SOON("Use set_max_old_generation_size_in_bytes.")
+ V8_DEPRECATED("Use set_max_old_generation_size_in_bytes.")
void set_max_old_space_size(size_t limit_in_mb) {
max_old_generation_size_ = limit_in_mb * kMB;
}
- V8_DEPRECATE_SOON("Zone does not pool memory any more.")
+ V8_DEPRECATED("Zone does not pool memory any more.")
size_t max_zone_pool_size() const { return max_zone_pool_size_; }
- V8_DEPRECATE_SOON("Zone does not pool memory any more.")
+ V8_DEPRECATED("Zone does not pool memory any more.")
void set_max_zone_pool_size(size_t bytes) { max_zone_pool_size_ = bytes; }
private:
@@ -7382,6 +7433,7 @@ class PromiseRejectMessage {
typedef void (*PromiseRejectCallback)(PromiseRejectMessage message);
// --- Microtasks Callbacks ---
+V8_DEPRECATED("Use *WithData version.")
typedef void (*MicrotasksCompletedCallback)(Isolate*);
typedef void (*MicrotasksCompletedCallbackWithData)(Isolate*, void*);
typedef void (*MicrotaskCallback)(void* data);
@@ -7547,11 +7599,15 @@ struct ModifyCodeGenerationFromStringsResult {
/**
* Callback to check if codegen is allowed from a source object, and convert
- * the source to string if necessary.See ModifyCodeGenerationFromStrings.
+ * the source to string if necessary. See: ModifyCodeGenerationFromStrings.
*/
typedef ModifyCodeGenerationFromStringsResult (
*ModifyCodeGenerationFromStringsCallback)(Local<Context> context,
Local<Value> source);
+typedef ModifyCodeGenerationFromStringsResult (
+ *ModifyCodeGenerationFromStringsCallback2)(Local<Context> context,
+ Local<Value> source,
+ bool is_code_like);
// --- WebAssembly compilation callbacks ---
typedef bool (*ExtensionCallback)(const FunctionCallbackInfo<Value>&);
@@ -7997,7 +8053,7 @@ class V8_EXPORT EmbedderHeapTracer {
virtual void RegisterV8References(
const std::vector<std::pair<void*, void*> >& embedder_fields) = 0;
- void RegisterEmbedderReference(const TracedReferenceBase<v8::Data>& ref);
+ void RegisterEmbedderReference(const BasicTracedReference<v8::Data>& ref);
/**
* Called at the beginning of a GC cycle.
@@ -9135,7 +9191,7 @@ class V8_EXPORT Isolate {
/**
* An alias for PerformMicrotaskCheckpoint.
*/
- V8_DEPRECATE_SOON("Use PerformMicrotaskCheckpoint.")
+ V8_DEPRECATED("Use PerformMicrotaskCheckpoint.")
void RunMicrotasks() { PerformMicrotaskCheckpoint(); }
/**
@@ -9179,7 +9235,7 @@ class V8_EXPORT Isolate {
* Executing scripts inside the callback will not re-trigger microtasks and
* the callback.
*/
- V8_DEPRECATE_SOON("Use *WithData version.")
+ V8_DEPRECATED("Use *WithData version.")
void AddMicrotasksCompletedCallback(MicrotasksCompletedCallback callback);
void AddMicrotasksCompletedCallback(
MicrotasksCompletedCallbackWithData callback, void* data = nullptr);
@@ -9187,7 +9243,7 @@ class V8_EXPORT Isolate {
/**
* Removes callback that was installed by AddMicrotasksCompletedCallback.
*/
- V8_DEPRECATE_SOON("Use *WithData version.")
+ V8_DEPRECATED("Use *WithData version.")
void RemoveMicrotasksCompletedCallback(MicrotasksCompletedCallback callback);
void RemoveMicrotasksCompletedCallback(
MicrotasksCompletedCallbackWithData callback, void* data = nullptr);
@@ -9368,6 +9424,12 @@ class V8_EXPORT Isolate {
void GetCodeRange(void** start, size_t* length_in_bytes);
/**
+ * As GetCodeRange, but for embedded builtins (these live in a distinct
+ * memory region from other V8 Code objects).
+ */
+ void GetEmbeddedCodeRange(const void** start, size_t* length_in_bytes);
+
+ /**
* Returns the JSEntryStubs necessary for use with the Unwinder API.
*/
JSEntryStubs GetJSEntryStubs();
@@ -9429,8 +9491,15 @@ class V8_EXPORT Isolate {
"See http://crbug.com/v8/10096.")
void SetAllowCodeGenerationFromStringsCallback(
AllowCodeGenerationFromStringsCallback callback);
+ V8_DEPRECATE_SOON(
+ "Use Isolate::SetModifyCodeGenerationFromStringsCallback with "
+ "ModifyCodeGenerationFromStringsCallback2 instead. See "
+ "http://crbug.com/1096017 and TC39 Dynamic Code Brand Checks proposal "
+ "at https://github.com/tc39/proposal-dynamic-code-brand-checks.")
void SetModifyCodeGenerationFromStringsCallback(
ModifyCodeGenerationFromStringsCallback callback);
+ void SetModifyCodeGenerationFromStringsCallback(
+ ModifyCodeGenerationFromStringsCallback2 callback);
/**
* Set the callback to invoke to check if wasm code generation should
@@ -9818,6 +9887,12 @@ class V8_EXPORT V8 {
*/
static void GetSharedMemoryStatistics(SharedMemoryStatistics* statistics);
+ /**
+ * Notifies V8 that the process is cross-origin-isolated, which enables
+ * defining the SharedArrayBuffer function on the global object of Contexts.
+ */
+ static void SetIsCrossOriginIsolated();
+
private:
V8();
@@ -9867,6 +9942,8 @@ class V8_EXPORT V8 {
static void ToLocalEmpty();
static void InternalFieldOutOfBounds(int index);
template <class T>
+ friend class BasicTracedReference;
+ template <class T>
friend class Global;
template <class T> friend class Local;
template <class T>
@@ -9874,9 +9951,8 @@ class V8_EXPORT V8 {
template <class T>
friend class Maybe;
template <class T>
- friend class TracedReferenceBase;
- template <class T>
friend class TracedGlobal;
+ friend class TracedReferenceBase;
template <class T>
friend class TracedReference;
template <class T>
@@ -10417,12 +10493,9 @@ class V8_EXPORT Context {
*/
void Exit();
- /** Returns the isolate associated with a current context. */
+ /** Returns an isolate associated with a current context. */
Isolate* GetIsolate();
- /** Returns the microtask queue associated with a current context. */
- MicrotaskQueue* GetMicrotaskQueue();
-
/**
* The field at kDebugIdIndex used to be reserved for the inspector.
* It now serves no purpose.
@@ -10777,8 +10850,8 @@ Local<T> Local<T>::New(Isolate* isolate, const PersistentBase<T>& that) {
}
template <class T>
-Local<T> Local<T>::New(Isolate* isolate, const TracedReferenceBase<T>& that) {
- return New(isolate, that.val_);
+Local<T> Local<T>::New(Isolate* isolate, const BasicTracedReference<T>& that) {
+ return New(isolate, *that);
}
template <class T>
@@ -10965,23 +11038,69 @@ Global<T>& Global<T>::operator=(Global<S>&& rhs) {
}
template <class T>
-T* TracedReferenceBase<T>::New(Isolate* isolate, T* that, void* slot,
- DestructionMode destruction_mode) {
+internal::Address* BasicTracedReference<T>::New(
+ Isolate* isolate, T* that, void* slot, DestructionMode destruction_mode) {
if (that == nullptr) return nullptr;
internal::Address* p = reinterpret_cast<internal::Address*>(that);
- return reinterpret_cast<T*>(V8::GlobalizeTracedReference(
+ return V8::GlobalizeTracedReference(
reinterpret_cast<internal::Isolate*>(isolate), p,
reinterpret_cast<internal::Address*>(slot),
- destruction_mode == kWithDestructor));
+ destruction_mode == kWithDestructor);
}
-template <class T>
-void TracedReferenceBase<T>::Reset() {
+void TracedReferenceBase::Reset() {
if (IsEmpty()) return;
V8::DisposeTracedGlobal(reinterpret_cast<internal::Address*>(val_));
SetSlotThreadSafe(nullptr);
}
+v8::Local<v8::Value> TracedReferenceBase::Get(v8::Isolate* isolate) const {
+ if (IsEmpty()) return Local<Value>();
+ return Local<Value>::New(isolate, reinterpret_cast<Value*>(val_));
+}
+
+V8_INLINE bool operator==(const TracedReferenceBase& lhs,
+ const TracedReferenceBase& rhs) {
+ v8::internal::Address* a = reinterpret_cast<v8::internal::Address*>(lhs.val_);
+ v8::internal::Address* b = reinterpret_cast<v8::internal::Address*>(rhs.val_);
+ if (a == nullptr) return b == nullptr;
+ if (b == nullptr) return false;
+ return *a == *b;
+}
+
+template <typename U>
+V8_INLINE bool operator==(const TracedReferenceBase& lhs,
+ const v8::Local<U>& rhs) {
+ v8::internal::Address* a = reinterpret_cast<v8::internal::Address*>(lhs.val_);
+ v8::internal::Address* b = reinterpret_cast<v8::internal::Address*>(*rhs);
+ if (a == nullptr) return b == nullptr;
+ if (b == nullptr) return false;
+ return *a == *b;
+}
+
+template <typename U>
+V8_INLINE bool operator==(const v8::Local<U>& lhs,
+ const TracedReferenceBase& rhs) {
+ return rhs == lhs;
+}
+
+V8_INLINE bool operator!=(const TracedReferenceBase& lhs,
+ const TracedReferenceBase& rhs) {
+ return !(lhs == rhs);
+}
+
+template <typename U>
+V8_INLINE bool operator!=(const TracedReferenceBase& lhs,
+ const v8::Local<U>& rhs) {
+ return !(lhs == rhs);
+}
+
+template <typename U>
+V8_INLINE bool operator!=(const v8::Local<U>& lhs,
+ const TracedReferenceBase& rhs) {
+ return !(rhs == lhs);
+}
+
template <class T>
template <class S>
void TracedGlobal<T>::Reset(Isolate* isolate, const Local<S>& other) {
@@ -10989,7 +11108,7 @@ void TracedGlobal<T>::Reset(Isolate* isolate, const Local<S>& other) {
Reset();
if (other.IsEmpty()) return;
this->val_ = this->New(isolate, other.val_, &this->val_,
- TracedReferenceBase<T>::kWithDestructor);
+ BasicTracedReference<T>::kWithDestructor);
}
template <class T>
@@ -11039,7 +11158,7 @@ void TracedReference<T>::Reset(Isolate* isolate, const Local<S>& other) {
if (other.IsEmpty()) return;
this->SetSlotThreadSafe(
this->New(isolate, other.val_, &this->val_,
- TracedReferenceBase<T>::kWithoutDestructor));
+ BasicTracedReference<T>::kWithoutDestructor));
}
template <class T>
@@ -11082,8 +11201,7 @@ TracedReference<T>& TracedReference<T>::operator=(const TracedReference& rhs) {
return *this;
}
-template <class T>
-void TracedReferenceBase<T>::SetWrapperClassId(uint16_t class_id) {
+void TracedReferenceBase::SetWrapperClassId(uint16_t class_id) {
typedef internal::Internals I;
if (IsEmpty()) return;
internal::Address* obj = reinterpret_cast<internal::Address*>(val_);
@@ -11091,8 +11209,7 @@ void TracedReferenceBase<T>::SetWrapperClassId(uint16_t class_id) {
*reinterpret_cast<uint16_t*>(addr) = class_id;
}
-template <class T>
-uint16_t TracedReferenceBase<T>::WrapperClassId() const {
+uint16_t TracedReferenceBase::WrapperClassId() const {
typedef internal::Internals I;
if (IsEmpty()) return 0;
internal::Address* obj = reinterpret_cast<internal::Address*>(val_);
@@ -11123,7 +11240,7 @@ void ReturnValue<T>::Set(const Global<S>& handle) {
template <typename T>
template <typename S>
-void ReturnValue<T>::Set(const TracedReferenceBase<S>& handle) {
+void ReturnValue<T>::Set(const BasicTracedReference<S>& handle) {
static_assert(std::is_base_of<T, S>::value, "type check");
if (V8_UNLIKELY(handle.IsEmpty())) {
*value_ = GetDefaultValue();
@@ -11243,22 +11360,14 @@ template<typename T>
Local<Value> FunctionCallbackInfo<T>::operator[](int i) const {
// values_ points to the first argument (not the receiver).
if (i < 0 || length_ <= i) return Local<Value>(*Undefined(GetIsolate()));
-#ifdef V8_REVERSE_JSARGS
return Local<Value>(reinterpret_cast<Value*>(values_ + i));
-#else
- return Local<Value>(reinterpret_cast<Value*>(values_ - i));
-#endif
}
template<typename T>
Local<Object> FunctionCallbackInfo<T>::This() const {
// values_ points to the first argument (not the receiver).
-#ifdef V8_REVERSE_JSARGS
return Local<Object>(reinterpret_cast<Object*>(values_ - 1));
-#else
- return Local<Object>(reinterpret_cast<Object*>(values_ + 1));
-#endif
}
@@ -11453,8 +11562,12 @@ void* Object::GetAlignedPointerFromInternalField(int index) {
instance_type == I::kJSApiObjectType ||
instance_type == I::kJSSpecialApiObjectType)) {
int offset = I::kJSObjectHeaderSize + (I::kEmbedderDataSlotSize * index);
+#ifdef V8_HEAP_SANDBOX
+ offset += I::kEmbedderDataSlotRawPayloadOffset;
+#endif
internal::Isolate* isolate = I::GetIsolateForHeapSandbox(obj);
- A value = I::ReadExternalPointerField(isolate, obj, offset);
+ A value = I::ReadExternalPointerField(
+ isolate, obj, offset, internal::kEmbedderDataSlotPayloadTag);
return reinterpret_cast<void*>(value);
}
#endif
@@ -11487,7 +11600,8 @@ String::ExternalStringResource* String::GetExternalStringResource() const {
if (I::IsExternalTwoByteString(I::GetInstanceType(obj))) {
internal::Isolate* isolate = I::GetIsolateForHeapSandbox(obj);
A value =
- I::ReadExternalPointerField(isolate, obj, I::kStringResourceOffset);
+ I::ReadExternalPointerField(isolate, obj, I::kStringResourceOffset,
+ internal::kExternalStringResourceTag);
result = reinterpret_cast<String::ExternalStringResource*>(value);
} else {
result = GetExternalStringResourceSlow();
@@ -11511,7 +11625,8 @@ String::ExternalStringResourceBase* String::GetExternalStringResourceBase(
type == I::kExternalTwoByteRepresentationTag) {
internal::Isolate* isolate = I::GetIsolateForHeapSandbox(obj);
A value =
- I::ReadExternalPointerField(isolate, obj, I::kStringResourceOffset);
+ I::ReadExternalPointerField(isolate, obj, I::kStringResourceOffset,
+ internal::kExternalStringResourceTag);
resource = reinterpret_cast<ExternalStringResourceBase*>(value);
} else {
resource = GetExternalStringResourceBaseSlow(encoding_out);
@@ -12073,9 +12188,13 @@ void* Context::GetAlignedPointerFromEmbedderData(int index) {
I::ReadTaggedPointerField(ctx, I::kNativeContextEmbedderDataOffset);
int value_offset =
I::kEmbedderDataArrayHeaderSize + (I::kEmbedderDataSlotSize * index);
+#ifdef V8_HEAP_SANDBOX
+ value_offset += I::kEmbedderDataSlotRawPayloadOffset;
+#endif
internal::Isolate* isolate = I::GetIsolateForHeapSandbox(ctx);
return reinterpret_cast<void*>(
- I::ReadExternalPointerField(isolate, embedder_data, value_offset));
+ I::ReadExternalPointerField(isolate, embedder_data, value_offset,
+ internal::kEmbedderDataSlotPayloadTag));
#else
return SlowGetAlignedPointerFromEmbedderData(index);
#endif
diff --git a/deps/v8/include/v8config.h b/deps/v8/include/v8config.h
index a047874c40..ae89edb2c9 100644
--- a/deps/v8/include/v8config.h
+++ b/deps/v8/include/v8config.h
@@ -482,15 +482,6 @@ V8 shared library set USING_V8_SHARED.
#endif // V8_OS_WIN
-// Support for floating point parameters in calls to C.
-// It's currently enabled only for the platforms listed below. We don't plan
-// to add support for IA32, because it has a totally different approach
-// (using FP stack). As support is added to more platforms, please make sure
-// to list them here in order to enable tests of this functionality.
-#if defined(V8_TARGET_ARCH_X64)
-#define V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
-#endif
-
// clang-format on
#endif // V8CONFIG_H_
diff --git a/deps/v8/infra/mb/mb_config.pyl b/deps/v8/infra/mb/mb_config.pyl
index 7d5cc73026..aab725feec 100644
--- a/deps/v8/infra/mb/mb_config.pyl
+++ b/deps/v8/infra/mb/mb_config.pyl
@@ -86,7 +86,6 @@
'V8 Linux - arm64 - sim - MSAN': 'release_simulate_arm64_msan',
# Misc.
'V8 Linux gcc': 'release_x86_gcc',
- 'V8 Linux64 gcc - debug': 'debug_x64_gcc',
# FYI.
'V8 iOS - sim': 'release_x64_ios_simulator',
'V8 Linux64 - debug - perfetto - builder': 'debug_x64_perfetto',
@@ -95,10 +94,10 @@
'release_x64_pointer_compression_without_dchecks',
'V8 Linux64 - arm64 - sim - pointer compression - builder':
'release_simulate_arm64_pointer_compression',
+ 'V8 Linux64 gcc - debug': 'debug_x64_gcc',
'V8 Fuchsia - builder': 'release_x64_fuchsia',
'V8 Fuchsia - debug builder': 'debug_x64_fuchsia',
'V8 Linux64 - cfi': 'release_x64_cfi',
- 'V8 Linux64 - reverse jsargs': 'debug_x64_reverse_jsargs',
'V8 Linux64 UBSan': 'release_x64_ubsan',
'V8 Linux - vtunejit': 'debug_x86_vtunejit',
'V8 Linux64 - gcov coverage': 'release_x64_gcc_coverage',
@@ -232,7 +231,6 @@
'v8_linux64_perfetto_dbg_ng': 'debug_x64_perfetto',
'v8_linux64_pointer_compression_rel_ng': 'release_x64_pointer_compression',
'v8_linux64_rel_ng': 'release_x64_test_features_trybot',
- 'v8_linux64_reverse_jsargs_dbg_ng': 'debug_x64_reverse_jsargs',
'v8_linux64_shared_compile_rel': 'release_x64_shared_verify_heap',
'v8_linux64_verify_csa_rel_ng': 'release_x64_verify_csa',
'v8_linux64_asan_rel_ng': 'release_x64_asan_minimal_symbols',
@@ -538,8 +536,6 @@
'debug_bot', 'x64', 'perfetto'],
'debug_x64_trybot': [
'debug_trybot', 'x64'],
- 'debug_x64_reverse_jsargs': [
- 'debug_bot', 'x64', 'reverse_jsargs'],
'debug_x64_trybot_custom': [
'debug_trybot', 'x64', 'v8_snapshot_custom'],
'full_debug_x64': [
@@ -661,7 +657,13 @@
},
'disable_concurrent_marking': {
- 'gn_args': 'v8_enable_concurrent_marking=false',
+ # Disable concurrent marking and atomic object field writes in order to
+ # increase the TSAN coverage for background tasks. We need to keep the
+ # atomic marking state enabled because that is needed for the concurrent
+ # write-barrier used by background compilation.
+ 'gn_args': 'v8_enable_concurrent_marking=false '
+ 'v8_enable_atomic_object_field_writes=false '
+ 'v8_enable_atomic_marking_state=true ',
},
'disable_pgo': {
@@ -752,10 +754,6 @@
'mixins': ['release_bot', 'minimal_symbols', 'dcheck_always_on'],
},
- 'reverse_jsargs': {
- 'gn_args': 'v8_enable_reverse_jsargs=true',
- },
-
'official': {
'gn_args': 'is_official_build=true',
},
diff --git a/deps/v8/infra/testing/PRESUBMIT.py b/deps/v8/infra/testing/PRESUBMIT.py
index 178ba9f707..46ae05163d 100644
--- a/deps/v8/infra/testing/PRESUBMIT.py
+++ b/deps/v8/infra/testing/PRESUBMIT.py
@@ -29,6 +29,7 @@ SUPPORTED_SWARMING_DIMENSIONS = [
'cpu',
'device_os',
'device_type',
+ 'gpu',
'os',
'pool',
]
diff --git a/deps/v8/infra/testing/builders.pyl b/deps/v8/infra/testing/builders.pyl
index c5ee5e496f..9414e17377 100644
--- a/deps/v8/infra/testing/builders.pyl
+++ b/deps/v8/infra/testing/builders.pyl
@@ -81,6 +81,24 @@
{'name': 'test262', 'variant': 'extra', 'shards': 3},
{'name': 'v8testing', 'shards': 3},
{'name': 'v8testing', 'variant': 'extra', 'shards': 2},
+ # Noavx.
+ {
+ 'name': 'mozilla',
+ 'suffix': 'noavx',
+ 'test_args': ['--extra-flags', '--noenable-avx']
+ },
+ {
+ 'name': 'test262',
+ 'suffix': 'noavx',
+ 'variant': 'default',
+ 'test_args': ['--extra-flags', '--noenable-avx']
+ },
+ {
+ 'name': 'v8testing',
+ 'suffix': 'noavx',
+ 'test_args': ['--extra-flags', '--noenable-avx'],
+ 'shards': 2
+ },
],
},
'v8_linux_gc_stress_dbg_ng_triggered': {
@@ -202,6 +220,24 @@
'test_args': ['--extra-flags', '--noenable-sse4-1 --noenable-avx'],
'shards': 3,
},
+ # Noavx.
+ {
+ 'name': 'mozilla',
+ 'suffix': 'noavx',
+ 'test_args': ['--extra-flags', '--noenable-avx']
+ },
+ {
+ 'name': 'test262',
+ 'suffix': 'noavx',
+ 'variant': 'default',
+ 'test_args': ['--extra-flags', '--noenable-avx']
+ },
+ {
+ 'name': 'v8testing',
+ 'suffix': 'noavx',
+ 'test_args': ['--extra-flags', '--noenable-avx'],
+ 'shards': 2
+ },
],
},
'v8_linux_verify_csa_rel_ng_triggered': {
@@ -292,6 +328,7 @@
{'name': 'v8testing', 'variant': 'minor_mc'},
{'name': 'v8testing', 'variant': 'no_lfa'},
{'name': 'v8testing', 'variant': 'stress_instruction_scheduling'},
+ {'name': 'v8testing', 'variant': 'stress_concurrent_allocation'},
],
},
'v8_linux64_fuzzilli_ng_triggered': {
@@ -301,15 +338,6 @@
# TODO(almuthanna): Add a new test config for the fuzzilli suite.
'tests': [],
},
- 'v8_linux64_reverse_jsargs_dbg_ng_triggered': {
- 'swarming_dimensions' : {
- 'cpu': 'x86-64-avx2',
- 'os': 'Ubuntu-16.04',
- },
- 'tests': [
- {'name': 'v8testing', 'shards': 3},
- ],
- },
'v8_linux64_gc_stress_custom_snapshot_dbg_ng_triggered': {
'swarming_dimensions' : {
'os': 'Ubuntu-16.04',
@@ -423,6 +451,7 @@
{'name': 'v8testing', 'variant': 'extra', 'shards': 3},
{'name': 'v8testing', 'variant': 'no_local_heaps'},
{'name': 'v8testing', 'variant': 'slow_path'},
+ {'name': 'v8testing', 'variant': 'stress_concurrent_allocation'},
],
},
'v8_linux64_tsan_no_cm_rel_ng_triggered': {
@@ -610,7 +639,8 @@
'v8_mac64_asan_rel_ng_triggered': {
'swarming_dimensions' : {
'cpu': 'x86-64',
- 'os': 'Mac-10.13',
+ 'os': 'Mac-10.15',
+ 'gpu': 'none',
},
'tests': [
{'name': 'v8testing', 'shards': 4},
@@ -619,7 +649,8 @@
'v8_mac64_dbg_ng_triggered': {
'swarming_dimensions' : {
'cpu': 'x86-64',
- 'os': 'Mac-10.13',
+ 'os': 'Mac-10.15',
+ 'gpu': 'none',
},
'tests': [
{'name': 'mozilla'},
@@ -631,7 +662,8 @@
'v8_mac64_gc_stress_dbg_ng_triggered': {
'swarming_dimensions' : {
'cpu': 'x86-64',
- 'os': 'Mac-10.13',
+ 'os': 'Mac-10.15',
+ 'gpu': 'none',
},
'tests': [
{'name': 'd8testing', 'test_args': ['--gc-stress'], 'shards': 4},
@@ -640,7 +672,8 @@
'v8_mac64_rel_ng_triggered': {
'swarming_dimensions' : {
'cpu': 'x86-64',
- 'os': 'Mac-10.13',
+ 'os': 'Mac-10.15',
+ 'gpu': 'none',
},
'tests': [
{'name': 'mozilla'},
@@ -682,7 +715,8 @@
'v8_mac_arm64_sim_rel_ng_triggered': {
'swarming_dimensions' : {
'cpu': 'x86-64',
- 'os': 'Mac-10.13',
+ 'os': 'Mac-10.15',
+ 'gpu': 'none',
},
'tests': [
{'name': 'v8testing', 'shards': 8},
@@ -691,7 +725,8 @@
'v8_mac_arm64_sim_dbg_ng_triggered': {
'swarming_dimensions' : {
'cpu': 'x86-64',
- 'os': 'Mac-10.13',
+ 'os': 'Mac-10.15',
+ 'gpu': 'none',
},
'tests': [
{'name': 'v8testing', 'shards': 8},
@@ -700,7 +735,8 @@
'v8_mac_arm64_sim_nodcheck_rel_ng_triggered': {
'swarming_dimensions' : {
'cpu': 'x86-64',
- 'os': 'Mac-10.13',
+ 'os': 'Mac-10.15',
+ 'gpu': 'none',
},
'tests': [
{'name': 'v8testing', 'shards': 8},
@@ -764,6 +800,24 @@
'suffix': 'nosse4',
'test_args': ['--extra-flags', '--noenable-sse4-1 --noenable-avx']
},
+ # Noavx.
+ {
+ 'name': 'mozilla',
+ 'suffix': 'noavx',
+ 'test_args': ['--extra-flags', '--noenable-avx']
+ },
+ {
+ 'name': 'test262',
+ 'suffix': 'noavx',
+ 'variant': 'default',
+ 'test_args': ['--extra-flags', '--noenable-avx']
+ },
+ {
+ 'name': 'v8testing',
+ 'suffix': 'noavx',
+ 'test_args': ['--extra-flags', '--noenable-avx'],
+ 'shards': 2
+ },
],
},
'V8 Linux - arm64 - sim - CFI': {
@@ -846,6 +900,24 @@
'test_args': ['--extra-flags', '--noenable-sse4-1 --noenable-avx'],
'shards': 3
},
+ # Noavx.
+ {
+ 'name': 'mozilla',
+ 'suffix': 'noavx',
+ 'test_args': ['--extra-flags', '--noenable-avx']
+ },
+ {
+ 'name': 'test262',
+ 'suffix': 'noavx',
+ 'variant': 'default',
+ 'test_args': ['--extra-flags', '--noenable-avx']
+ },
+ {
+ 'name': 'v8testing',
+ 'suffix': 'noavx',
+ 'test_args': ['--extra-flags', '--noenable-avx'],
+ 'shards': 2
+ },
],
},
'V8 Linux - full debug': {
@@ -1010,6 +1082,7 @@
{'name': 'v8testing', 'variant': 'no_lfa'},
{'name': 'v8testing', 'variant': 'slow_path'},
{'name': 'v8testing', 'variant': 'stress_instruction_scheduling'},
+ {'name': 'v8testing', 'variant': 'stress_concurrent_allocation'},
# Noavx.
{
'name': 'mozilla',
@@ -1105,15 +1178,6 @@
{'name': 'v8testing', 'shards': 2},
],
},
- 'V8 Linux64 - reverse jsargs': {
- 'swarming_dimensions' : {
- 'cpu': 'x86-64-avx2',
- 'os': 'Ubuntu-16.04',
- },
- 'tests': [
- {'name': 'v8testing', 'shards': 3},
- ],
- },
'V8 Linux64 - shared': {
'swarming_dimensions' : {
'os': 'Ubuntu-16.04',
@@ -1168,6 +1232,7 @@
{'name': 'v8testing', 'variant': 'extra', 'shards': 3},
{'name': 'v8testing', 'variant': 'no_local_heaps', 'shards': 1},
{'name': 'v8testing', 'variant': 'slow_path', 'shards': 1},
+ {'name': 'v8testing', 'variant': 'stress_concurrent_allocation', 'shards': 1},
],
},
'V8 Linux64 TSAN - stress-incremental-marking': {
@@ -1240,7 +1305,8 @@
'V8 Mac64': {
'swarming_dimensions': {
'cpu': 'x86-64',
- 'os': 'Mac-10.13',
+ 'os': 'Mac-10.15',
+ 'gpu': 'none',
},
'tests': [
{'name': 'mozilla'},
@@ -1252,7 +1318,8 @@
'V8 Mac64 - debug': {
'swarming_dimensions': {
'cpu': 'x86-64',
- 'os': 'Mac-10.13',
+ 'os': 'Mac-10.15',
+ 'gpu': 'none',
},
'tests': [
{'name': 'mozilla'},
@@ -1264,7 +1331,8 @@
'V8 Mac64 ASAN': {
'swarming_dimensions': {
'cpu': 'x86-64',
- 'os': 'Mac-10.13',
+ 'os': 'Mac-10.15',
+ 'gpu': 'none',
},
'tests': [
{'name': 'v8testing', 'shards': 5},
@@ -1273,7 +1341,8 @@
'V8 Mac64 GC Stress': {
'swarming_dimensions': {
'cpu': 'x86-64',
- 'os': 'Mac-10.13',
+ 'os': 'Mac-10.15',
+ 'gpu': 'none',
},
'tests': [
{'name': 'd8testing', 'test_args': ['--gc-stress'], 'shards': 4},
@@ -1298,7 +1367,8 @@
'V8 Mac - arm64 - sim - debug': {
'swarming_dimensions' : {
'cpu': 'x86-64',
- 'os': 'Mac-10.13',
+ 'os': 'Mac-10.15',
+ 'gpu': 'none',
},
'swarming_task_attrs': {
'expiration': 14400,
@@ -1312,7 +1382,8 @@
'V8 Mac - arm64 - sim - release': {
'swarming_dimensions' : {
'cpu': 'x86-64',
- 'os': 'Mac-10.13',
+ 'os': 'Mac-10.15',
+ 'gpu': 'none',
},
'swarming_task_attrs': {
'expiration': 14400,
diff --git a/deps/v8/samples/cppgc/cppgc-for-v8-embedders.cc b/deps/v8/samples/cppgc/cppgc-for-v8-embedders.cc
index 8aaa9cd39c..b4d7ed9e4d 100644
--- a/deps/v8/samples/cppgc/cppgc-for-v8-embedders.cc
+++ b/deps/v8/samples/cppgc/cppgc-for-v8-embedders.cc
@@ -3,12 +3,12 @@
// found in the LICENSE file.
#include <include/cppgc/allocation.h>
+#include <include/cppgc/default-platform.h>
#include <include/cppgc/garbage-collected.h>
#include <include/cppgc/heap.h>
#include <include/cppgc/member.h>
#include <include/cppgc/platform.h>
#include <include/cppgc/visitor.h>
-#include <include/libplatform/libplatform.h>
#include <include/v8.h>
#include <iostream>
@@ -22,42 +22,6 @@
*/
/**
- * Platform used by cppgc. Can just redirect to v8::Platform for most calls.
- * Exception: GetForegroundTaskRunner(), see below.
- *
- * This example uses V8's default platform implementation to drive the cppgc
- * platform.
- */
-class Platform final : public cppgc::Platform {
- public:
- Platform() : v8_platform_(v8::platform::NewDefaultPlatform()) {}
-
- cppgc::PageAllocator* GetPageAllocator() final {
- return v8_platform_->GetPageAllocator();
- }
-
- double MonotonicallyIncreasingTime() final {
- return v8_platform_->MonotonicallyIncreasingTime();
- }
-
- std::shared_ptr<cppgc::TaskRunner> GetForegroundTaskRunner() final {
- // V8's default platform creates a new task runner when passed the
- // v8::Isolate pointer the first time. For non-default platforms this will
- // require getting the appropriate task runner.
- return v8_platform_->GetForegroundTaskRunner(nullptr);
- }
-
- std::unique_ptr<cppgc::JobHandle> PostJob(
- cppgc::TaskPriority priority,
- std::unique_ptr<cppgc::JobTask> job_task) final {
- return v8_platform_->PostJob(priority, std::move(job_task));
- }
-
- private:
- std::unique_ptr<v8::Platform> v8_platform_;
-};
-
-/**
* Simple string rope to illustrate allocation and garbage collection below. The
* rope keeps the next parts alive via regular managed reference.
*/
@@ -86,7 +50,7 @@ std::ostream& operator<<(std::ostream& os, const Rope& rope) {
int main(int argc, char* argv[]) {
// Create a platform that is used by cppgc::Heap for execution and backend
// allocation.
- auto cppgc_platform = std::make_shared<Platform>();
+ auto cppgc_platform = std::make_shared<cppgc::DefaultPlatform>();
// Initialize the process. This must happen before any cppgc::Heap::Create()
// calls.
cppgc::InitializeProcess(cppgc_platform->GetPageAllocator());
diff --git a/deps/v8/src/DIR_METADATA b/deps/v8/src/DIR_METADATA
new file mode 100644
index 0000000000..2f8dbbcf45
--- /dev/null
+++ b/deps/v8/src/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript"
+} \ No newline at end of file
diff --git a/deps/v8/src/OWNERS b/deps/v8/src/OWNERS
index 3e21b6ea36..e5e3de50a3 100644
--- a/deps/v8/src/OWNERS
+++ b/deps/v8/src/OWNERS
@@ -1,5 +1,3 @@
per-file *DEPS=file:../COMMON_OWNERS
per-file intl-*=file:../INTL_OWNERS
per-file *-intl*=file:../INTL_OWNERS
-
-# COMPONENT: Blink>JavaScript
diff --git a/deps/v8/src/api/DIR_METADATA b/deps/v8/src/api/DIR_METADATA
new file mode 100644
index 0000000000..a27ea1b53a
--- /dev/null
+++ b/deps/v8/src/api/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>API"
+} \ No newline at end of file
diff --git a/deps/v8/src/api/OWNERS b/deps/v8/src/api/OWNERS
index 4e36be20e8..519588070b 100644
--- a/deps/v8/src/api/OWNERS
+++ b/deps/v8/src/api/OWNERS
@@ -6,5 +6,3 @@ leszeks@chromium.org
mlippautz@chromium.org
mslekova@chromium.org
verwaest@chromium.org
-
-# COMPONENT: Blink>JavaScript>API
diff --git a/deps/v8/src/api/api-natives.cc b/deps/v8/src/api/api-natives.cc
index e21dbd0eee..f8f660ea15 100644
--- a/deps/v8/src/api/api-natives.cc
+++ b/deps/v8/src/api/api-natives.cc
@@ -361,7 +361,8 @@ bool IsSimpleInstantiation(Isolate* isolate, ObjectTemplateInfo info,
if (!new_target.IsJSFunction()) return false;
JSFunction fun = JSFunction::cast(new_target);
- if (fun.shared().function_data() != info.constructor()) return false;
+ if (fun.shared().function_data(kAcquireLoad) != info.constructor())
+ return false;
if (info.immutable_proto()) return false;
return fun.context().native_context() == isolate->raw_native_context();
}
diff --git a/deps/v8/src/api/api.cc b/deps/v8/src/api/api.cc
index 11a9dce9f0..a29747da62 100644
--- a/deps/v8/src/api/api.cc
+++ b/deps/v8/src/api/api.cc
@@ -14,6 +14,7 @@
#include "include/v8-cppgc.h"
#include "include/v8-fast-api-calls.h"
#include "include/v8-profiler.h"
+#include "include/v8-unwinder-state.h"
#include "include/v8-util.h"
#include "src/api/api-inl.h"
#include "src/api/api-natives.h"
@@ -86,6 +87,7 @@
#include "src/objects/slots.h"
#include "src/objects/smi.h"
#include "src/objects/stack-frame-info-inl.h"
+#include "src/objects/synthetic-module-inl.h"
#include "src/objects/templates.h"
#include "src/objects/value-serializer.h"
#include "src/parsing/parse-info.h"
@@ -100,6 +102,7 @@
#include "src/regexp/regexp-utils.h"
#include "src/runtime/runtime.h"
#include "src/snapshot/code-serializer.h"
+#include "src/snapshot/embedded/embedded-data.h"
#include "src/snapshot/snapshot.h"
#include "src/snapshot/startup-serializer.h" // For SerializedHandleChecker.
#include "src/strings/char-predicates-inl.h"
@@ -919,9 +922,9 @@ void ResourceConstraints::ConfigureDefaultsFromHeapSize(
i::Heap::GenerationSizesFromHeapSize(maximum_heap_size_in_bytes,
&young_generation, &old_generation);
set_max_young_generation_size_in_bytes(
- i::Max(young_generation, i::Heap::MinYoungGenerationSize()));
+ std::max(young_generation, i::Heap::MinYoungGenerationSize()));
set_max_old_generation_size_in_bytes(
- i::Max(old_generation, i::Heap::MinOldGenerationSize()));
+ std::max(old_generation, i::Heap::MinOldGenerationSize()));
if (initial_heap_size_in_bytes > 0) {
i::Heap::GenerationSizesFromHeapSize(initial_heap_size_in_bytes,
&young_generation, &old_generation);
@@ -931,7 +934,7 @@ void ResourceConstraints::ConfigureDefaultsFromHeapSize(
}
if (i::kPlatformRequiresCodeRange) {
set_code_range_size_in_bytes(
- i::Min(i::kMaximalCodeRangeSize, maximum_heap_size_in_bytes));
+ std::min(i::kMaximalCodeRangeSize, maximum_heap_size_in_bytes));
}
}
@@ -946,8 +949,8 @@ void ResourceConstraints::ConfigureDefaults(uint64_t physical_memory,
if (virtual_memory_limit > 0 && i::kPlatformRequiresCodeRange) {
set_code_range_size_in_bytes(
- i::Min(i::kMaximalCodeRangeSize,
- static_cast<size_t>(virtual_memory_limit / 8)));
+ std::min(i::kMaximalCodeRangeSize,
+ static_cast<size_t>(virtual_memory_limit / 8)));
}
}
@@ -991,42 +994,6 @@ i::Address* V8::GlobalizeTracedReference(i::Isolate* isolate, i::Address* obj,
return result.location();
}
-// static
-i::Address* i::JSMemberBase::New(v8::Isolate* isolate, i::Address* object_slot,
- i::Address** this_slot) {
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- LOG_API(i_isolate, JSMemberBase, New);
-#ifdef DEBUG
- Utils::ApiCheck((object_slot != nullptr), "i::JSMemberBase::New",
- "the object must be not null");
-#endif
- i::Handle<i::Object> result = i_isolate->global_handles()->CreateTraced(
- *object_slot, reinterpret_cast<i::Address*>(this_slot),
- false /* no destructor */);
-#ifdef VERIFY_HEAP
- if (i::FLAG_verify_heap) {
- i::Object(*object_slot).ObjectVerify(i_isolate);
- }
-#endif // VERIFY_HEAP
- return result.location();
-}
-
-// static
-void i::JSMemberBase::Delete(i::Address* object) {
- i::GlobalHandles::DestroyTraced(object);
-}
-
-// static
-void i::JSMemberBase::Copy(const i::Address* const* from_slot,
- i::Address** to_slot) {
- i::GlobalHandles::CopyTracedGlobal(from_slot, to_slot);
-}
-
-// static
-void i::JSMemberBase::Move(i::Address** from_slot, i::Address** to_slot) {
- i::GlobalHandles::MoveTracedGlobal(from_slot, to_slot);
-}
-
i::Address* V8::CopyGlobalReference(i::Address* from) {
i::Handle<i::Object> result = i::GlobalHandles::CopyGlobal(from);
return result.location();
@@ -1560,7 +1527,7 @@ void FunctionTemplate::SetCallHandler(FunctionCallback callback,
isolate, info,
i::handle(*FromCData(isolate, c_function->GetTypeInfo()), isolate));
}
- info->set_call_code(*obj);
+ info->set_call_code(*obj, kReleaseStore);
}
namespace {
@@ -2038,6 +2005,17 @@ void ObjectTemplate::SetImmutableProto() {
self->set_immutable_proto(true);
}
+bool ObjectTemplate::IsCodeLike() {
+ return Utils::OpenHandle(this)->code_like();
+}
+
+void ObjectTemplate::SetCodeLike() {
+ auto self = Utils::OpenHandle(this);
+ i::Isolate* isolate = self->GetIsolate();
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
+ self->set_code_like(true);
+}
+
// --- S c r i p t s ---
// Internally, UnboundScript is a SharedFunctionInfo, and Script is a
@@ -2271,7 +2249,9 @@ Local<String> Module::GetModuleRequest(int i) const {
i::Handle<i::SourceTextModule>::cast(self)->info().module_requests(),
isolate);
CHECK_LT(i, module_requests->length());
- return ToApiHandle<String>(i::handle(module_requests->get(i), isolate));
+ i::Handle<i::ModuleRequest> module_request(
+ i::ModuleRequest::cast(module_requests->get(i)), isolate);
+ return ToApiHandle<String>(i::handle(module_request->specifier(), isolate));
}
Location Module::GetModuleRequestLocation(int i) const {
@@ -2329,6 +2309,15 @@ int Module::ScriptId() {
return ToApiHandle<UnboundScript>(sfi)->GetId();
}
+bool Module::IsGraphAsync() const {
+ Utils::ApiCheck(
+ GetStatus() >= kInstantiated, "v8::Module::IsGraphAsync",
+ "v8::Module::IsGraphAsync must be used on an instantiated module");
+ i::Handle<i::Module> self = Utils::OpenHandle(this);
+ auto isolate = reinterpret_cast<i::Isolate*>(self->GetIsolate());
+ return self->IsGraphAsync(isolate);
+}
+
bool Module::IsSourceTextModule() const {
return Utils::OpenHandle(this)->IsSourceTextModule();
}
@@ -2646,12 +2635,15 @@ void ScriptCompiler::ScriptStreamingTask::Run() { data_->task->Run(); }
ScriptCompiler::ScriptStreamingTask* ScriptCompiler::StartStreamingScript(
Isolate* v8_isolate, StreamedSource* source, CompileOptions options) {
- if (!i::FLAG_script_streaming) {
- return nullptr;
- }
// We don't support other compile options on streaming background compiles.
// TODO(rmcilroy): remove CompileOptions from the API.
CHECK(options == ScriptCompiler::kNoCompileOptions);
+ return StartStreaming(v8_isolate, source);
+}
+
+ScriptCompiler::ScriptStreamingTask* ScriptCompiler::StartStreaming(
+ Isolate* v8_isolate, StreamedSource* source) {
+ if (!i::FLAG_script_streaming) return nullptr;
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
i::ScriptStreamingData* data = source->impl();
std::unique_ptr<i::BackgroundCompileTask> task =
@@ -3663,6 +3655,12 @@ MaybeLocal<Uint32> Value::ToUint32(Local<Context> context) const {
RETURN_ESCAPED(result);
}
+i::Address i::DecodeExternalPointerImpl(const i::Isolate* isolate,
+ i::ExternalPointer_t encoded_pointer,
+ ExternalPointerTag tag) {
+ return i::DecodeExternalPointer(isolate, encoded_pointer, tag);
+}
+
i::Isolate* i::IsolateFromNeverReadOnlySpaceObject(i::Address obj) {
return i::GetIsolateFromWritableObject(i::HeapObject::cast(i::Object(obj)));
}
@@ -4436,7 +4434,8 @@ MaybeLocal<Array> v8::Object::GetPropertyNames(
accumulator.GetKeys(static_cast<i::GetKeysConversion>(key_conversion));
DCHECK(self->map().EnumLength() == i::kInvalidEnumCacheSentinel ||
self->map().EnumLength() == 0 ||
- self->map().instance_descriptors().enum_cache().keys() != *value);
+ self->map().instance_descriptors(kRelaxedLoad).enum_cache().keys() !=
+ *value);
auto result = isolate->factory()->NewJSArrayWithElements(value);
RETURN_ESCAPED(Utils::ToLocal(result));
}
@@ -4941,7 +4940,8 @@ MaybeLocal<Object> Function::NewInstanceWithSideEffectType(
CHECK(self->IsJSFunction() &&
i::JSFunction::cast(*self).shared().IsApiFunction());
i::Object obj =
- i::JSFunction::cast(*self).shared().get_api_func_data().call_code();
+ i::JSFunction::cast(*self).shared().get_api_func_data().call_code(
+ kAcquireLoad);
if (obj.IsCallHandlerInfo()) {
i::CallHandlerInfo handler_info = i::CallHandlerInfo::cast(obj);
if (!handler_info.IsSideEffectFreeCallHandlerInfo()) {
@@ -4955,7 +4955,8 @@ MaybeLocal<Object> Function::NewInstanceWithSideEffectType(
i::Execution::New(isolate, self, self, argc, args), &result);
if (should_set_has_no_side_effect) {
i::Object obj =
- i::JSFunction::cast(*self).shared().get_api_func_data().call_code();
+ i::JSFunction::cast(*self).shared().get_api_func_data().call_code(
+ kAcquireLoad);
if (obj.IsCallHandlerInfo()) {
i::CallHandlerInfo handler_info = i::CallHandlerInfo::cast(obj);
if (has_pending_exception) {
@@ -5127,6 +5128,18 @@ Local<v8::Value> Function::GetBoundFunction() const {
return v8::Undefined(reinterpret_cast<v8::Isolate*>(self->GetIsolate()));
}
+MaybeLocal<String> v8::Function::FunctionProtoToString(Local<Context> context) {
+ PREPARE_FOR_EXECUTION(context, Function, FunctionProtoToString, String);
+ auto self = Utils::OpenHandle(this);
+ Local<Value> result;
+ has_pending_exception = !ToLocal<Value>(
+ i::Execution::CallBuiltin(isolate, isolate->function_to_string(), self, 0,
+ nullptr),
+ &result);
+ RETURN_ON_FAILED_EXECUTION(String);
+ RETURN_ESCAPED(Local<String>::Cast(result));
+}
+
int Name::GetIdentityHash() {
auto self = Utils::OpenHandle(this);
return static_cast<int>(self->Hash());
@@ -5532,7 +5545,8 @@ String::ExternalStringResource* String::GetExternalStringResourceSlow() const {
if (i::StringShape(str).IsExternalTwoByte()) {
internal::Isolate* isolate = I::GetIsolateForHeapSandbox(str.ptr());
internal::Address value = I::ReadExternalPointerField(
- isolate, str.ptr(), I::kStringResourceOffset);
+ isolate, str.ptr(), I::kStringResourceOffset,
+ internal::kExternalStringResourceTag);
return reinterpret_cast<String::ExternalStringResource*>(value);
}
return nullptr;
@@ -5556,7 +5570,8 @@ String::ExternalStringResourceBase* String::GetExternalStringResourceBaseSlow(
i::StringShape(str).IsExternalTwoByte()) {
internal::Isolate* isolate = I::GetIsolateForHeapSandbox(string);
internal::Address value =
- I::ReadExternalPointerField(isolate, string, I::kStringResourceOffset);
+ I::ReadExternalPointerField(isolate, string, I::kStringResourceOffset,
+ internal::kExternalStringResourceTag);
resource = reinterpret_cast<ExternalStringResourceBase*>(value);
}
return resource;
@@ -5876,6 +5891,10 @@ void V8::GetSharedMemoryStatistics(SharedMemoryStatistics* statistics) {
i::ReadOnlyHeap::PopulateReadOnlySpaceStatistics(statistics);
}
+void V8::SetIsCrossOriginIsolated() {
+ i::FLAG_harmony_sharedarraybuffer = true;
+}
+
template <typename ObjectType>
struct InvokeBootstrapper;
@@ -6118,12 +6137,6 @@ v8::Isolate* Context::GetIsolate() {
return reinterpret_cast<Isolate*>(env->GetIsolate());
}
-v8::MicrotaskQueue* Context::GetMicrotaskQueue() {
- i::Handle<i::Context> env = Utils::OpenHandle(this);
- CHECK(env->IsNativeContext());
- return i::Handle<i::NativeContext>::cast(env)->microtask_queue();
-}
-
v8::Local<v8::Object> Context::Global() {
i::Handle<i::Context> context = Utils::OpenHandle(this);
i::Isolate* isolate = context->GetIsolate();
@@ -6839,6 +6852,7 @@ REGEXP_FLAG_ASSERT_EQ(kIgnoreCase);
REGEXP_FLAG_ASSERT_EQ(kMultiline);
REGEXP_FLAG_ASSERT_EQ(kSticky);
REGEXP_FLAG_ASSERT_EQ(kUnicode);
+REGEXP_FLAG_ASSERT_EQ(kLinear);
#undef REGEXP_FLAG_ASSERT_EQ
v8::RegExp::Flags v8::RegExp::GetFlags() const {
@@ -7015,10 +7029,11 @@ i::Handle<i::JSArray> MapAsArray(i::Isolate* isolate, i::Object table_obj,
i::DisallowHeapAllocation no_gc;
i::Oddball the_hole = i::ReadOnlyRoots(isolate).the_hole_value();
for (int i = offset; i < capacity; ++i) {
- i::Object key = table->KeyAt(i);
+ i::InternalIndex entry(i);
+ i::Object key = table->KeyAt(entry);
if (key == the_hole) continue;
if (collect_keys) result->set(result_index++, key);
- if (collect_values) result->set(result_index++, table->ValueAt(i));
+ if (collect_values) result->set(result_index++, table->ValueAt(entry));
}
}
DCHECK_GE(max_length, result_index);
@@ -7118,7 +7133,8 @@ i::Handle<i::JSArray> SetAsArray(i::Isolate* isolate, i::Object table_obj,
i::DisallowHeapAllocation no_gc;
i::Oddball the_hole = i::ReadOnlyRoots(isolate).the_hole_value();
for (int i = offset; i < capacity; ++i) {
- i::Object key = table->KeyAt(i);
+ i::InternalIndex entry(i);
+ i::Object key = table->KeyAt(entry);
if (key == the_hole) continue;
result->set(result_index++, key);
if (collect_key_values) result->set(result_index++, key);
@@ -7314,6 +7330,7 @@ CompiledWasmModule::CompiledWasmModule(
}
OwnedBuffer CompiledWasmModule::Serialize() {
+ TRACE_EVENT0("v8.wasm", "wasm.SerializeModule");
i::wasm::WasmSerializer wasm_serializer(native_module_.get());
size_t buffer_size = wasm_serializer.GetSerializedNativeModuleSize();
std::unique_ptr<uint8_t[]> buffer(new uint8_t[buffer_size]);
@@ -7665,7 +7682,7 @@ Local<ArrayBuffer> v8::ArrayBufferView::Buffer() {
size_t v8::ArrayBufferView::CopyContents(void* dest, size_t byte_length) {
i::Handle<i::JSArrayBufferView> self = Utils::OpenHandle(this);
size_t byte_offset = self->byte_offset();
- size_t bytes_to_copy = i::Min(byte_length, self->byte_length());
+ size_t bytes_to_copy = std::min(byte_length, self->byte_length());
if (bytes_to_copy) {
i::DisallowHeapAllocation no_gc;
i::Isolate* isolate = self->GetIsolate();
@@ -8995,6 +9012,14 @@ void Isolate::GetCodeRange(void** start, size_t* length_in_bytes) {
*length_in_bytes = code_range.size();
}
+void Isolate::GetEmbeddedCodeRange(const void** start,
+ size_t* length_in_bytes) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ i::EmbeddedData d = i::EmbeddedData::FromBlob(isolate);
+ *start = reinterpret_cast<const void*>(d.code());
+ *length_in_bytes = d.code_size();
+}
+
JSEntryStubs Isolate::GetJSEntryStubs() {
JSEntryStubs entry_stubs;
@@ -9050,6 +9075,9 @@ CALLBACK_SETTER(AllowCodeGenerationFromStringsCallback,
CALLBACK_SETTER(ModifyCodeGenerationFromStringsCallback,
ModifyCodeGenerationFromStringsCallback,
modify_code_gen_callback)
+CALLBACK_SETTER(ModifyCodeGenerationFromStringsCallback,
+ ModifyCodeGenerationFromStringsCallback2,
+ modify_code_gen_callback2)
CALLBACK_SETTER(AllowWasmCodeGenerationCallback,
AllowWasmCodeGenerationCallback, allow_wasm_code_gen_callback)
@@ -9199,6 +9227,14 @@ void v8::Isolate::LocaleConfigurationChangeNotification() {
#endif // V8_INTL_SUPPORT
}
+bool v8::Object::IsCodeLike(v8::Isolate* isolate) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ LOG_API(i_isolate, Object, IsCodeLike);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
+ i::HandleScope scope(i_isolate);
+ return Utils::OpenHandle(this)->IsCodeLike(i_isolate);
+}
+
// static
std::unique_ptr<MicrotaskQueue> MicrotaskQueue::New(Isolate* isolate,
MicrotasksPolicy policy) {
@@ -9827,7 +9863,7 @@ void debug::ForceGarbageCollection(
v8::Isolate* isolate,
v8::EmbedderHeapTracer::EmbedderStackState embedder_stack_state) {
i::Heap* heap = reinterpret_cast<i::Isolate*>(isolate)->heap();
- heap->SetEmbedderStackStateForNextFinalizaton(embedder_stack_state);
+ heap->SetEmbedderStackStateForNextFinalization(embedder_stack_state);
isolate->LowMemoryNotification();
}
@@ -9943,6 +9979,10 @@ int debug::WasmScript::CodeOffset() const {
i::wasm::NativeModule* native_module = script->wasm_native_module();
const i::wasm::WasmModule* module = native_module->module();
+ // If the module contains at least one function, the code offset must have
+ // been initialized, and it cannot be zero.
+ DCHECK_IMPLIES(module->num_declared_functions > 0,
+ module->code.offset() != 0);
return module->code.offset();
}
@@ -10289,6 +10329,12 @@ debug::PostponeInterruptsScope::PostponeInterruptsScope(v8::Isolate* isolate)
debug::PostponeInterruptsScope::~PostponeInterruptsScope() = default;
+debug::DisableBreakScope::DisableBreakScope(v8::Isolate* isolate)
+ : scope_(std::make_unique<i::DisableBreak>(
+ reinterpret_cast<i::Isolate*>(isolate)->debug())) {}
+
+debug::DisableBreakScope::~DisableBreakScope() = default;
+
Local<String> CpuProfileNode::GetFunctionName() const {
const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
i::Isolate* isolate = node->isolate();
@@ -10695,24 +10741,27 @@ void CpuProfiler::SetUsePreciseSampling(bool use_precise_sampling) {
use_precise_sampling);
}
-void CpuProfiler::StartProfiling(Local<String> title,
- CpuProfilingOptions options) {
- reinterpret_cast<i::CpuProfiler*>(this)->StartProfiling(
+CpuProfilingStatus CpuProfiler::StartProfiling(Local<String> title,
+ CpuProfilingOptions options) {
+ return reinterpret_cast<i::CpuProfiler*>(this)->StartProfiling(
*Utils::OpenHandle(*title), options);
}
-void CpuProfiler::StartProfiling(Local<String> title, bool record_samples) {
+CpuProfilingStatus CpuProfiler::StartProfiling(Local<String> title,
+ bool record_samples) {
CpuProfilingOptions options(
kLeafNodeLineNumbers,
record_samples ? CpuProfilingOptions::kNoSampleLimit : 0);
- reinterpret_cast<i::CpuProfiler*>(this)->StartProfiling(
+ return reinterpret_cast<i::CpuProfiler*>(this)->StartProfiling(
*Utils::OpenHandle(*title), options);
}
-void CpuProfiler::StartProfiling(Local<String> title, CpuProfilingMode mode,
- bool record_samples, unsigned max_samples) {
+CpuProfilingStatus CpuProfiler::StartProfiling(Local<String> title,
+ CpuProfilingMode mode,
+ bool record_samples,
+ unsigned max_samples) {
CpuProfilingOptions options(mode, record_samples ? max_samples : 0);
- reinterpret_cast<i::CpuProfiler*>(this)->StartProfiling(
+ return reinterpret_cast<i::CpuProfiler*>(this)->StartProfiling(
*Utils::OpenHandle(*title), options);
}
@@ -11004,6 +11053,12 @@ void HeapProfiler::RemoveBuildEmbedderGraphCallback(
callback, data);
}
+void HeapProfiler::SetGetDetachednessCallback(GetDetachednessCallback callback,
+ void* data) {
+ reinterpret_cast<i::HeapProfiler*>(this)->SetGetDetachednessCallback(callback,
+ data);
+}
+
void EmbedderHeapTracer::SetStackStart(void* stack_start) {
CHECK(isolate_);
reinterpret_cast<i::Isolate*>(isolate_)->global_handles()->SetStackStart(
@@ -11032,7 +11087,7 @@ void EmbedderHeapTracer::GarbageCollectionForTesting(
CHECK(isolate_);
CHECK(i::FLAG_expose_gc);
i::Heap* const heap = reinterpret_cast<i::Isolate*>(isolate_)->heap();
- heap->SetEmbedderStackStateForNextFinalizaton(stack_state);
+ heap->SetEmbedderStackStateForNextFinalization(stack_state);
heap->PreciseCollectAllGarbage(i::Heap::kNoGCFlags,
i::GarbageCollectionReason::kTesting,
kGCCallbackFlagForced);
@@ -11061,7 +11116,7 @@ void EmbedderHeapTracer::DecreaseAllocatedSize(size_t bytes) {
}
void EmbedderHeapTracer::RegisterEmbedderReference(
- const TracedReferenceBase<v8::Data>& ref) {
+ const BasicTracedReference<v8::Data>& ref) {
if (ref.IsEmpty()) return;
i::Heap* const heap = reinterpret_cast<i::Isolate*>(isolate_)->heap();
@@ -11119,6 +11174,33 @@ CFunction::CFunction(const void* address, const CFunctionInfo* type_info)
}
}
+RegisterState::RegisterState()
+ : pc(nullptr), sp(nullptr), fp(nullptr), lr(nullptr) {}
+RegisterState::~RegisterState() = default;
+
+RegisterState::RegisterState(const RegisterState& other) V8_NOEXCEPT {
+ *this = other;
+}
+
+RegisterState& RegisterState::operator=(const RegisterState& other)
+ V8_NOEXCEPT {
+ if (&other != this) {
+ pc = other.pc;
+ sp = other.sp;
+ fp = other.fp;
+ lr = other.lr;
+ if (other.callee_saved) {
+ // Make a deep copy if {other.callee_saved} is non-null.
+ callee_saved =
+ std::make_unique<CalleeSavedRegisters>(*(other.callee_saved));
+ } else {
+ // Otherwise, set {callee_saved} to null to match {other}.
+ callee_saved.reset();
+ }
+ }
+ return *this;
+}
+
namespace internal {
const size_t HandleScopeImplementer::kEnteredContextsOffset =
diff --git a/deps/v8/src/asmjs/DIR_METADATA b/deps/v8/src/asmjs/DIR_METADATA
new file mode 100644
index 0000000000..3b428d9660
--- /dev/null
+++ b/deps/v8/src/asmjs/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>WebAssembly"
+} \ No newline at end of file
diff --git a/deps/v8/src/asmjs/OWNERS b/deps/v8/src/asmjs/OWNERS
index 16b08f3b3b..c400f97de0 100644
--- a/deps/v8/src/asmjs/OWNERS
+++ b/deps/v8/src/asmjs/OWNERS
@@ -1,5 +1,3 @@
ahaas@chromium.org
clemensb@chromium.org
titzer@chromium.org
-
-# COMPONENT: Blink>JavaScript>WebAssembly
diff --git a/deps/v8/src/ast/DIR_METADATA b/deps/v8/src/ast/DIR_METADATA
new file mode 100644
index 0000000000..165380ae4f
--- /dev/null
+++ b/deps/v8/src/ast/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>Parser"
+} \ No newline at end of file
diff --git a/deps/v8/src/ast/OWNERS b/deps/v8/src/ast/OWNERS
index 089db4c252..1da57bd30d 100644
--- a/deps/v8/src/ast/OWNERS
+++ b/deps/v8/src/ast/OWNERS
@@ -6,5 +6,3 @@ littledan@chromium.org
marja@chromium.org
neis@chromium.org
verwaest@chromium.org
-
-# COMPONENT: Blink>JavaScript>Parser
diff --git a/deps/v8/src/ast/ast-function-literal-id-reindexer.cc b/deps/v8/src/ast/ast-function-literal-id-reindexer.cc
index b583b5e421..8c9318bfe7 100644
--- a/deps/v8/src/ast/ast-function-literal-id-reindexer.cc
+++ b/deps/v8/src/ast/ast-function-literal-id-reindexer.cc
@@ -54,10 +54,10 @@ void AstFunctionLiteralIdReindexer::VisitClassLiteral(ClassLiteral* expr) {
// Private fields have their key and value present in
// instance_members_initializer_function, so they will
// already have been visited.
- if (prop->value()->IsFunctionLiteral()) {
- Visit(prop->value());
- } else {
+ if (prop->kind() == ClassLiteralProperty::Kind::FIELD) {
CheckVisited(prop->value());
+ } else {
+ Visit(prop->value());
}
}
ZonePtrList<ClassLiteral::Property>* props = expr->public_members();
@@ -67,7 +67,8 @@ void AstFunctionLiteralIdReindexer::VisitClassLiteral(ClassLiteral* expr) {
// Public fields with computed names have their key
// and value present in instance_members_initializer_function, so they will
// already have been visited.
- if (prop->is_computed_name() && !prop->value()->IsFunctionLiteral()) {
+ if (prop->is_computed_name() &&
+ prop->kind() == ClassLiteralProperty::Kind::FIELD) {
if (!prop->key()->IsLiteral()) {
CheckVisited(prop->key());
}
diff --git a/deps/v8/src/ast/ast-source-ranges.h b/deps/v8/src/ast/ast-source-ranges.h
index 1e96ec4c27..1b42a055dd 100644
--- a/deps/v8/src/ast/ast-source-ranges.h
+++ b/deps/v8/src/ast/ast-source-ranges.h
@@ -47,7 +47,6 @@ struct SourceRange {
V(Block) \
V(CaseClause) \
V(Conditional) \
- V(Expression) \
V(FunctionLiteral) \
V(IfStatement) \
V(IterationStatement) \
@@ -282,24 +281,6 @@ class NaryOperationSourceRanges final : public AstNodeSourceRanges {
ZoneVector<SourceRange> ranges_;
};
-class ExpressionSourceRanges final : public AstNodeSourceRanges {
- public:
- explicit ExpressionSourceRanges(const SourceRange& right_range)
- : right_range_(right_range) {}
-
- SourceRange GetRange(SourceRangeKind kind) override {
- DCHECK(HasRange(kind));
- return right_range_;
- }
-
- bool HasRange(SourceRangeKind kind) override {
- return kind == SourceRangeKind::kRight;
- }
-
- private:
- SourceRange right_range_;
-};
-
class SuspendSourceRanges final : public ContinuationSourceRanges {
public:
explicit SuspendSourceRanges(int32_t continuation_position)
diff --git a/deps/v8/src/ast/ast-value-factory.cc b/deps/v8/src/ast/ast-value-factory.cc
index 598096ba10..b5a39b22cf 100644
--- a/deps/v8/src/ast/ast-value-factory.cc
+++ b/deps/v8/src/ast/ast-value-factory.cc
@@ -27,12 +27,14 @@
#include "src/ast/ast-value-factory.h"
+#include "src/base/hashmap-entry.h"
#include "src/base/logging.h"
#include "src/common/globals.h"
#include "src/heap/factory-inl.h"
#include "src/heap/local-factory-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/objects.h"
+#include "src/objects/string.h"
#include "src/strings/char-predicates-inl.h"
#include "src/strings/string-hasher.h"
#include "src/utils/utils-inl.h"
@@ -113,9 +115,7 @@ uint16_t AstRawString::FirstCharacter() const {
return *c;
}
-bool AstRawString::Compare(void* a, void* b) {
- const AstRawString* lhs = static_cast<AstRawString*>(a);
- const AstRawString* rhs = static_cast<AstRawString*>(b);
+bool AstRawString::Compare(const AstRawString* lhs, const AstRawString* rhs) {
DCHECK_EQ(lhs->Hash(), rhs->Hash());
if (lhs->length() != rhs->length()) return false;
@@ -194,14 +194,17 @@ Handle<String> AstConsString::AllocateFlat(LocalIsolate* isolate) const {
->NewRawOneByteString(result_length, AllocationType::kOld)
.ToHandleChecked();
DisallowHeapAllocation no_gc;
- uint8_t* dest = result->GetChars(no_gc) + result_length;
+ uint8_t* dest =
+ result->GetChars(no_gc, SharedStringAccessGuardIfNeeded::NotNeeded()) +
+ result_length;
for (const AstConsString::Segment* current = &segment_; current != nullptr;
current = current->next) {
int length = current->string->length();
dest -= length;
CopyChars(dest, current->string->raw_data(), length);
}
- DCHECK_EQ(dest, result->GetChars(no_gc));
+ DCHECK_EQ(dest, result->GetChars(
+ no_gc, SharedStringAccessGuardIfNeeded::NotNeeded()));
return result;
}
@@ -210,7 +213,9 @@ Handle<String> AstConsString::AllocateFlat(LocalIsolate* isolate) const {
->NewRawTwoByteString(result_length, AllocationType::kOld)
.ToHandleChecked();
DisallowHeapAllocation no_gc;
- uint16_t* dest = result->GetChars(no_gc) + result_length;
+ uint16_t* dest =
+ result->GetChars(no_gc, SharedStringAccessGuardIfNeeded::NotNeeded()) +
+ result_length;
for (const AstConsString::Segment* current = &segment_; current != nullptr;
current = current->next) {
int length = current->string->length();
@@ -223,7 +228,8 @@ Handle<String> AstConsString::AllocateFlat(LocalIsolate* isolate) const {
length);
}
}
- DCHECK_EQ(dest, result->GetChars(no_gc));
+ DCHECK_EQ(dest, result->GetChars(
+ no_gc, SharedStringAccessGuardIfNeeded::NotNeeded()));
return result;
}
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
@@ -248,7 +254,7 @@ std::forward_list<const AstRawString*> AstConsString::ToRawStrings() const {
AstStringConstants::AstStringConstants(Isolate* isolate, uint64_t hash_seed)
: zone_(isolate->allocator(), ZONE_NAME),
- string_table_(AstRawString::Compare),
+ string_table_(),
hash_seed_(hash_seed) {
DCHECK_EQ(ThreadId::Current(), isolate->thread_id());
#define F(name, str) \
@@ -262,16 +268,13 @@ AstStringConstants::AstStringConstants(Isolate* isolate, uint64_t hash_seed)
/* The Handle returned by the factory is located on the roots */ \
/* array, not on the temporary HandleScope, so this is safe. */ \
name##_string_->set_string(isolate->factory()->name##_string()); \
- base::HashMap::Entry* entry = \
- string_table_.InsertNew(name##_string_, name##_string_->Hash()); \
- DCHECK_NULL(entry->value); \
- entry->value = reinterpret_cast<void*>(1); \
+ string_table_.InsertNew(name##_string_, name##_string_->Hash()); \
}
AST_STRING_CONSTANTS(F)
#undef F
}
-AstRawString* AstValueFactory::GetOneByteStringInternal(
+const AstRawString* AstValueFactory::GetOneByteStringInternal(
Vector<const uint8_t> literal) {
if (literal.length() == 1 && literal[0] < kMaxOneCharStringValue) {
int key = literal[0];
@@ -287,7 +290,7 @@ AstRawString* AstValueFactory::GetOneByteStringInternal(
return GetString(hash_field, true, literal);
}
-AstRawString* AstValueFactory::GetTwoByteStringInternal(
+const AstRawString* AstValueFactory::GetTwoByteStringInternal(
Vector<const uint16_t> literal) {
uint32_t hash_field = StringHasher::HashSequentialString<uint16_t>(
literal.begin(), literal.length(), hash_seed_);
@@ -295,7 +298,7 @@ AstRawString* AstValueFactory::GetTwoByteStringInternal(
}
const AstRawString* AstValueFactory::GetString(Handle<String> literal) {
- AstRawString* result = nullptr;
+ const AstRawString* result = nullptr;
DisallowHeapAllocation no_gc;
String::FlatContent content = literal->GetFlatContent(no_gc);
if (content.IsOneByte()) {
@@ -348,27 +351,29 @@ template EXPORT_TEMPLATE_DEFINE(
template EXPORT_TEMPLATE_DEFINE(
V8_EXPORT_PRIVATE) void AstValueFactory::Internalize(LocalIsolate* isolate);
-AstRawString* AstValueFactory::GetString(uint32_t hash_field, bool is_one_byte,
- Vector<const byte> literal_bytes) {
+const AstRawString* AstValueFactory::GetString(
+ uint32_t hash_field, bool is_one_byte, Vector<const byte> literal_bytes) {
// literal_bytes here points to whatever the user passed, and this is OK
// because we use vector_compare (which checks the contents) to compare
// against the AstRawStrings which are in the string_table_. We should not
// return this AstRawString.
AstRawString key(is_one_byte, literal_bytes, hash_field);
- base::HashMap::Entry* entry = string_table_.LookupOrInsert(&key, key.Hash());
- if (entry->value == nullptr) {
- // Copy literal contents for later comparison.
- int length = literal_bytes.length();
- byte* new_literal_bytes = zone()->NewArray<byte>(length);
- memcpy(new_literal_bytes, literal_bytes.begin(), length);
- AstRawString* new_string = zone()->New<AstRawString>(
- is_one_byte, Vector<const byte>(new_literal_bytes, length), hash_field);
- CHECK_NOT_NULL(new_string);
- AddString(new_string);
- entry->key = new_string;
- entry->value = reinterpret_cast<void*>(1);
- }
- return reinterpret_cast<AstRawString*>(entry->key);
+ AstRawStringMap::Entry* entry = string_table_.LookupOrInsert(
+ &key, key.Hash(),
+ [&]() {
+ // Copy literal contents for later comparison.
+ int length = literal_bytes.length();
+ byte* new_literal_bytes = zone()->NewArray<byte>(length);
+ memcpy(new_literal_bytes, literal_bytes.begin(), length);
+ AstRawString* new_string = zone()->New<AstRawString>(
+ is_one_byte, Vector<const byte>(new_literal_bytes, length),
+ hash_field);
+ CHECK_NOT_NULL(new_string);
+ AddString(new_string);
+ return new_string;
+ },
+ [&]() { return base::NoHashMapValue(); });
+ return entry->key;
}
} // namespace internal
diff --git a/deps/v8/src/ast/ast-value-factory.h b/deps/v8/src/ast/ast-value-factory.h
index 1752498123..776b45a670 100644
--- a/deps/v8/src/ast/ast-value-factory.h
+++ b/deps/v8/src/ast/ast-value-factory.h
@@ -48,6 +48,8 @@ class Isolate;
class AstRawString final : public ZoneObject {
public:
+ static bool Compare(const AstRawString* a, const AstRawString* b);
+
bool IsEmpty() const { return literal_bytes_.length() == 0; }
int length() const {
return is_one_byte() ? literal_bytes_.length()
@@ -85,7 +87,6 @@ class AstRawString final : public ZoneObject {
friend Zone;
// Members accessed only by the AstValueFactory & related classes:
- static bool Compare(void* a, void* b);
AstRawString(bool is_one_byte, const Vector<const byte>& literal_bytes,
uint32_t hash_field)
: next_(nullptr),
@@ -205,12 +206,26 @@ class AstBigInt {
const char* bigint_;
};
+struct AstRawStringMapMatcher {
+ bool operator()(uint32_t hash1, uint32_t hash2,
+ const AstRawString* lookup_key,
+ const AstRawString* entry_key) const {
+ return hash1 == hash2 && AstRawString::Compare(lookup_key, entry_key);
+ }
+};
+
+using AstRawStringMap =
+ base::TemplateHashMapImpl<const AstRawString*, base::NoHashMapValue,
+ AstRawStringMapMatcher,
+ base::DefaultAllocationPolicy>;
+
// For generating constants.
#define AST_STRING_CONSTANTS(F) \
F(anonymous, "anonymous") \
F(anonymous_function, "(anonymous function)") \
F(arguments, "arguments") \
F(as, "as") \
+ F(assert, "assert") \
F(async, "async") \
F(await, "await") \
F(bigint, "bigint") \
@@ -269,13 +284,11 @@ class AstStringConstants final {
#undef F
uint64_t hash_seed() const { return hash_seed_; }
- const base::CustomMatcherHashMap* string_table() const {
- return &string_table_;
- }
+ const AstRawStringMap* string_table() const { return &string_table_; }
private:
Zone zone_;
- base::CustomMatcherHashMap string_table_;
+ AstRawStringMap string_table_;
uint64_t hash_seed_;
#define F(name, str) AstRawString* name##_string_;
@@ -353,14 +366,14 @@ class AstValueFactory {
strings_ = nullptr;
strings_end_ = &strings_;
}
- V8_EXPORT_PRIVATE AstRawString* GetOneByteStringInternal(
+ V8_EXPORT_PRIVATE const AstRawString* GetOneByteStringInternal(
Vector<const uint8_t> literal);
- AstRawString* GetTwoByteStringInternal(Vector<const uint16_t> literal);
- AstRawString* GetString(uint32_t hash, bool is_one_byte,
- Vector<const byte> literal_bytes);
+ const AstRawString* GetTwoByteStringInternal(Vector<const uint16_t> literal);
+ const AstRawString* GetString(uint32_t hash, bool is_one_byte,
+ Vector<const byte> literal_bytes);
- // All strings are copied here, one after another (no zeroes inbetween).
- base::CustomMatcherHashMap string_table_;
+ // All strings are copied here.
+ AstRawStringMap string_table_;
AstRawString* strings_;
AstRawString** strings_end_;
@@ -372,7 +385,7 @@ class AstValueFactory {
// Caches one character lowercase strings (for minified code).
static const int kMaxOneCharStringValue = 128;
- AstRawString* one_character_strings_[kMaxOneCharStringValue];
+ const AstRawString* one_character_strings_[kMaxOneCharStringValue];
Zone* zone_;
diff --git a/deps/v8/src/ast/ast.cc b/deps/v8/src/ast/ast.cc
index b40cf83c82..e8c7796abc 100644
--- a/deps/v8/src/ast/ast.cc
+++ b/deps/v8/src/ast/ast.cc
@@ -223,12 +223,6 @@ bool FunctionLiteral::AllowsLazyCompilation() {
return scope()->AllowsLazyCompilation();
}
-bool FunctionLiteral::SafeToSkipArgumentsAdaptor() const {
- return language_mode() == LanguageMode::kStrict &&
- scope()->arguments() == nullptr &&
- scope()->rest_parameter() == nullptr;
-}
-
int FunctionLiteral::start_position() const {
return scope()->start_position();
}
@@ -438,7 +432,7 @@ int ObjectLiteral::InitDepthAndFlags() {
// literal with fast elements will be a waste of space.
uint32_t element_index = 0;
if (key->AsArrayIndex(&element_index)) {
- max_element_index = Max(element_index, max_element_index);
+ max_element_index = std::max(element_index, max_element_index);
elements++;
} else {
DCHECK(key->IsPropertyName());
diff --git a/deps/v8/src/ast/ast.h b/deps/v8/src/ast/ast.h
index 4213c60f24..7b70181e6a 100644
--- a/deps/v8/src/ast/ast.h
+++ b/deps/v8/src/ast/ast.h
@@ -2160,18 +2160,6 @@ class FunctionLiteral final : public Expression {
return false;
}
- // We can safely skip the arguments adaptor frame setup even
- // in case of arguments mismatches for strict mode functions,
- // as long as there's
- //
- // 1. no use of the arguments object (either explicitly or
- // potentially implicitly via a direct eval() call), and
- // 2. rest parameters aren't being used in the function.
- //
- // See http://bit.ly/v8-faster-calls-with-arguments-mismatch
- // for the details here (https://crbug.com/v8/8895).
- bool SafeToSkipArgumentsAdaptor() const;
-
// Returns either name or inferred name as a cstring.
std::unique_ptr<char[]> GetDebugName() const;
diff --git a/deps/v8/src/ast/modules.cc b/deps/v8/src/ast/modules.cc
index 08fbe76102..3c9a5080ad 100644
--- a/deps/v8/src/ast/modules.cc
+++ b/deps/v8/src/ast/modules.cc
@@ -16,43 +16,78 @@ namespace internal {
bool SourceTextModuleDescriptor::AstRawStringComparer::operator()(
const AstRawString* lhs, const AstRawString* rhs) const {
+ return ThreeWayCompare(lhs, rhs) < 0;
+}
+
+int SourceTextModuleDescriptor::AstRawStringComparer::ThreeWayCompare(
+ const AstRawString* lhs, const AstRawString* rhs) {
// Fast path for equal pointers: a pointer is not strictly less than itself.
if (lhs == rhs) return false;
// Order by contents (ordering by hash is unstable across runs).
if (lhs->is_one_byte() != rhs->is_one_byte()) {
- return lhs->is_one_byte();
+ return lhs->is_one_byte() ? -1 : 1;
}
if (lhs->byte_length() != rhs->byte_length()) {
- return lhs->byte_length() < rhs->byte_length();
+ return lhs->byte_length() - rhs->byte_length();
}
- return memcmp(lhs->raw_data(), rhs->raw_data(), lhs->byte_length()) < 0;
+ return memcmp(lhs->raw_data(), rhs->raw_data(), lhs->byte_length());
+}
+
+bool SourceTextModuleDescriptor::ModuleRequestComparer::operator()(
+ const AstModuleRequest* lhs, const AstModuleRequest* rhs) const {
+ if (int specifier_comparison = AstRawStringComparer::ThreeWayCompare(
+ lhs->specifier(), rhs->specifier()))
+ return specifier_comparison < 0;
+
+ if (lhs->import_assertions()->size() != rhs->import_assertions()->size())
+ return (lhs->import_assertions()->size() <
+ rhs->import_assertions()->size());
+
+ auto lhsIt = lhs->import_assertions()->cbegin();
+ auto rhsIt = rhs->import_assertions()->cbegin();
+ for (; lhsIt != lhs->import_assertions()->cend(); ++lhsIt, ++rhsIt) {
+ if (int assertion_key_comparison =
+ AstRawStringComparer::ThreeWayCompare(lhsIt->first, rhsIt->first))
+ return assertion_key_comparison < 0;
+
+ if (int assertion_value_comparison = AstRawStringComparer::ThreeWayCompare(
+ lhsIt->second.first, rhsIt->second.first))
+ return assertion_value_comparison < 0;
+ }
+
+ return false;
}
void SourceTextModuleDescriptor::AddImport(
const AstRawString* import_name, const AstRawString* local_name,
- const AstRawString* module_request, const Scanner::Location loc,
+ const AstRawString* module_request,
+ const ImportAssertions* import_assertions, const Scanner::Location loc,
const Scanner::Location specifier_loc, Zone* zone) {
Entry* entry = zone->New<Entry>(loc);
entry->local_name = local_name;
entry->import_name = import_name;
- entry->module_request = AddModuleRequest(module_request, specifier_loc);
+ entry->module_request =
+ AddModuleRequest(module_request, import_assertions, specifier_loc, zone);
AddRegularImport(entry);
}
void SourceTextModuleDescriptor::AddStarImport(
const AstRawString* local_name, const AstRawString* module_request,
- const Scanner::Location loc, const Scanner::Location specifier_loc,
- Zone* zone) {
+ const ImportAssertions* import_assertions, const Scanner::Location loc,
+ const Scanner::Location specifier_loc, Zone* zone) {
Entry* entry = zone->New<Entry>(loc);
entry->local_name = local_name;
- entry->module_request = AddModuleRequest(module_request, specifier_loc);
+ entry->module_request =
+ AddModuleRequest(module_request, import_assertions, specifier_loc, zone);
AddNamespaceImport(entry, zone);
}
void SourceTextModuleDescriptor::AddEmptyImport(
- const AstRawString* module_request, const Scanner::Location specifier_loc) {
- AddModuleRequest(module_request, specifier_loc);
+ const AstRawString* module_request,
+ const ImportAssertions* import_assertions,
+ const Scanner::Location specifier_loc, Zone* zone) {
+ AddModuleRequest(module_request, import_assertions, specifier_loc, zone);
}
void SourceTextModuleDescriptor::AddExport(const AstRawString* local_name,
@@ -66,22 +101,26 @@ void SourceTextModuleDescriptor::AddExport(const AstRawString* local_name,
void SourceTextModuleDescriptor::AddExport(
const AstRawString* import_name, const AstRawString* export_name,
- const AstRawString* module_request, const Scanner::Location loc,
+ const AstRawString* module_request,
+ const ImportAssertions* import_assertions, const Scanner::Location loc,
const Scanner::Location specifier_loc, Zone* zone) {
DCHECK_NOT_NULL(import_name);
DCHECK_NOT_NULL(export_name);
Entry* entry = zone->New<Entry>(loc);
entry->export_name = export_name;
entry->import_name = import_name;
- entry->module_request = AddModuleRequest(module_request, specifier_loc);
+ entry->module_request =
+ AddModuleRequest(module_request, import_assertions, specifier_loc, zone);
AddSpecialExport(entry, zone);
}
void SourceTextModuleDescriptor::AddStarExport(
- const AstRawString* module_request, const Scanner::Location loc,
+ const AstRawString* module_request,
+ const ImportAssertions* import_assertions, const Scanner::Location loc,
const Scanner::Location specifier_loc, Zone* zone) {
Entry* entry = zone->New<Entry>(loc);
- entry->module_request = AddModuleRequest(module_request, specifier_loc);
+ entry->module_request =
+ AddModuleRequest(module_request, import_assertions, specifier_loc, zone);
AddSpecialExport(entry, zone);
}
@@ -95,6 +134,32 @@ Handle<PrimitiveHeapObject> ToStringOrUndefined(LocalIsolate* isolate,
} // namespace
template <typename LocalIsolate>
+Handle<ModuleRequest> SourceTextModuleDescriptor::AstModuleRequest::Serialize(
+ LocalIsolate* isolate) const {
+ // The import assertions will be stored in this array in the form:
+ // [key1, value1, location1, key2, value2, location2, ...]
+ Handle<FixedArray> import_assertions_array =
+ isolate->factory()->NewFixedArray(
+ static_cast<int>(import_assertions()->size() * 3));
+
+ int i = 0;
+ for (auto iter = import_assertions()->cbegin();
+ iter != import_assertions()->cend(); ++iter, i += 3) {
+ import_assertions_array->set(i, *iter->first->string());
+ import_assertions_array->set(i + 1, *iter->second.first->string());
+ import_assertions_array->set(i + 2,
+ Smi::FromInt(iter->second.second.beg_pos));
+ }
+ return v8::internal::ModuleRequest::New(isolate, specifier()->string(),
+ import_assertions_array);
+}
+template Handle<ModuleRequest>
+SourceTextModuleDescriptor::AstModuleRequest::Serialize(Isolate* isolate) const;
+template Handle<ModuleRequest>
+SourceTextModuleDescriptor::AstModuleRequest::Serialize(
+ LocalIsolate* isolate) const;
+
+template <typename LocalIsolate>
Handle<SourceTextModuleInfoEntry> SourceTextModuleDescriptor::Entry::Serialize(
LocalIsolate* isolate) const {
CHECK(Smi::IsValid(module_request)); // TODO(neis): Check earlier?
diff --git a/deps/v8/src/ast/modules.h b/deps/v8/src/ast/modules.h
index b57387b25f..f156d7a411 100644
--- a/deps/v8/src/ast/modules.h
+++ b/deps/v8/src/ast/modules.h
@@ -13,6 +13,7 @@ namespace internal {
class AstRawString;
+class ModuleRequest;
class SourceTextModuleInfo;
class SourceTextModuleInfoEntry;
class PendingCompilationErrorHandler;
@@ -26,6 +27,10 @@ class SourceTextModuleDescriptor : public ZoneObject {
regular_exports_(zone),
regular_imports_(zone) {}
+ using ImportAssertions =
+ ZoneMap<const AstRawString*,
+ std::pair<const AstRawString*, Scanner::Location>>;
+
// The following Add* methods are high-level convenience functions for use by
// the parser.
@@ -35,12 +40,14 @@ class SourceTextModuleDescriptor : public ZoneObject {
void AddImport(const AstRawString* import_name,
const AstRawString* local_name,
const AstRawString* module_request,
+ const ImportAssertions* import_assertions,
const Scanner::Location loc,
const Scanner::Location specifier_loc, Zone* zone);
// import * as x from "foo.js";
void AddStarImport(const AstRawString* local_name,
const AstRawString* module_request,
+ const ImportAssertions* import_assertions,
const Scanner::Location loc,
const Scanner::Location specifier_loc, Zone* zone);
@@ -48,7 +55,8 @@ class SourceTextModuleDescriptor : public ZoneObject {
// import {} from "foo.js";
// export {} from "foo.js"; (sic!)
void AddEmptyImport(const AstRawString* module_request,
- const Scanner::Location specifier_loc);
+ const ImportAssertions* import_assertions,
+ const Scanner::Location specifier_loc, Zone* zone);
// export {x};
// export {x as y};
@@ -64,11 +72,13 @@ class SourceTextModuleDescriptor : public ZoneObject {
void AddExport(const AstRawString* export_name,
const AstRawString* import_name,
const AstRawString* module_request,
+ const ImportAssertions* import_assertions,
const Scanner::Location loc,
const Scanner::Location specifier_loc, Zone* zone);
// export * from "foo.js";
void AddStarExport(const AstRawString* module_request,
+ const ImportAssertions* import_assertions,
const Scanner::Location loc,
const Scanner::Location specifier_loc, Zone* zone);
@@ -114,20 +124,55 @@ class SourceTextModuleDescriptor : public ZoneObject {
enum CellIndexKind { kInvalid, kExport, kImport };
static CellIndexKind GetCellIndexKind(int cell_index);
- struct ModuleRequest {
+ class AstModuleRequest : public ZoneObject {
+ public:
+ // TODO(v8:10958): Consider storing module request location here
+ // instead of using separate ModuleRequestLocation struct.
+ AstModuleRequest(const AstRawString* specifier,
+ const ImportAssertions* import_assertions)
+ : specifier_(specifier), import_assertions_(import_assertions) {}
+
+ template <typename LocalIsolate>
+ Handle<v8::internal::ModuleRequest> Serialize(LocalIsolate* isolate) const;
+
+ const AstRawString* specifier() const { return specifier_; }
+ const ImportAssertions* import_assertions() const {
+ return import_assertions_;
+ }
+
+ private:
+ const AstRawString* specifier_;
+ const ImportAssertions* import_assertions_;
+ };
+
+ struct ModuleRequestLocation {
+ // The index at which we will place the request in SourceTextModuleInfo's
+ // module_requests FixedArray.
int index;
+
+ // The JS source code position of the request, used for reporting errors.
int position;
- ModuleRequest(int index, int position) : index(index), position(position) {}
+
+ ModuleRequestLocation(int index, int position)
+ : index(index), position(position) {}
};
// Custom content-based comparer for the below maps, to keep them stable
// across parses.
struct V8_EXPORT_PRIVATE AstRawStringComparer {
bool operator()(const AstRawString* lhs, const AstRawString* rhs) const;
+ static int ThreeWayCompare(const AstRawString* lhs,
+ const AstRawString* rhs);
+ };
+
+ struct V8_EXPORT_PRIVATE ModuleRequestComparer {
+ bool operator()(const AstModuleRequest* lhs,
+ const AstModuleRequest* rhs) const;
};
using ModuleRequestMap =
- ZoneMap<const AstRawString*, ModuleRequest, AstRawStringComparer>;
+ ZoneMap<const AstModuleRequest*, ModuleRequestLocation,
+ ModuleRequestComparer>;
using RegularExportMap =
ZoneMultimap<const AstRawString*, Entry*, AstRawStringComparer>;
using RegularImportMap =
@@ -224,13 +269,15 @@ class SourceTextModuleDescriptor : public ZoneObject {
void AssignCellIndices();
int AddModuleRequest(const AstRawString* specifier,
- Scanner::Location specifier_loc) {
+ const ImportAssertions* import_assertions,
+ Scanner::Location specifier_loc, Zone* zone) {
DCHECK_NOT_NULL(specifier);
int module_requests_count = static_cast<int>(module_requests_.size());
auto it = module_requests_
- .insert(std::make_pair(specifier,
- ModuleRequest(module_requests_count,
- specifier_loc.beg_pos)))
+ .insert(std::make_pair(
+ zone->New<AstModuleRequest>(specifier, import_assertions),
+ ModuleRequestLocation(module_requests_count,
+ specifier_loc.beg_pos)))
.first;
return it->second.index;
}
diff --git a/deps/v8/src/ast/prettyprinter.cc b/deps/v8/src/ast/prettyprinter.cc
index 20dca56cc4..e53d9c9e6e 100644
--- a/deps/v8/src/ast/prettyprinter.cc
+++ b/deps/v8/src/ast/prettyprinter.cc
@@ -258,6 +258,7 @@ void CallPrinter::VisitRegExpLiteral(RegExpLiteral* node) {
Print("/");
if (node->flags() & RegExp::kGlobal) Print("g");
if (node->flags() & RegExp::kIgnoreCase) Print("i");
+ if (node->flags() & RegExp::kLinear) Print("l");
if (node->flags() & RegExp::kMultiline) Print("m");
if (node->flags() & RegExp::kUnicode) Print("u");
if (node->flags() & RegExp::kSticky) Print("y");
@@ -1163,6 +1164,7 @@ void AstPrinter::VisitRegExpLiteral(RegExpLiteral* node) {
EmbeddedVector<char, 128> buf;
if (node->flags() & RegExp::kGlobal) buf[i++] = 'g';
if (node->flags() & RegExp::kIgnoreCase) buf[i++] = 'i';
+ if (node->flags() & RegExp::kLinear) buf[i++] = 'l';
if (node->flags() & RegExp::kMultiline) buf[i++] = 'm';
if (node->flags() & RegExp::kUnicode) buf[i++] = 'u';
if (node->flags() & RegExp::kSticky) buf[i++] = 'y';
diff --git a/deps/v8/src/ast/scopes.h b/deps/v8/src/ast/scopes.h
index a5f4523670..e731d4c46a 100644
--- a/deps/v8/src/ast/scopes.h
+++ b/deps/v8/src/ast/scopes.h
@@ -705,8 +705,6 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
void SetDefaults();
- void set_scope_info(Handle<ScopeInfo> scope_info);
-
friend class DeclarationScope;
friend class ClassScope;
friend class ScopeTestHelper;
diff --git a/deps/v8/src/base/DIR_METADATA b/deps/v8/src/base/DIR_METADATA
new file mode 100644
index 0000000000..2f8dbbcf45
--- /dev/null
+++ b/deps/v8/src/base/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript"
+} \ No newline at end of file
diff --git a/deps/v8/src/base/OWNERS b/deps/v8/src/base/OWNERS
index 67dcc1cd98..8fcbc9e047 100644
--- a/deps/v8/src/base/OWNERS
+++ b/deps/v8/src/base/OWNERS
@@ -1,5 +1,3 @@
clemensb@chromium.org
ishell@chromium.org
mlippautz@chromium.org
-
-# COMPONENT: Blink>JavaScript
diff --git a/deps/v8/src/base/bounded-page-allocator.h b/deps/v8/src/base/bounded-page-allocator.h
index d09aecee05..1c8c846711 100644
--- a/deps/v8/src/base/bounded-page-allocator.h
+++ b/deps/v8/src/base/bounded-page-allocator.h
@@ -29,6 +29,8 @@ class V8_BASE_EXPORT BoundedPageAllocator : public v8::PageAllocator {
BoundedPageAllocator(v8::PageAllocator* page_allocator, Address start,
size_t size, size_t allocate_page_size);
+ BoundedPageAllocator(const BoundedPageAllocator&) = delete;
+ BoundedPageAllocator& operator=(const BoundedPageAllocator&) = delete;
~BoundedPageAllocator() override = default;
// These functions are not inlined to avoid https://crbug.com/v8/8275.
@@ -75,8 +77,6 @@ class V8_BASE_EXPORT BoundedPageAllocator : public v8::PageAllocator {
const size_t commit_page_size_;
v8::PageAllocator* const page_allocator_;
v8::base::RegionAllocator region_allocator_;
-
- DISALLOW_COPY_AND_ASSIGN(BoundedPageAllocator);
};
} // namespace base
diff --git a/deps/v8/src/base/build_config.h b/deps/v8/src/base/build_config.h
index ad287c9290..2bfbe1ba32 100644
--- a/deps/v8/src/base/build_config.h
+++ b/deps/v8/src/base/build_config.h
@@ -207,6 +207,10 @@ constexpr int kReturnAddressStackSlotCount =
// PPC has large (64KB) physical pages.
const int kPageSizeBits = 19;
#else
+// Arm64 supports up to 64k OS pages on Linux, however 4k pages are more common
+// so we keep the V8 page size at 256k. Nonetheless, we need to make sure we
+// don't decrease it further in the future due to reserving 3 OS pages for every
+// executable V8 page.
const int kPageSizeBits = 18;
#endif
diff --git a/deps/v8/src/base/debug/stack_trace_posix.cc b/deps/v8/src/base/debug/stack_trace_posix.cc
index ed602af547..270f1ca4e0 100644
--- a/deps/v8/src/base/debug/stack_trace_posix.cc
+++ b/deps/v8/src/base/debug/stack_trace_posix.cc
@@ -267,27 +267,28 @@ void StackDumpSignalHandler(int signal, siginfo_t* info, void* void_context) {
class PrintBacktraceOutputHandler : public BacktraceOutputHandler {
public:
PrintBacktraceOutputHandler() = default;
+ PrintBacktraceOutputHandler(const PrintBacktraceOutputHandler&) = delete;
+ PrintBacktraceOutputHandler& operator=(const PrintBacktraceOutputHandler&) =
+ delete;
void HandleOutput(const char* output) override {
// NOTE: This code MUST be async-signal safe (it's used by in-process
// stack dumping signal handler). NO malloc or stdio is allowed here.
PrintToStderr(output);
}
-
- private:
- DISALLOW_COPY_AND_ASSIGN(PrintBacktraceOutputHandler);
};
class StreamBacktraceOutputHandler : public BacktraceOutputHandler {
public:
explicit StreamBacktraceOutputHandler(std::ostream* os) : os_(os) {}
+ StreamBacktraceOutputHandler(const StreamBacktraceOutputHandler&) = delete;
+ StreamBacktraceOutputHandler& operator=(const StreamBacktraceOutputHandler&) =
+ delete;
void HandleOutput(const char* output) override { (*os_) << output; }
private:
std::ostream* os_;
-
- DISALLOW_COPY_AND_ASSIGN(StreamBacktraceOutputHandler);
};
void WarmUpBacktrace() {
diff --git a/deps/v8/src/base/hashmap-entry.h b/deps/v8/src/base/hashmap-entry.h
index 629e734088..2f984f3c2a 100644
--- a/deps/v8/src/base/hashmap-entry.h
+++ b/deps/v8/src/base/hashmap-entry.h
@@ -6,15 +6,25 @@
#define V8_BASE_HASHMAP_ENTRY_H_
#include <cstdint>
+#include <type_traits>
+
+#include "src/base/memory.h"
namespace v8 {
namespace base {
+// Marker type for hashmaps without a value (i.e. hashsets). These won't
+// allocate space for the value in the entry.
+struct NoHashMapValue {};
+
// HashMap entries are (key, value, hash) triplets, with a boolean indicating if
// they are an empty entry. Some clients may not need to use the value slot
-// (e.g. implementers of sets, where the key is the value).
+// (e.g. implementers of sets, where the key is the value), in which case they
+// should use NoHashMapValue.
template <typename Key, typename Value>
struct TemplateHashMapEntry {
+ STATIC_ASSERT((!std::is_same<Value, NoHashMapValue>::value));
+
Key key;
Value value;
uint32_t hash; // The full hash value for key
@@ -33,6 +43,8 @@ struct TemplateHashMapEntry {
// Specialization for pointer-valued keys
template <typename Key, typename Value>
struct TemplateHashMapEntry<Key*, Value> {
+ STATIC_ASSERT((!std::is_same<Value, NoHashMapValue>::value));
+
Key* key;
Value value;
uint32_t hash; // The full hash value for key
@@ -45,8 +57,42 @@ struct TemplateHashMapEntry<Key*, Value> {
void clear() { key = nullptr; }
};
-// TODO(leszeks): There could be a specialisation for void values (e.g. for
-// sets), which omits the value field
+// Specialization for no value.
+template <typename Key>
+struct TemplateHashMapEntry<Key, NoHashMapValue> {
+ union {
+ Key key;
+ NoHashMapValue value; // Value in union with key to not take up space.
+ };
+ uint32_t hash; // The full hash value for key
+
+ TemplateHashMapEntry(Key key, NoHashMapValue value, uint32_t hash)
+ : key(key), hash(hash), exists_(true) {}
+
+ bool exists() const { return exists_; }
+
+ void clear() { exists_ = false; }
+
+ private:
+ bool exists_;
+};
+
+// Specialization for pointer-valued keys and no value.
+template <typename Key>
+struct TemplateHashMapEntry<Key*, NoHashMapValue> {
+ union {
+ Key* key;
+ NoHashMapValue value; // Value in union with key to not take up space.
+ };
+ uint32_t hash; // The full hash value for key
+
+ TemplateHashMapEntry(Key* key, NoHashMapValue value, uint32_t hash)
+ : key(key), hash(hash) {}
+
+ bool exists() const { return key != nullptr; }
+
+ void clear() { key = nullptr; }
+};
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/hashmap.h b/deps/v8/src/base/hashmap.h
index 2b40b329b8..c0a7f21bf5 100644
--- a/deps/v8/src/base/hashmap.h
+++ b/deps/v8/src/base/hashmap.h
@@ -46,6 +46,9 @@ class TemplateHashMapImpl {
MatchFun match = MatchFun(),
AllocationPolicy allocator = AllocationPolicy());
+ TemplateHashMapImpl(const TemplateHashMapImpl&) = delete;
+ TemplateHashMapImpl& operator=(const TemplateHashMapImpl&) = delete;
+
// Clones the given hashmap and creates a copy with the same entries.
explicit TemplateHashMapImpl(const TemplateHashMapImpl* original,
AllocationPolicy allocator = AllocationPolicy());
@@ -72,6 +75,20 @@ class TemplateHashMapImpl {
template <typename Func>
Entry* LookupOrInsert(const Key& key, uint32_t hash, const Func& value_func);
+ // Heterogeneous version of LookupOrInsert, which allows a
+ // different lookup key type than the hashmap's key type.
+ // The requirement is that MatchFun has an overload:
+ //
+ // operator()(const LookupKey& lookup_key, const Key& entry_key)
+ //
+ // If an entry with matching key is found, returns that entry.
+ // If no matching entry is found, a new entry is inserted with
+ // a key created by key_func, key hash, and value created by
+ // value_func.
+ template <typename LookupKey, typename KeyFunc, typename ValueFunc>
+ Entry* LookupOrInsert(const LookupKey& lookup_key, uint32_t hash,
+ const KeyFunc& key_func, const ValueFunc& value_func);
+
Entry* InsertNew(const Key& key, uint32_t hash);
// Removes the entry with matching key.
@@ -115,7 +132,8 @@ class TemplateHashMapImpl {
private:
Entry* map_end() const { return impl_.map_ + impl_.capacity_; }
- Entry* Probe(const Key& key, uint32_t hash) const;
+ template <typename LookupKey>
+ Entry* Probe(const LookupKey& key, uint32_t hash) const;
Entry* FillEmptyEntry(Entry* entry, const Key& key, const Value& value,
uint32_t hash);
void Resize();
@@ -160,8 +178,6 @@ class TemplateHashMapImpl {
uint32_t capacity_ = 0;
uint32_t occupancy_ = 0;
} impl_;
-
- DISALLOW_COPY_AND_ASSIGN(TemplateHashMapImpl);
};
template <typename Key, typename Value, typename MatchFun,
class AllocationPolicy>
@@ -214,13 +230,24 @@ template <typename Func>
typename TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Entry*
TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::LookupOrInsert(
const Key& key, uint32_t hash, const Func& value_func) {
+ return LookupOrInsert(
+ key, hash, [&key]() { return key; }, value_func);
+}
+
+template <typename Key, typename Value, typename MatchFun,
+ class AllocationPolicy>
+template <typename LookupKey, typename KeyFunc, typename ValueFunc>
+typename TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Entry*
+TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::LookupOrInsert(
+ const LookupKey& lookup_key, uint32_t hash, const KeyFunc& key_func,
+ const ValueFunc& value_func) {
// Find a matching entry.
- Entry* entry = Probe(key, hash);
+ Entry* entry = Probe(lookup_key, hash);
if (entry->exists()) {
return entry;
}
- return FillEmptyEntry(entry, key, value_func(), hash);
+ return FillEmptyEntry(entry, key_func(), value_func(), hash);
}
template <typename Key, typename Value, typename MatchFun,
@@ -328,9 +355,10 @@ TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Next(
template <typename Key, typename Value, typename MatchFun,
class AllocationPolicy>
+template <typename LookupKey>
typename TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Entry*
TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Probe(
- const Key& key, uint32_t hash) const {
+ const LookupKey& key, uint32_t hash) const {
DCHECK(base::bits::IsPowerOfTwo(capacity()));
size_t i = hash & (capacity() - 1);
DCHECK(i < capacity());
@@ -442,8 +470,10 @@ class CustomMatcherTemplateHashMapImpl
AllocationPolicy allocator = AllocationPolicy())
: Base(original, allocator) {}
- private:
- DISALLOW_COPY_AND_ASSIGN(CustomMatcherTemplateHashMapImpl);
+ CustomMatcherTemplateHashMapImpl(const CustomMatcherTemplateHashMapImpl&) =
+ delete;
+ CustomMatcherTemplateHashMapImpl& operator=(
+ const CustomMatcherTemplateHashMapImpl&) = delete;
};
using CustomMatcherHashMap =
diff --git a/deps/v8/src/base/lazy-instance.h b/deps/v8/src/base/lazy-instance.h
index 3ea5fc9575..75e5b06006 100644
--- a/deps/v8/src/base/lazy-instance.h
+++ b/deps/v8/src/base/lazy-instance.h
@@ -235,12 +235,13 @@ class LeakyObject {
new (&storage_) T(std::forward<Args>(args)...);
}
+ LeakyObject(const LeakyObject&) = delete;
+ LeakyObject& operator=(const LeakyObject&) = delete;
+
T* get() { return reinterpret_cast<T*>(&storage_); }
private:
typename std::aligned_storage<sizeof(T), alignof(T)>::type storage_;
-
- DISALLOW_COPY_AND_ASSIGN(LeakyObject);
};
// Define a function which returns a pointer to a lazily initialized and never
diff --git a/deps/v8/src/base/macros.h b/deps/v8/src/base/macros.h
index 37cab78f08..8b39da2451 100644
--- a/deps/v8/src/base/macros.h
+++ b/deps/v8/src/base/macros.h
@@ -109,11 +109,15 @@ V8_INLINE Dest bit_cast(Source const& source) {
}
// Explicitly declare the assignment operator as deleted.
+// Note: This macro is deprecated and will be removed soon. Please explicitly
+// delete the assignment operator instead.
#define DISALLOW_ASSIGN(TypeName) TypeName& operator=(const TypeName&) = delete
// Explicitly declare the copy constructor and assignment operator as deleted.
// This also deletes the implicit move constructor and implicit move assignment
// operator, but still allows to manually define them.
+// Note: This macro is deprecated and will be removed soon. Please explicitly
+// delete the copy constructor and assignment operator instead.
#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
TypeName(const TypeName&) = delete; \
DISALLOW_ASSIGN(TypeName)
diff --git a/deps/v8/src/base/platform/DIR_METADATA b/deps/v8/src/base/platform/DIR_METADATA
new file mode 100644
index 0000000000..2f8dbbcf45
--- /dev/null
+++ b/deps/v8/src/base/platform/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript"
+} \ No newline at end of file
diff --git a/deps/v8/src/base/platform/OWNERS b/deps/v8/src/base/platform/OWNERS
index bf5455c9af..782eb7c684 100644
--- a/deps/v8/src/base/platform/OWNERS
+++ b/deps/v8/src/base/platform/OWNERS
@@ -3,5 +3,3 @@ mlippautz@chromium.org
ulan@chromium.org
per-file platform-fuchsia.cc=wez@chromium.org
-
-# COMPONENT: Blink>JavaScript
diff --git a/deps/v8/src/base/platform/condition-variable.h b/deps/v8/src/base/platform/condition-variable.h
index 8b5c7cf569..79e653a32a 100644
--- a/deps/v8/src/base/platform/condition-variable.h
+++ b/deps/v8/src/base/platform/condition-variable.h
@@ -36,6 +36,8 @@ class TimeDelta;
class V8_BASE_EXPORT ConditionVariable final {
public:
ConditionVariable();
+ ConditionVariable(const ConditionVariable&) = delete;
+ ConditionVariable& operator=(const ConditionVariable&) = delete;
~ConditionVariable();
// If any threads are waiting on this condition variable, calling
@@ -81,8 +83,6 @@ class V8_BASE_EXPORT ConditionVariable final {
private:
NativeHandle native_handle_;
-
- DISALLOW_COPY_AND_ASSIGN(ConditionVariable);
};
// POD ConditionVariable initialized lazily (i.e. the first time Pointer() is
diff --git a/deps/v8/src/base/platform/mutex.h b/deps/v8/src/base/platform/mutex.h
index 7a19b2f4aa..1b950c61ad 100644
--- a/deps/v8/src/base/platform/mutex.h
+++ b/deps/v8/src/base/platform/mutex.h
@@ -43,6 +43,8 @@ namespace base {
class V8_BASE_EXPORT Mutex final {
public:
Mutex();
+ Mutex(const Mutex&) = delete;
+ Mutex& operator=(const Mutex&) = delete;
~Mutex();
// Locks the given mutex. If the mutex is currently unlocked, it becomes
@@ -99,8 +101,6 @@ class V8_BASE_EXPORT Mutex final {
}
friend class ConditionVariable;
-
- DISALLOW_COPY_AND_ASSIGN(Mutex);
};
// POD Mutex initialized lazily (i.e. the first time Pointer() is called).
@@ -140,6 +140,8 @@ using LazyMutex = LazyStaticInstance<Mutex, DefaultConstructTrait<Mutex>,
class V8_BASE_EXPORT RecursiveMutex final {
public:
RecursiveMutex();
+ RecursiveMutex(const RecursiveMutex&) = delete;
+ RecursiveMutex& operator=(const RecursiveMutex&) = delete;
~RecursiveMutex();
// Locks the mutex. If another thread has already locked the mutex, a call to
@@ -175,8 +177,6 @@ class V8_BASE_EXPORT RecursiveMutex final {
#ifdef DEBUG
int level_;
#endif
-
- DISALLOW_COPY_AND_ASSIGN(RecursiveMutex);
};
@@ -213,6 +213,8 @@ using LazyRecursiveMutex =
class V8_BASE_EXPORT SharedMutex final {
public:
SharedMutex();
+ SharedMutex(const SharedMutex&) = delete;
+ SharedMutex& operator=(const SharedMutex&) = delete;
~SharedMutex();
// Acquires shared ownership of the {SharedMutex}. If another thread is
@@ -262,8 +264,6 @@ class V8_BASE_EXPORT SharedMutex final {
#endif
NativeHandle native_handle_;
-
- DISALLOW_COPY_AND_ASSIGN(SharedMutex);
};
// -----------------------------------------------------------------------------
@@ -286,6 +286,8 @@ class LockGuard final {
explicit LockGuard(Mutex* mutex) : mutex_(mutex) {
if (has_mutex()) mutex_->Lock();
}
+ LockGuard(const LockGuard&) = delete;
+ LockGuard& operator=(const LockGuard&) = delete;
~LockGuard() {
if (has_mutex()) mutex_->Unlock();
}
@@ -298,8 +300,6 @@ class LockGuard final {
mutex_ != nullptr);
return Behavior == NullBehavior::kRequireNotNull || mutex_ != nullptr;
}
-
- DISALLOW_COPY_AND_ASSIGN(LockGuard);
};
using MutexGuard = LockGuard<Mutex>;
@@ -319,6 +319,8 @@ class SharedMutexGuard final {
mutex_->LockExclusive();
}
}
+ SharedMutexGuard(const SharedMutexGuard&) = delete;
+ SharedMutexGuard& operator=(const SharedMutexGuard&) = delete;
~SharedMutexGuard() {
if (!has_mutex()) return;
if (kIsShared) {
@@ -336,8 +338,6 @@ class SharedMutexGuard final {
mutex_ != nullptr);
return Behavior == NullBehavior::kRequireNotNull || mutex_ != nullptr;
}
-
- DISALLOW_COPY_AND_ASSIGN(SharedMutexGuard);
};
} // namespace base
diff --git a/deps/v8/src/base/platform/platform-aix.cc b/deps/v8/src/base/platform/platform-aix.cc
index e1ccda2ab0..6b6a870370 100644
--- a/deps/v8/src/base/platform/platform-aix.cc
+++ b/deps/v8/src/base/platform/platform-aix.cc
@@ -130,7 +130,7 @@ void OS::SignalCodeMovingGC() {}
void OS::AdjustSchedulingParams() {}
// static
-void* Stack::GetStackStart() {
+Stack::StackSlot Stack::GetStackStart() {
// pthread_getthrds_np creates 3 values:
// __pi_stackaddr, __pi_stacksize, __pi_stackend
diff --git a/deps/v8/src/base/platform/platform-freebsd.cc b/deps/v8/src/base/platform/platform-freebsd.cc
index ed16ad096d..edc793c662 100644
--- a/deps/v8/src/base/platform/platform-freebsd.cc
+++ b/deps/v8/src/base/platform/platform-freebsd.cc
@@ -98,7 +98,7 @@ void OS::SignalCodeMovingGC() {}
void OS::AdjustSchedulingParams() {}
// static
-void* Stack::GetStackStart() {
+Stack::StackSlot Stack::GetStackStart() {
pthread_attr_t attr;
int error;
pthread_attr_init(&attr);
diff --git a/deps/v8/src/base/platform/platform-fuchsia.cc b/deps/v8/src/base/platform/platform-fuchsia.cc
index 35a508a140..381b59a904 100644
--- a/deps/v8/src/base/platform/platform-fuchsia.cc
+++ b/deps/v8/src/base/platform/platform-fuchsia.cc
@@ -4,6 +4,7 @@
#include <zircon/process.h>
#include <zircon/syscalls.h>
+#include <zircon/threads.h>
#include "src/base/macros.h"
#include "src/base/platform/platform-posix-time.h"
@@ -151,17 +152,18 @@ void OS::SignalCodeMovingGC() {
int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
const auto kNanosPerMicrosecond = 1000ULL;
const auto kMicrosPerSecond = 1000000ULL;
- zx_time_t nanos_since_thread_started;
- zx_status_t status =
- zx_clock_get(ZX_CLOCK_THREAD, &nanos_since_thread_started);
+
+ zx_info_thread_stats_t info = {};
+ zx_status_t status = zx_object_get_info(thrd_get_zx_handle(thrd_current()),
+ ZX_INFO_THREAD_STATS, &info,
+ sizeof(info), nullptr, nullptr);
if (status != ZX_OK) {
return -1;
}
// First convert to microseconds, rounding up.
const uint64_t micros_since_thread_started =
- (nanos_since_thread_started + kNanosPerMicrosecond - 1ULL) /
- kNanosPerMicrosecond;
+ (info.total_runtime + kNanosPerMicrosecond - 1ULL) / kNanosPerMicrosecond;
*secs = static_cast<uint32_t>(micros_since_thread_started / kMicrosPerSecond);
*usecs =
diff --git a/deps/v8/src/base/platform/platform-macos.cc b/deps/v8/src/base/platform/platform-macos.cc
index bee6b30f7c..3f1638ec0d 100644
--- a/deps/v8/src/base/platform/platform-macos.cc
+++ b/deps/v8/src/base/platform/platform-macos.cc
@@ -94,7 +94,7 @@ void OS::AdjustSchedulingParams() {
}
// static
-void* Stack::GetStackStart() {
+Stack::StackSlot Stack::GetStackStart() {
return pthread_get_stackaddr_np(pthread_self());
}
diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc
index d5624cb8ac..ab0d7839a4 100644
--- a/deps/v8/src/base/platform/platform-posix.cc
+++ b/deps/v8/src/base/platform/platform-posix.cc
@@ -415,16 +415,6 @@ bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
int prot = GetProtectionFromMemoryPermission(access);
int ret = mprotect(address, size, prot);
-
- // MacOS 11.2 on Apple Silicon refuses to switch permissions from
- // rwx to none. Just use madvise instead.
-#if defined(V8_OS_MACOSX)
- if (ret != 0 && access == OS::MemoryPermission::kNoAccess) {
- ret = madvise(address, size, MADV_FREE_REUSABLE);
- return ret == 0;
- }
-#endif
-
if (ret == 0 && access == OS::MemoryPermission::kNoAccess) {
// This is advisory; ignore errors and continue execution.
USE(DiscardSystemPages(address, size));
@@ -1013,7 +1003,7 @@ void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
!defined(V8_OS_SOLARIS)
// static
-void* Stack::GetStackStart() {
+Stack::StackSlot Stack::GetStackStart() {
pthread_attr_t attr;
int error = pthread_getattr_np(pthread_self(), &attr);
if (!error) {
@@ -1039,7 +1029,9 @@ void* Stack::GetStackStart() {
// !defined(_AIX) && !defined(V8_OS_SOLARIS)
// static
-void* Stack::GetCurrentStackPosition() { return __builtin_frame_address(0); }
+Stack::StackSlot Stack::GetCurrentStackPosition() {
+ return __builtin_frame_address(0);
+}
#undef LOG_TAG
#undef MAP_ANONYMOUS
diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc
index e7b1e51936..cee24e9876 100644
--- a/deps/v8/src/base/platform/platform-win32.cc
+++ b/deps/v8/src/base/platform/platform-win32.cc
@@ -1395,7 +1395,7 @@ void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
void OS::AdjustSchedulingParams() {}
// static
-void* Stack::GetStackStart() {
+Stack::StackSlot Stack::GetStackStart() {
#if defined(V8_TARGET_ARCH_X64)
return reinterpret_cast<void*>(
reinterpret_cast<NT_TIB64*>(NtCurrentTeb())->StackBase);
@@ -1414,7 +1414,7 @@ void* Stack::GetStackStart() {
}
// static
-void* Stack::GetCurrentStackPosition() {
+Stack::StackSlot Stack::GetCurrentStackPosition() {
#if V8_CC_MSVC
return _AddressOfReturnAddress();
#else
diff --git a/deps/v8/src/base/platform/platform.h b/deps/v8/src/base/platform/platform.h
index 9c52f21804..042e4428cd 100644
--- a/deps/v8/src/base/platform/platform.h
+++ b/deps/v8/src/base/platform/platform.h
@@ -22,6 +22,7 @@
#define V8_BASE_PLATFORM_PLATFORM_H_
#include <cstdarg>
+#include <cstdint>
#include <string>
#include <vector>
@@ -353,6 +354,8 @@ class V8_BASE_EXPORT Thread {
// Create new thread.
explicit Thread(const Options& options);
+ Thread(const Thread&) = delete;
+ Thread& operator=(const Thread&) = delete;
virtual ~Thread();
// Start new thread by calling the Run() method on the new thread.
@@ -426,37 +429,48 @@ class V8_BASE_EXPORT Thread {
char name_[kMaxThreadNameLength];
int stack_size_;
Semaphore* start_semaphore_;
-
- DISALLOW_COPY_AND_ASSIGN(Thread);
};
// TODO(v8:10354): Make use of the stack utilities here in V8.
class V8_BASE_EXPORT Stack {
public:
+ // Convenience wrapper to use stack slots as unsigned values or void*
+ // pointers.
+ struct StackSlot {
+ // NOLINTNEXTLINE
+ StackSlot(void* value) : value(reinterpret_cast<uintptr_t>(value)) {}
+ StackSlot(uintptr_t value) : value(value) {} // NOLINT
+
+ // NOLINTNEXTLINE
+ operator void*() const { return reinterpret_cast<void*>(value); }
+ operator uintptr_t() const { return value; } // NOLINT
+
+ uintptr_t value;
+ };
+
// Gets the start of the stack of the current thread.
- static void* GetStackStart();
+ static StackSlot GetStackStart();
// Returns the current stack top. Works correctly with ASAN and SafeStack.
// GetCurrentStackPosition() should not be inlined, because it works on stack
// frames if it were inlined into a function with a huge stack frame it would
// return an address significantly above the actual current stack position.
- static V8_NOINLINE void* GetCurrentStackPosition();
+ static V8_NOINLINE StackSlot GetCurrentStackPosition();
- // Translates an ASAN-based slot to a real stack slot if necessary.
- static void* GetStackSlot(void* slot) {
+ // Returns the real stack frame if slot is part of a fake frame, and slot
+ // otherwise.
+ static StackSlot GetRealStackAddressForSlot(StackSlot slot) {
#ifdef V8_USE_ADDRESS_SANITIZER
- void* fake_stack = __asan_get_current_fake_stack();
- if (fake_stack) {
- void* fake_frame_start;
- void* real_frame = __asan_addr_is_in_fake_stack(
- fake_stack, slot, &fake_frame_start, nullptr);
- if (real_frame) {
- return reinterpret_cast<void*>(
- reinterpret_cast<uintptr_t>(real_frame) +
- (reinterpret_cast<uintptr_t>(slot) -
- reinterpret_cast<uintptr_t>(fake_frame_start)));
- }
- }
+ // ASAN fetches the real stack deeper in the __asan_addr_is_in_fake_stack()
+ // call (precisely, deeper in __asan_stack_malloc_()), which results in a
+ // real frame that could be outside of stack bounds. Adjust for this
+ // impreciseness here.
+ constexpr size_t kAsanRealFrameOffsetBytes = 32;
+ void* real_frame = __asan_addr_is_in_fake_stack(
+ __asan_get_current_fake_stack(), slot, nullptr, nullptr);
+ return real_frame
+ ? (static_cast<char*>(real_frame) + kAsanRealFrameOffsetBytes)
+ : slot;
#endif // V8_USE_ADDRESS_SANITIZER
return slot;
}
diff --git a/deps/v8/src/base/platform/semaphore.h b/deps/v8/src/base/platform/semaphore.h
index 0c0b877da2..83a7a3392f 100644
--- a/deps/v8/src/base/platform/semaphore.h
+++ b/deps/v8/src/base/platform/semaphore.h
@@ -39,6 +39,8 @@ class TimeDelta;
class V8_BASE_EXPORT Semaphore final {
public:
explicit Semaphore(int count);
+ Semaphore(const Semaphore&) = delete;
+ Semaphore& operator=(const Semaphore&) = delete;
~Semaphore();
// Increments the semaphore counter.
@@ -72,8 +74,6 @@ class V8_BASE_EXPORT Semaphore final {
private:
NativeHandle native_handle_;
-
- DISALLOW_COPY_AND_ASSIGN(Semaphore);
};
diff --git a/deps/v8/src/base/platform/wrappers.h b/deps/v8/src/base/platform/wrappers.h
new file mode 100644
index 0000000000..521b06ebe1
--- /dev/null
+++ b/deps/v8/src/base/platform/wrappers.h
@@ -0,0 +1,31 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_PLATFORM_WRAPPERS_H_
+#define V8_BASE_PLATFORM_WRAPPERS_H_
+
+#include <stddef.h>
+#include <stdio.h>
+
+namespace v8 {
+namespace base {
+
+void* Malloc(size_t size);
+
+void* Realloc(void* memory, size_t size);
+
+void Free(void* memory);
+
+void* Calloc(size_t count, size_t size);
+
+void* Memcpy(void* dest, const void* source, size_t count);
+
+FILE* Fopen(const char* filename, const char* mode);
+
+int Fclose(FILE* stream);
+
+} // namespace base
+} // namespace v8
+
+#endif // V8_BASE_PLATFORM_WRAPPERS_H_
diff --git a/deps/v8/src/base/platform/wrappers_starboard.cc b/deps/v8/src/base/platform/wrappers_starboard.cc
new file mode 100644
index 0000000000..199e753409
--- /dev/null
+++ b/deps/v8/src/base/platform/wrappers_starboard.cc
@@ -0,0 +1,31 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "starboard/memory.h"
+
+#include "src/base/platform/wrappers.h"
+
+namespace v8 {
+namespace base {
+
+void* Malloc(size_t size) { return SbMemoryAlloc(size); }
+
+void* Realloc(void* memory, size_t size) {
+ return SbMemoryReallocate(memory, size);
+}
+
+void Free(void* memory) { return SbMemoryDeallocate(memory); }
+
+void* Calloc(size_t count, size_t size) { return SbMemoryCalloc(count, size); }
+
+void* Memcpy(void* dest, const void* source, size_t count) {
+ return SbMemoryCopy(dest, source, count);
+}
+
+FILE* Fopen(const char* filename, const char* mode) { return NULL; }
+
+int Fclose(FILE* stream) { return -1; }
+
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/platform/wrappers_std.cc b/deps/v8/src/base/platform/wrappers_std.cc
new file mode 100644
index 0000000000..6b38b18e37
--- /dev/null
+++ b/deps/v8/src/base/platform/wrappers_std.cc
@@ -0,0 +1,34 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "src/base/platform/wrappers.h"
+
+namespace v8 {
+namespace base {
+
+void* Malloc(size_t size) { return malloc(size); }
+
+void* Realloc(void* memory, size_t size) { return realloc(memory, size); }
+
+void Free(void* memory) { return free(memory); }
+
+void* Calloc(size_t count, size_t size) { return calloc(count, size); }
+
+void* Memcpy(void* dest, const void* source, size_t count) {
+ return memcpy(dest, source, count);
+}
+
+FILE* Fopen(const char* filename, const char* mode) {
+ return fopen(filename, mode);
+}
+
+int Fclose(FILE* stream) { return fclose(stream); }
+
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/region-allocator.h b/deps/v8/src/base/region-allocator.h
index 887f123b10..adc4bd10b6 100644
--- a/deps/v8/src/base/region-allocator.h
+++ b/deps/v8/src/base/region-allocator.h
@@ -39,6 +39,8 @@ class V8_BASE_EXPORT RegionAllocator final {
};
RegionAllocator(Address address, size_t size, size_t page_size);
+ RegionAllocator(const RegionAllocator&) = delete;
+ RegionAllocator& operator=(const RegionAllocator&) = delete;
~RegionAllocator();
// Allocates region of |size| (must be |page_size|-aligned). Returns
@@ -176,8 +178,6 @@ class V8_BASE_EXPORT RegionAllocator final {
FRIEND_TEST(RegionAllocatorTest, Contains);
FRIEND_TEST(RegionAllocatorTest, FindRegion);
FRIEND_TEST(RegionAllocatorTest, Fragmentation);
-
- DISALLOW_COPY_AND_ASSIGN(RegionAllocator);
};
} // namespace base
diff --git a/deps/v8/src/base/ring-buffer.h b/deps/v8/src/base/ring-buffer.h
index b347977640..8357987083 100644
--- a/deps/v8/src/base/ring-buffer.h
+++ b/deps/v8/src/base/ring-buffer.h
@@ -14,7 +14,11 @@ template <typename T>
class RingBuffer {
public:
RingBuffer() { Reset(); }
+ RingBuffer(const RingBuffer&) = delete;
+ RingBuffer& operator=(const RingBuffer&) = delete;
+
static const int kSize = 10;
+
void Push(const T& value) {
if (count_ == kSize) {
elements_[start_++] = value;
@@ -45,7 +49,6 @@ class RingBuffer {
T elements_[kSize];
int start_;
int count_;
- DISALLOW_COPY_AND_ASSIGN(RingBuffer);
};
} // namespace base
diff --git a/deps/v8/src/base/safe_conversions.h b/deps/v8/src/base/safe_conversions.h
index f63f1ad99e..38aa7b9aaa 100644
--- a/deps/v8/src/base/safe_conversions.h
+++ b/deps/v8/src/base/safe_conversions.h
@@ -4,59 +4,383 @@
// Slightly adapted for inclusion in V8.
// Copyright 2014 the V8 project authors. All rights reserved.
+// List of adaptations:
+// - include guard names
+// - wrap in v8 namespace
+// - formatting (git cl format)
+// - include paths
#ifndef V8_BASE_SAFE_CONVERSIONS_H_
#define V8_BASE_SAFE_CONVERSIONS_H_
+#include <stddef.h>
+
+#include <cmath>
#include <limits>
+#include <type_traits>
#include "src/base/safe_conversions_impl.h"
+#if defined(__ARMEL__) && !defined(__native_client__)
+#include "src/base/safe_conversions_arm_impl.h"
+#define BASE_HAS_OPTIMIZED_SAFE_CONVERSIONS (1)
+#else
+#define BASE_HAS_OPTIMIZED_SAFE_CONVERSIONS (0)
+#endif
+
+#if !BASE_NUMERICS_DISABLE_OSTREAM_OPERATORS
+#include <ostream>
+#endif
+
namespace v8 {
namespace base {
+namespace internal {
+
+#if !BASE_HAS_OPTIMIZED_SAFE_CONVERSIONS
+template <typename Dst, typename Src>
+struct SaturateFastAsmOp {
+ static constexpr bool is_supported = false;
+ static constexpr Dst Do(Src) {
+ // Force a compile failure if instantiated.
+ return CheckOnFailure::template HandleFailure<Dst>();
+ }
+};
+#endif // BASE_HAS_OPTIMIZED_SAFE_CONVERSIONS
+#undef BASE_HAS_OPTIMIZED_SAFE_CONVERSIONS
+
+// The following special case a few specific integer conversions where we can
+// eke out better performance than range checking.
+template <typename Dst, typename Src, typename Enable = void>
+struct IsValueInRangeFastOp {
+ static constexpr bool is_supported = false;
+ static constexpr bool Do(Src value) {
+ // Force a compile failure if instantiated.
+ return CheckOnFailure::template HandleFailure<bool>();
+ }
+};
+
+// Signed to signed range comparison.
+template <typename Dst, typename Src>
+struct IsValueInRangeFastOp<
+ Dst, Src,
+ typename std::enable_if<
+ std::is_integral<Dst>::value && std::is_integral<Src>::value &&
+ std::is_signed<Dst>::value && std::is_signed<Src>::value &&
+ !IsTypeInRangeForNumericType<Dst, Src>::value>::type> {
+ static constexpr bool is_supported = true;
+
+ static constexpr bool Do(Src value) {
+ // Just downcast to the smaller type, sign extend it back to the original
+ // type, and then see if it matches the original value.
+ return value == static_cast<Dst>(value);
+ }
+};
+
+// Signed to unsigned range comparison.
+template <typename Dst, typename Src>
+struct IsValueInRangeFastOp<
+ Dst, Src,
+ typename std::enable_if<
+ std::is_integral<Dst>::value && std::is_integral<Src>::value &&
+ !std::is_signed<Dst>::value && std::is_signed<Src>::value &&
+ !IsTypeInRangeForNumericType<Dst, Src>::value>::type> {
+ static constexpr bool is_supported = true;
+
+ static constexpr bool Do(Src value) {
+ // We cast a signed as unsigned to overflow negative values to the top,
+ // then compare against whichever maximum is smaller, as our upper bound.
+ return as_unsigned(value) <= as_unsigned(CommonMax<Src, Dst>());
+ }
+};
// Convenience function that returns true if the supplied value is in range
// for the destination type.
template <typename Dst, typename Src>
-inline bool IsValueInRangeForNumericType(Src value) {
- return internal::DstRangeRelationToSrcRange<Dst>(value) ==
- internal::RANGE_VALID;
+constexpr bool IsValueInRangeForNumericType(Src value) {
+ using SrcType = typename internal::UnderlyingType<Src>::type;
+ return internal::IsValueInRangeFastOp<Dst, SrcType>::is_supported
+ ? internal::IsValueInRangeFastOp<Dst, SrcType>::Do(
+ static_cast<SrcType>(value))
+ : internal::DstRangeRelationToSrcRange<Dst>(
+ static_cast<SrcType>(value))
+ .IsValid();
}
// checked_cast<> is analogous to static_cast<> for numeric types,
// except that it CHECKs that the specified numeric conversion will not
// overflow or underflow. NaN source will always trigger a CHECK.
-template <typename Dst, typename Src>
-inline Dst checked_cast(Src value) {
- CHECK(IsValueInRangeForNumericType<Dst>(value));
- return static_cast<Dst>(value);
+template <typename Dst, class CheckHandler = internal::CheckOnFailure,
+ typename Src>
+constexpr Dst checked_cast(Src value) {
+ // This throws a compile-time error on evaluating the constexpr if it can be
+ // determined at compile-time as failing, otherwise it will CHECK at runtime.
+ using SrcType = typename internal::UnderlyingType<Src>::type;
+ return BASE_NUMERICS_LIKELY((IsValueInRangeForNumericType<Dst>(value)))
+ ? static_cast<Dst>(static_cast<SrcType>(value))
+ : CheckHandler::template HandleFailure<Dst>();
}
+// Default boundaries for integral/float: max/infinity, lowest/-infinity, 0/NaN.
+// You may provide your own limits (e.g. to saturated_cast) so long as you
+// implement all of the static constexpr member functions in the class below.
+template <typename T>
+struct SaturationDefaultLimits : public std::numeric_limits<T> {
+ static constexpr T NaN() {
+ return std::numeric_limits<T>::has_quiet_NaN
+ ? std::numeric_limits<T>::quiet_NaN()
+ : T();
+ }
+ using std::numeric_limits<T>::max;
+ static constexpr T Overflow() {
+ return std::numeric_limits<T>::has_infinity
+ ? std::numeric_limits<T>::infinity()
+ : std::numeric_limits<T>::max();
+ }
+ using std::numeric_limits<T>::lowest;
+ static constexpr T Underflow() {
+ return std::numeric_limits<T>::has_infinity
+ ? std::numeric_limits<T>::infinity() * -1
+ : std::numeric_limits<T>::lowest();
+ }
+};
+
+template <typename Dst, template <typename> class S, typename Src>
+constexpr Dst saturated_cast_impl(Src value, RangeCheck constraint) {
+ // For some reason clang generates much better code when the branch is
+ // structured exactly this way, rather than a sequence of checks.
+ return !constraint.IsOverflowFlagSet()
+ ? (!constraint.IsUnderflowFlagSet() ? static_cast<Dst>(value)
+ : S<Dst>::Underflow())
+ // Skip this check for integral Src, which cannot be NaN.
+ : (std::is_integral<Src>::value || !constraint.IsUnderflowFlagSet()
+ ? S<Dst>::Overflow()
+ : S<Dst>::NaN());
+}
+
+// We can reduce the number of conditions and get slightly better performance
+// for normal signed and unsigned integer ranges. And in the specific case of
+// Arm, we can use the optimized saturation instructions.
+template <typename Dst, typename Src, typename Enable = void>
+struct SaturateFastOp {
+ static constexpr bool is_supported = false;
+ static constexpr Dst Do(Src value) {
+ // Force a compile failure if instantiated.
+ return CheckOnFailure::template HandleFailure<Dst>();
+ }
+};
+
+template <typename Dst, typename Src>
+struct SaturateFastOp<
+ Dst, Src,
+ typename std::enable_if<std::is_integral<Src>::value &&
+ std::is_integral<Dst>::value &&
+ SaturateFastAsmOp<Dst, Src>::is_supported>::type> {
+ static constexpr bool is_supported = true;
+ static constexpr Dst Do(Src value) {
+ return SaturateFastAsmOp<Dst, Src>::Do(value);
+ }
+};
+
+template <typename Dst, typename Src>
+struct SaturateFastOp<
+ Dst, Src,
+ typename std::enable_if<std::is_integral<Src>::value &&
+ std::is_integral<Dst>::value &&
+ !SaturateFastAsmOp<Dst, Src>::is_supported>::type> {
+ static constexpr bool is_supported = true;
+ static constexpr Dst Do(Src value) {
+ // The exact order of the following is structured to hit the correct
+ // optimization heuristics across compilers. Do not change without
+ // checking the emitted code.
+ const Dst saturated = CommonMaxOrMin<Dst, Src>(
+ IsMaxInRangeForNumericType<Dst, Src>() ||
+ (!IsMinInRangeForNumericType<Dst, Src>() && IsValueNegative(value)));
+ return BASE_NUMERICS_LIKELY(IsValueInRangeForNumericType<Dst>(value))
+ ? static_cast<Dst>(value)
+ : saturated;
+ }
+};
+
// saturated_cast<> is analogous to static_cast<> for numeric types, except
-// that the specified numeric conversion will saturate rather than overflow or
-// underflow. NaN assignment to an integral will trigger a CHECK condition.
+// that the specified numeric conversion will saturate by default rather than
+// overflow or underflow, and NaN assignment to an integral will return 0.
+// All boundary condition behaviors can be overriden with a custom handler.
+template <typename Dst,
+ template <typename> class SaturationHandler = SaturationDefaultLimits,
+ typename Src>
+constexpr Dst saturated_cast(Src value) {
+ using SrcType = typename UnderlyingType<Src>::type;
+ return !IsCompileTimeConstant(value) &&
+ SaturateFastOp<Dst, SrcType>::is_supported &&
+ std::is_same<SaturationHandler<Dst>,
+ SaturationDefaultLimits<Dst>>::value
+ ? SaturateFastOp<Dst, SrcType>::Do(static_cast<SrcType>(value))
+ : saturated_cast_impl<Dst, SaturationHandler, SrcType>(
+ static_cast<SrcType>(value),
+ DstRangeRelationToSrcRange<Dst, SaturationHandler, SrcType>(
+ static_cast<SrcType>(value)));
+}
+
+// strict_cast<> is analogous to static_cast<> for numeric types, except that
+// it will cause a compile failure if the destination type is not large enough
+// to contain any value in the source type. It performs no runtime checking.
template <typename Dst, typename Src>
-inline Dst saturated_cast(Src value) {
- // Optimization for floating point values, which already saturate.
- if (std::numeric_limits<Dst>::is_iec559)
- return static_cast<Dst>(value);
+constexpr Dst strict_cast(Src value) {
+ using SrcType = typename UnderlyingType<Src>::type;
+ static_assert(UnderlyingType<Src>::is_numeric, "Argument must be numeric.");
+ static_assert(std::is_arithmetic<Dst>::value, "Result must be numeric.");
+
+ // If you got here from a compiler error, it's because you tried to assign
+ // from a source type to a destination type that has insufficient range.
+ // The solution may be to change the destination type you're assigning to,
+ // and use one large enough to represent the source.
+ // Alternatively, you may be better served with the checked_cast<> or
+ // saturated_cast<> template functions for your particular use case.
+ static_assert(StaticDstRangeRelationToSrcRange<Dst, SrcType>::value ==
+ NUMERIC_RANGE_CONTAINED,
+ "The source type is out of range for the destination type. "
+ "Please see strict_cast<> comments for more information.");
+
+ return static_cast<Dst>(static_cast<SrcType>(value));
+}
+
+// Some wrappers to statically check that a type is in range.
+template <typename Dst, typename Src, class Enable = void>
+struct IsNumericRangeContained {
+ static constexpr bool value = false;
+};
+
+template <typename Dst, typename Src>
+struct IsNumericRangeContained<
+ Dst, Src,
+ typename std::enable_if<ArithmeticOrUnderlyingEnum<Dst>::value &&
+ ArithmeticOrUnderlyingEnum<Src>::value>::type> {
+ static constexpr bool value =
+ StaticDstRangeRelationToSrcRange<Dst, Src>::value ==
+ NUMERIC_RANGE_CONTAINED;
+};
+
+// StrictNumeric implements compile time range checking between numeric types by
+// wrapping assignment operations in a strict_cast. This class is intended to be
+// used for function arguments and return types, to ensure the destination type
+// can always contain the source type. This is essentially the same as enforcing
+// -Wconversion in gcc and C4302 warnings on MSVC, but it can be applied
+// incrementally at API boundaries, making it easier to convert code so that it
+// compiles cleanly with truncation warnings enabled.
+// This template should introduce no runtime overhead, but it also provides no
+// runtime checking of any of the associated mathematical operations. Use
+// CheckedNumeric for runtime range checks of the actual value being assigned.
+template <typename T>
+class StrictNumeric {
+ public:
+ using type = T;
- switch (internal::DstRangeRelationToSrcRange<Dst>(value)) {
- case internal::RANGE_VALID:
- return static_cast<Dst>(value);
+ constexpr StrictNumeric() : value_(0) {}
- case internal::RANGE_UNDERFLOW:
- return std::numeric_limits<Dst>::min();
+ // Copy constructor.
+ template <typename Src>
+ constexpr StrictNumeric(const StrictNumeric<Src>& rhs)
+ : value_(strict_cast<T>(rhs.value_)) {}
- case internal::RANGE_OVERFLOW:
- return std::numeric_limits<Dst>::max();
+ // This is not an explicit constructor because we implicitly upgrade regular
+ // numerics to StrictNumerics to make them easier to use.
+ template <typename Src>
+ constexpr StrictNumeric(Src value) // NOLINT(runtime/explicit)
+ : value_(strict_cast<T>(value)) {}
- // Should fail only on attempting to assign NaN to a saturated integer.
- case internal::RANGE_INVALID:
- UNREACHABLE();
+ // If you got here from a compiler error, it's because you tried to assign
+ // from a source type to a destination type that has insufficient range.
+ // The solution may be to change the destination type you're assigning to,
+ // and use one large enough to represent the source.
+ // If you're assigning from a CheckedNumeric<> class, you may be able to use
+ // the AssignIfValid() member function, specify a narrower destination type to
+ // the member value functions (e.g. val.template ValueOrDie<Dst>()), use one
+ // of the value helper functions (e.g. ValueOrDieForType<Dst>(val)).
+ // If you've encountered an _ambiguous overload_ you can use a static_cast<>
+ // to explicitly cast the result to the destination type.
+ // If none of that works, you may be better served with the checked_cast<> or
+ // saturated_cast<> template functions for your particular use case.
+ template <typename Dst, typename std::enable_if<IsNumericRangeContained<
+ Dst, T>::value>::type* = nullptr>
+ constexpr operator Dst() const {
+ return static_cast<typename ArithmeticOrUnderlyingEnum<Dst>::type>(value_);
}
- UNREACHABLE();
+ private:
+ const T value_;
+};
+
+// Convience wrapper returns a StrictNumeric from the provided arithmetic type.
+template <typename T>
+constexpr StrictNumeric<typename UnderlyingType<T>::type> MakeStrictNum(
+ const T value) {
+ return value;
+}
+
+#if !BASE_NUMERICS_DISABLE_OSTREAM_OPERATORS
+// Overload the ostream output operator to make logging work nicely.
+template <typename T>
+std::ostream& operator<<(std::ostream& os, const StrictNumeric<T>& value) {
+ os << static_cast<T>(value);
+ return os;
+}
+#endif
+
+#define BASE_NUMERIC_COMPARISON_OPERATORS(CLASS, NAME, OP) \
+ template <typename L, typename R, \
+ typename std::enable_if< \
+ internal::Is##CLASS##Op<L, R>::value>::type* = nullptr> \
+ constexpr bool operator OP(const L lhs, const R rhs) { \
+ return SafeCompare<NAME, typename UnderlyingType<L>::type, \
+ typename UnderlyingType<R>::type>(lhs, rhs); \
+ }
+
+BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsLess, <)
+BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsLessOrEqual, <=)
+BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsGreater, >)
+BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsGreaterOrEqual, >=)
+BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsEqual, ==)
+BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsNotEqual, !=)
+
+} // namespace internal
+
+using internal::as_signed;
+using internal::as_unsigned;
+using internal::checked_cast;
+using internal::IsTypeInRangeForNumericType;
+using internal::IsValueInRangeForNumericType;
+using internal::IsValueNegative;
+using internal::MakeStrictNum;
+using internal::SafeUnsignedAbs;
+using internal::saturated_cast;
+using internal::strict_cast;
+using internal::StrictNumeric;
+
+// Explicitly make a shorter size_t alias for convenience.
+using SizeT = StrictNumeric<size_t>;
+
+// floating -> integral conversions that saturate and thus can actually return
+// an integral type. In most cases, these should be preferred over the std::
+// versions.
+template <typename Dst = int, typename Src,
+ typename = std::enable_if_t<std::is_integral<Dst>::value &&
+ std::is_floating_point<Src>::value>>
+Dst ClampFloor(Src value) {
+ return saturated_cast<Dst>(std::floor(value));
+}
+template <typename Dst = int, typename Src,
+ typename = std::enable_if_t<std::is_integral<Dst>::value &&
+ std::is_floating_point<Src>::value>>
+Dst ClampCeil(Src value) {
+ return saturated_cast<Dst>(std::ceil(value));
+}
+template <typename Dst = int, typename Src,
+ typename = std::enable_if_t<std::is_integral<Dst>::value &&
+ std::is_floating_point<Src>::value>>
+Dst ClampRound(Src value) {
+ const Src rounded =
+ (value >= 0.0f) ? std::floor(value + 0.5f) : std::ceil(value - 0.5f);
+ return saturated_cast<Dst>(rounded);
}
} // namespace base
diff --git a/deps/v8/src/base/safe_conversions_arm_impl.h b/deps/v8/src/base/safe_conversions_arm_impl.h
new file mode 100644
index 0000000000..0e08a14405
--- /dev/null
+++ b/deps/v8/src/base/safe_conversions_arm_impl.h
@@ -0,0 +1,60 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Slightly adapted for inclusion in V8.
+// Copyright 2014 the V8 project authors. All rights reserved.
+// List of adaptations:
+// - include guard names
+// - wrap in v8 namespace
+// - include paths
+
+#ifndef V8_BASE_SAFE_CONVERSIONS_ARM_IMPL_H_
+#define V8_BASE_SAFE_CONVERSIONS_ARM_IMPL_H_
+
+#include <cassert>
+#include <limits>
+#include <type_traits>
+
+#include "src/base/safe_conversions_impl.h"
+
+namespace v8 {
+namespace base {
+namespace internal {
+
+// Fast saturation to a destination type.
+template <typename Dst, typename Src>
+struct SaturateFastAsmOp {
+ static constexpr bool is_supported =
+ std::is_signed<Src>::value && std::is_integral<Dst>::value &&
+ std::is_integral<Src>::value &&
+ IntegerBitsPlusSign<Src>::value <= IntegerBitsPlusSign<int32_t>::value &&
+ IntegerBitsPlusSign<Dst>::value <= IntegerBitsPlusSign<int32_t>::value &&
+ !IsTypeInRangeForNumericType<Dst, Src>::value;
+
+ __attribute__((always_inline)) static Dst Do(Src value) {
+ int32_t src = value;
+ typename std::conditional<std::is_signed<Dst>::value, int32_t,
+ uint32_t>::type result;
+ if (std::is_signed<Dst>::value) {
+ asm("ssat %[dst], %[shift], %[src]"
+ : [dst] "=r"(result)
+ : [src] "r"(src), [shift] "n"(IntegerBitsPlusSign<Dst>::value <= 32
+ ? IntegerBitsPlusSign<Dst>::value
+ : 32));
+ } else {
+ asm("usat %[dst], %[shift], %[src]"
+ : [dst] "=r"(result)
+ : [src] "r"(src), [shift] "n"(IntegerBitsPlusSign<Dst>::value < 32
+ ? IntegerBitsPlusSign<Dst>::value
+ : 31));
+ }
+ return static_cast<Dst>(result);
+ }
+};
+
+} // namespace internal
+} // namespace base
+} // namespace v8
+
+#endif // V8_BASE_SAFE_CONVERSIONS_ARM_IMPL_H_
diff --git a/deps/v8/src/base/safe_conversions_impl.h b/deps/v8/src/base/safe_conversions_impl.h
index 90c8e19353..5d9277df24 100644
--- a/deps/v8/src/base/safe_conversions_impl.h
+++ b/deps/v8/src/base/safe_conversions_impl.h
@@ -4,28 +4,130 @@
// Slightly adapted for inclusion in V8.
// Copyright 2014 the V8 project authors. All rights reserved.
+// List of adaptations:
+// - include guard names
+// - wrap in v8 namespace
+// - formatting (git cl format)
#ifndef V8_BASE_SAFE_CONVERSIONS_IMPL_H_
#define V8_BASE_SAFE_CONVERSIONS_IMPL_H_
+#include <stdint.h>
+
#include <limits>
+#include <type_traits>
-#include "src/base/logging.h"
-#include "src/base/macros.h"
+#if defined(__GNUC__) || defined(__clang__)
+#define BASE_NUMERICS_LIKELY(x) __builtin_expect(!!(x), 1)
+#define BASE_NUMERICS_UNLIKELY(x) __builtin_expect(!!(x), 0)
+#else
+#define BASE_NUMERICS_LIKELY(x) (x)
+#define BASE_NUMERICS_UNLIKELY(x) (x)
+#endif
namespace v8 {
namespace base {
namespace internal {
// The std library doesn't provide a binary max_exponent for integers, however
-// we can compute one by adding one to the number of non-sign bits. This allows
-// for accurate range comparisons between floating point and integer types.
+// we can compute an analog using std::numeric_limits<>::digits.
template <typename NumericType>
struct MaxExponent {
- static const int value = std::numeric_limits<NumericType>::is_iec559
+ static const int value = std::is_floating_point<NumericType>::value
? std::numeric_limits<NumericType>::max_exponent
- : (sizeof(NumericType) * 8 + 1 -
- std::numeric_limits<NumericType>::is_signed);
+ : std::numeric_limits<NumericType>::digits + 1;
+};
+
+// The number of bits (including the sign) in an integer. Eliminates sizeof
+// hacks.
+template <typename NumericType>
+struct IntegerBitsPlusSign {
+ static const int value = std::numeric_limits<NumericType>::digits +
+ std::is_signed<NumericType>::value;
+};
+
+// Helper templates for integer manipulations.
+
+template <typename Integer>
+struct PositionOfSignBit {
+ static const size_t value = IntegerBitsPlusSign<Integer>::value - 1;
+};
+
+// Determines if a numeric value is negative without throwing compiler
+// warnings on: unsigned(value) < 0.
+template <typename T,
+ typename std::enable_if<std::is_signed<T>::value>::type* = nullptr>
+constexpr bool IsValueNegative(T value) {
+ static_assert(std::is_arithmetic<T>::value, "Argument must be numeric.");
+ return value < 0;
+}
+
+template <typename T,
+ typename std::enable_if<!std::is_signed<T>::value>::type* = nullptr>
+constexpr bool IsValueNegative(T) {
+ static_assert(std::is_arithmetic<T>::value, "Argument must be numeric.");
+ return false;
+}
+
+// This performs a fast negation, returning a signed value. It works on unsigned
+// arguments, but probably doesn't do what you want for any unsigned value
+// larger than max / 2 + 1 (i.e. signed min cast to unsigned).
+template <typename T>
+constexpr typename std::make_signed<T>::type ConditionalNegate(
+ T x, bool is_negative) {
+ static_assert(std::is_integral<T>::value, "Type must be integral");
+ using SignedT = typename std::make_signed<T>::type;
+ using UnsignedT = typename std::make_unsigned<T>::type;
+ return static_cast<SignedT>(
+ (static_cast<UnsignedT>(x) ^ -SignedT(is_negative)) + is_negative);
+}
+
+// This performs a safe, absolute value via unsigned overflow.
+template <typename T>
+constexpr typename std::make_unsigned<T>::type SafeUnsignedAbs(T value) {
+ static_assert(std::is_integral<T>::value, "Type must be integral");
+ using UnsignedT = typename std::make_unsigned<T>::type;
+ return IsValueNegative(value)
+ ? static_cast<UnsignedT>(0u - static_cast<UnsignedT>(value))
+ : static_cast<UnsignedT>(value);
+}
+
+// This allows us to switch paths on known compile-time constants.
+#if defined(__clang__) || defined(__GNUC__)
+constexpr bool CanDetectCompileTimeConstant() { return true; }
+template <typename T>
+constexpr bool IsCompileTimeConstant(const T v) {
+ return __builtin_constant_p(v);
+}
+#else
+constexpr bool CanDetectCompileTimeConstant() { return false; }
+template <typename T>
+constexpr bool IsCompileTimeConstant(const T) {
+ return false;
+}
+#endif
+template <typename T>
+constexpr bool MustTreatAsConstexpr(const T v) {
+ // Either we can't detect a compile-time constant, and must always use the
+ // constexpr path, or we know we have a compile-time constant.
+ return !CanDetectCompileTimeConstant() || IsCompileTimeConstant(v);
+}
+
+// Forces a crash, like a CHECK(false). Used for numeric boundary errors.
+// Also used in a constexpr template to trigger a compilation failure on
+// an error condition.
+struct CheckOnFailure {
+ template <typename T>
+ static T HandleFailure() {
+#if defined(_MSC_VER)
+ __debugbreak();
+#elif defined(__GNUC__) || defined(__clang__)
+ __builtin_trap();
+#else
+ ((void)(*(volatile char*)0 = 0));
+#endif
+ return T();
+ }
};
enum IntegerRepresentation {
@@ -35,7 +137,7 @@ enum IntegerRepresentation {
// A range for a given nunmeric Src type is contained for a given numeric Dst
// type if both numeric_limits<Src>::max() <= numeric_limits<Dst>::max() and
-// numeric_limits<Src>::min() >= numeric_limits<Dst>::min() are true.
+// numeric_limits<Src>::lowest() >= numeric_limits<Dst>::lowest() are true.
// We implement this as template specializations rather than simple static
// comparisons to ensure type correctness in our comparisons.
enum NumericRangeRepresentation {
@@ -46,16 +148,13 @@ enum NumericRangeRepresentation {
// Helper templates to statically determine if our destination type can contain
// maximum and minimum values represented by the source type.
-template <
- typename Dst,
- typename Src,
- IntegerRepresentation DstSign = std::numeric_limits<Dst>::is_signed
- ? INTEGER_REPRESENTATION_SIGNED
- : INTEGER_REPRESENTATION_UNSIGNED,
- IntegerRepresentation SrcSign =
- std::numeric_limits<Src>::is_signed
- ? INTEGER_REPRESENTATION_SIGNED
- : INTEGER_REPRESENTATION_UNSIGNED >
+template <typename Dst, typename Src,
+ IntegerRepresentation DstSign = std::is_signed<Dst>::value
+ ? INTEGER_REPRESENTATION_SIGNED
+ : INTEGER_REPRESENTATION_UNSIGNED,
+ IntegerRepresentation SrcSign = std::is_signed<Src>::value
+ ? INTEGER_REPRESENTATION_SIGNED
+ : INTEGER_REPRESENTATION_UNSIGNED>
struct StaticDstRangeRelationToSrcRange;
// Same sign: Dst is guaranteed to contain Src only if its range is equal or
@@ -90,127 +189,630 @@ struct StaticDstRangeRelationToSrcRange<Dst,
static const NumericRangeRepresentation value = NUMERIC_RANGE_NOT_CONTAINED;
};
-enum RangeConstraint {
- RANGE_VALID = 0x0, // Value can be represented by the destination type.
- RANGE_UNDERFLOW = 0x1, // Value would overflow.
- RANGE_OVERFLOW = 0x2, // Value would underflow.
- RANGE_INVALID = RANGE_UNDERFLOW | RANGE_OVERFLOW // Invalid (i.e. NaN).
+// This class wraps the range constraints as separate booleans so the compiler
+// can identify constants and eliminate unused code paths.
+class RangeCheck {
+ public:
+ constexpr RangeCheck(bool is_in_lower_bound, bool is_in_upper_bound)
+ : is_underflow_(!is_in_lower_bound), is_overflow_(!is_in_upper_bound) {}
+ constexpr RangeCheck() : is_underflow_(0), is_overflow_(0) {}
+ constexpr bool IsValid() const { return !is_overflow_ && !is_underflow_; }
+ constexpr bool IsInvalid() const { return is_overflow_ && is_underflow_; }
+ constexpr bool IsOverflow() const { return is_overflow_ && !is_underflow_; }
+ constexpr bool IsUnderflow() const { return !is_overflow_ && is_underflow_; }
+ constexpr bool IsOverflowFlagSet() const { return is_overflow_; }
+ constexpr bool IsUnderflowFlagSet() const { return is_underflow_; }
+ constexpr bool operator==(const RangeCheck rhs) const {
+ return is_underflow_ == rhs.is_underflow_ &&
+ is_overflow_ == rhs.is_overflow_;
+ }
+ constexpr bool operator!=(const RangeCheck rhs) const {
+ return !(*this == rhs);
+ }
+
+ private:
+ // Do not change the order of these member variables. The integral conversion
+ // optimization depends on this exact order.
+ const bool is_underflow_;
+ const bool is_overflow_;
};
-// Helper function for coercing an int back to a RangeContraint.
-inline RangeConstraint GetRangeConstraint(int integer_range_constraint) {
- DCHECK(integer_range_constraint >= RANGE_VALID &&
- integer_range_constraint <= RANGE_INVALID);
- return static_cast<RangeConstraint>(integer_range_constraint);
-}
+// The following helper template addresses a corner case in range checks for
+// conversion from a floating-point type to an integral type of smaller range
+// but larger precision (e.g. float -> unsigned). The problem is as follows:
+// 1. Integral maximum is always one less than a power of two, so it must be
+// truncated to fit the mantissa of the floating point. The direction of
+// rounding is implementation defined, but by default it's always IEEE
+// floats, which round to nearest and thus result in a value of larger
+// magnitude than the integral value.
+// Example: float f = UINT_MAX; // f is 4294967296f but UINT_MAX
+// // is 4294967295u.
+// 2. If the floating point value is equal to the promoted integral maximum
+// value, a range check will erroneously pass.
+// Example: (4294967296f <= 4294967295u) // This is true due to a precision
+// // loss in rounding up to float.
+// 3. When the floating point value is then converted to an integral, the
+// resulting value is out of range for the target integral type and
+// thus is implementation defined.
+// Example: unsigned u = (float)INT_MAX; // u will typically overflow to 0.
+// To fix this bug we manually truncate the maximum value when the destination
+// type is an integral of larger precision than the source floating-point type,
+// such that the resulting maximum is represented exactly as a floating point.
+template <typename Dst, typename Src, template <typename> class Bounds>
+struct NarrowingRange {
+ using SrcLimits = std::numeric_limits<Src>;
+ using DstLimits = typename std::numeric_limits<Dst>;
-// This function creates a RangeConstraint from an upper and lower bound
-// check by taking advantage of the fact that only NaN can be out of range in
-// both directions at once.
-inline RangeConstraint GetRangeConstraint(bool is_in_upper_bound,
- bool is_in_lower_bound) {
- return GetRangeConstraint((is_in_upper_bound ? 0 : RANGE_OVERFLOW) |
- (is_in_lower_bound ? 0 : RANGE_UNDERFLOW));
-}
+ // Computes the mask required to make an accurate comparison between types.
+ static const int kShift =
+ (MaxExponent<Src>::value > MaxExponent<Dst>::value &&
+ SrcLimits::digits < DstLimits::digits)
+ ? (DstLimits::digits - SrcLimits::digits)
+ : 0;
+ template <typename T, typename std::enable_if<
+ std::is_integral<T>::value>::type* = nullptr>
-template <
- typename Dst,
- typename Src,
- IntegerRepresentation DstSign = std::numeric_limits<Dst>::is_signed
- ? INTEGER_REPRESENTATION_SIGNED
- : INTEGER_REPRESENTATION_UNSIGNED,
- IntegerRepresentation SrcSign = std::numeric_limits<Src>::is_signed
- ? INTEGER_REPRESENTATION_SIGNED
- : INTEGER_REPRESENTATION_UNSIGNED,
- NumericRangeRepresentation DstRange =
- StaticDstRangeRelationToSrcRange<Dst, Src>::value >
+ // Masks out the integer bits that are beyond the precision of the
+ // intermediate type used for comparison.
+ static constexpr T Adjust(T value) {
+ static_assert(std::is_same<T, Dst>::value, "");
+ static_assert(kShift < DstLimits::digits, "");
+ return static_cast<T>(
+ ConditionalNegate(SafeUnsignedAbs(value) & ~((T(1) << kShift) - T(1)),
+ IsValueNegative(value)));
+ }
+
+ template <typename T, typename std::enable_if<
+ std::is_floating_point<T>::value>::type* = nullptr>
+ static constexpr T Adjust(T value) {
+ static_assert(std::is_same<T, Dst>::value, "");
+ static_assert(kShift == 0, "");
+ return value;
+ }
+
+ static constexpr Dst max() { return Adjust(Bounds<Dst>::max()); }
+ static constexpr Dst lowest() { return Adjust(Bounds<Dst>::lowest()); }
+};
+
+template <typename Dst, typename Src, template <typename> class Bounds,
+ IntegerRepresentation DstSign = std::is_signed<Dst>::value
+ ? INTEGER_REPRESENTATION_SIGNED
+ : INTEGER_REPRESENTATION_UNSIGNED,
+ IntegerRepresentation SrcSign = std::is_signed<Src>::value
+ ? INTEGER_REPRESENTATION_SIGNED
+ : INTEGER_REPRESENTATION_UNSIGNED,
+ NumericRangeRepresentation DstRange =
+ StaticDstRangeRelationToSrcRange<Dst, Src>::value>
struct DstRangeRelationToSrcRangeImpl;
// The following templates are for ranges that must be verified at runtime. We
// split it into checks based on signedness to avoid confusing casts and
// compiler warnings on signed an unsigned comparisons.
-// Dst range is statically determined to contain Src: Nothing to check.
-template <typename Dst,
- typename Src,
- IntegerRepresentation DstSign,
- IntegerRepresentation SrcSign>
-struct DstRangeRelationToSrcRangeImpl<Dst,
- Src,
- DstSign,
- SrcSign,
+// Same sign narrowing: The range is contained for normal limits.
+template <typename Dst, typename Src, template <typename> class Bounds,
+ IntegerRepresentation DstSign, IntegerRepresentation SrcSign>
+struct DstRangeRelationToSrcRangeImpl<Dst, Src, Bounds, DstSign, SrcSign,
NUMERIC_RANGE_CONTAINED> {
- static RangeConstraint Check(Src value) { return RANGE_VALID; }
+ static constexpr RangeCheck Check(Src value) {
+ using SrcLimits = std::numeric_limits<Src>;
+ using DstLimits = NarrowingRange<Dst, Src, Bounds>;
+ return RangeCheck(
+ static_cast<Dst>(SrcLimits::lowest()) >= DstLimits::lowest() ||
+ static_cast<Dst>(value) >= DstLimits::lowest(),
+ static_cast<Dst>(SrcLimits::max()) <= DstLimits::max() ||
+ static_cast<Dst>(value) <= DstLimits::max());
+ }
};
// Signed to signed narrowing: Both the upper and lower boundaries may be
-// exceeded.
-template <typename Dst, typename Src>
-struct DstRangeRelationToSrcRangeImpl<Dst,
- Src,
- INTEGER_REPRESENTATION_SIGNED,
- INTEGER_REPRESENTATION_SIGNED,
- NUMERIC_RANGE_NOT_CONTAINED> {
- static RangeConstraint Check(Src value) {
- return std::numeric_limits<Dst>::is_iec559
- ? GetRangeConstraint(value <= std::numeric_limits<Dst>::max(),
- value >= -std::numeric_limits<Dst>::max())
- : GetRangeConstraint(value <= std::numeric_limits<Dst>::max(),
- value >= std::numeric_limits<Dst>::min());
+// exceeded for standard limits.
+template <typename Dst, typename Src, template <typename> class Bounds>
+struct DstRangeRelationToSrcRangeImpl<
+ Dst, Src, Bounds, INTEGER_REPRESENTATION_SIGNED,
+ INTEGER_REPRESENTATION_SIGNED, NUMERIC_RANGE_NOT_CONTAINED> {
+ static constexpr RangeCheck Check(Src value) {
+ using DstLimits = NarrowingRange<Dst, Src, Bounds>;
+ return RangeCheck(value >= DstLimits::lowest(), value <= DstLimits::max());
}
};
-// Unsigned to unsigned narrowing: Only the upper boundary can be exceeded.
-template <typename Dst, typename Src>
-struct DstRangeRelationToSrcRangeImpl<Dst,
- Src,
- INTEGER_REPRESENTATION_UNSIGNED,
- INTEGER_REPRESENTATION_UNSIGNED,
- NUMERIC_RANGE_NOT_CONTAINED> {
- static RangeConstraint Check(Src value) {
- return GetRangeConstraint(value <= std::numeric_limits<Dst>::max(), true);
+// Unsigned to unsigned narrowing: Only the upper bound can be exceeded for
+// standard limits.
+template <typename Dst, typename Src, template <typename> class Bounds>
+struct DstRangeRelationToSrcRangeImpl<
+ Dst, Src, Bounds, INTEGER_REPRESENTATION_UNSIGNED,
+ INTEGER_REPRESENTATION_UNSIGNED, NUMERIC_RANGE_NOT_CONTAINED> {
+ static constexpr RangeCheck Check(Src value) {
+ using DstLimits = NarrowingRange<Dst, Src, Bounds>;
+ return RangeCheck(
+ DstLimits::lowest() == Dst(0) || value >= DstLimits::lowest(),
+ value <= DstLimits::max());
}
};
-// Unsigned to signed: The upper boundary may be exceeded.
-template <typename Dst, typename Src>
-struct DstRangeRelationToSrcRangeImpl<Dst,
- Src,
- INTEGER_REPRESENTATION_SIGNED,
- INTEGER_REPRESENTATION_UNSIGNED,
- NUMERIC_RANGE_NOT_CONTAINED> {
- static RangeConstraint Check(Src value) {
- return sizeof(Dst) > sizeof(Src)
- ? RANGE_VALID
- : GetRangeConstraint(
- value <= static_cast<Src>(std::numeric_limits<Dst>::max()),
- true);
+// Unsigned to signed: Only the upper bound can be exceeded for standard limits.
+template <typename Dst, typename Src, template <typename> class Bounds>
+struct DstRangeRelationToSrcRangeImpl<
+ Dst, Src, Bounds, INTEGER_REPRESENTATION_SIGNED,
+ INTEGER_REPRESENTATION_UNSIGNED, NUMERIC_RANGE_NOT_CONTAINED> {
+ static constexpr RangeCheck Check(Src value) {
+ using DstLimits = NarrowingRange<Dst, Src, Bounds>;
+ using Promotion = decltype(Src() + Dst());
+ return RangeCheck(DstLimits::lowest() <= Dst(0) ||
+ static_cast<Promotion>(value) >=
+ static_cast<Promotion>(DstLimits::lowest()),
+ static_cast<Promotion>(value) <=
+ static_cast<Promotion>(DstLimits::max()));
}
};
// Signed to unsigned: The upper boundary may be exceeded for a narrower Dst,
-// and any negative value exceeds the lower boundary.
+// and any negative value exceeds the lower boundary for standard limits.
+template <typename Dst, typename Src, template <typename> class Bounds>
+struct DstRangeRelationToSrcRangeImpl<
+ Dst, Src, Bounds, INTEGER_REPRESENTATION_UNSIGNED,
+ INTEGER_REPRESENTATION_SIGNED, NUMERIC_RANGE_NOT_CONTAINED> {
+ static constexpr RangeCheck Check(Src value) {
+ using SrcLimits = std::numeric_limits<Src>;
+ using DstLimits = NarrowingRange<Dst, Src, Bounds>;
+ using Promotion = decltype(Src() + Dst());
+ bool ge_zero = false;
+ // Converting floating-point to integer will discard fractional part, so
+ // values in (-1.0, -0.0) will truncate to 0 and fit in Dst.
+ if (std::is_floating_point<Src>::value) {
+ ge_zero = value > Src(-1);
+ } else {
+ ge_zero = value >= Src(0);
+ }
+ return RangeCheck(
+ ge_zero && (DstLimits::lowest() == 0 ||
+ static_cast<Dst>(value) >= DstLimits::lowest()),
+ static_cast<Promotion>(SrcLimits::max()) <=
+ static_cast<Promotion>(DstLimits::max()) ||
+ static_cast<Promotion>(value) <=
+ static_cast<Promotion>(DstLimits::max()));
+ }
+};
+
+// Simple wrapper for statically checking if a type's range is contained.
template <typename Dst, typename Src>
-struct DstRangeRelationToSrcRangeImpl<Dst,
- Src,
- INTEGER_REPRESENTATION_UNSIGNED,
- INTEGER_REPRESENTATION_SIGNED,
- NUMERIC_RANGE_NOT_CONTAINED> {
- static RangeConstraint Check(Src value) {
- return (MaxExponent<Dst>::value >= MaxExponent<Src>::value)
- ? GetRangeConstraint(true, value >= static_cast<Src>(0))
- : GetRangeConstraint(
- value <= static_cast<Src>(std::numeric_limits<Dst>::max()),
- value >= static_cast<Src>(0));
+struct IsTypeInRangeForNumericType {
+ static const bool value = StaticDstRangeRelationToSrcRange<Dst, Src>::value ==
+ NUMERIC_RANGE_CONTAINED;
+};
+
+template <typename Dst, template <typename> class Bounds = std::numeric_limits,
+ typename Src>
+constexpr RangeCheck DstRangeRelationToSrcRange(Src value) {
+ static_assert(std::is_arithmetic<Src>::value, "Argument must be numeric.");
+ static_assert(std::is_arithmetic<Dst>::value, "Result must be numeric.");
+ static_assert(Bounds<Dst>::lowest() < Bounds<Dst>::max(), "");
+ return DstRangeRelationToSrcRangeImpl<Dst, Src, Bounds>::Check(value);
+}
+
+// Integer promotion templates used by the portable checked integer arithmetic.
+template <size_t Size, bool IsSigned>
+struct IntegerForDigitsAndSign;
+
+#define INTEGER_FOR_DIGITS_AND_SIGN(I) \
+ template <> \
+ struct IntegerForDigitsAndSign<IntegerBitsPlusSign<I>::value, \
+ std::is_signed<I>::value> { \
+ using type = I; \
+ }
+
+INTEGER_FOR_DIGITS_AND_SIGN(int8_t);
+INTEGER_FOR_DIGITS_AND_SIGN(uint8_t);
+INTEGER_FOR_DIGITS_AND_SIGN(int16_t);
+INTEGER_FOR_DIGITS_AND_SIGN(uint16_t);
+INTEGER_FOR_DIGITS_AND_SIGN(int32_t);
+INTEGER_FOR_DIGITS_AND_SIGN(uint32_t);
+INTEGER_FOR_DIGITS_AND_SIGN(int64_t);
+INTEGER_FOR_DIGITS_AND_SIGN(uint64_t);
+#undef INTEGER_FOR_DIGITS_AND_SIGN
+
+// WARNING: We have no IntegerForSizeAndSign<16, *>. If we ever add one to
+// support 128-bit math, then the ArithmeticPromotion template below will need
+// to be updated (or more likely replaced with a decltype expression).
+static_assert(IntegerBitsPlusSign<intmax_t>::value == 64,
+ "Max integer size not supported for this toolchain.");
+
+template <typename Integer, bool IsSigned = std::is_signed<Integer>::value>
+struct TwiceWiderInteger {
+ using type =
+ typename IntegerForDigitsAndSign<IntegerBitsPlusSign<Integer>::value * 2,
+ IsSigned>::type;
+};
+
+enum ArithmeticPromotionCategory {
+ LEFT_PROMOTION, // Use the type of the left-hand argument.
+ RIGHT_PROMOTION // Use the type of the right-hand argument.
+};
+
+// Determines the type that can represent the largest positive value.
+template <typename Lhs, typename Rhs,
+ ArithmeticPromotionCategory Promotion =
+ (MaxExponent<Lhs>::value > MaxExponent<Rhs>::value)
+ ? LEFT_PROMOTION
+ : RIGHT_PROMOTION>
+struct MaxExponentPromotion;
+
+template <typename Lhs, typename Rhs>
+struct MaxExponentPromotion<Lhs, Rhs, LEFT_PROMOTION> {
+ using type = Lhs;
+};
+
+template <typename Lhs, typename Rhs>
+struct MaxExponentPromotion<Lhs, Rhs, RIGHT_PROMOTION> {
+ using type = Rhs;
+};
+
+// Determines the type that can represent the lowest arithmetic value.
+template <typename Lhs, typename Rhs,
+ ArithmeticPromotionCategory Promotion =
+ std::is_signed<Lhs>::value
+ ? (std::is_signed<Rhs>::value
+ ? (MaxExponent<Lhs>::value > MaxExponent<Rhs>::value
+ ? LEFT_PROMOTION
+ : RIGHT_PROMOTION)
+ : LEFT_PROMOTION)
+ : (std::is_signed<Rhs>::value
+ ? RIGHT_PROMOTION
+ : (MaxExponent<Lhs>::value < MaxExponent<Rhs>::value
+ ? LEFT_PROMOTION
+ : RIGHT_PROMOTION))>
+struct LowestValuePromotion;
+
+template <typename Lhs, typename Rhs>
+struct LowestValuePromotion<Lhs, Rhs, LEFT_PROMOTION> {
+ using type = Lhs;
+};
+
+template <typename Lhs, typename Rhs>
+struct LowestValuePromotion<Lhs, Rhs, RIGHT_PROMOTION> {
+ using type = Rhs;
+};
+
+// Determines the type that is best able to represent an arithmetic result.
+template <
+ typename Lhs, typename Rhs = Lhs,
+ bool is_intmax_type =
+ std::is_integral<typename MaxExponentPromotion<Lhs, Rhs>::type>::value&&
+ IntegerBitsPlusSign<typename MaxExponentPromotion<Lhs, Rhs>::type>::
+ value == IntegerBitsPlusSign<intmax_t>::value,
+ bool is_max_exponent =
+ StaticDstRangeRelationToSrcRange<
+ typename MaxExponentPromotion<Lhs, Rhs>::type, Lhs>::value ==
+ NUMERIC_RANGE_CONTAINED&& StaticDstRangeRelationToSrcRange<
+ typename MaxExponentPromotion<Lhs, Rhs>::type, Rhs>::value ==
+ NUMERIC_RANGE_CONTAINED>
+struct BigEnoughPromotion;
+
+// The side with the max exponent is big enough.
+template <typename Lhs, typename Rhs, bool is_intmax_type>
+struct BigEnoughPromotion<Lhs, Rhs, is_intmax_type, true> {
+ using type = typename MaxExponentPromotion<Lhs, Rhs>::type;
+ static const bool is_contained = true;
+};
+
+// We can use a twice wider type to fit.
+template <typename Lhs, typename Rhs>
+struct BigEnoughPromotion<Lhs, Rhs, false, false> {
+ using type =
+ typename TwiceWiderInteger<typename MaxExponentPromotion<Lhs, Rhs>::type,
+ std::is_signed<Lhs>::value ||
+ std::is_signed<Rhs>::value>::type;
+ static const bool is_contained = true;
+};
+
+// No type is large enough.
+template <typename Lhs, typename Rhs>
+struct BigEnoughPromotion<Lhs, Rhs, true, false> {
+ using type = typename MaxExponentPromotion<Lhs, Rhs>::type;
+ static const bool is_contained = false;
+};
+
+// We can statically check if operations on the provided types can wrap, so we
+// can skip the checked operations if they're not needed. So, for an integer we
+// care if the destination type preserves the sign and is twice the width of
+// the source.
+template <typename T, typename Lhs, typename Rhs = Lhs>
+struct IsIntegerArithmeticSafe {
+ static const bool value =
+ !std::is_floating_point<T>::value &&
+ !std::is_floating_point<Lhs>::value &&
+ !std::is_floating_point<Rhs>::value &&
+ std::is_signed<T>::value >= std::is_signed<Lhs>::value &&
+ IntegerBitsPlusSign<T>::value >= (2 * IntegerBitsPlusSign<Lhs>::value) &&
+ std::is_signed<T>::value >= std::is_signed<Rhs>::value &&
+ IntegerBitsPlusSign<T>::value >= (2 * IntegerBitsPlusSign<Rhs>::value);
+};
+
+// Promotes to a type that can represent any possible result of a binary
+// arithmetic operation with the source types.
+template <typename Lhs, typename Rhs,
+ bool is_promotion_possible = IsIntegerArithmeticSafe<
+ typename std::conditional<std::is_signed<Lhs>::value ||
+ std::is_signed<Rhs>::value,
+ intmax_t, uintmax_t>::type,
+ typename MaxExponentPromotion<Lhs, Rhs>::type>::value>
+struct FastIntegerArithmeticPromotion;
+
+template <typename Lhs, typename Rhs>
+struct FastIntegerArithmeticPromotion<Lhs, Rhs, true> {
+ using type =
+ typename TwiceWiderInteger<typename MaxExponentPromotion<Lhs, Rhs>::type,
+ std::is_signed<Lhs>::value ||
+ std::is_signed<Rhs>::value>::type;
+ static_assert(IsIntegerArithmeticSafe<type, Lhs, Rhs>::value, "");
+ static const bool is_contained = true;
+};
+
+template <typename Lhs, typename Rhs>
+struct FastIntegerArithmeticPromotion<Lhs, Rhs, false> {
+ using type = typename BigEnoughPromotion<Lhs, Rhs>::type;
+ static const bool is_contained = false;
+};
+
+// Extracts the underlying type from an enum.
+template <typename T, bool is_enum = std::is_enum<T>::value>
+struct ArithmeticOrUnderlyingEnum;
+
+template <typename T>
+struct ArithmeticOrUnderlyingEnum<T, true> {
+ using type = typename std::underlying_type<T>::type;
+ static const bool value = std::is_arithmetic<type>::value;
+};
+
+template <typename T>
+struct ArithmeticOrUnderlyingEnum<T, false> {
+ using type = T;
+ static const bool value = std::is_arithmetic<type>::value;
+};
+
+// The following are helper templates used in the CheckedNumeric class.
+template <typename T>
+class CheckedNumeric;
+
+template <typename T>
+class ClampedNumeric;
+
+template <typename T>
+class StrictNumeric;
+
+// Used to treat CheckedNumeric and arithmetic underlying types the same.
+template <typename T>
+struct UnderlyingType {
+ using type = typename ArithmeticOrUnderlyingEnum<T>::type;
+ static const bool is_numeric = std::is_arithmetic<type>::value;
+ static const bool is_checked = false;
+ static const bool is_clamped = false;
+ static const bool is_strict = false;
+};
+
+template <typename T>
+struct UnderlyingType<CheckedNumeric<T>> {
+ using type = T;
+ static const bool is_numeric = true;
+ static const bool is_checked = true;
+ static const bool is_clamped = false;
+ static const bool is_strict = false;
+};
+
+template <typename T>
+struct UnderlyingType<ClampedNumeric<T>> {
+ using type = T;
+ static const bool is_numeric = true;
+ static const bool is_checked = false;
+ static const bool is_clamped = true;
+ static const bool is_strict = false;
+};
+
+template <typename T>
+struct UnderlyingType<StrictNumeric<T>> {
+ using type = T;
+ static const bool is_numeric = true;
+ static const bool is_checked = false;
+ static const bool is_clamped = false;
+ static const bool is_strict = true;
+};
+
+template <typename L, typename R>
+struct IsCheckedOp {
+ static const bool value =
+ UnderlyingType<L>::is_numeric && UnderlyingType<R>::is_numeric &&
+ (UnderlyingType<L>::is_checked || UnderlyingType<R>::is_checked);
+};
+
+template <typename L, typename R>
+struct IsClampedOp {
+ static const bool value =
+ UnderlyingType<L>::is_numeric && UnderlyingType<R>::is_numeric &&
+ (UnderlyingType<L>::is_clamped || UnderlyingType<R>::is_clamped) &&
+ !(UnderlyingType<L>::is_checked || UnderlyingType<R>::is_checked);
+};
+
+template <typename L, typename R>
+struct IsStrictOp {
+ static const bool value =
+ UnderlyingType<L>::is_numeric && UnderlyingType<R>::is_numeric &&
+ (UnderlyingType<L>::is_strict || UnderlyingType<R>::is_strict) &&
+ !(UnderlyingType<L>::is_checked || UnderlyingType<R>::is_checked) &&
+ !(UnderlyingType<L>::is_clamped || UnderlyingType<R>::is_clamped);
+};
+
+// as_signed<> returns the supplied integral value (or integral castable
+// Numeric template) cast as a signed integral of equivalent precision.
+// I.e. it's mostly an alias for: static_cast<std::make_signed<T>::type>(t)
+template <typename Src>
+constexpr typename std::make_signed<
+ typename base::internal::UnderlyingType<Src>::type>::type
+as_signed(const Src value) {
+ static_assert(std::is_integral<decltype(as_signed(value))>::value,
+ "Argument must be a signed or unsigned integer type.");
+ return static_cast<decltype(as_signed(value))>(value);
+}
+
+// as_unsigned<> returns the supplied integral value (or integral castable
+// Numeric template) cast as an unsigned integral of equivalent precision.
+// I.e. it's mostly an alias for: static_cast<std::make_unsigned<T>::type>(t)
+template <typename Src>
+constexpr typename std::make_unsigned<
+ typename base::internal::UnderlyingType<Src>::type>::type
+as_unsigned(const Src value) {
+ static_assert(std::is_integral<decltype(as_unsigned(value))>::value,
+ "Argument must be a signed or unsigned integer type.");
+ return static_cast<decltype(as_unsigned(value))>(value);
+}
+
+template <typename L, typename R>
+constexpr bool IsLessImpl(const L lhs, const R rhs, const RangeCheck l_range,
+ const RangeCheck r_range) {
+ return l_range.IsUnderflow() || r_range.IsOverflow() ||
+ (l_range == r_range && static_cast<decltype(lhs + rhs)>(lhs) <
+ static_cast<decltype(lhs + rhs)>(rhs));
+}
+
+template <typename L, typename R>
+struct IsLess {
+ static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ "Types must be numeric.");
+ static constexpr bool Test(const L lhs, const R rhs) {
+ return IsLessImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
+ DstRangeRelationToSrcRange<L>(rhs));
+ }
+};
+
+template <typename L, typename R>
+constexpr bool IsLessOrEqualImpl(const L lhs, const R rhs,
+ const RangeCheck l_range,
+ const RangeCheck r_range) {
+ return l_range.IsUnderflow() || r_range.IsOverflow() ||
+ (l_range == r_range && static_cast<decltype(lhs + rhs)>(lhs) <=
+ static_cast<decltype(lhs + rhs)>(rhs));
+}
+
+template <typename L, typename R>
+struct IsLessOrEqual {
+ static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ "Types must be numeric.");
+ static constexpr bool Test(const L lhs, const R rhs) {
+ return IsLessOrEqualImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
+ DstRangeRelationToSrcRange<L>(rhs));
+ }
+};
+
+template <typename L, typename R>
+constexpr bool IsGreaterImpl(const L lhs, const R rhs, const RangeCheck l_range,
+ const RangeCheck r_range) {
+ return l_range.IsOverflow() || r_range.IsUnderflow() ||
+ (l_range == r_range && static_cast<decltype(lhs + rhs)>(lhs) >
+ static_cast<decltype(lhs + rhs)>(rhs));
+}
+
+template <typename L, typename R>
+struct IsGreater {
+ static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ "Types must be numeric.");
+ static constexpr bool Test(const L lhs, const R rhs) {
+ return IsGreaterImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
+ DstRangeRelationToSrcRange<L>(rhs));
}
};
+template <typename L, typename R>
+constexpr bool IsGreaterOrEqualImpl(const L lhs, const R rhs,
+ const RangeCheck l_range,
+ const RangeCheck r_range) {
+ return l_range.IsOverflow() || r_range.IsUnderflow() ||
+ (l_range == r_range && static_cast<decltype(lhs + rhs)>(lhs) >=
+ static_cast<decltype(lhs + rhs)>(rhs));
+}
+
+template <typename L, typename R>
+struct IsGreaterOrEqual {
+ static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ "Types must be numeric.");
+ static constexpr bool Test(const L lhs, const R rhs) {
+ return IsGreaterOrEqualImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
+ DstRangeRelationToSrcRange<L>(rhs));
+ }
+};
+
+template <typename L, typename R>
+struct IsEqual {
+ static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ "Types must be numeric.");
+ static constexpr bool Test(const L lhs, const R rhs) {
+ return DstRangeRelationToSrcRange<R>(lhs) ==
+ DstRangeRelationToSrcRange<L>(rhs) &&
+ static_cast<decltype(lhs + rhs)>(lhs) ==
+ static_cast<decltype(lhs + rhs)>(rhs);
+ }
+};
+
+template <typename L, typename R>
+struct IsNotEqual {
+ static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ "Types must be numeric.");
+ static constexpr bool Test(const L lhs, const R rhs) {
+ return DstRangeRelationToSrcRange<R>(lhs) !=
+ DstRangeRelationToSrcRange<L>(rhs) ||
+ static_cast<decltype(lhs + rhs)>(lhs) !=
+ static_cast<decltype(lhs + rhs)>(rhs);
+ }
+};
+
+// These perform the actual math operations on the CheckedNumerics.
+// Binary arithmetic operations.
+template <template <typename, typename> class C, typename L, typename R>
+constexpr bool SafeCompare(const L lhs, const R rhs) {
+ static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ "Types must be numeric.");
+ using Promotion = BigEnoughPromotion<L, R>;
+ using BigType = typename Promotion::type;
+ return Promotion::is_contained
+ // Force to a larger type for speed if both are contained.
+ ? C<BigType, BigType>::Test(
+ static_cast<BigType>(static_cast<L>(lhs)),
+ static_cast<BigType>(static_cast<R>(rhs)))
+ // Let the template functions figure it out for mixed types.
+ : C<L, R>::Test(lhs, rhs);
+}
+
+template <typename Dst, typename Src>
+constexpr bool IsMaxInRangeForNumericType() {
+ return IsGreaterOrEqual<Dst, Src>::Test(std::numeric_limits<Dst>::max(),
+ std::numeric_limits<Src>::max());
+}
+
template <typename Dst, typename Src>
-inline RangeConstraint DstRangeRelationToSrcRange(Src value) {
- // Both source and destination must be numeric.
- STATIC_ASSERT(std::numeric_limits<Src>::is_specialized);
- STATIC_ASSERT(std::numeric_limits<Dst>::is_specialized);
- return DstRangeRelationToSrcRangeImpl<Dst, Src>::Check(value);
+constexpr bool IsMinInRangeForNumericType() {
+ return IsLessOrEqual<Dst, Src>::Test(std::numeric_limits<Dst>::lowest(),
+ std::numeric_limits<Src>::lowest());
+}
+
+template <typename Dst, typename Src>
+constexpr Dst CommonMax() {
+ return !IsMaxInRangeForNumericType<Dst, Src>()
+ ? Dst(std::numeric_limits<Dst>::max())
+ : Dst(std::numeric_limits<Src>::max());
+}
+
+template <typename Dst, typename Src>
+constexpr Dst CommonMin() {
+ return !IsMinInRangeForNumericType<Dst, Src>()
+ ? Dst(std::numeric_limits<Dst>::lowest())
+ : Dst(std::numeric_limits<Src>::lowest());
+}
+
+// This is a wrapper to generate return the max or min for a supplied type.
+// If the argument is false, the returned value is the maximum. If true the
+// returned value is the minimum.
+template <typename Dst, typename Src = Dst>
+constexpr Dst CommonMaxOrMin(bool is_min) {
+ return is_min ? CommonMin<Dst, Src>() : CommonMax<Dst, Src>();
}
} // namespace internal
diff --git a/deps/v8/src/base/threaded-list.h b/deps/v8/src/base/threaded-list.h
index f0eed52ede..91c726474e 100644
--- a/deps/v8/src/base/threaded-list.h
+++ b/deps/v8/src/base/threaded-list.h
@@ -29,6 +29,9 @@ template <typename T, typename BaseClass,
class ThreadedListBase final : public BaseClass {
public:
ThreadedListBase() : head_(nullptr), tail_(&head_) {}
+ ThreadedListBase(const ThreadedListBase&) = delete;
+ ThreadedListBase& operator=(const ThreadedListBase&) = delete;
+
void Add(T* v) {
DCHECK_NULL(*tail_);
DCHECK_NULL(*TLTraits::next(v));
@@ -253,7 +256,6 @@ class ThreadedListBase final : public BaseClass {
private:
T* head_;
T** tail_;
- DISALLOW_COPY_AND_ASSIGN(ThreadedListBase);
};
struct EmptyBase {};
diff --git a/deps/v8/src/builtins/DIR_METADATA b/deps/v8/src/builtins/DIR_METADATA
new file mode 100644
index 0000000000..b183b81885
--- /dev/null
+++ b/deps/v8/src/builtins/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>Runtime"
+} \ No newline at end of file
diff --git a/deps/v8/src/builtins/OWNERS b/deps/v8/src/builtins/OWNERS
index f52e1c9ca8..48d72aea5e 100644
--- a/deps/v8/src/builtins/OWNERS
+++ b/deps/v8/src/builtins/OWNERS
@@ -1,3 +1 @@
file:../../COMMON_OWNERS
-
-# COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/builtins/accessors.cc b/deps/v8/src/builtins/accessors.cc
index 4258b07a7b..eea53bca09 100644
--- a/deps/v8/src/builtins/accessors.cc
+++ b/deps/v8/src/builtins/accessors.cc
@@ -81,12 +81,13 @@ bool Accessors::IsJSObjectFieldAccessor(Isolate* isolate, Handle<Map> map,
}
V8_WARN_UNUSED_RESULT MaybeHandle<Object>
-Accessors::ReplaceAccessorWithDataProperty(Handle<Object> receiver,
+Accessors::ReplaceAccessorWithDataProperty(Isolate* isolate,
+ Handle<Object> receiver,
Handle<JSObject> holder,
Handle<Name> name,
Handle<Object> value) {
- LookupIterator it(holder->GetIsolate(), receiver, name, holder,
- LookupIterator::OWN_SKIP_INTERCEPTOR);
+ LookupIterator it(isolate, receiver, LookupIterator::Key(isolate, name),
+ holder, LookupIterator::OWN_SKIP_INTERCEPTOR);
// Skip any access checks we might hit. This accessor should never hit in a
// situation where the caller does not have access.
if (it.state() == LookupIterator::ACCESS_CHECK) {
@@ -114,8 +115,8 @@ void Accessors::ReconfigureToDataProperty(
Handle<JSObject>::cast(Utils::OpenHandle(*info.Holder()));
Handle<Name> name = Utils::OpenHandle(*key);
Handle<Object> value = Utils::OpenHandle(*val);
- MaybeHandle<Object> result =
- Accessors::ReplaceAccessorWithDataProperty(receiver, holder, name, value);
+ MaybeHandle<Object> result = Accessors::ReplaceAccessorWithDataProperty(
+ isolate, receiver, holder, name, value);
if (result.is_null()) {
isolate->OptionalRescheduleException(false);
} else {
diff --git a/deps/v8/src/builtins/accessors.h b/deps/v8/src/builtins/accessors.h
index faee0d9b67..7bc8075e55 100644
--- a/deps/v8/src/builtins/accessors.h
+++ b/deps/v8/src/builtins/accessors.h
@@ -102,8 +102,8 @@ class Accessors : public AllStatic {
FieldIndex* field_index);
static MaybeHandle<Object> ReplaceAccessorWithDataProperty(
- Handle<Object> receiver, Handle<JSObject> holder, Handle<Name> name,
- Handle<Object> value);
+ Isolate* isolate, Handle<Object> receiver, Handle<JSObject> holder,
+ Handle<Name> name, Handle<Object> value);
// Create an AccessorInfo. The setter is optional (can be nullptr).
//
diff --git a/deps/v8/src/builtins/aggregate-error.tq b/deps/v8/src/builtins/aggregate-error.tq
index 0f4a47b3e7..9c70ffcb00 100644
--- a/deps/v8/src/builtins/aggregate-error.tq
+++ b/deps/v8/src/builtins/aggregate-error.tq
@@ -9,10 +9,6 @@ namespace error {
transitioning javascript builtin AggregateErrorConstructor(
js-implicit context: NativeContext, target: JSFunction,
newTarget: JSAny)(...arguments): JSAny {
- // This function is implementing the spec as suggested by
- // https://github.com/tc39/proposal-promise-any/pull/59 . FIXME(marja):
- // change this if the PR is declined, otherwise remove the comment.
-
// 1. If NewTarget is undefined, let newTarget be the active function
// object, else let newTarget be NewTarget.
// 2. Let O be ? OrdinaryCreateFromConstructor(newTarget,
diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc
index e0a6ee1611..5a0a59d879 100644
--- a/deps/v8/src/builtins/arm/builtins-arm.cc
+++ b/deps/v8/src/builtins/arm/builtins-arm.cc
@@ -72,38 +72,6 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
namespace {
-enum StackLimitKind { kInterruptStackLimit, kRealStackLimit };
-
-void LoadStackLimit(MacroAssembler* masm, Register destination,
- StackLimitKind kind) {
- DCHECK(masm->root_array_available());
- Isolate* isolate = masm->isolate();
- ExternalReference limit =
- kind == StackLimitKind::kRealStackLimit
- ? ExternalReference::address_of_real_jslimit(isolate)
- : ExternalReference::address_of_jslimit(isolate);
- DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
-
- intptr_t offset =
- TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
- CHECK(is_int32(offset));
- __ ldr(destination, MemOperand(kRootRegister, offset));
-}
-
-void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
- Register scratch, Label* stack_overflow) {
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- LoadStackLimit(masm, scratch, StackLimitKind::kRealStackLimit);
- // Make scratch the space we have left. The stack might already be overflowed
- // here which will cause scratch to become negative.
- __ sub(scratch, sp, scratch);
- // Check if the arguments will overflow the stack.
- __ cmp(scratch, Operand(num_args, LSL, kPointerSizeLog2));
- __ b(le, stack_overflow); // Signed comparison.
-}
-
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
@@ -118,7 +86,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
Label stack_overflow;
- Generate_StackOverflowCheck(masm, r0, scratch, &stack_overflow);
+ __ StackOverflowCheck(r0, scratch, &stack_overflow);
// Enter a construct frame.
{
@@ -129,7 +97,11 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ Push(cp, r0);
__ SmiUntag(r0);
-#ifdef V8_REVERSE_JSARGS
+ // TODO(victorgomes): When the arguments adaptor is completely removed, we
+ // should get the formal parameter count and copy the arguments in its
+ // correct position (including any undefined), instead of delaying this to
+ // InvokeFunction.
+
// Set up pointer to last argument (skip receiver).
__ add(
r4, fp,
@@ -138,14 +110,6 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ PushArray(r4, r0, r5);
// The receiver for the builtin/api call.
__ PushRoot(RootIndex::kTheHoleValue);
-#else
- // The receiver for the builtin/api call.
- __ PushRoot(RootIndex::kTheHoleValue);
- // Set up pointer to last argument.
- __ add(r4, fp, Operand(StandardFrameConstants::kCallerSPOffset));
- // Copy arguments and receiver to the expression stack.
- __ PushArray(r4, r0, r5);
-#endif
// Call the function.
// r0: number of arguments (untagged)
@@ -187,165 +151,155 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// -- sp[...]: constructor arguments
// -----------------------------------
+ FrameScope scope(masm, StackFrame::MANUAL);
// Enter a construct frame.
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
- Label post_instantiation_deopt_entry, not_create_implicit_receiver;
-
- // Preserve the incoming parameters on the stack.
- __ LoadRoot(r4, RootIndex::kTheHoleValue);
- __ SmiTag(r0);
- __ Push(cp, r0, r1, r4, r3);
-
- // ----------- S t a t e -------------
- // -- sp[0*kPointerSize]: new target
- // -- sp[1*kPointerSize]: padding
- // -- r1 and sp[2*kPointerSize]: constructor function
- // -- sp[3*kPointerSize]: number of arguments (tagged)
- // -- sp[4*kPointerSize]: context
- // -----------------------------------
+ Label post_instantiation_deopt_entry, not_create_implicit_receiver;
+ __ EnterFrame(StackFrame::CONSTRUCT);
- __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kFlagsOffset));
- __ DecodeField<SharedFunctionInfo::FunctionKindBits>(r4);
- __ JumpIfIsInRange(r4, kDefaultDerivedConstructor, kDerivedConstructor,
- &not_create_implicit_receiver);
-
- // If not derived class constructor: Allocate the new receiver object.
- __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
- r4, r5);
- __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject),
- RelocInfo::CODE_TARGET);
- __ b(&post_instantiation_deopt_entry);
-
- // Else: use TheHoleValue as receiver for constructor call
- __ bind(&not_create_implicit_receiver);
- __ LoadRoot(r0, RootIndex::kTheHoleValue);
-
- // ----------- S t a t e -------------
- // -- r0: receiver
- // -- Slot 3 / sp[0*kPointerSize]: new target
- // -- Slot 2 / sp[1*kPointerSize]: constructor function
- // -- Slot 1 / sp[2*kPointerSize]: number of arguments (tagged)
- // -- Slot 0 / sp[3*kPointerSize]: context
- // -----------------------------------
- // Deoptimizer enters here.
- masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
- masm->pc_offset());
- __ bind(&post_instantiation_deopt_entry);
-
- // Restore new target.
- __ Pop(r3);
-
-#ifdef V8_REVERSE_JSARGS
- // Push the allocated receiver to the stack.
- __ Push(r0);
- // We need two copies because we may have to return the original one
- // and the calling conventions dictate that the called function pops the
- // receiver. The second copy is pushed after the arguments, we saved in r6
- // since r0 needs to store the number of arguments before
- // InvokingFunction.
- __ mov(r6, r0);
-
- // Set up pointer to first argument (skip receiver).
- __ add(
- r4, fp,
- Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
-#else
- // Push the allocated receiver to the stack. We need two copies
- // because we may have to return the original one and the calling
- // conventions dictate that the called function pops the receiver.
- __ Push(r0, r0);
-
- // Set up pointer to last argument.
- __ add(r4, fp, Operand(StandardFrameConstants::kCallerSPOffset));
-#endif
+ // Preserve the incoming parameters on the stack.
+ __ LoadRoot(r4, RootIndex::kTheHoleValue);
+ __ SmiTag(r0);
+ __ Push(cp, r0, r1, r4, r3);
- // Restore constructor function and argument count.
- __ ldr(r1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
- __ ldr(r0, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
- __ SmiUntag(r0);
+ // ----------- S t a t e -------------
+ // -- sp[0*kPointerSize]: new target
+ // -- sp[1*kPointerSize]: padding
+ // -- r1 and sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments (tagged)
+ // -- sp[4*kPointerSize]: context
+ // -----------------------------------
- Label enough_stack_space, stack_overflow;
- Generate_StackOverflowCheck(masm, r0, r5, &stack_overflow);
- __ b(&enough_stack_space);
+ __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kFlagsOffset));
+ __ DecodeField<SharedFunctionInfo::FunctionKindBits>(r4);
+ __ JumpIfIsInRange(r4, kDefaultDerivedConstructor, kDerivedConstructor,
+ &not_create_implicit_receiver);
- __ bind(&stack_overflow);
- // Restore the context from the frame.
- __ ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
- __ CallRuntime(Runtime::kThrowStackOverflow);
- // Unreachable code.
- __ bkpt(0);
+ // If not derived class constructor: Allocate the new receiver object.
+ __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1, r4,
+ r5);
+ __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject), RelocInfo::CODE_TARGET);
+ __ b(&post_instantiation_deopt_entry);
- __ bind(&enough_stack_space);
+ // Else: use TheHoleValue as receiver for constructor call
+ __ bind(&not_create_implicit_receiver);
+ __ LoadRoot(r0, RootIndex::kTheHoleValue);
- // Copy arguments to the expression stack.
- __ PushArray(r4, r0, r5);
+ // ----------- S t a t e -------------
+ // -- r0: receiver
+ // -- Slot 3 / sp[0*kPointerSize]: new target
+ // -- Slot 2 / sp[1*kPointerSize]: constructor function
+ // -- Slot 1 / sp[2*kPointerSize]: number of arguments (tagged)
+ // -- Slot 0 / sp[3*kPointerSize]: context
+ // -----------------------------------
+ // Deoptimizer enters here.
+ masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
+ masm->pc_offset());
+ __ bind(&post_instantiation_deopt_entry);
+
+ // Restore new target.
+ __ Pop(r3);
+
+ // Push the allocated receiver to the stack.
+ __ Push(r0);
+ // We need two copies because we may have to return the original one
+ // and the calling conventions dictate that the called function pops the
+ // receiver. The second copy is pushed after the arguments, we saved in r6
+ // since r0 needs to store the number of arguments before
+ // InvokingFunction.
+ __ mov(r6, r0);
+
+ // Set up pointer to first argument (skip receiver).
+ __ add(r4, fp,
+ Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
+
+ // Restore constructor function and argument count.
+ __ ldr(r1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
+ __ ldr(r0, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+ __ SmiUntag(r0);
-#ifdef V8_REVERSE_JSARGS
- // Push implicit receiver.
- __ Push(r6);
-#endif
+ Label stack_overflow;
+ __ StackOverflowCheck(r0, r5, &stack_overflow);
- // Call the function.
- __ InvokeFunctionWithNewTarget(r1, r3, r0, CALL_FUNCTION);
+ // TODO(victorgomes): When the arguments adaptor is completely removed, we
+ // should get the formal parameter count and copy the arguments in its
+ // correct position (including any undefined), instead of delaying this to
+ // InvokeFunction.
- // ----------- S t a t e -------------
- // -- r0: constructor result
- // -- sp[0*kPointerSize]: implicit receiver
- // -- sp[1*kPointerSize]: padding
- // -- sp[2*kPointerSize]: constructor function
- // -- sp[3*kPointerSize]: number of arguments
- // -- sp[4*kPointerSize]: context
- // -----------------------------------
+ // Copy arguments to the expression stack.
+ __ PushArray(r4, r0, r5);
- // Store offset of return address for deoptimizer.
- masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
- masm->pc_offset());
+ // Push implicit receiver.
+ __ Push(r6);
- // Restore the context from the frame.
- __ ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+ // Call the function.
+ __ InvokeFunctionWithNewTarget(r1, r3, r0, CALL_FUNCTION);
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, do_throw, leave_frame;
+ // ----------- S t a t e -------------
+ // -- r0: constructor result
+ // -- sp[0*kPointerSize]: implicit receiver
+ // -- sp[1*kPointerSize]: padding
+ // -- sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments
+ // -- sp[4*kPointerSize]: context
+ // -----------------------------------
- // If the result is undefined, we jump out to using the implicit receiver.
- __ JumpIfRoot(r0, RootIndex::kUndefinedValue, &use_receiver);
+ // Store offset of return address for deoptimizer.
+ masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
+ masm->pc_offset());
- // Otherwise we do a smi check and fall through to check if the return value
- // is a valid receiver.
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, do_throw, leave_and_return, check_receiver;
- // If the result is a smi, it is *not* an object in the ECMA sense.
- __ JumpIfSmi(r0, &use_receiver);
+ // If the result is undefined, we jump out to using the implicit receiver.
+ __ JumpIfNotRoot(r0, RootIndex::kUndefinedValue, &check_receiver);
- // If the type of the result (stored in its map) is less than
- // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ CompareObjectType(r0, r4, r5, FIRST_JS_RECEIVER_TYPE);
- __ b(ge, &leave_frame);
- __ b(&use_receiver);
+ // Otherwise we do a smi check and fall through to check if the return value
+ // is a valid receiver.
- __ bind(&do_throw);
- __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ ldr(r0, MemOperand(sp, 0 * kPointerSize));
+ __ JumpIfRoot(r0, RootIndex::kTheHoleValue, &do_throw);
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ ldr(r0, MemOperand(sp, 0 * kPointerSize));
- __ JumpIfRoot(r0, RootIndex::kTheHoleValue, &do_throw);
+ __ bind(&leave_and_return);
+ // Restore smi-tagged arguments count from the frame.
+ __ ldr(r1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+ // Leave construct frame.
+ __ LeaveFrame(StackFrame::CONSTRUCT);
- __ bind(&leave_frame);
- // Restore smi-tagged arguments count from the frame.
- __ ldr(r1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
- // Leave construct frame.
- }
// Remove caller arguments from the stack and return.
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
__ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
__ add(sp, sp, Operand(kPointerSize));
__ Jump(lr);
+
+ __ bind(&check_receiver);
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ __ JumpIfSmi(r0, &use_receiver);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CompareObjectType(r0, r4, r5, FIRST_JS_RECEIVER_TYPE);
+ __ b(ge, &leave_and_return);
+ __ b(&use_receiver);
+
+ __ bind(&do_throw);
+ // Restore the context from the frame.
+ __ ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+ __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
+ __ bkpt(0);
+
+ __ bind(&stack_overflow);
+ // Restore the context from the frame.
+ __ ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ // Unreachable code.
+ __ bkpt(0);
}
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
@@ -408,16 +362,10 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
- LoadStackLimit(masm, scratch, StackLimitKind::kRealStackLimit);
+ __ LoadStackLimit(scratch, StackLimitKind::kRealStackLimit);
__ cmp(sp, scratch);
__ b(lo, &stack_overflow);
-#ifndef V8_REVERSE_JSARGS
- // Push receiver.
- __ ldr(scratch, FieldMemOperand(r1, JSGeneratorObject::kReceiverOffset));
- __ Push(scratch);
-#endif
-
// ----------- S t a t e -------------
// -- r1 : the JSGeneratorObject to resume
// -- r4 : generator function
@@ -433,7 +381,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ ldr(r2,
FieldMemOperand(r1, JSGeneratorObject::kParametersAndRegistersOffset));
{
-#ifdef V8_REVERSE_JSARGS
Label done_loop, loop;
__ mov(r6, r3);
@@ -450,21 +397,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Push receiver.
__ ldr(scratch, FieldMemOperand(r1, JSGeneratorObject::kReceiverOffset));
__ Push(scratch);
-#else
- Label done_loop, loop;
- __ mov(r6, Operand(0));
-
- __ bind(&loop);
- __ cmp(r6, r3);
- __ b(ge, &done_loop);
- __ add(scratch, r2, Operand(r6, LSL, kTaggedSizeLog2));
- __ ldr(scratch, FieldMemOperand(scratch, FixedArray::kHeaderSize));
- __ Push(scratch);
- __ add(r6, r6, Operand(1));
- __ b(&loop);
-
- __ bind(&done_loop);
-#endif
}
// Underlying function needs to have bytecode available.
@@ -767,7 +699,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Clobbers r5.
Label enough_stack_space, stack_overflow;
__ add(r6, r0, Operand(1)); // Add one for receiver.
- Generate_StackOverflowCheck(masm, r6, r5, &stack_overflow);
+ __ StackOverflowCheck(r6, r5, &stack_overflow);
__ b(&enough_stack_space);
__ bind(&stack_overflow);
__ CallRuntime(Runtime::kThrowStackOverflow);
@@ -782,7 +714,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// r3: receiver
// r0: argc
// r4: argv, i.e. points to first arg
-#ifdef V8_REVERSE_JSARGS
Label loop, entry;
__ add(r6, r4, Operand(r0, LSL, kSystemPointerSizeLog2));
// r6 points past last arg.
@@ -798,23 +729,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Push the receiver.
__ Push(r3);
-#else
- // Push the receiver.
- __ Push(r3);
-
- Label loop, entry;
- __ add(r3, r4, Operand(r0, LSL, kSystemPointerSizeLog2));
- // r3 points past last arg.
- __ b(&entry);
- __ bind(&loop);
- __ ldr(r5, MemOperand(r4, kSystemPointerSize,
- PostIndex)); // read next parameter
- __ ldr(r5, MemOperand(r5)); // dereference handle
- __ push(r5); // push parameter
- __ bind(&entry);
- __ cmp(r4, r3);
- __ b(ne, &loop);
-#endif
// Setup new.target and function.
__ mov(r3, r1);
@@ -877,29 +791,43 @@ static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
OMIT_SMI_CHECK);
}
-static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
- Register args_count = scratch;
-
- // Get the arguments + receiver count.
- __ ldr(args_count,
+static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
+ Register scratch2) {
+ Register params_size = scratch1;
+ // Get the size of the formal parameters + receiver (in bytes).
+ __ ldr(params_size,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
- __ ldr(args_count,
- FieldMemOperand(args_count, BytecodeArray::kParameterSizeOffset));
+ __ ldr(params_size,
+ FieldMemOperand(params_size, BytecodeArray::kParameterSizeOffset));
+
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ Register actual_params_size = scratch2;
+ // Compute the size of the actual parameters + receiver (in bytes).
+ __ ldr(actual_params_size,
+ MemOperand(fp, StandardFrameConstants::kArgCOffset));
+ __ lsl(actual_params_size, actual_params_size, Operand(kPointerSizeLog2));
+ __ add(actual_params_size, actual_params_size, Operand(kSystemPointerSize));
+
+ // If actual is bigger than formal, then we should use it to free up the stack
+ // arguments.
+ __ cmp(params_size, actual_params_size);
+ __ mov(params_size, actual_params_size, LeaveCC, lt);
+#endif
// Leave the frame (also dropping the register file).
__ LeaveFrame(StackFrame::INTERPRETED);
// Drop receiver + arguments.
- __ add(sp, sp, args_count, LeaveCC);
+ __ add(sp, sp, params_size, LeaveCC);
}
-// Tail-call |function_id| if |smi_entry| == |marker|
+// Tail-call |function_id| if |actual_marker| == |expected_marker|
static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
- Register smi_entry,
- OptimizationMarker marker,
+ Register actual_marker,
+ OptimizationMarker expected_marker,
Runtime::FunctionId function_id) {
Label no_match;
- __ cmp(smi_entry, Operand(Smi::FromEnum(marker)));
+ __ cmp_raw_immediate(actual_marker, expected_marker);
__ b(ne, &no_match);
GenerateTailCallToReturnedCode(masm, function_id);
__ bind(&no_match);
@@ -916,16 +844,21 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
DCHECK(!AreAliased(r1, r3, optimized_code_entry, scratch));
Register closure = r1;
+ Label heal_optimized_code_slot;
+
+ // If the optimized code is cleared, go to runtime to update the optimization
+ // marker field.
+ __ LoadWeakValue(optimized_code_entry, optimized_code_entry,
+ &heal_optimized_code_slot);
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
- Label found_deoptimized_code;
__ ldr(scratch,
FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
__ ldr(scratch,
FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
__ tst(scratch, Operand(1 << Code::kMarkedForDeoptimizationBit));
- __ b(ne, &found_deoptimized_code);
+ __ b(ne, &heal_optimized_code_slot);
// Optimized code is good, get it into the closure and link the closure
// into the optimized functions list, then tail call the optimized code.
@@ -934,10 +867,11 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
__ LoadCodeObjectEntry(r2, optimized_code_entry);
__ Jump(r2);
- // Optimized code slot contains deoptimized code, evict it and re-enter
- // the closure's code.
- __ bind(&found_deoptimized_code);
- GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
+ // Optimized code slot contains deoptimized code or code is cleared and
+ // optimized code marker isn't updated. Evict the code, update the marker
+ // and re-enter the closure's code.
+ __ bind(&heal_optimized_code_slot);
+ GenerateTailCallToReturnedCode(masm, Runtime::kHealOptimizedCodeSlot);
}
static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
@@ -947,7 +881,8 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
// -- r3 : new target (preserved for callee if needed, and caller)
// -- r1 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
- // -- optimization_marker : a Smi containing a non-zero optimization marker.
+ // -- optimization_marker : a int32 containing a non-zero optimization
+ // marker.
// -----------------------------------
DCHECK(!AreAliased(feedback_vector, r1, r3, optimization_marker));
@@ -964,12 +899,11 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
- // Otherwise, the marker is InOptimizationQueue, so fall through hoping
- // that an interrupt will eventually update the slot with optimized code.
+ // Marker should be one of LogFirstExecution / CompileOptimized /
+ // CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach
+ // here.
if (FLAG_debug_code) {
- __ cmp(optimization_marker,
- Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
- __ Assert(eq, AbortReason::kExpectedOptimizationSentinel);
+ __ stop();
}
}
@@ -1099,18 +1033,18 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ cmp(r4, Operand(FEEDBACK_VECTOR_TYPE));
__ b(ne, &push_stack_frame);
- Register optimized_code_entry = r4;
+ Register optimization_state = r4;
- // Read off the optimized code slot in the feedback vector.
- __ ldr(optimized_code_entry,
- FieldMemOperand(feedback_vector,
- FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
+ // Read off the optimization state in the feedback vector.
+ __ ldr(optimization_state,
+ FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
- // Check if the optimized code slot is not empty.
- Label optimized_code_slot_not_empty;
- __ cmp(optimized_code_entry,
- Operand(Smi::FromEnum(OptimizationMarker::kNone)));
- __ b(ne, &optimized_code_slot_not_empty);
+ // Check if the optimized code slot is not empty or has a optimization marker.
+ Label has_optimized_code_or_marker;
+ __ tst(
+ optimization_state,
+ Operand(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask));
+ __ b(ne, &has_optimized_code_or_marker);
Label not_optimized;
__ bind(&not_optimized);
@@ -1156,7 +1090,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Do a stack check to ensure we don't go over the limit.
__ sub(r9, sp, Operand(r4));
- LoadStackLimit(masm, r2, StackLimitKind::kRealStackLimit);
+ __ LoadStackLimit(r2, StackLimitKind::kRealStackLimit);
__ cmp(r9, Operand(r2));
__ b(lo, &stack_overflow);
@@ -1185,7 +1119,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Perform interrupt stack check.
// TODO(solanes): Merge with the real stack limit check above.
Label stack_check_interrupt, after_stack_check_interrupt;
- LoadStackLimit(masm, r4, StackLimitKind::kInterruptStackLimit);
+ __ LoadStackLimit(r4, StackLimitKind::kInterruptStackLimit);
__ cmp(sp, r4);
__ b(lo, &stack_check_interrupt);
__ bind(&after_stack_check_interrupt);
@@ -1228,7 +1162,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bind(&do_return);
// The return value is in r0.
- LeaveInterpreterFrame(masm, r2);
+ LeaveInterpreterFrame(masm, r2, r4);
__ Jump(lr);
__ bind(&stack_check_interrupt);
@@ -1255,19 +1189,26 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ jmp(&after_stack_check_interrupt);
- __ bind(&optimized_code_slot_not_empty);
+ __ bind(&has_optimized_code_or_marker);
Label maybe_has_optimized_code;
- // Check if optimized code marker is actually a weak reference to the
- // optimized code.
- __ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code);
- MaybeOptimizeCode(masm, feedback_vector, optimized_code_entry);
+
+ // Check if optimized code is available
+ __ tst(
+ optimization_state,
+ Operand(FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker));
+ __ b(eq, &maybe_has_optimized_code);
+
+ Register optimization_marker = optimization_state;
+ __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
+ MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
// Fall through if there's no runnable optimized code.
__ jmp(&not_optimized);
__ bind(&maybe_has_optimized_code);
- // Load code entry from the weak reference, if it was cleared, resume
- // execution of unoptimized code.
- __ LoadWeakValue(optimized_code_entry, optimized_code_entry, &not_optimized);
+ Register optimized_code_entry = optimization_state;
+ __ ldr(optimization_marker,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kMaybeOptimizedCodeOffset));
TailCallOptimizedCodeSlot(masm, optimized_code_entry, r6);
__ bind(&compile_lazy);
@@ -1287,12 +1228,8 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
__ mov(scratch, Operand(scratch, LSL, kSystemPointerSizeLog2));
__ sub(start_address, start_address, scratch);
// Push the arguments.
-#ifdef V8_REVERSE_JSARGS
__ PushArray(start_address, num_args, scratch,
TurboAssembler::PushArrayOrder::kReverse);
-#else
- __ PushArray(start_address, num_args, scratch);
-#endif
}
// static
@@ -1309,18 +1246,15 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// -----------------------------------
Label stack_overflow;
-#ifdef V8_REVERSE_JSARGS
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// The spread argument should not be pushed.
__ sub(r0, r0, Operand(1));
}
-#endif
__ add(r3, r0, Operand(1)); // Add one for receiver.
- Generate_StackOverflowCheck(masm, r3, r4, &stack_overflow);
+ __ StackOverflowCheck(r3, r4, &stack_overflow);
-#ifdef V8_REVERSE_JSARGS
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
// Don't copy receiver. Argument count is correct.
__ mov(r3, r0);
@@ -1341,21 +1275,6 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
__ sub(r2, r2, Operand(kSystemPointerSize));
__ ldr(r2, MemOperand(r2));
}
-#else
- // Push "undefined" as the receiver arg if we need to.
- if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
- __ PushRoot(RootIndex::kUndefinedValue);
- __ mov(r3, r0); // Argument count is correct.
- }
-
- // Push the arguments. r2 and r4 will be modified.
- Generate_InterpreterPushArgs(masm, r3, r2, r4);
-
- if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
- __ Pop(r2); // Pass the spread in a register
- __ sub(r0, r0, Operand(1)); // Subtract one for spread
- }
-#endif
// Call the target.
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
@@ -1388,9 +1307,8 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
__ add(r5, r0, Operand(1)); // Add one for receiver.
- Generate_StackOverflowCheck(masm, r5, r6, &stack_overflow);
+ __ StackOverflowCheck(r5, r6, &stack_overflow);
-#ifdef V8_REVERSE_JSARGS
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// The spread argument should not be pushed.
__ sub(r0, r0, Operand(1));
@@ -1412,21 +1330,6 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
} else {
__ AssertUndefinedOrAllocationSite(r2, r5);
}
-#else
- // Push a slot for the receiver to be constructed.
- __ mov(r5, Operand::Zero());
- __ push(r5);
-
- // Push the arguments. r4 and r5 will be modified.
- Generate_InterpreterPushArgs(masm, r0, r4, r5);
-
- if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
- __ Pop(r2); // Pass the spread in a register
- __ sub(r0, r0, Operand(1)); // Subtract one for spread
- } else {
- __ AssertUndefinedOrAllocationSite(r2, r5);
- }
-#endif
if (mode == InterpreterPushArgsMode::kArrayFunction) {
__ AssertFunction(r1);
@@ -1590,7 +1493,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire(); // Temp register is not allocatable.
if (with_result) {
-#ifdef V8_REVERSE_JSARGS
if (java_script_builtin) {
__ mov(scratch, r0);
} else {
@@ -1602,14 +1504,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
sp, config->num_allocatable_general_registers() * kPointerSize +
BuiltinContinuationFrameConstants::kFixedFrameSize));
}
-#else
- // Overwrite the hole inserted by the deoptimizer with the return value from
- // the LAZY deopt point.
- __ str(r0,
- MemOperand(
- sp, config->num_allocatable_general_registers() * kPointerSize +
- BuiltinContinuationFrameConstants::kFixedFrameSize));
-#endif
}
for (int i = allocatable_register_count - 1; i >= 0; --i) {
int code = config->GetAllocatableGeneralCode(i);
@@ -1618,7 +1512,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
__ SmiUntag(Register::from_code(code));
}
}
-#ifdef V8_REVERSE_JSARGS
if (java_script_builtin && with_result) {
// Overwrite the hole inserted by the deoptimizer with the return value from
// the LAZY deopt point. r0 contains the arguments count, the return value
@@ -1628,7 +1521,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
// Recover arguments count.
__ sub(r0, r0, Operand(BuiltinContinuationFrameConstants::kFixedSlotCount));
}
-#endif
__ ldr(fp, MemOperand(
sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
// Load builtin index (stored as a Smi) and use it to get the builtin start
@@ -1715,9 +1607,9 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : argc
- // -- sp[0] : argArray
+ // -- sp[0] : receiver
// -- sp[4] : thisArg
- // -- sp[8] : receiver
+ // -- sp[8] : argArray
// -----------------------------------
// 1. Load receiver into r1, argArray into r2 (if present), remove all
@@ -1726,20 +1618,11 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
{
__ LoadRoot(r5, RootIndex::kUndefinedValue);
__ mov(r2, r5);
-#ifdef V8_REVERSE_JSARGS
__ ldr(r1, MemOperand(sp, 0)); // receiver
__ cmp(r0, Operand(1));
__ ldr(r5, MemOperand(sp, kSystemPointerSize), ge); // thisArg
__ cmp(r0, Operand(2), ge);
__ ldr(r2, MemOperand(sp, 2 * kSystemPointerSize), ge); // argArray
-#else
- __ ldr(r1, MemOperand(sp, r0, LSL, kSystemPointerSizeLog2)); // receiver
- __ sub(r4, r0, Operand(1), SetCC);
- __ ldr(r5, MemOperand(sp, r4, LSL, kSystemPointerSizeLog2), ge); // thisArg
- __ sub(r4, r4, Operand(1), SetCC, ge);
- __ ldr(r2, MemOperand(sp, r4, LSL, kSystemPointerSizeLog2),
- ge); // argArray
-#endif
__ add(sp, sp, Operand(r0, LSL, kSystemPointerSizeLog2));
__ str(r5, MemOperand(sp, 0));
}
@@ -1774,7 +1657,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// static
void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
-#ifdef V8_REVERSE_JSARGS
// 1. Get the callable to call (passed as receiver) from the stack.
__ Pop(r1);
@@ -1791,45 +1673,6 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// 3. Adjust the actual number of arguments.
__ sub(r0, r0, Operand(1));
-#else
- // 1. Make sure we have at least one argument.
- // r0: actual number of arguments
- {
- Label done;
- __ cmp(r0, Operand::Zero());
- __ b(ne, &done);
- __ PushRoot(RootIndex::kUndefinedValue);
- __ add(r0, r0, Operand(1));
- __ bind(&done);
- }
-
- // 2. Get the callable to call (passed as receiver) from the stack.
- // r0: actual number of arguments
- __ ldr(r1, __ ReceiverOperand(r0));
-
- // 3. Shift arguments and return address one slot down on the stack
- // (overwriting the original receiver). Adjust argument count to make
- // the original first argument the new receiver.
- // r0: actual number of arguments
- // r1: callable
- {
- Register scratch = r3;
- Label loop;
- // Calculate the copy start address (destination). Copy end address is sp.
- __ add(r2, sp, Operand(r0, LSL, kSystemPointerSizeLog2));
-
- __ bind(&loop);
- __ ldr(scratch, MemOperand(r2, -kSystemPointerSize));
- __ str(scratch, MemOperand(r2));
- __ sub(r2, r2, Operand(kSystemPointerSize));
- __ cmp(r2, sp);
- __ b(ne, &loop);
- // Adjust the actual number of arguments and remove the top element
- // (which is a copy of the last argument).
- __ sub(r0, r0, Operand(1));
- __ pop();
- }
-#endif
// 4. Call the callable.
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
@@ -1838,12 +1681,11 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : argc
- // -- sp[0] : argumentsList
- // -- sp[4] : thisArgument
- // -- sp[8] : target
- // -- sp[12] : receiver
+ // -- sp[0] : receiver
+ // -- sp[4] : target (if argc >= 1)
+ // -- sp[8] : thisArgument (if argc >= 2)
+ // -- sp[12] : argumentsList (if argc == 3)
// -----------------------------------
- // NOTE: The order of args in the stack are reversed if V8_REVERSE_JSARGS
// 1. Load target into r1 (if present), argumentsList into r2 (if present),
// remove all arguments from the stack (including the receiver), and push
@@ -1852,23 +1694,12 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ LoadRoot(r1, RootIndex::kUndefinedValue);
__ mov(r5, r1);
__ mov(r2, r1);
-#ifdef V8_REVERSE_JSARGS
__ cmp(r0, Operand(1));
__ ldr(r1, MemOperand(sp, kSystemPointerSize), ge); // target
__ cmp(r0, Operand(2), ge);
__ ldr(r5, MemOperand(sp, 2 * kSystemPointerSize), ge); // thisArgument
__ cmp(r0, Operand(3), ge);
__ ldr(r2, MemOperand(sp, 3 * kSystemPointerSize), ge); // argumentsList
-#else
- __ sub(r4, r0, Operand(1), SetCC);
- __ ldr(r1, MemOperand(sp, r4, LSL, kSystemPointerSizeLog2), ge); // target
- __ sub(r4, r4, Operand(1), SetCC, ge);
- __ ldr(r5, MemOperand(sp, r4, LSL, kSystemPointerSizeLog2),
- ge); // thisArgument
- __ sub(r4, r4, Operand(1), SetCC, ge);
- __ ldr(r2, MemOperand(sp, r4, LSL, kSystemPointerSizeLog2),
- ge); // argumentsList
-#endif
__ add(sp, sp, Operand(r0, LSL, kSystemPointerSizeLog2));
__ str(r5, MemOperand(sp, 0));
}
@@ -1891,12 +1722,11 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : argc
- // -- sp[0] : new.target (optional)
- // -- sp[4] : argumentsList
- // -- sp[8] : target
- // -- sp[12] : receiver
+ // -- sp[0] : receiver
+ // -- sp[4] : target
+ // -- sp[8] : argumentsList
+ // -- sp[12] : new.target (optional)
// -----------------------------------
- // NOTE: The order of args in the stack are reversed if V8_REVERSE_JSARGS
// 1. Load target into r1 (if present), argumentsList into r2 (if present),
// new.target into r3 (if present, otherwise use target), remove all
@@ -1905,7 +1735,6 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
{
__ LoadRoot(r1, RootIndex::kUndefinedValue);
__ mov(r2, r1);
-#ifdef V8_REVERSE_JSARGS
__ mov(r4, r1);
__ cmp(r0, Operand(1));
__ ldr(r1, MemOperand(sp, kSystemPointerSize), ge); // target
@@ -1916,19 +1745,6 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ ldr(r3, MemOperand(sp, 3 * kSystemPointerSize), ge); // new.target
__ add(sp, sp, Operand(r0, LSL, kSystemPointerSizeLog2));
__ str(r4, MemOperand(sp, 0)); // set undefined to the receiver
-#else
- __ str(r2, MemOperand(sp, r0, LSL, kSystemPointerSizeLog2)); // receiver
- __ sub(r4, r0, Operand(1), SetCC);
- __ ldr(r1, MemOperand(sp, r4, LSL, kSystemPointerSizeLog2), ge); // target
- __ mov(r3, r1); // new.target defaults to target
- __ sub(r4, r4, Operand(1), SetCC, ge);
- __ ldr(r2, MemOperand(sp, r4, LSL, kSystemPointerSizeLog2),
- ge); // argumentsList
- __ sub(r4, r4, Operand(1), SetCC, ge);
- __ ldr(r3, MemOperand(sp, r4, LSL, kSystemPointerSizeLog2),
- ge); // new.target
- __ add(sp, sp, Operand(r0, LSL, kSystemPointerSizeLog2));
-#endif
}
// ----------- S t a t e -------------
@@ -2006,9 +1822,8 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
}
Label stack_overflow;
- Generate_StackOverflowCheck(masm, r4, scratch, &stack_overflow);
+ __ StackOverflowCheck(r4, scratch, &stack_overflow);
-#ifdef V8_REVERSE_JSARGS
// Move the arguments already in the stack,
// including the receiver and the return address.
{
@@ -2028,7 +1843,6 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ bind(&check);
__ b(ge, &copy);
}
-#endif
// Copy arguments onto the stack (thisArgument is already on the stack).
{
@@ -2043,11 +1857,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ cmp(scratch, r5);
// Turn the hole into undefined as we go.
__ LoadRoot(scratch, RootIndex::kUndefinedValue, eq);
-#ifdef V8_REVERSE_JSARGS
__ str(scratch, MemOperand(r9, kSystemPointerSize, PostIndex));
-#else
- __ Push(scratch);
-#endif
__ add(r6, r6, Operand(1));
__ b(&loop);
__ bind(&done);
@@ -2092,6 +1902,12 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ bind(&new_target_constructor);
}
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ // TODO(victorgomes): Remove this copy when all the arguments adaptor frame
+ // code is erased.
+ __ mov(r4, fp);
+ __ ldr(r5, MemOperand(fp, StandardFrameConstants::kArgCOffset));
+#else
// Check if we have an arguments adaptor frame below the function frame.
Label arguments_adaptor, arguments_done;
__ ldr(r4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -2115,6 +1931,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ SmiUntag(r5);
}
__ bind(&arguments_done);
+#endif
Label stack_done, stack_overflow;
__ sub(r5, r5, r2, SetCC);
@@ -2131,10 +1948,9 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
// -----------------------------------
// Check for stack overflow.
- Generate_StackOverflowCheck(masm, r5, scratch, &stack_overflow);
+ __ StackOverflowCheck(r5, scratch, &stack_overflow);
// Forward the arguments from the caller frame.
-#ifdef V8_REVERSE_JSARGS
// Point to the first argument to copy (skipping the receiver).
__ add(r4, r4,
Operand(CommonFrameConstants::kFixedFrameSizeAboveFp +
@@ -2161,26 +1977,17 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ bind(&check);
__ b(ge, &copy);
}
-#endif
// Copy arguments from the caller frame.
// TODO(victorgomes): Consider using forward order as potentially more cache
// friendly.
{
Label loop;
-#ifndef V8_REVERSE_JSARGS
- // Skips frame pointer.
- __ add(r4, r4, Operand(CommonFrameConstants::kFixedFrameSizeAboveFp));
-#endif
__ add(r0, r0, r5);
__ bind(&loop);
{
__ sub(r5, r5, Operand(1), SetCC);
__ ldr(scratch, MemOperand(r4, r5, LSL, kSystemPointerSizeLog2));
-#ifdef V8_REVERSE_JSARGS
__ str(scratch, MemOperand(r2, r5, LSL, kSystemPointerSizeLog2));
-#else
- __ push(scratch);
-#endif
__ b(ne, &loop);
}
}
@@ -2334,8 +2141,8 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Compute the space we have left. The stack might already be overflowed
// here which will cause remaining_stack_size to become negative.
- LoadStackLimit(masm, remaining_stack_size,
- StackLimitKind::kRealStackLimit);
+ __ LoadStackLimit(remaining_stack_size,
+ StackLimitKind::kRealStackLimit);
__ sub(remaining_stack_size, sp, remaining_stack_size);
// Check if the arguments will overflow the stack.
@@ -2350,7 +2157,6 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ bind(&done);
}
-#ifdef V8_REVERSE_JSARGS
// Pop receiver.
__ Pop(r5);
@@ -2368,39 +2174,6 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Push receiver.
__ Push(r5);
-#else
- // Reserve stack space for the [[BoundArguments]].
- __ AllocateStackSpace(scratch);
-
- // Relocate arguments down the stack.
- {
- Label loop, done_loop;
- __ mov(r5, Operand(0));
- __ bind(&loop);
- __ cmp(r5, r0);
- __ b(gt, &done_loop);
- __ ldr(scratch, MemOperand(sp, r4, LSL, kSystemPointerSizeLog2));
- __ str(scratch, MemOperand(sp, r5, LSL, kSystemPointerSizeLog2));
- __ add(r4, r4, Operand(1));
- __ add(r5, r5, Operand(1));
- __ b(&loop);
- __ bind(&done_loop);
- }
-
- // Copy [[BoundArguments]] to the stack (below the arguments).
- {
- Label loop;
- __ ldr(r4, FieldMemOperand(r2, FixedArray::kLengthOffset));
- __ SmiUntag(r4);
- __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ bind(&loop);
- __ sub(r4, r4, Operand(1), SetCC);
- __ ldr(scratch, MemOperand(r2, r4, LSL, kPointerSizeLog2));
- __ str(scratch, MemOperand(sp, r0, LSL, kPointerSizeLog2));
- __ add(r0, r0, Operand(1));
- __ b(gt, &loop);
- }
-#endif
}
__ bind(&no_bound_arguments);
}
@@ -2588,19 +2361,12 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -- r3 : new target (passed through to callee)
// -----------------------------------
- Label dont_adapt_arguments, stack_overflow, skip_adapt_arguments;
+ Label dont_adapt_arguments, stack_overflow;
__ cmp(r2, Operand(kDontAdaptArgumentsSentinel));
__ b(eq, &dont_adapt_arguments);
__ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kFlagsOffset));
-#ifndef V8_REVERSE_JSARGS
- // This optimization is disabled when the arguments are reversed.
- __ tst(r4,
- Operand(SharedFunctionInfo::IsSafeToSkipArgumentsAdaptorBit::kMask));
- __ b(ne, &skip_adapt_arguments);
-#endif
-
// -------------------------------------------
// Adapt arguments.
// -------------------------------------------
@@ -2613,18 +2379,14 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ bind(&over_application);
{
EnterArgumentsAdaptorFrame(masm);
- Generate_StackOverflowCheck(masm, r2, r5, &stack_overflow);
+ __ StackOverflowCheck(r2, r5, &stack_overflow);
// Calculate copy start address into r0 and copy end address into r4.
// r0: actual number of arguments as a smi
// r1: function
// r2: expected number of arguments
// r3: new target (passed through to callee)
-#ifdef V8_REVERSE_JSARGS
__ add(r0, fp, Operand(r2, LSL, kSystemPointerSizeLog2));
-#else
- __ add(r0, fp, Operand::PointerOffsetFromSmiKey(r0));
-#endif
// adjust for return address and receiver
__ add(r0, r0, Operand(2 * kSystemPointerSize));
__ sub(r4, r0, Operand(r2, LSL, kSystemPointerSizeLog2));
@@ -2651,9 +2413,8 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ bind(&under_application);
{
EnterArgumentsAdaptorFrame(masm);
- Generate_StackOverflowCheck(masm, r2, r5, &stack_overflow);
+ __ StackOverflowCheck(r2, r5, &stack_overflow);
-#ifdef V8_REVERSE_JSARGS
// Fill the remaining expected arguments with undefined.
// r0: actual number of arguments as a smi
// r1: function
@@ -2695,47 +2456,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ cmp(r0, fp); // Compare before moving to next argument.
__ sub(r0, r0, Operand(kPointerSize));
__ b(ne, &copy);
-#else
- // Calculate copy start address into r0 and copy end address is fp.
- // r0: actual number of arguments as a smi
- // r1: function
- // r2: expected number of arguments
- // r3: new target (passed through to callee)
- __ add(r0, fp, Operand::PointerOffsetFromSmiKey(r0));
-
- // Copy the arguments (including the receiver) to the new stack frame.
- // r0: copy start address
- // r1: function
- // r2: expected number of arguments
- // r3: new target (passed through to callee)
- Label copy;
- __ bind(&copy);
-
- // Adjust load for return address and receiver.
- __ ldr(r5, MemOperand(r0, 2 * kPointerSize));
- __ push(r5);
-
- __ cmp(r0, fp); // Compare before moving to next argument.
- __ sub(r0, r0, Operand(kPointerSize));
- __ b(ne, &copy);
-
- // Fill the remaining expected arguments with undefined.
- // r1: function
- // r2: expected number of arguments
- // r3: new target (passed through to callee)
- __ LoadRoot(r5, RootIndex::kUndefinedValue);
- __ sub(r4, fp, Operand(r2, LSL, kPointerSizeLog2));
- // Adjust for frame.
- __ sub(r4, r4,
- Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp +
- kPointerSize));
-
- Label fill;
- __ bind(&fill);
- __ push(r5);
- __ cmp(sp, r4);
- __ b(ne, &fill);
-#endif
}
// Call the entry point.
@@ -2758,41 +2478,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
}
// -------------------------------------------
- // Skip adapt arguments.
- // -------------------------------------------
- __ bind(&skip_adapt_arguments);
- {
- // The callee cannot observe the actual arguments, so it's safe to just
- // pass the expected arguments by massaging the stack appropriately. See
- // http://bit.ly/v8-faster-calls-with-arguments-mismatch for details.
- Label under_application, over_application;
- __ cmp(r0, r2);
- __ b(lt, &under_application);
-
- __ bind(&over_application);
- {
- // Remove superfluous parameters from the stack.
- __ sub(r4, r0, r2);
- __ mov(r0, r2);
- __ add(sp, sp, Operand(r4, LSL, kPointerSizeLog2));
- __ b(&dont_adapt_arguments);
- }
-
- __ bind(&under_application);
- {
- // Fill remaining expected arguments with undefined values.
- Label fill;
- __ LoadRoot(r4, RootIndex::kUndefinedValue);
- __ bind(&fill);
- __ add(r0, r0, Operand(1));
- __ push(r4);
- __ cmp(r0, r2);
- __ b(lt, &fill);
- __ b(&dont_adapt_arguments);
- }
- }
-
- // -------------------------------------------
// Dont adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
@@ -3241,12 +2926,11 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// -- r2 : arguments count (not including the receiver)
// -- r3 : call data
// -- r0 : holder
- // -- sp[0] : last argument
+ // -- sp[0] : receiver
+ // -- sp[8] : first argument
// -- ...
- // -- sp[(argc - 1) * 4] : first argument
- // -- sp[(argc + 0) * 4] : receiver
+ // -- sp[(argc) * 8] : last argument
// -----------------------------------
- // NOTE: The order of args are reversed if V8_REVERSE_JSARGS
Register api_function_address = r1;
Register argc = r2;
@@ -3314,12 +2998,7 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// FunctionCallbackInfo::values_ (points at the first varargs argument passed
// on the stack).
-#ifdef V8_REVERSE_JSARGS
__ add(scratch, scratch, Operand((FCA::kArgsLength + 1) * kPointerSize));
-#else
- __ add(scratch, scratch, Operand((FCA::kArgsLength - 1) * kPointerSize));
- __ add(scratch, scratch, Operand(argc, LSL, kPointerSizeLog2));
-#endif
__ str(scratch, MemOperand(sp, 2 * kPointerSize));
// FunctionCallbackInfo::length_.
@@ -3461,6 +3140,251 @@ void Builtins::Generate_MemCopyUint8Uint8(MacroAssembler* masm) {
__ Ret();
}
+namespace {
+
+// This code tries to be close to ia32 code so that any changes can be
+// easily ported.
+void Generate_DeoptimizationEntry(MacroAssembler* masm,
+ DeoptimizeKind deopt_kind) {
+ Isolate* isolate = masm->isolate();
+
+ // Note: This is an overapproximation; we always reserve space for 32 double
+ // registers, even though the actual CPU may only support 16. In the latter
+ // case, SaveFPRegs and RestoreFPRegs still use 32 stack slots, but only fill
+ // 16.
+ static constexpr int kDoubleRegsSize =
+ kDoubleSize * DwVfpRegister::kNumRegisters;
+
+ // Save all allocatable VFP registers before messing with them.
+ {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ SaveFPRegs(sp, scratch);
+ }
+
+ // Save all general purpose registers before messing with them.
+ static constexpr int kNumberOfRegisters = Register::kNumRegisters;
+ STATIC_ASSERT(kNumberOfRegisters == 16);
+
+ // Everything but pc, lr and ip which will be saved but not restored.
+ RegList restored_regs = kJSCallerSaved | kCalleeSaved | ip.bit();
+
+ // Push all 16 registers (needed to populate FrameDescription::registers_).
+ // TODO(v8:1588): Note that using pc with stm is deprecated, so we should
+ // perhaps handle this a bit differently.
+ __ stm(db_w, sp, restored_regs | sp.bit() | lr.bit() | pc.bit());
+
+ {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ Move(scratch, ExternalReference::Create(
+ IsolateAddressId::kCEntryFPAddress, isolate));
+ __ str(fp, MemOperand(scratch));
+ }
+
+ static constexpr int kSavedRegistersAreaSize =
+ (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
+
+ __ mov(r2, Operand(Deoptimizer::kFixedExitSizeMarker));
+ // Get the address of the location in the code object (r3) (return
+ // address for lazy deoptimization) and compute the fp-to-sp delta in
+ // register r4.
+ __ mov(r3, lr);
+ __ add(r4, sp, Operand(kSavedRegistersAreaSize));
+ __ sub(r4, fp, r4);
+
+ // Allocate a new deoptimizer object.
+ // Pass four arguments in r0 to r3 and fifth argument on stack.
+ __ PrepareCallCFunction(6);
+ __ mov(r0, Operand(0));
+ Label context_check;
+ __ ldr(r1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ JumpIfSmi(r1, &context_check);
+ __ ldr(r0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ __ bind(&context_check);
+ __ mov(r1, Operand(static_cast<int>(deopt_kind)));
+ // r2: bailout id already loaded.
+ // r3: code address or 0 already loaded.
+ __ str(r4, MemOperand(sp, 0 * kPointerSize)); // Fp-to-sp delta.
+ __ Move(r5, ExternalReference::isolate_address(isolate));
+ __ str(r5, MemOperand(sp, 1 * kPointerSize)); // Isolate.
+ // Call Deoptimizer::New().
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
+ }
+
+ // Preserve "deoptimizer" object in register r0 and get the input
+ // frame descriptor pointer to r1 (deoptimizer->input_);
+ __ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
+
+ // Copy core registers into FrameDescription::registers_.
+ DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
+ for (int i = 0; i < kNumberOfRegisters; i++) {
+ int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+ __ ldr(r2, MemOperand(sp, i * kPointerSize));
+ __ str(r2, MemOperand(r1, offset));
+ }
+
+ // Copy double registers to double_registers_.
+ static constexpr int kDoubleRegsOffset =
+ FrameDescription::double_registers_offset();
+ {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ Register src_location = r4;
+ __ add(src_location, sp, Operand(kNumberOfRegisters * kPointerSize));
+ __ RestoreFPRegs(src_location, scratch);
+
+ Register dst_location = r4;
+ __ add(dst_location, r1, Operand(kDoubleRegsOffset));
+ __ SaveFPRegsToHeap(dst_location, scratch);
+ }
+
+ // Mark the stack as not iterable for the CPU profiler which won't be able to
+ // walk the stack without the return address.
+ {
+ UseScratchRegisterScope temps(masm);
+ Register is_iterable = temps.Acquire();
+ Register zero = r4;
+ __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
+ __ mov(zero, Operand(0));
+ __ strb(zero, MemOperand(is_iterable));
+ }
+
+ // Remove the saved registers from the stack.
+ __ add(sp, sp, Operand(kSavedRegistersAreaSize));
+
+ // Compute a pointer to the unwinding limit in register r2; that is
+ // the first stack slot not part of the input frame.
+ __ ldr(r2, MemOperand(r1, FrameDescription::frame_size_offset()));
+ __ add(r2, r2, sp);
+
+ // Unwind the stack down to - but not including - the unwinding
+ // limit and copy the contents of the activation frame to the input
+ // frame description.
+ __ add(r3, r1, Operand(FrameDescription::frame_content_offset()));
+ Label pop_loop;
+ Label pop_loop_header;
+ __ b(&pop_loop_header);
+ __ bind(&pop_loop);
+ __ pop(r4);
+ __ str(r4, MemOperand(r3, 0));
+ __ add(r3, r3, Operand(sizeof(uint32_t)));
+ __ bind(&pop_loop_header);
+ __ cmp(r2, sp);
+ __ b(ne, &pop_loop);
+
+ // Compute the output frame in the deoptimizer.
+ __ push(r0); // Preserve deoptimizer object across call.
+ // r0: deoptimizer object; r1: scratch.
+ __ PrepareCallCFunction(1);
+ // Call Deoptimizer::ComputeOutputFrames().
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
+ }
+ __ pop(r0); // Restore deoptimizer object (class Deoptimizer).
+
+ __ ldr(sp, MemOperand(r0, Deoptimizer::caller_frame_top_offset()));
+
+ // Replace the current (input) frame with the output frames.
+ Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
+ // Outer loop state: r4 = current "FrameDescription** output_",
+ // r1 = one past the last FrameDescription**.
+ __ ldr(r1, MemOperand(r0, Deoptimizer::output_count_offset()));
+ __ ldr(r4, MemOperand(r0, Deoptimizer::output_offset())); // r4 is output_.
+ __ add(r1, r4, Operand(r1, LSL, 2));
+ __ jmp(&outer_loop_header);
+ __ bind(&outer_push_loop);
+ // Inner loop state: r2 = current FrameDescription*, r3 = loop index.
+ __ ldr(r2, MemOperand(r4, 0)); // output_[ix]
+ __ ldr(r3, MemOperand(r2, FrameDescription::frame_size_offset()));
+ __ jmp(&inner_loop_header);
+ __ bind(&inner_push_loop);
+ __ sub(r3, r3, Operand(sizeof(uint32_t)));
+ __ add(r6, r2, Operand(r3));
+ __ ldr(r6, MemOperand(r6, FrameDescription::frame_content_offset()));
+ __ push(r6);
+ __ bind(&inner_loop_header);
+ __ cmp(r3, Operand::Zero());
+ __ b(ne, &inner_push_loop); // test for gt?
+ __ add(r4, r4, Operand(kPointerSize));
+ __ bind(&outer_loop_header);
+ __ cmp(r4, r1);
+ __ b(lt, &outer_push_loop);
+
+ __ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
+
+ // State:
+ // r1: Deoptimizer::input_ (FrameDescription*).
+ // r2: The last output FrameDescription pointer (FrameDescription*).
+
+ // Restore double registers from the input frame description.
+ {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ Register src_location = r6;
+ __ add(src_location, r1, Operand(kDoubleRegsOffset));
+ __ RestoreFPRegsFromHeap(src_location, scratch);
+ }
+
+ // Push pc and continuation from the last output frame.
+ __ ldr(r6, MemOperand(r2, FrameDescription::pc_offset()));
+ __ push(r6);
+ __ ldr(r6, MemOperand(r2, FrameDescription::continuation_offset()));
+ __ push(r6);
+
+ // Push the registers from the last output frame.
+ for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
+ int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+ __ ldr(r6, MemOperand(r2, offset));
+ __ push(r6);
+ }
+
+ // Restore the registers from the stack.
+ __ ldm(ia_w, sp, restored_regs); // all but pc registers.
+
+ {
+ UseScratchRegisterScope temps(masm);
+ Register is_iterable = temps.Acquire();
+ Register one = r4;
+ __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
+ __ mov(one, Operand(1));
+ __ strb(one, MemOperand(is_iterable));
+ }
+
+ // Remove sp, lr and pc.
+ __ Drop(3);
+ {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ pop(scratch); // get continuation, leave pc on stack
+ __ pop(lr);
+ __ Jump(scratch);
+ }
+
+ __ stop();
+}
+
+} // namespace
+
+void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Bailout(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kBailout);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
+}
+
#undef __
} // namespace internal
diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc
index be6d70eb08..92c1fefa0a 100644
--- a/deps/v8/src/builtins/arm64/builtins-arm64.cc
+++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc
@@ -74,41 +74,6 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
namespace {
-enum StackLimitKind { kInterruptStackLimit, kRealStackLimit };
-
-void LoadStackLimit(MacroAssembler* masm, Register destination,
- StackLimitKind kind) {
- DCHECK(masm->root_array_available());
- Isolate* isolate = masm->isolate();
- ExternalReference limit =
- kind == StackLimitKind::kRealStackLimit
- ? ExternalReference::address_of_real_jslimit(isolate)
- : ExternalReference::address_of_jslimit(isolate);
- DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
-
- intptr_t offset =
- TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
- __ Ldr(destination, MemOperand(kRootRegister, offset));
-}
-
-void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
- Label* stack_overflow) {
- UseScratchRegisterScope temps(masm);
- Register scratch = temps.AcquireX();
-
- // Check the stack for overflow.
- // We are not trying to catch interruptions (e.g. debug break and
- // preemption) here, so the "real stack limit" is checked.
-
- LoadStackLimit(masm, scratch, StackLimitKind::kRealStackLimit);
- // Make scratch the space we have left. The stack might already be overflowed
- // here which will cause scratch to become negative.
- __ Sub(scratch, sp, scratch);
- // Check if the arguments will overflow the stack.
- __ Cmp(scratch, Operand(num_args, LSL, kSystemPointerSizeLog2));
- __ B(le, stack_overflow);
-}
-
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : number of arguments
@@ -122,7 +87,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
ASM_LOCATION("Builtins::Generate_JSConstructStubHelper");
Label stack_overflow;
- Generate_StackOverflowCheck(masm, x0, &stack_overflow);
+ __ StackOverflowCheck(x0, &stack_overflow);
// Enter a construct frame.
{
@@ -155,32 +120,28 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// stack to which arguments will be later copied.
__ SlotAddress(x2, argc);
-#ifndef V8_REVERSE_JSARGS
- // Poke the hole (receiver) in the highest slot.
- __ Str(x4, MemOperand(x2));
-#endif
-
// Store padding, if needed.
__ Tbnz(slot_count_without_rounding, 0, &already_aligned);
__ Str(padreg, MemOperand(x2, 1 * kSystemPointerSize));
__ Bind(&already_aligned);
+ // TODO(victorgomes): When the arguments adaptor is completely removed, we
+ // should get the formal parameter count and copy the arguments in its
+ // correct position (including any undefined), instead of delaying this to
+ // InvokeFunction.
+
// Copy arguments to the expression stack.
{
Register count = x2;
Register dst = x10;
Register src = x11;
__ SlotAddress(dst, 0);
-#ifdef V8_REVERSE_JSARGS
// Poke the hole (receiver).
__ Str(x4, MemOperand(dst));
__ Add(dst, dst, kSystemPointerSize); // Skip receiver.
__ Add(src, fp,
StandardFrameConstants::kCallerSPOffset +
kSystemPointerSize); // Skip receiver.
-#else
- __ Add(src, fp, StandardFrameConstants::kCallerSPOffset);
-#endif
__ Mov(count, argc);
__ CopyDoubleWords(dst, src, count);
}
@@ -190,24 +151,25 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// -- x1: constructor function
// -- x3: new target
// If argc is odd:
- // -- sp[0*kSystemPointerSize]: argument n - 1
+ // -- sp[0*kSystemPointerSize]: the hole (receiver)
+ // -- sp[1*kSystemPointerSize]: argument 1
// -- ...
- // -- sp[(n-1)*kSystemPointerSize]: argument 1
- // -- sp[(n+0)*kSystemPointerSize]: the hole (receiver)
+ // -- sp[(n-1)*kSystemPointerSize]: argument (n - 1)
+ // -- sp[(n+0)*kSystemPointerSize]: argument n
// -- sp[(n+1)*kSystemPointerSize]: padding
// -- sp[(n+2)*kSystemPointerSize]: padding
// -- sp[(n+3)*kSystemPointerSize]: number of arguments (tagged)
// -- sp[(n+4)*kSystemPointerSize]: context (pushed by FrameScope)
// If argc is even:
- // -- sp[0*kSystemPointerSize]: argument n - 1
+ // -- sp[0*kSystemPointerSize]: the hole (receiver)
+ // -- sp[1*kSystemPointerSize]: argument 1
// -- ...
- // -- sp[(n-1)*kSystemPointerSize]: argument 1
- // -- sp[(n+0)*kSystemPointerSize]: the hole (receiver)
+ // -- sp[(n-1)*kSystemPointerSize]: argument (n - 1)
+ // -- sp[(n+0)*kSystemPointerSize]: argument n
// -- sp[(n+1)*kSystemPointerSize]: padding
// -- sp[(n+2)*kSystemPointerSize]: number of arguments (tagged)
// -- sp[(n+3)*kSystemPointerSize]: context (pushed by FrameScope)
// -----------------------------------
- // NOTE: The order of args in the stack are reversed if V8_REVERSE_JSARGS
// Call the function.
__ InvokeFunctionWithNewTarget(x1, x3, argc, CALL_FUNCTION);
@@ -248,194 +210,192 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
ASM_LOCATION("Builtins::Generate_JSConstructStubGeneric");
+ FrameScope scope(masm, StackFrame::MANUAL);
// Enter a construct frame.
- {
- FrameScope scope(masm, StackFrame::CONSTRUCT);
- Label post_instantiation_deopt_entry, not_create_implicit_receiver;
+ __ EnterFrame(StackFrame::CONSTRUCT);
+ Label post_instantiation_deopt_entry, not_create_implicit_receiver;
- if (__ emit_debug_code()) {
- // Check that FrameScope pushed the context on to the stack already.
- __ Peek(x2, 0);
- __ Cmp(x2, cp);
- __ Check(eq, AbortReason::kUnexpectedValue);
- }
-
- // Preserve the incoming parameters on the stack.
- __ SmiTag(x0);
- __ Push(x0, x1, padreg, x3);
-
- // ----------- S t a t e -------------
- // -- sp[0*kSystemPointerSize]: new target
- // -- sp[1*kSystemPointerSize]: padding
- // -- x1 and sp[2*kSystemPointerSize]: constructor function
- // -- sp[3*kSystemPointerSize]: number of arguments (tagged)
- // -- sp[4*kSystemPointerSize]: context (pushed by FrameScope)
- // -----------------------------------
-
- __ LoadTaggedPointerField(
- x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kFlagsOffset));
- __ DecodeField<SharedFunctionInfo::FunctionKindBits>(w4);
- __ JumpIfIsInRange(w4, kDefaultDerivedConstructor, kDerivedConstructor,
- &not_create_implicit_receiver);
-
- // If not derived class constructor: Allocate the new receiver object.
- __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
- x4, x5);
-
- __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject),
- RelocInfo::CODE_TARGET);
+ if (__ emit_debug_code()) {
+ // Check that FrameScope pushed the context on to the stack already.
+ __ Peek(x2, 0);
+ __ Cmp(x2, cp);
+ __ Check(eq, AbortReason::kUnexpectedValue);
+ }
- __ B(&post_instantiation_deopt_entry);
+ // Preserve the incoming parameters on the stack.
+ __ SmiTag(x0);
+ __ Push(x0, x1, padreg, x3);
- // Else: use TheHoleValue as receiver for constructor call
- __ Bind(&not_create_implicit_receiver);
- __ LoadRoot(x0, RootIndex::kTheHoleValue);
+ // ----------- S t a t e -------------
+ // -- sp[0*kSystemPointerSize]: new target
+ // -- sp[1*kSystemPointerSize]: padding
+ // -- x1 and sp[2*kSystemPointerSize]: constructor function
+ // -- sp[3*kSystemPointerSize]: number of arguments (tagged)
+ // -- sp[4*kSystemPointerSize]: context (pushed by FrameScope)
+ // -----------------------------------
- // ----------- S t a t e -------------
- // -- x0: receiver
- // -- Slot 4 / sp[0*kSystemPointerSize]: new target
- // -- Slot 3 / sp[1*kSystemPointerSize]: padding
- // -- Slot 2 / sp[2*kSystemPointerSize]: constructor function
- // -- Slot 1 / sp[3*kSystemPointerSize]: number of arguments (tagged)
- // -- Slot 0 / sp[4*kSystemPointerSize]: context
- // -----------------------------------
- // Deoptimizer enters here.
- masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
- masm->pc_offset());
+ __ LoadTaggedPointerField(
+ x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kFlagsOffset));
+ __ DecodeField<SharedFunctionInfo::FunctionKindBits>(w4);
+ __ JumpIfIsInRange(w4, kDefaultDerivedConstructor, kDerivedConstructor,
+ &not_create_implicit_receiver);
- __ Bind(&post_instantiation_deopt_entry);
+ // If not derived class constructor: Allocate the new receiver object.
+ __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1, x4,
+ x5);
- // Restore new target from the top of the stack.
- __ Peek(x3, 0 * kSystemPointerSize);
+ __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject), RelocInfo::CODE_TARGET);
- // Restore constructor function and argument count.
- __ Ldr(x1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
- __ SmiUntag(x12, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+ __ B(&post_instantiation_deopt_entry);
- // Copy arguments to the expression stack. The called function pops the
- // receiver along with its arguments, so we need an extra receiver on the
- // stack, in case we have to return it later.
+ // Else: use TheHoleValue as receiver for constructor call
+ __ Bind(&not_create_implicit_receiver);
+ __ LoadRoot(x0, RootIndex::kTheHoleValue);
- // Overwrite the new target with a receiver.
- __ Poke(x0, 0);
+ // ----------- S t a t e -------------
+ // -- x0: receiver
+ // -- Slot 4 / sp[0*kSystemPointerSize]: new target
+ // -- Slot 3 / sp[1*kSystemPointerSize]: padding
+ // -- Slot 2 / sp[2*kSystemPointerSize]: constructor function
+ // -- Slot 1 / sp[3*kSystemPointerSize]: number of arguments (tagged)
+ // -- Slot 0 / sp[4*kSystemPointerSize]: context
+ // -----------------------------------
+ // Deoptimizer enters here.
+ masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
+ masm->pc_offset());
- // Push two further copies of the receiver. One will be popped by the called
- // function. The second acts as padding if the number of arguments plus
- // receiver is odd - pushing receiver twice avoids branching. It also means
- // that we don't have to handle the even and odd cases specially on
- // InvokeFunction's return, as top of stack will be the receiver in either
- // case.
- __ Push(x0, x0);
+ __ Bind(&post_instantiation_deopt_entry);
- // ----------- S t a t e -------------
- // -- x3: new target
- // -- x12: number of arguments (untagged)
- // -- sp[0*kSystemPointerSize]: implicit receiver (overwrite if argc
- // odd)
- // -- sp[1*kSystemPointerSize]: implicit receiver
- // -- sp[2*kSystemPointerSize]: implicit receiver
- // -- sp[3*kSystemPointerSize]: padding
- // -- x1 and sp[4*kSystemPointerSize]: constructor function
- // -- sp[5*kSystemPointerSize]: number of arguments (tagged)
- // -- sp[6*kSystemPointerSize]: context
- // -----------------------------------
+ // Restore new target from the top of the stack.
+ __ Peek(x3, 0 * kSystemPointerSize);
- // Round the number of arguments down to the next even number, and claim
- // slots for the arguments. If the number of arguments was odd, the last
- // argument will overwrite one of the receivers pushed above.
- __ Bic(x10, x12, 1);
+ // Restore constructor function and argument count.
+ __ Ldr(x1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
+ __ SmiUntag(x12, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
- // Check if we have enough stack space to push all arguments.
- Label enough_stack_space, stack_overflow;
- Generate_StackOverflowCheck(masm, x10, &stack_overflow);
- __ B(&enough_stack_space);
+ // Copy arguments to the expression stack. The called function pops the
+ // receiver along with its arguments, so we need an extra receiver on the
+ // stack, in case we have to return it later.
- __ Bind(&stack_overflow);
- // Restore the context from the frame.
- __ Ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
- __ CallRuntime(Runtime::kThrowStackOverflow);
- __ Unreachable();
+ // Overwrite the new target with a receiver.
+ __ Poke(x0, 0);
- __ Bind(&enough_stack_space);
- __ Claim(x10);
+ // Push two further copies of the receiver. One will be popped by the called
+ // function. The second acts as padding if the number of arguments plus
+ // receiver is odd - pushing receiver twice avoids branching. It also means
+ // that we don't have to handle the even and odd cases specially on
+ // InvokeFunction's return, as top of stack will be the receiver in either
+ // case.
+ __ Push(x0, x0);
- // Copy the arguments.
- {
- Register count = x2;
- Register dst = x10;
- Register src = x11;
- __ Mov(count, x12);
-#ifdef V8_REVERSE_JSARGS
- __ Poke(x0, 0); // Add the receiver.
- __ SlotAddress(dst, 1); // Skip receiver.
- __ Add(src, fp,
- StandardFrameConstants::kCallerSPOffset + kSystemPointerSize);
-#else
- __ SlotAddress(dst, 0);
- __ Add(src, fp, StandardFrameConstants::kCallerSPOffset);
-#endif
- __ CopyDoubleWords(dst, src, count);
- }
+ // ----------- S t a t e -------------
+ // -- x3: new target
+ // -- x12: number of arguments (untagged)
+ // -- sp[0*kSystemPointerSize]: implicit receiver (overwrite if argc
+ // odd)
+ // -- sp[1*kSystemPointerSize]: implicit receiver
+ // -- sp[2*kSystemPointerSize]: implicit receiver
+ // -- sp[3*kSystemPointerSize]: padding
+ // -- x1 and sp[4*kSystemPointerSize]: constructor function
+ // -- sp[5*kSystemPointerSize]: number of arguments (tagged)
+ // -- sp[6*kSystemPointerSize]: context
+ // -----------------------------------
- // Call the function.
- __ Mov(x0, x12);
- __ InvokeFunctionWithNewTarget(x1, x3, x0, CALL_FUNCTION);
+ // Round the number of arguments down to the next even number, and claim
+ // slots for the arguments. If the number of arguments was odd, the last
+ // argument will overwrite one of the receivers pushed above.
+ __ Bic(x10, x12, 1);
- // ----------- S t a t e -------------
- // -- sp[0*kSystemPointerSize]: implicit receiver
- // -- sp[1*kSystemPointerSize]: padding
- // -- sp[2*kSystemPointerSize]: constructor function
- // -- sp[3*kSystemPointerSize]: number of arguments
- // -- sp[4*kSystemPointerSize]: context
- // -----------------------------------
+ // Check if we have enough stack space to push all arguments.
+ Label stack_overflow;
+ __ StackOverflowCheck(x10, &stack_overflow);
+ __ Claim(x10);
- // Store offset of return address for deoptimizer.
- masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
- masm->pc_offset());
+ // TODO(victorgomes): When the arguments adaptor is completely removed, we
+ // should get the formal parameter count and copy the arguments in its
+ // correct position (including any undefined), instead of delaying this to
+ // InvokeFunction.
- // Restore the context from the frame.
- __ Ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+ // Copy the arguments.
+ {
+ Register count = x2;
+ Register dst = x10;
+ Register src = x11;
+ __ Mov(count, x12);
+ __ Poke(x0, 0); // Add the receiver.
+ __ SlotAddress(dst, 1); // Skip receiver.
+ __ Add(src, fp,
+ StandardFrameConstants::kCallerSPOffset + kSystemPointerSize);
+ __ CopyDoubleWords(dst, src, count);
+ }
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, do_throw, leave_frame;
+ // Call the function.
+ __ Mov(x0, x12);
+ __ InvokeFunctionWithNewTarget(x1, x3, x0, CALL_FUNCTION);
- // If the result is undefined, we jump out to using the implicit receiver.
- __ CompareRoot(x0, RootIndex::kUndefinedValue);
- __ B(eq, &use_receiver);
+ // ----------- S t a t e -------------
+ // -- sp[0*kSystemPointerSize]: implicit receiver
+ // -- sp[1*kSystemPointerSize]: padding
+ // -- sp[2*kSystemPointerSize]: constructor function
+ // -- sp[3*kSystemPointerSize]: number of arguments
+ // -- sp[4*kSystemPointerSize]: context
+ // -----------------------------------
- // Otherwise we do a smi check and fall through to check if the return value
- // is a valid receiver.
+ // Store offset of return address for deoptimizer.
+ masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
+ masm->pc_offset());
+
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, do_throw, leave_and_return, check_receiver;
+
+ // If the result is undefined, we jump out to using the implicit receiver.
+ __ CompareRoot(x0, RootIndex::kUndefinedValue);
+ __ B(ne, &check_receiver);
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ Bind(&use_receiver);
+ __ Peek(x0, 0 * kSystemPointerSize);
+ __ CompareRoot(x0, RootIndex::kTheHoleValue);
+ __ B(eq, &do_throw);
+
+ __ Bind(&leave_and_return);
+ // Restore smi-tagged arguments count from the frame.
+ __ SmiUntag(x1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+ // Leave construct frame.
+ __ LeaveFrame(StackFrame::CONSTRUCT);
+ // Remove caller arguments from the stack and return.
+ __ DropArguments(x1, TurboAssembler::kCountExcludesReceiver);
+ __ Ret();
- // If the result is a smi, it is *not* an object in the ECMA sense.
- __ JumpIfSmi(x0, &use_receiver);
+ // Otherwise we do a smi check and fall through to check if the return value
+ // is a valid receiver.
+ __ bind(&check_receiver);
- // If the type of the result (stored in its map) is less than
- // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ JumpIfObjectType(x0, x4, x5, FIRST_JS_RECEIVER_TYPE, &leave_frame, ge);
- __ B(&use_receiver);
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ __ JumpIfSmi(x0, &use_receiver);
- __ Bind(&do_throw);
- __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
+ // If the type of the result (stored in its map) is less than
+ // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ JumpIfObjectType(x0, x4, x5, FIRST_JS_RECEIVER_TYPE, &leave_and_return,
+ ge);
+ __ B(&use_receiver);
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ Bind(&use_receiver);
- __ Peek(x0, 0 * kSystemPointerSize);
- __ CompareRoot(x0, RootIndex::kTheHoleValue);
- __ B(eq, &do_throw);
+ __ Bind(&do_throw);
+ // Restore the context from the frame.
+ __ Ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+ __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
+ __ Unreachable();
- __ Bind(&leave_frame);
- // Restore smi-tagged arguments count from the frame.
- __ SmiUntag(x1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
- // Leave construct frame.
- }
- // Remove caller arguments from the stack and return.
- __ DropArguments(x1, TurboAssembler::kCountExcludesReceiver);
- __ Ret();
+ __ Bind(&stack_overflow);
+ // Restore the context from the frame.
+ __ Ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ __ Unreachable();
}
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
Generate_JSBuiltinsConstructStubHelper(masm);
@@ -501,7 +461,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
- LoadStackLimit(masm, x10, StackLimitKind::kRealStackLimit);
+ __ LoadStackLimit(x10, StackLimitKind::kRealStackLimit);
__ Cmp(sp, x10);
__ B(lo, &stack_overflow);
@@ -541,7 +501,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
{
Label loop, done;
__ Cbz(x10, &done);
-#ifdef V8_REVERSE_JSARGS
__ SlotAddress(x12, x10);
__ Add(x5, x5, Operand(x10, LSL, kTaggedSizeLog2));
__ Add(x5, x5, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@@ -549,15 +508,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Sub(x10, x10, 1);
__ LoadAnyTaggedField(x11, MemOperand(x5, -kTaggedSize, PreIndex));
__ Str(x11, MemOperand(x12, -kSystemPointerSize, PostIndex));
-#else
- __ Mov(x12, 0);
- __ Bind(&loop);
- __ Sub(x10, x10, 1);
- __ Add(x11, x5, Operand(x12, LSL, kTaggedSizeLog2));
- __ LoadAnyTaggedField(x11, FieldMemOperand(x11, FixedArray::kHeaderSize));
- __ Poke(x11, Operand(x10, LSL, kSystemPointerSizeLog2));
- __ Add(x12, x12, 1);
-#endif
__ Cbnz(x10, &loop);
__ Bind(&done);
}
@@ -882,7 +832,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Check if we have enough stack space to push all arguments.
Label enough_stack_space, stack_overflow;
- Generate_StackOverflowCheck(masm, slots_to_claim, &stack_overflow);
+ __ StackOverflowCheck(slots_to_claim, &stack_overflow);
__ B(&enough_stack_space);
__ Bind(&stack_overflow);
@@ -896,17 +846,11 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ SlotAddress(scratch, slots_to_claim);
__ Str(padreg, MemOperand(scratch, -kSystemPointerSize));
-#ifdef V8_REVERSE_JSARGS
// Store receiver on the stack.
__ Poke(receiver, 0);
// Store function on the stack.
__ SlotAddress(scratch, argc);
__ Str(function, MemOperand(scratch, kSystemPointerSize));
-#else
- // Store receiver and function on the stack.
- __ SlotAddress(scratch, argc);
- __ Stp(receiver, function, MemOperand(scratch));
-#endif
// Copy arguments to the stack in a loop, in reverse order.
// x4: argc.
@@ -918,7 +862,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// scratch has been set to point to the location of the function, which
// marks the end of the argument copy.
-#ifdef V8_REVERSE_JSARGS
__ SlotAddress(x0, 1); // Skips receiver.
__ Bind(&loop);
// Load the handle.
@@ -930,18 +873,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Loop if we've not reached the end of copy marker.
__ Cmp(x0, scratch);
__ B(le, &loop);
-#else
- __ Bind(&loop);
- // Load the handle.
- __ Ldr(x11, MemOperand(argv, kSystemPointerSize, PostIndex));
- // Dereference the handle.
- __ Ldr(x11, MemOperand(x11));
- // Poke the result into the stack.
- __ Str(x11, MemOperand(scratch, -kSystemPointerSize, PreIndex));
- // Loop if we've not reached the end of copy marker.
- __ Cmp(sp, scratch);
- __ B(lt, &loop);
-#endif
__ Bind(&done);
@@ -1010,35 +941,51 @@ static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
OMIT_SMI_CHECK);
}
-static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
- Register args_size = scratch;
-
- // Get the arguments + receiver count.
- __ Ldr(args_size,
+static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
+ Register scratch2) {
+ Register params_size = scratch1;
+ // Get the size of the formal parameters + receiver (in bytes).
+ __ Ldr(params_size,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
- __ Ldr(args_size.W(),
- FieldMemOperand(args_size, BytecodeArray::kParameterSizeOffset));
+ __ Ldr(params_size.W(),
+ FieldMemOperand(params_size, BytecodeArray::kParameterSizeOffset));
+
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ Register actual_params_size = scratch2;
+ // Compute the size of the actual parameters + receiver (in bytes).
+ __ Ldr(actual_params_size,
+ MemOperand(fp, StandardFrameConstants::kArgCOffset));
+ __ lsl(actual_params_size, actual_params_size, kSystemPointerSizeLog2);
+ __ Add(actual_params_size, actual_params_size, Operand(kSystemPointerSize));
+
+ // If actual is bigger than formal, then we should use it to free up the stack
+ // arguments.
+ Label corrected_args_count;
+ __ Cmp(params_size, actual_params_size);
+ __ B(ge, &corrected_args_count);
+ __ Mov(params_size, actual_params_size);
+ __ Bind(&corrected_args_count);
+#endif
// Leave the frame (also dropping the register file).
__ LeaveFrame(StackFrame::INTERPRETED);
// Drop receiver + arguments.
if (__ emit_debug_code()) {
- __ Tst(args_size, kSystemPointerSize - 1);
+ __ Tst(params_size, kSystemPointerSize - 1);
__ Check(eq, AbortReason::kUnexpectedValue);
}
- __ Lsr(args_size, args_size, kSystemPointerSizeLog2);
- __ DropArguments(args_size);
+ __ Lsr(params_size, params_size, kSystemPointerSizeLog2);
+ __ DropArguments(params_size);
}
-// Tail-call |function_id| if |smi_entry| == |marker|
+// Tail-call |function_id| if |actual_marker| == |expected_marker|
static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
- Register smi_entry,
- OptimizationMarker marker,
+ Register actual_marker,
+ OptimizationMarker expected_marker,
Runtime::FunctionId function_id) {
Label no_match;
- __ CompareTaggedAndBranch(smi_entry, Operand(Smi::FromEnum(marker)), ne,
- &no_match);
+ __ CompareAndBranch(actual_marker, Operand(expected_marker), ne, &no_match);
GenerateTailCallToReturnedCode(masm, function_id);
__ bind(&no_match);
}
@@ -1054,17 +1001,22 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
DCHECK(!AreAliased(x1, x3, optimized_code_entry, scratch));
Register closure = x1;
+ Label heal_optimized_code_slot;
+
+ // If the optimized code is cleared, go to runtime to update the optimization
+ // marker field.
+ __ LoadWeakValue(optimized_code_entry, optimized_code_entry,
+ &heal_optimized_code_slot);
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
- Label found_deoptimized_code;
__ LoadTaggedPointerField(
scratch,
FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
__ Ldr(scratch.W(),
FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
__ Tbnz(scratch.W(), Code::kMarkedForDeoptimizationBit,
- &found_deoptimized_code);
+ &heal_optimized_code_slot);
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
@@ -1079,10 +1031,11 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
__ Jump(x17);
}
- // Optimized code slot contains deoptimized code, evict it and re-enter the
- // closure's code.
- __ bind(&found_deoptimized_code);
- GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
+ // Optimized code slot contains deoptimized code or code is cleared and
+ // optimized code marker isn't updated. Evict the code, update the marker
+ // and re-enter the closure's code.
+ __ bind(&heal_optimized_code_slot);
+ GenerateTailCallToReturnedCode(masm, Runtime::kHealOptimizedCodeSlot);
}
static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
@@ -1092,7 +1045,7 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
// -- x3 : new target (preserved for callee if needed, and caller)
// -- x1 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
- // -- optimization_marker : a Smi containing a non-zero optimization marker.
+ // -- optimization_marker : int32 containing non-zero optimization marker.
// -----------------------------------
DCHECK(!AreAliased(feedback_vector, x1, x3, optimization_marker));
@@ -1109,13 +1062,11 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
- // Otherwise, the marker is InOptimizationQueue, so fall through hoping
- // that an interrupt will eventually update the slot with optimized code.
+ // Marker should be one of LogFirstExecution / CompileOptimized /
+ // CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach
+ // here.
if (FLAG_debug_code) {
- __ CmpTagged(
- optimization_marker,
- Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
- __ Assert(eq, AbortReason::kExpectedOptimizationSentinel);
+ __ Unreachable();
}
}
@@ -1245,19 +1196,19 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Cmp(x7, FEEDBACK_VECTOR_TYPE);
__ B(ne, &push_stack_frame);
- // Read off the optimized code slot in the feedback vector, and if there
+ // Read off the optimized state in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
- Register optimized_code_entry = x7;
- __ LoadAnyTaggedField(
- optimized_code_entry,
- FieldMemOperand(feedback_vector,
- FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
+ Register optimization_state = w7;
+ __ Ldr(optimization_state,
+ FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
- // Check if the optimized code slot is not empty.
- Label optimized_code_slot_not_empty;
- __ CompareTaggedAndBranch(optimized_code_entry,
- Operand(Smi::FromEnum(OptimizationMarker::kNone)),
- ne, &optimized_code_slot_not_empty);
+ // Check if there is optimized code or a optimization marker that needes to be
+ // processed.
+ Label has_optimized_code_or_marker;
+ __ TestAndBranchIfAnySet(
+ optimization_state,
+ FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask,
+ &has_optimized_code_or_marker);
Label not_optimized;
__ bind(&not_optimized);
@@ -1295,10 +1246,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Push actual argument count, bytecode array, Smi tagged bytecode array
// offset and an undefined (to properly align the stack pointer).
STATIC_ASSERT(TurboAssembler::kExtraSlotClaimedByPrologue == 1);
- __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
__ SmiTag(x6, kInterpreterBytecodeOffsetRegister);
- __ Push(kJavaScriptCallArgCountRegister, kInterpreterBytecodeArrayRegister,
- x6, kInterpreterAccumulatorRegister);
+ __ Push(kJavaScriptCallArgCountRegister, kInterpreterBytecodeArrayRegister);
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+ __ Push(x6, kInterpreterAccumulatorRegister);
// Allocate the local and temporary register file on the stack.
Label stack_overflow;
@@ -1312,7 +1263,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
{
UseScratchRegisterScope temps(masm);
Register scratch = temps.AcquireX();
- LoadStackLimit(masm, scratch, StackLimitKind::kRealStackLimit);
+ __ LoadStackLimit(scratch, StackLimitKind::kRealStackLimit);
__ Cmp(x10, scratch);
}
__ B(lo, &stack_overflow);
@@ -1343,7 +1294,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Perform interrupt stack check.
// TODO(solanes): Merge with the real stack limit check above.
Label stack_check_interrupt, after_stack_check_interrupt;
- LoadStackLimit(masm, x10, StackLimitKind::kInterruptStackLimit);
+ __ LoadStackLimit(x10, StackLimitKind::kInterruptStackLimit);
__ Cmp(sp, x10);
__ B(lo, &stack_check_interrupt);
__ Bind(&after_stack_check_interrupt);
@@ -1385,7 +1336,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bind(&do_return);
// The return value is in x0.
- LeaveInterpreterFrame(masm, x2);
+ LeaveInterpreterFrame(masm, x2, x4);
__ Ret();
__ bind(&stack_check_interrupt);
@@ -1412,19 +1363,27 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ jmp(&after_stack_check_interrupt);
- __ bind(&optimized_code_slot_not_empty);
+ __ bind(&has_optimized_code_or_marker);
+
Label maybe_has_optimized_code;
- // Check if optimized code marker is actually a weak reference to the
- // optimized code as opposed to an optimization marker.
- __ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code);
- MaybeOptimizeCode(masm, feedback_vector, optimized_code_entry);
+ // Check if optimized code is available
+ __ TestAndBranchIfAllClear(
+ optimization_state,
+ FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker,
+ &maybe_has_optimized_code);
+
+ Register optimization_marker = optimization_state;
+ __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
+ MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
// Fall through if there's no runnable optimized code.
__ jmp(&not_optimized);
__ bind(&maybe_has_optimized_code);
- // Load code entry from the weak reference, if it was cleared, resume
- // execution of unoptimized code.
- __ LoadWeakValue(optimized_code_entry, optimized_code_entry, &not_optimized);
+ Register optimized_code_entry = x7;
+ __ LoadAnyTaggedField(
+ optimized_code_entry,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kMaybeOptimizedCodeOffset));
TailCallOptimizedCodeSlot(masm, optimized_code_entry, x4);
__ bind(&compile_lazy);
@@ -1464,7 +1423,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
// Add a stack check before pushing arguments.
Label stack_overflow, done;
- Generate_StackOverflowCheck(masm, slots_to_claim, &stack_overflow);
+ __ StackOverflowCheck(slots_to_claim, &stack_overflow);
__ B(&done);
__ Bind(&stack_overflow);
__ TailCallRuntime(Runtime::kThrowStackOverflow);
@@ -1484,7 +1443,6 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
__ Poke(padreg, Operand(scratch, LSL, kSystemPointerSizeLog2));
}
-#ifdef V8_REVERSE_JSARGS
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
__ Mov(slots_to_copy, num_args);
__ SlotAddress(stack_addr, 1);
@@ -1513,33 +1471,6 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
__ LoadRoot(receiver, RootIndex::kUndefinedValue);
__ Poke(receiver, 0);
}
-#else // !V8_REVERSE_JSARGS
- if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
- // Store "undefined" as the receiver arg if we need to.
- Register receiver = x14;
- __ LoadRoot(receiver, RootIndex::kUndefinedValue);
- __ SlotAddress(stack_addr, num_args);
- __ Str(receiver, MemOperand(stack_addr));
- __ Mov(slots_to_copy, num_args);
- } else {
- // If we're not given an explicit receiver to store, we'll need to copy it
- // together with the rest of the arguments.
- __ Add(slots_to_copy, num_args, 1);
- }
-
- __ Sub(last_arg_addr, first_arg_index,
- Operand(slots_to_copy, LSL, kSystemPointerSizeLog2));
- __ Add(last_arg_addr, last_arg_addr, kSystemPointerSize);
-
- // Load the final spread argument into spread_arg_out, if necessary.
- if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
- __ Ldr(spread_arg_out, MemOperand(last_arg_addr, -kSystemPointerSize));
- }
-
- // Copy the rest of the arguments.
- __ SlotAddress(stack_addr, 0);
- __ CopyDoubleWords(stack_addr, last_arg_addr, slots_to_copy);
-#endif // !V8_REVERSE_JSARGS
}
// static
@@ -1764,7 +1695,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
__ Add(fp, sp, frame_size);
if (with_result) {
-#ifdef V8_REVERSE_JSARGS
if (java_script_builtin) {
__ mov(scratch, x0);
} else {
@@ -1773,12 +1703,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
__ Str(x0, MemOperand(
fp, BuiltinContinuationFrameConstants::kCallerSPOffset));
}
-#else
- // Overwrite the hole inserted by the deoptimizer with the return value from
- // the LAZY deopt point.
- __ Str(x0,
- MemOperand(fp, BuiltinContinuationFrameConstants::kCallerSPOffset));
-#endif
}
// Restore registers in pairs.
@@ -1801,7 +1725,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
if (java_script_builtin) __ SmiUntag(kJavaScriptCallArgCountRegister);
-#ifdef V8_REVERSE_JSARGS
if (java_script_builtin && with_result) {
// Overwrite the hole inserted by the deoptimizer with the return value from
// the LAZY deopt point. r0 contains the arguments count, the return value
@@ -1815,7 +1738,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
BuiltinContinuationFrameConstants::kCallerSPOffset /
kSystemPointerSize);
}
-#endif
// Load builtin index (stored as a Smi) and use it to get the builtin start
// address from the builtins table.
@@ -1904,11 +1826,10 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : argc
- // -- sp[0] : argArray (if argc == 2)
+ // -- sp[0] : receiver
// -- sp[8] : thisArg (if argc >= 1)
- // -- sp[16] : receiver
+ // -- sp[16] : argArray (if argc == 2)
// -----------------------------------
- // NOTE: The order of args in the stack are reversed if V8_REVERSE_JSARGS
ASM_LOCATION("Builtins::Generate_FunctionPrototypeApply");
@@ -1925,7 +1846,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// 1. Load receiver into x1, argArray into x2 (if present), remove all
// arguments from the stack (including the receiver), and push thisArg (if
// present) instead.
-#ifdef V8_REVERSE_JSARGS
{
Label done;
__ Mov(this_arg, undefined_value);
@@ -1938,32 +1858,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ Peek(arg_array, 2 * kSystemPointerSize);
__ bind(&done);
}
-#else // !V8_REVERSE_JSARGS
- {
- Register scratch = x11;
-
- // Push two undefined values on the stack, to put it in a consistent state
- // so that we can always read three arguments from it.
- __ Push(undefined_value, undefined_value);
-
- // The state of the stack (with arrows pointing to the slots we will read)
- // is as follows:
- //
- // argc = 0 argc = 1 argc = 2
- // -> sp[16]: receiver -> sp[24]: receiver -> sp[32]: receiver
- // -> sp[8]: undefined -> sp[16]: this_arg -> sp[24]: this_arg
- // -> sp[0]: undefined -> sp[8]: undefined -> sp[16]: arg_array
- // sp[0]: undefined sp[8]: undefined
- // sp[0]: undefined
- //
- // There are now always three arguments to read, in the slots starting from
- // slot argc.
- __ SlotAddress(scratch, argc);
- __ Ldp(arg_array, this_arg, MemOperand(scratch));
- __ Ldr(receiver, MemOperand(scratch, 2 * kSystemPointerSize));
- __ Drop(2); // Drop the undefined values we pushed above.
- }
-#endif // !V8_REVERSE_JSARGS
__ DropArguments(argc, TurboAssembler::kCountExcludesReceiver);
__ PushArgument(this_arg);
@@ -2022,7 +1916,6 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
}
Label arguments_ready;
-#ifdef V8_REVERSE_JSARGS
// 3. Shift arguments. It depends if the arguments is even or odd.
// That is if padding exists or not.
{
@@ -2051,30 +1944,6 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
TurboAssembler::kSrcLessThanDst);
__ Drop(2);
}
-#else // !V8_REVERSE_JSARGS
- // 3. Overwrite the receiver with padding. If argc is odd, this is all we
- // need to do.
- __ Poke(padreg, Operand(argc, LSL, kXRegSizeLog2));
- __ Tbnz(argc, 0, &arguments_ready);
-
- // 4. If argc is even:
- // Copy arguments two slots higher in memory, overwriting the original
- // receiver and padding.
- {
- Register copy_from = x10;
- Register copy_to = x11;
- Register count = x12;
- Register last_arg_slot = x13;
- __ Mov(count, argc);
- __ Sub(last_arg_slot, argc, 1);
- __ SlotAddress(copy_from, last_arg_slot);
- __ Add(copy_to, copy_from, 2 * kSystemPointerSize);
- __ CopyDoubleWords(copy_to, copy_from, count,
- TurboAssembler::kSrcLessThanDst);
- // Drop two slots. These are copies of the last two arguments.
- __ Drop(2);
- }
-#endif // !V8_REVERSE_JSARGS
// 5. Adjust argument count to make the original first argument the new
// receiver and call the callable.
@@ -2085,13 +1954,12 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- x0 : argc
- // -- sp[0] : argumentsList (if argc == 3)
- // -- sp[8] : thisArgument (if argc >= 2)
- // -- sp[16] : target (if argc >= 1)
- // -- sp[24] : receiver
+ // -- x0 : argc
+ // -- sp[0] : receiver
+ // -- sp[8] : target (if argc >= 1)
+ // -- sp[16] : thisArgument (if argc >= 2)
+ // -- sp[24] : argumentsList (if argc == 3)
// -----------------------------------
- // NOTE: The order of args in the stack are reversed if V8_REVERSE_JSARGS
ASM_LOCATION("Builtins::Generate_ReflectApply");
@@ -2106,7 +1974,6 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// 1. Load target into x1 (if present), argumentsList into x2 (if present),
// remove all arguments from the stack (including the receiver), and push
// thisArgument (if present) instead.
-#ifdef V8_REVERSE_JSARGS
{
Label done;
__ Mov(target, undefined_value);
@@ -2122,45 +1989,6 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ Peek(arguments_list, 3 * kSystemPointerSize);
__ bind(&done);
}
-#else // !V8_REVERSE_JSARGS
- {
- // Push four undefined values on the stack, to put it in a consistent state
- // so that we can always read the three arguments we need from it. The
- // fourth value is used for stack alignment.
- __ Push(undefined_value, undefined_value, undefined_value, undefined_value);
-
- // The state of the stack (with arrows pointing to the slots we will read)
- // is as follows:
- //
- // argc = 0 argc = 1 argc = 2
- // sp[32]: receiver sp[40]: receiver sp[48]: receiver
- // -> sp[24]: undefined -> sp[32]: target -> sp[40]: target
- // -> sp[16]: undefined -> sp[24]: undefined -> sp[32]: this_argument
- // -> sp[8]: undefined -> sp[16]: undefined -> sp[24]: undefined
- // sp[0]: undefined sp[8]: undefined sp[16]: undefined
- // sp[0]: undefined sp[8]: undefined
- // sp[0]: undefined
- // argc = 3
- // sp[56]: receiver
- // -> sp[48]: target
- // -> sp[40]: this_argument
- // -> sp[32]: arguments_list
- // sp[24]: undefined
- // sp[16]: undefined
- // sp[8]: undefined
- // sp[0]: undefined
- //
- // There are now always three arguments to read, in the slots starting from
- // slot (argc + 1).
- Register scratch = x10;
- __ SlotAddress(scratch, argc);
- __ Ldp(arguments_list, this_argument,
- MemOperand(scratch, 1 * kSystemPointerSize));
- __ Ldr(target, MemOperand(scratch, 3 * kSystemPointerSize));
-
- __ Drop(4); // Drop the undefined values we pushed above.
- }
-#endif // !V8_REVERSE_JSARGS
__ DropArguments(argc, TurboAssembler::kCountExcludesReceiver);
__ PushArgument(this_argument);
@@ -2182,12 +2010,11 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : argc
- // -- sp[0] : new.target (optional)
- // -- sp[8] : argumentsList
- // -- sp[16] : target
- // -- sp[24] : receiver
+ // -- sp[0] : receiver
+ // -- sp[8] : target
+ // -- sp[16] : argumentsList
+ // -- sp[24] : new.target (optional)
// -----------------------------------
- // NOTE: The order of args in the stack are reversed if V8_REVERSE_JSARGS
ASM_LOCATION("Builtins::Generate_ReflectConstruct");
@@ -2203,7 +2030,6 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// new.target into x3 (if present, otherwise use target), remove all
// arguments from the stack (including the receiver), and push thisArgument
// (if present) instead.
-#ifdef V8_REVERSE_JSARGS
{
Label done;
__ Mov(target, undefined_value);
@@ -2220,48 +2046,6 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ Peek(new_target, 3 * kSystemPointerSize);
__ bind(&done);
}
-#else // !V8_REVERSE_JSARGS
- {
- // Push four undefined values on the stack, to put it in a consistent state
- // so that we can always read the three arguments we need from it. The
- // fourth value is used for stack alignment.
- __ Push(undefined_value, undefined_value, undefined_value, undefined_value);
-
- // The state of the stack (with arrows pointing to the slots we will read)
- // is as follows:
- //
- // argc = 0 argc = 1 argc = 2
- // sp[32]: receiver sp[40]: receiver sp[48]: receiver
- // -> sp[24]: undefined -> sp[32]: target -> sp[40]: target
- // -> sp[16]: undefined -> sp[24]: undefined -> sp[32]: arguments_list
- // -> sp[8]: undefined -> sp[16]: undefined -> sp[24]: undefined
- // sp[0]: undefined sp[8]: undefined sp[16]: undefined
- // sp[0]: undefined sp[8]: undefined
- // sp[0]: undefined
- // argc = 3
- // sp[56]: receiver
- // -> sp[48]: target
- // -> sp[40]: arguments_list
- // -> sp[32]: new_target
- // sp[24]: undefined
- // sp[16]: undefined
- // sp[8]: undefined
- // sp[0]: undefined
- //
- // There are now always three arguments to read, in the slots starting from
- // slot (argc + 1).
- Register scratch = x10;
- __ SlotAddress(scratch, argc);
- __ Ldp(new_target, arguments_list,
- MemOperand(scratch, 1 * kSystemPointerSize));
- __ Ldr(target, MemOperand(scratch, 3 * kSystemPointerSize));
-
- __ Cmp(argc, 2);
- __ CmovX(new_target, target, ls); // target if argc <= 2.
-
- __ Drop(4); // Drop the undefined values we pushed above.
- }
-#endif // !V8_REVERSE_JSARGS
__ DropArguments(argc, TurboAssembler::kCountExcludesReceiver);
@@ -2319,9 +2103,7 @@ void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// one slot up or one slot down, as needed.
void Generate_PrepareForCopyingVarargs(MacroAssembler* masm, Register argc,
Register len) {
- Label exit;
-#ifdef V8_REVERSE_JSARGS
- Label even;
+ Label exit, even;
Register slots_to_copy = x10;
Register slots_to_claim = x12;
@@ -2353,60 +2135,6 @@ void Generate_PrepareForCopyingVarargs(MacroAssembler* masm, Register argc,
__ SlotAddress(dst, 0);
__ CopyDoubleWords(dst, src, slots_to_copy);
}
-#else // !V8_REVERSE_JSARGS
- Label len_odd;
- Register slots_to_copy = x10; // If needed.
- __ Add(slots_to_copy, argc, 1);
- __ Add(argc, argc, len);
- __ Tbnz(len, 0, &len_odd);
- __ Claim(len);
- __ B(&exit);
-
- __ Bind(&len_odd);
- // Claim space we need. If argc is even, slots_to_claim = len + 1, as we need
- // one extra padding slot. If argc is odd, we know that the original arguments
- // will have a padding slot we can reuse (since len is odd), so
- // slots_to_claim = len - 1.
- {
- Register scratch = x11;
- Register slots_to_claim = x12;
- __ Add(slots_to_claim, len, 1);
- __ And(scratch, argc, 1);
- __ Sub(slots_to_claim, slots_to_claim, Operand(scratch, LSL, 1));
- __ Claim(slots_to_claim);
- }
-
- Label copy_down;
- __ Tbz(slots_to_copy, 0, &copy_down);
-
- // Copy existing arguments one slot up.
- {
- Register src = x11;
- Register dst = x12;
- Register scratch = x13;
- __ Sub(scratch, argc, 1);
- __ SlotAddress(src, scratch);
- __ SlotAddress(dst, argc);
- __ CopyDoubleWords(dst, src, slots_to_copy,
- TurboAssembler::kSrcLessThanDst);
- }
- __ B(&exit);
-
- // Copy existing arguments one slot down and add padding.
- __ Bind(&copy_down);
- {
- Register src = x11;
- Register dst = x12;
- Register scratch = x13;
- __ Add(src, len, 1);
- __ Mov(dst, len); // CopySlots will corrupt dst.
- __ CopySlots(dst, src, slots_to_copy);
- __ Add(scratch, argc, 1);
- __ Poke(padreg,
- Operand(scratch, LSL, kSystemPointerSizeLog2)); // Store padding.
- }
-
-#endif // !V8_REVERSE_JSARGS
__ Bind(&exit);
}
@@ -2446,7 +2174,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Register len = x4;
Label stack_overflow;
- Generate_StackOverflowCheck(masm, len, &stack_overflow);
+ __ StackOverflowCheck(len, &stack_overflow);
// Skip argument setup if we don't need to push any varargs.
Label done;
@@ -2467,7 +2195,6 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// We do not use the CompareRoot macro as it would do a LoadRoot behind the
// scenes and we want to avoid that in a loop.
// TODO(all): Consider using Ldp and Stp.
-#ifdef V8_REVERSE_JSARGS
Register dst = x16;
__ Add(dst, argc, Immediate(1)); // Consider the receiver as well.
__ SlotAddress(dst, dst);
@@ -2479,15 +2206,6 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ Csel(scratch, scratch, undefined_value, ne);
__ Str(scratch, MemOperand(dst, kSystemPointerSize, PostIndex));
__ Cbnz(len, &loop);
-#else
- __ Bind(&loop);
- __ Sub(len, len, 1);
- __ LoadAnyTaggedField(scratch, MemOperand(src, kTaggedSize, PostIndex));
- __ CmpTagged(scratch, the_hole_value);
- __ Csel(scratch, scratch, undefined_value, ne);
- __ Poke(scratch, Operand(len, LSL, kSystemPointerSizeLog2));
- __ Cbnz(len, &loop);
-#endif
}
__ Bind(&done);
// Tail-call to the actual Call or Construct builtin.
@@ -2529,12 +2247,18 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ Bind(&new_target_constructor);
}
+ Register args_fp = x5;
+ Register len = x6;
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ // TODO(victorgomes): Remove this copy when all the arguments adaptor frame
+ // code is erased.
+ __ Mov(args_fp, fp);
+ __ Ldr(len, MemOperand(fp, StandardFrameConstants::kArgCOffset));
+#else
// Check if we have an arguments adaptor frame below the function frame.
// args_fp will point to the frame that contains the actual arguments, which
// will be the current frame unless we have an arguments adaptor frame, in
// which case args_fp points to the arguments adaptor frame.
- Register args_fp = x5;
- Register len = x6;
{
Label arguments_adaptor, arguments_done;
Register scratch = x10;
@@ -2563,19 +2287,19 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
}
__ Bind(&arguments_done);
}
+#endif
Label stack_done, stack_overflow;
__ Subs(len, len, start_index);
__ B(le, &stack_done);
// Check for stack overflow.
- Generate_StackOverflowCheck(masm, x6, &stack_overflow);
+ __ StackOverflowCheck(len, &stack_overflow);
Generate_PrepareForCopyingVarargs(masm, argc, len);
// Push varargs.
{
Register dst = x13;
-#ifdef V8_REVERSE_JSARGS
// Point to the fist argument to copy from (skipping receiver).
__ Add(args_fp, args_fp,
CommonFrameConstants::kFixedFrameSizeAboveFp + kSystemPointerSize);
@@ -2586,10 +2310,6 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ SlotAddress(dst, x10);
// Update total number of arguments.
__ Add(argc, argc, len);
-#else
- __ Add(args_fp, args_fp, CommonFrameConstants::kFixedFrameSizeAboveFp);
- __ SlotAddress(dst, 0);
-#endif
__ CopyDoubleWords(dst, args_fp, len);
}
__ B(&stack_done);
@@ -2739,7 +2459,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// (i.e. debug break and preemption) here, so check the "real stack
// limit".
Label done;
- LoadStackLimit(masm, x10, StackLimitKind::kRealStackLimit);
+ __ LoadStackLimit(x10, StackLimitKind::kRealStackLimit);
// Make x10 the space we have left. The stack might already be overflowed
// here which will cause x10 to become negative.
__ Sub(x10, sp, x10);
@@ -2750,7 +2470,6 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ Bind(&done);
}
-#ifdef V8_REVERSE_JSARGS
Label copy_bound_args;
Register total_argc = x15;
Register slots_to_claim = x12;
@@ -2826,80 +2545,6 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
}
// Update argc.
__ Mov(argc, total_argc);
-#else // !V8_REVERSE_JSARGS
- // Check if we need padding.
- Label copy_args, copy_bound_args;
- Register total_argc = x15;
- Register slots_to_claim = x12;
- __ Add(total_argc, argc, bound_argc);
- __ Mov(slots_to_claim, bound_argc);
- __ Tbz(bound_argc, 0, &copy_args);
-
- // Load receiver before we start moving the arguments. We will only
- // need this in this path because the bound arguments are odd.
- Register receiver = x14;
- __ Peek(receiver, Operand(argc, LSL, kSystemPointerSizeLog2));
-
- // Claim space we need. If argc is even, slots_to_claim = bound_argc + 1,
- // as we need one extra padding slot. If argc is odd, we know that the
- // original arguments will have a padding slot we can reuse (since
- // bound_argc is odd), so slots_to_claim = bound_argc - 1.
- {
- Register scratch = x11;
- __ Add(slots_to_claim, bound_argc, 1);
- __ And(scratch, total_argc, 1);
- __ Sub(slots_to_claim, slots_to_claim, Operand(scratch, LSL, 1));
- }
-
- // Copy bound arguments.
- __ Bind(&copy_args);
- // Skip claim and copy of existing arguments in the special case where we
- // do not need to claim any slots (this will be the case when
- // bound_argc == 1 and the existing arguments have padding we can reuse).
- __ Cbz(slots_to_claim, &copy_bound_args);
- __ Claim(slots_to_claim);
- {
- Register count = x10;
- // Relocate arguments to a lower address.
- __ Mov(count, argc);
- __ CopySlots(0, slots_to_claim, count);
-
- __ Bind(&copy_bound_args);
- // Copy [[BoundArguments]] to the stack (below the arguments). The first
- // element of the array is copied to the highest address.
- {
- Label loop;
- Register counter = x10;
- Register scratch = x11;
- Register copy_to = x12;
- __ Add(bound_argv, bound_argv,
- FixedArray::kHeaderSize - kHeapObjectTag);
- __ SlotAddress(copy_to, argc);
- __ Add(argc, argc,
- bound_argc); // Update argc to include bound arguments.
- __ Lsl(counter, bound_argc, kTaggedSizeLog2);
- __ Bind(&loop);
- __ Sub(counter, counter, kTaggedSize);
- __ LoadAnyTaggedField(scratch, MemOperand(bound_argv, counter));
- // Poke into claimed area of stack.
- __ Str(scratch, MemOperand(copy_to, kSystemPointerSize, PostIndex));
- __ Cbnz(counter, &loop);
- }
-
- {
- Label done;
- Register scratch = x10;
- __ Tbz(bound_argc, 0, &done);
- // Store receiver.
- __ Add(scratch, sp, Operand(total_argc, LSL, kSystemPointerSizeLog2));
- __ Str(receiver, MemOperand(scratch, kSystemPointerSize, PostIndex));
- __ Tbnz(total_argc, 0, &done);
- // Store padding.
- __ Str(padreg, MemOperand(scratch));
- __ Bind(&done);
- }
- }
-#endif // !V8_REVERSE_JSARGS
}
__ Bind(&no_bound_arguments);
}
@@ -3160,26 +2805,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ Cmp(argc_expected, kDontAdaptArgumentsSentinel);
__ B(eq, &dont_adapt_arguments);
-#ifndef V8_REVERSE_JSARGS
- // This optimization is disabled when the arguments are reversed.
- Label adapt_arguments_in_place;
- Register argc_actual_minus_expected = x5;
-
- // When the difference between argc_actual and argc_expected is odd, we
- // create an arguments adaptor frame.
- __ Sub(argc_actual_minus_expected, argc_actual, argc_expected);
- __ Tbnz(argc_actual_minus_expected, 0, &create_adaptor_frame);
-
- // When the difference is even, check if we are allowed to adjust the
- // existing frame instead.
- __ LoadTaggedPointerField(
- x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kFlagsOffset));
- __ TestAndBranchIfAnySet(
- w4, SharedFunctionInfo::IsSafeToSkipArgumentsAdaptorBit::kMask,
- &adapt_arguments_in_place);
-#endif
-
// -------------------------------------------
// Create an arguments adaptor frame.
// -------------------------------------------
@@ -3198,7 +2823,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// receiver.
__ RecordComment("-- Stack check --");
__ Add(scratch1, argc_expected, 1);
- Generate_StackOverflowCheck(masm, scratch1, &stack_overflow);
+ __ StackOverflowCheck(scratch1, &stack_overflow);
// Round up number of slots to be even, to maintain stack alignment.
__ RecordComment("-- Allocate callee frame slots --");
@@ -3206,7 +2831,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ Bic(scratch1, scratch1, 1);
__ Claim(scratch1, kSystemPointerSize);
-#ifdef V8_REVERSE_JSARGS
// If we don't have enough arguments, fill the remaining expected
// arguments with undefined, otherwise skip this step.
Label enough_arguments;
@@ -3251,84 +2875,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ Add(copy_from, fp, 2 * kSystemPointerSize);
__ CopyDoubleWords(copy_to, copy_from, argc_to_copy);
-#else // !V8_REVERSE_JSARGS
- Register argc_unused_actual = x14;
- Register scratch2 = x16;
-
- // Preparing the expected arguments is done in four steps, the order of
- // which is chosen so we can use LDP/STP and avoid conditional branches as
- // much as possible.
-
- __ Mov(copy_to, sp);
-
- // (1) If we don't have enough arguments, fill the remaining expected
- // arguments with undefined, otherwise skip this step.
- Label enough_arguments;
- __ Subs(scratch1, argc_actual, argc_expected);
- __ Csel(argc_unused_actual, xzr, scratch1, lt);
- __ Csel(argc_to_copy, argc_expected, argc_actual, ge);
- __ B(ge, &enough_arguments);
-
- // Fill the remaining expected arguments with undefined.
- __ RecordComment("-- Fill slots with undefined --");
- __ Sub(copy_end, copy_to, Operand(scratch1, LSL, kSystemPointerSizeLog2));
- __ LoadRoot(scratch1, RootIndex::kUndefinedValue);
-
- Label fill;
- __ Bind(&fill);
- __ Stp(scratch1, scratch1,
- MemOperand(copy_to, 2 * kSystemPointerSize, PostIndex));
- // We might write one slot extra, but that is ok because we'll overwrite it
- // below.
- __ Cmp(copy_end, copy_to);
- __ B(hi, &fill);
-
- // Correct copy_to, for the case where we wrote one additional slot.
- __ Mov(copy_to, copy_end);
-
- __ Bind(&enough_arguments);
- // (2) Copy all of the actual arguments, or as many as we need.
- Label skip_copy;
- __ RecordComment("-- Copy actual arguments --");
- __ Cbz(argc_to_copy, &skip_copy);
- __ Add(copy_end, copy_to,
- Operand(argc_to_copy, LSL, kSystemPointerSizeLog2));
- __ Add(copy_from, fp, 2 * kSystemPointerSize);
- // Adjust for difference between actual and expected arguments.
- __ Add(copy_from, copy_from,
- Operand(argc_unused_actual, LSL, kSystemPointerSizeLog2));
-
- // Copy arguments. We use load/store pair instructions, so we might
- // overshoot by one slot, but since we copy the arguments starting from the
- // last one, if we do overshoot, the extra slot will be overwritten later by
- // the receiver.
- Label copy_2_by_2;
- __ Bind(&copy_2_by_2);
- __ Ldp(scratch1, scratch2,
- MemOperand(copy_from, 2 * kSystemPointerSize, PostIndex));
- __ Stp(scratch1, scratch2,
- MemOperand(copy_to, 2 * kSystemPointerSize, PostIndex));
- __ Cmp(copy_end, copy_to);
- __ B(hi, &copy_2_by_2);
- __ Bind(&skip_copy);
-
- // (3) Store padding, which might be overwritten by the receiver, if it is
- // not necessary.
- __ RecordComment("-- Store padding --");
- __ Str(padreg, MemOperand(fp, -5 * kSystemPointerSize));
-
- // (4) Store receiver. Calculate target address from the sp to avoid
- // checking for padding. Storing the receiver will overwrite either the
- // extra slot we copied with the actual arguments, if we did copy one, or
- // the padding we stored above.
- __ RecordComment("-- Store receiver --");
- __ Add(copy_from, fp, 2 * kSystemPointerSize);
- __ Ldr(scratch1,
- MemOperand(copy_from, argc_actual, LSL, kSystemPointerSizeLog2));
- __ Str(scratch1,
- MemOperand(sp, argc_expected, LSL, kSystemPointerSizeLog2));
-#endif
-
// Arguments have been adapted. Now call the entry point.
__ RecordComment("-- Call entry point --");
__ Mov(argc_actual, argc_expected);
@@ -3349,46 +2895,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ Ret();
}
-#ifndef V8_REVERSE_JSARGS
- // -----------------------------------------
- // Adapt arguments in the existing frame.
- // -----------------------------------------
- __ Bind(&adapt_arguments_in_place);
- {
- __ RecordComment("-- Update arguments in place --");
- // The callee cannot observe the actual arguments, so it's safe to just
- // pass the expected arguments by massaging the stack appropriately. See
- // http://bit.ly/v8-faster-calls-with-arguments-mismatch for details.
- Label under_application, over_application;
- __ Tbnz(argc_actual_minus_expected, kXSignBit, &under_application);
-
- __ Bind(&over_application);
- {
- // Remove superfluous arguments from the stack. The number of superflous
- // arguments is even.
- __ RecordComment("-- Over-application --");
- __ Mov(argc_actual, argc_expected);
- __ Drop(argc_actual_minus_expected);
- __ B(&dont_adapt_arguments);
- }
-
- __ Bind(&under_application);
- {
- // Fill remaining expected arguments with undefined values.
- __ RecordComment("-- Under-application --");
- Label fill;
- Register undef_value = x16;
- __ LoadRoot(undef_value, RootIndex::kUndefinedValue);
- __ Bind(&fill);
- __ Add(argc_actual, argc_actual, 2);
- __ Push(undef_value, undef_value);
- __ Cmp(argc_actual, argc_expected);
- __ B(lt, &fill);
- __ B(&dont_adapt_arguments);
- }
- }
-#endif
-
// -------------------------------------------
// Dont adapt arguments.
// -------------------------------------------
@@ -3915,12 +3421,11 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// -- x2 : arguments count (not including the receiver)
// -- x3 : call data
// -- x0 : holder
- // -- sp[0] : last argument
+ // -- sp[0] : receiver
+ // -- sp[8] : first argument
// -- ...
- // -- sp[(argc - 1) * 8] : first argument
- // -- sp[(argc + 0) * 8] : receiver
+ // -- sp[(argc) * 8] : last argument
// -----------------------------------
- // NOTE: The order of args in the stack are reversed if V8_REVERSE_JSARGS
Register api_function_address = x1;
Register argc = x2;
@@ -3990,14 +3495,8 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// FunctionCallbackInfo::values_ (points at the first varargs argument passed
// on the stack).
-#ifdef V8_REVERSE_JSARGS
__ Add(scratch, scratch,
Operand((FCA::kArgsLength + 1) * kSystemPointerSize));
-#else
- __ Add(scratch, scratch,
- Operand((FCA::kArgsLength - 1) * kSystemPointerSize));
- __ Add(scratch, scratch, Operand(argc, LSL, kSystemPointerSizeLog2));
-#endif
__ Str(scratch, MemOperand(sp, 2 * kSystemPointerSize));
// FunctionCallbackInfo::length_.
@@ -4128,6 +3627,303 @@ void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
__ Ret();
}
+namespace {
+
+void CopyRegListToFrame(MacroAssembler* masm, const Register& dst,
+ int dst_offset, const CPURegList& reg_list,
+ const Register& temp0, const Register& temp1,
+ int src_offset = 0) {
+ DCHECK_EQ(reg_list.Count() % 2, 0);
+ UseScratchRegisterScope temps(masm);
+ CPURegList copy_to_input = reg_list;
+ int reg_size = reg_list.RegisterSizeInBytes();
+ DCHECK_EQ(temp0.SizeInBytes(), reg_size);
+ DCHECK_EQ(temp1.SizeInBytes(), reg_size);
+
+ // Compute some temporary addresses to avoid having the macro assembler set
+ // up a temp with an offset for accesses out of the range of the addressing
+ // mode.
+ Register src = temps.AcquireX();
+ masm->Add(src, sp, src_offset);
+ masm->Add(dst, dst, dst_offset);
+
+ // Write reg_list into the frame pointed to by dst.
+ for (int i = 0; i < reg_list.Count(); i += 2) {
+ masm->Ldp(temp0, temp1, MemOperand(src, i * reg_size));
+
+ CPURegister reg0 = copy_to_input.PopLowestIndex();
+ CPURegister reg1 = copy_to_input.PopLowestIndex();
+ int offset0 = reg0.code() * reg_size;
+ int offset1 = reg1.code() * reg_size;
+
+ // Pair up adjacent stores, otherwise write them separately.
+ if (offset1 == offset0 + reg_size) {
+ masm->Stp(temp0, temp1, MemOperand(dst, offset0));
+ } else {
+ masm->Str(temp0, MemOperand(dst, offset0));
+ masm->Str(temp1, MemOperand(dst, offset1));
+ }
+ }
+ masm->Sub(dst, dst, dst_offset);
+}
+
+void RestoreRegList(MacroAssembler* masm, const CPURegList& reg_list,
+ const Register& src_base, int src_offset) {
+ DCHECK_EQ(reg_list.Count() % 2, 0);
+ UseScratchRegisterScope temps(masm);
+ CPURegList restore_list = reg_list;
+ int reg_size = restore_list.RegisterSizeInBytes();
+
+ // Compute a temporary addresses to avoid having the macro assembler set
+ // up a temp with an offset for accesses out of the range of the addressing
+ // mode.
+ Register src = temps.AcquireX();
+ masm->Add(src, src_base, src_offset);
+
+ // No need to restore padreg.
+ restore_list.Remove(padreg);
+
+ // Restore every register in restore_list from src.
+ while (!restore_list.IsEmpty()) {
+ CPURegister reg0 = restore_list.PopLowestIndex();
+ CPURegister reg1 = restore_list.PopLowestIndex();
+ int offset0 = reg0.code() * reg_size;
+
+ if (reg1 == NoCPUReg) {
+ masm->Ldr(reg0, MemOperand(src, offset0));
+ break;
+ }
+
+ int offset1 = reg1.code() * reg_size;
+
+ // Pair up adjacent loads, otherwise read them separately.
+ if (offset1 == offset0 + reg_size) {
+ masm->Ldp(reg0, reg1, MemOperand(src, offset0));
+ } else {
+ masm->Ldr(reg0, MemOperand(src, offset0));
+ masm->Ldr(reg1, MemOperand(src, offset1));
+ }
+ }
+}
+
+void Generate_DeoptimizationEntry(MacroAssembler* masm,
+ DeoptimizeKind deopt_kind) {
+ Isolate* isolate = masm->isolate();
+
+ // TODO(all): This code needs to be revisited. We probably only need to save
+ // caller-saved registers here. Callee-saved registers can be stored directly
+ // in the input frame.
+
+ // Save all allocatable double registers.
+ CPURegList saved_double_registers(
+ CPURegister::kVRegister, kDRegSizeInBits,
+ RegisterConfiguration::Default()->allocatable_double_codes_mask());
+ DCHECK_EQ(saved_double_registers.Count() % 2, 0);
+ __ PushCPURegList(saved_double_registers);
+
+ // We save all the registers except sp, lr, platform register (x18) and the
+ // masm scratches.
+ CPURegList saved_registers(CPURegister::kRegister, kXRegSizeInBits, 0, 28);
+ saved_registers.Remove(ip0);
+ saved_registers.Remove(ip1);
+ saved_registers.Remove(x18);
+ saved_registers.Combine(fp);
+ saved_registers.Align();
+ DCHECK_EQ(saved_registers.Count() % 2, 0);
+ __ PushCPURegList(saved_registers);
+
+ __ Mov(x3, Operand(ExternalReference::Create(
+ IsolateAddressId::kCEntryFPAddress, isolate)));
+ __ Str(fp, MemOperand(x3));
+
+ const int kSavedRegistersAreaSize =
+ (saved_registers.Count() * kXRegSize) +
+ (saved_double_registers.Count() * kDRegSize);
+
+ // Floating point registers are saved on the stack above core registers.
+ const int kDoubleRegistersOffset = saved_registers.Count() * kXRegSize;
+
+ Register bailout_id = x2;
+ Register code_object = x3;
+ Register fp_to_sp = x4;
+ __ Mov(bailout_id, Deoptimizer::kFixedExitSizeMarker);
+ // Get the address of the location in the code object. This is the return
+ // address for lazy deoptimization.
+ __ Mov(code_object, lr);
+ // Compute the fp-to-sp delta.
+ __ Add(fp_to_sp, sp, kSavedRegistersAreaSize);
+ __ Sub(fp_to_sp, fp, fp_to_sp);
+
+ // Allocate a new deoptimizer object.
+ __ Ldr(x1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
+
+ // Ensure we can safely load from below fp.
+ DCHECK_GT(kSavedRegistersAreaSize, -StandardFrameConstants::kFunctionOffset);
+ __ Ldr(x0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+
+ // If x1 is a smi, zero x0.
+ __ Tst(x1, kSmiTagMask);
+ __ CzeroX(x0, eq);
+
+ __ Mov(x1, static_cast<int>(deopt_kind));
+ // Following arguments are already loaded:
+ // - x2: bailout id
+ // - x3: code object address
+ // - x4: fp-to-sp delta
+ __ Mov(x5, ExternalReference::isolate_address(isolate));
+
+ {
+ // Call Deoptimizer::New().
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
+ }
+
+ // Preserve "deoptimizer" object in register x0.
+ Register deoptimizer = x0;
+
+ // Get the input frame descriptor pointer.
+ __ Ldr(x1, MemOperand(deoptimizer, Deoptimizer::input_offset()));
+
+ // Copy core registers into the input frame.
+ CopyRegListToFrame(masm, x1, FrameDescription::registers_offset(),
+ saved_registers, x2, x3);
+
+ // Copy double registers to the input frame.
+ CopyRegListToFrame(masm, x1, FrameDescription::double_registers_offset(),
+ saved_double_registers, x2, x3, kDoubleRegistersOffset);
+
+ // Mark the stack as not iterable for the CPU profiler which won't be able to
+ // walk the stack without the return address.
+ {
+ UseScratchRegisterScope temps(masm);
+ Register is_iterable = temps.AcquireX();
+ __ Mov(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
+ __ strb(xzr, MemOperand(is_iterable));
+ }
+
+ // Remove the saved registers from the stack.
+ DCHECK_EQ(kSavedRegistersAreaSize % kXRegSize, 0);
+ __ Drop(kSavedRegistersAreaSize / kXRegSize);
+
+ // Compute a pointer to the unwinding limit in register x2; that is
+ // the first stack slot not part of the input frame.
+ Register unwind_limit = x2;
+ __ Ldr(unwind_limit, MemOperand(x1, FrameDescription::frame_size_offset()));
+
+ // Unwind the stack down to - but not including - the unwinding
+ // limit and copy the contents of the activation frame to the input
+ // frame description.
+ __ Add(x3, x1, FrameDescription::frame_content_offset());
+ __ SlotAddress(x1, 0);
+ __ Lsr(unwind_limit, unwind_limit, kSystemPointerSizeLog2);
+ __ Mov(x5, unwind_limit);
+ __ CopyDoubleWords(x3, x1, x5);
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ // Since {unwind_limit} is the frame size up to the parameter count, we might
+ // end up with a unaligned stack pointer. This is later recovered when
+ // setting the stack pointer to {caller_frame_top_offset}.
+ __ Bic(unwind_limit, unwind_limit, 1);
+#endif
+ __ Drop(unwind_limit);
+
+ // Compute the output frame in the deoptimizer.
+ __ Push(padreg, x0); // Preserve deoptimizer object across call.
+ {
+ // Call Deoptimizer::ComputeOutputFrames().
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
+ }
+ __ Pop(x4, padreg); // Restore deoptimizer object (class Deoptimizer).
+
+ {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.AcquireX();
+ __ Ldr(scratch, MemOperand(x4, Deoptimizer::caller_frame_top_offset()));
+ __ Mov(sp, scratch);
+ }
+
+ // Replace the current (input) frame with the output frames.
+ Label outer_push_loop, outer_loop_header;
+ __ Ldrsw(x1, MemOperand(x4, Deoptimizer::output_count_offset()));
+ __ Ldr(x0, MemOperand(x4, Deoptimizer::output_offset()));
+ __ Add(x1, x0, Operand(x1, LSL, kSystemPointerSizeLog2));
+ __ B(&outer_loop_header);
+
+ __ Bind(&outer_push_loop);
+ Register current_frame = x2;
+ Register frame_size = x3;
+ __ Ldr(current_frame, MemOperand(x0, kSystemPointerSize, PostIndex));
+ __ Ldr(x3, MemOperand(current_frame, FrameDescription::frame_size_offset()));
+ __ Lsr(frame_size, x3, kSystemPointerSizeLog2);
+ __ Claim(frame_size);
+
+ __ Add(x7, current_frame, FrameDescription::frame_content_offset());
+ __ SlotAddress(x6, 0);
+ __ CopyDoubleWords(x6, x7, frame_size);
+
+ __ Bind(&outer_loop_header);
+ __ Cmp(x0, x1);
+ __ B(lt, &outer_push_loop);
+
+ __ Ldr(x1, MemOperand(x4, Deoptimizer::input_offset()));
+ RestoreRegList(masm, saved_double_registers, x1,
+ FrameDescription::double_registers_offset());
+
+ {
+ UseScratchRegisterScope temps(masm);
+ Register is_iterable = temps.AcquireX();
+ Register one = x4;
+ __ Mov(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
+ __ Mov(one, Operand(1));
+ __ strb(one, MemOperand(is_iterable));
+ }
+
+ // TODO(all): ARM copies a lot (if not all) of the last output frame onto the
+ // stack, then pops it all into registers. Here, we try to load it directly
+ // into the relevant registers. Is this correct? If so, we should improve the
+ // ARM code.
+
+ // Restore registers from the last output frame.
+ // Note that lr is not in the list of saved_registers and will be restored
+ // later. We can use it to hold the address of last output frame while
+ // reloading the other registers.
+ DCHECK(!saved_registers.IncludesAliasOf(lr));
+ Register last_output_frame = lr;
+ __ Mov(last_output_frame, current_frame);
+
+ RestoreRegList(masm, saved_registers, last_output_frame,
+ FrameDescription::registers_offset());
+
+ UseScratchRegisterScope temps(masm);
+ temps.Exclude(x17);
+ Register continuation = x17;
+ __ Ldr(continuation, MemOperand(last_output_frame,
+ FrameDescription::continuation_offset()));
+ __ Ldr(lr, MemOperand(last_output_frame, FrameDescription::pc_offset()));
+#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
+ __ Autibsp();
+#endif
+ __ Br(continuation);
+}
+
+} // namespace
+
+void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Bailout(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kBailout);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
+}
+
#undef __
} // namespace internal
diff --git a/deps/v8/src/builtins/base.tq b/deps/v8/src/builtins/base.tq
index eed7bc6e97..e068cfa9d1 100644
--- a/deps/v8/src/builtins/base.tq
+++ b/deps/v8/src/builtins/base.tq
@@ -28,7 +28,7 @@ type never;
type Tagged generates 'TNode<MaybeObject>' constexpr 'MaybeObject';
type StrongTagged extends Tagged
- generates 'TNode<Object>' constexpr 'ObjectPtr';
+ generates 'TNode<Object>' constexpr 'Object';
type Smi extends StrongTagged generates 'TNode<Smi>' constexpr 'Smi';
type TaggedIndex extends StrongTagged
generates 'TNode<TaggedIndex>' constexpr 'TaggedIndex';
@@ -50,10 +50,11 @@ type Zero extends PositiveSmi;
type Uninitialized extends Tagged;
extern macro MakeWeak(HeapObject): WeakHeapObject;
-extern macro GetHeapObjectAssumeWeak(WeakHeapObject):
- HeapObject labels ClearedWeakPointer;
+extern macro GetHeapObjectAssumeWeak(MaybeObject): HeapObject labels IfCleared;
+extern macro GetHeapObjectIfStrong(MaybeObject): HeapObject labels IfNotStrong;
extern macro IsWeakOrCleared(MaybeObject): bool;
extern macro IsWeakReferenceToObject(MaybeObject, Object): bool;
+extern macro IsStrong(MaybeObject): bool;
macro StrongToWeak<T: type>(x: T): Weak<T> {
return %RawDownCast<Weak<T>>(MakeWeak(x));
@@ -110,7 +111,7 @@ type bint generates 'TNode<BInt>' constexpr 'BInt';
type string constexpr 'const char*';
// A Smi value containing a bitfield struct as its integer data.
-type SmiTagged<T : type extends uint31> extends Smi;
+@useParentTypeChecker type SmiTagged<T : type extends uint31> extends Smi;
// WARNING: The memory representation (i.e., in class fields and arrays) of
// float64_or_hole is just a float64 that may be the hole-representing
@@ -149,7 +150,7 @@ type ObjectHashTable extends HashTable
generates 'TNode<ObjectHashTable>';
extern class NumberDictionary extends HashTable;
-type RawPtr generates 'TNode<RawPtrT>' constexpr 'void*';
+type RawPtr generates 'TNode<RawPtrT>' constexpr 'Address';
type ExternalPointer
generates 'TNode<ExternalPointerT>' constexpr 'ExternalPointer_t';
extern class Code extends HeapObject;
@@ -166,6 +167,9 @@ type LayoutDescriptor extends ByteArray
generates 'TNode<LayoutDescriptor>';
extern class TransitionArray extends WeakFixedArray;
+extern operator '.length_intptr' macro LoadAndUntagWeakFixedArrayLength(
+ WeakFixedArray): intptr;
+
type InstanceType extends uint16 constexpr 'InstanceType';
type NoSharedNameSentinel extends Smi;
@@ -278,6 +282,7 @@ extern enum MessageTemplate {
kFirstArgumentNotRegExp,
kBigIntMixedTypes,
kTypedArrayTooShort,
+ kTypedArrayTooLargeToSort,
kInvalidCountValue,
kConstructorNotFunction,
kSymbolToString,
@@ -320,7 +325,6 @@ extern enum MessageTemplate {
kWasmTrapDivUnrepresentable,
kWasmTrapRemByZero,
kWasmTrapFloatUnrepresentable,
- kWasmTrapFuncInvalid,
kWasmTrapFuncSigMismatch,
kWasmTrapDataSegmentDropped,
kWasmTrapElemSegmentDropped,
@@ -330,7 +334,6 @@ extern enum MessageTemplate {
kWasmTrapNullDereference,
kWasmTrapIllegalCast,
kWasmTrapArrayOutOfBounds,
- kWasmTrapWasmJSFunction,
kWeakRefsRegisterTargetAndHoldingsMustNotBeSame,
kWeakRefsRegisterTargetMustBeObject,
kWeakRefsUnregisterTokenMustBeObject,
@@ -831,6 +834,10 @@ extern operator '==' macro
ConstexprInt31Equal(constexpr int31, constexpr int31): constexpr bool;
extern operator '!=' macro
ConstexprInt31NotEqual(constexpr int31, constexpr int31): constexpr bool;
+extern operator '==' macro
+ConstexprUint32Equal(constexpr uint32, constexpr uint32): constexpr bool;
+extern operator '!=' macro
+ConstexprUint32NotEqual(constexpr uint32, constexpr uint32): constexpr bool;
extern operator '>=' macro
ConstexprInt31GreaterThanEqual(
constexpr int31, constexpr int31): constexpr bool;
@@ -1555,6 +1562,7 @@ namespace runtime {
extern runtime
GetDerivedMap(Context, JSFunction, JSReceiver): Map;
}
+extern macro IsDeprecatedMap(Map): bool;
transitioning builtin FastCreateDataProperty(implicit context: Context)(
receiver: JSReceiver, key: JSAny, value: JSAny): Object {
@@ -1703,3 +1711,9 @@ struct ConstantIterator<T: type> {
macro ConstantIterator<T: type>(value: T): ConstantIterator<T> {
return ConstantIterator{value};
}
+
+extern macro FeedbackIteratorSizeFor(constexpr int32): intptr;
+extern macro FeedbackIteratorMapIndexForEntry(constexpr int32): intptr;
+extern macro FeedbackIteratorHandlerIndexForEntry(constexpr int32): intptr;
+extern operator '[]' macro LoadWeakFixedArrayElement(
+ WeakFixedArray, intptr): MaybeObject;
diff --git a/deps/v8/src/builtins/builtins-api.cc b/deps/v8/src/builtins/builtins-api.cc
index 6eb6f87c74..e42760d4d2 100644
--- a/deps/v8/src/builtins/builtins-api.cc
+++ b/deps/v8/src/builtins/builtins-api.cc
@@ -99,7 +99,7 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Object> HandleApiCallHelper(
}
}
- Object raw_call_data = fun_data->call_code();
+ Object raw_call_data = fun_data->call_code(kAcquireLoad);
if (!raw_call_data.IsUndefined(isolate)) {
DCHECK(raw_call_data.IsCallHandlerInfo());
CallHandlerInfo call_data = CallHandlerInfo::cast(raw_call_data);
@@ -206,7 +206,6 @@ MaybeHandle<Object> Builtins::InvokeApiFunction(Isolate* isolate,
} else {
argv = new Address[frame_argc];
}
-#ifdef V8_REVERSE_JSARGS
argv[BuiltinArguments::kNewTargetOffset] = new_target->ptr();
argv[BuiltinArguments::kTargetOffset] = function->ptr();
argv[BuiltinArguments::kArgcOffset] = Smi::FromInt(frame_argc).ptr();
@@ -217,19 +216,6 @@ MaybeHandle<Object> Builtins::InvokeApiFunction(Isolate* isolate,
for (int i = 0; i < argc; ++i) {
argv[cursor++] = args[i]->ptr();
}
-#else
- int cursor = frame_argc - 1;
- argv[cursor--] = receiver->ptr();
- for (int i = 0; i < argc; ++i) {
- argv[cursor--] = args[i]->ptr();
- }
- DCHECK_EQ(cursor, BuiltinArguments::kPaddingOffset);
- argv[BuiltinArguments::kPaddingOffset] =
- ReadOnlyRoots(isolate).the_hole_value().ptr();
- argv[BuiltinArguments::kArgcOffset] = Smi::FromInt(frame_argc).ptr();
- argv[BuiltinArguments::kTargetOffset] = function->ptr();
- argv[BuiltinArguments::kNewTargetOffset] = new_target->ptr();
-#endif
MaybeHandle<Object> result;
{
RelocatableArguments arguments(isolate, frame_argc, &argv[frame_argc - 1]);
diff --git a/deps/v8/src/builtins/builtins-array-gen.cc b/deps/v8/src/builtins/builtins-array-gen.cc
index 134baeb96e..7a8ee5c415 100644
--- a/deps/v8/src/builtins/builtins-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-array-gen.cc
@@ -221,10 +221,9 @@ void ArrayBuiltinsAssembler::VisitAllTypedArrayElements(
}
TF_BUILTIN(ArrayPrototypePop, CodeStubAssembler) {
- TNode<Int32T> argc =
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- CSA_ASSERT(this, IsUndefined(Parameter(Descriptor::kJSNewTarget)));
+ auto argc = UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount);
+ auto context = Parameter<Context>(Descriptor::kContext);
+ CSA_ASSERT(this, IsUndefined(Parameter<Object>(Descriptor::kJSNewTarget)));
CodeStubArguments args(this, argc);
TNode<Object> receiver = args.GetReceiver();
@@ -246,11 +245,12 @@ TF_BUILTIN(ArrayPrototypePop, CodeStubAssembler) {
TNode<JSArray> array_receiver = CAST(receiver);
TNode<IntPtrT> length = SmiUntag(LoadFastJSArrayLength(array_receiver));
Label return_undefined(this), fast_elements(this);
- GotoIf(IntPtrEqual(length, IntPtrConstant(0)), &return_undefined);
// 2) Ensure that the length is writable.
EnsureArrayLengthWritable(context, LoadMap(array_receiver), &runtime);
+ GotoIf(IntPtrEqual(length, IntPtrConstant(0)), &return_undefined);
+
// 3) Check that the elements backing store isn't copy-on-write.
TNode<FixedArrayBase> elements = LoadElements(array_receiver);
GotoIf(TaggedEqual(LoadMap(elements), FixedCOWArrayMapConstant()),
@@ -321,10 +321,9 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
Label double_transition(this);
Label runtime(this, Label::kDeferred);
- TNode<Int32T> argc =
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- CSA_ASSERT(this, IsUndefined(Parameter(Descriptor::kJSNewTarget)));
+ auto argc = UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount);
+ auto context = Parameter<Context>(Descriptor::kContext);
+ CSA_ASSERT(this, IsUndefined(Parameter<Object>(Descriptor::kJSNewTarget)));
CodeStubArguments args(this, argc);
TNode<Object> receiver = args.GetReceiver();
@@ -438,10 +437,10 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
}
TF_BUILTIN(ExtractFastJSArray, ArrayBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<JSArray> array = CAST(Parameter(Descriptor::kSource));
- TNode<BInt> begin = SmiToBInt(CAST(Parameter(Descriptor::kBegin)));
- TNode<BInt> count = SmiToBInt(CAST(Parameter(Descriptor::kCount)));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto array = Parameter<JSArray>(Descriptor::kSource);
+ TNode<BInt> begin = SmiToBInt(Parameter<Smi>(Descriptor::kBegin));
+ TNode<BInt> count = SmiToBInt(Parameter<Smi>(Descriptor::kCount));
CSA_ASSERT(this, Word32BinaryNot(IsNoElementsProtectorCellInvalid()));
@@ -449,8 +448,8 @@ TF_BUILTIN(ExtractFastJSArray, ArrayBuiltinsAssembler) {
}
TF_BUILTIN(CloneFastJSArray, ArrayBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<JSArray> array = CAST(Parameter(Descriptor::kSource));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto array = Parameter<JSArray>(Descriptor::kSource);
CSA_ASSERT(this,
Word32Or(Word32BinaryNot(IsHoleyFastElementsKindForRead(
@@ -468,8 +467,8 @@ TF_BUILTIN(CloneFastJSArray, ArrayBuiltinsAssembler) {
// - If there are holes in the source, the ElementsKind of the "copy" will be
// PACKED_ELEMENTS (such that undefined can be stored).
TF_BUILTIN(CloneFastJSArrayFillingHoles, ArrayBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<JSArray> array = CAST(Parameter(Descriptor::kSource));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto array = Parameter<JSArray>(Descriptor::kSource);
CSA_ASSERT(this,
Word32Or(Word32BinaryNot(IsHoleyFastElementsKindForRead(
@@ -543,9 +542,9 @@ class ArrayPopulatorAssembler : public CodeStubAssembler {
TF_BUILTIN(TypedArrayPrototypeMap, ArrayBuiltinsAssembler) {
TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
+ UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto context = Parameter<Context>(Descriptor::kContext);
TNode<Object> receiver = args.GetReceiver();
TNode<Object> callbackfn = args.GetOptionalArgumentValue(0);
TNode<Object> this_arg = args.GetOptionalArgumentValue(1);
@@ -1068,28 +1067,28 @@ void ArrayIncludesIndexofAssembler::GenerateHoleyDoubles(
TF_BUILTIN(ArrayIncludes, ArrayIncludesIndexofAssembler) {
TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount));
+ auto context = Parameter<Context>(Descriptor::kContext);
Generate(kIncludes, argc, context);
}
TF_BUILTIN(ArrayIncludesSmiOrObject, ArrayIncludesIndexofAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<FixedArray> elements = CAST(Parameter(Descriptor::kElements));
- TNode<Object> search_element = CAST(Parameter(Descriptor::kSearchElement));
- TNode<Smi> array_length = CAST(Parameter(Descriptor::kLength));
- TNode<Smi> from_index = CAST(Parameter(Descriptor::kFromIndex));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto elements = Parameter<FixedArray>(Descriptor::kElements);
+ auto search_element = Parameter<Object>(Descriptor::kSearchElement);
+ auto array_length = Parameter<Smi>(Descriptor::kLength);
+ auto from_index = Parameter<Smi>(Descriptor::kFromIndex);
GenerateSmiOrObject(kIncludes, context, elements, search_element,
array_length, from_index);
}
TF_BUILTIN(ArrayIncludesPackedDoubles, ArrayIncludesIndexofAssembler) {
- TNode<FixedArrayBase> elements = CAST(Parameter(Descriptor::kElements));
- TNode<Object> search_element = CAST(Parameter(Descriptor::kSearchElement));
- TNode<Smi> array_length = CAST(Parameter(Descriptor::kLength));
- TNode<Smi> from_index = CAST(Parameter(Descriptor::kFromIndex));
+ auto elements = Parameter<FixedArrayBase>(Descriptor::kElements);
+ auto search_element = Parameter<Object>(Descriptor::kSearchElement);
+ auto array_length = Parameter<Smi>(Descriptor::kLength);
+ auto from_index = Parameter<Smi>(Descriptor::kFromIndex);
ReturnIfEmpty(array_length, FalseConstant());
GeneratePackedDoubles(kIncludes, CAST(elements), search_element, array_length,
@@ -1097,10 +1096,10 @@ TF_BUILTIN(ArrayIncludesPackedDoubles, ArrayIncludesIndexofAssembler) {
}
TF_BUILTIN(ArrayIncludesHoleyDoubles, ArrayIncludesIndexofAssembler) {
- TNode<FixedArrayBase> elements = CAST(Parameter(Descriptor::kElements));
- TNode<Object> search_element = CAST(Parameter(Descriptor::kSearchElement));
- TNode<Smi> array_length = CAST(Parameter(Descriptor::kLength));
- TNode<Smi> from_index = CAST(Parameter(Descriptor::kFromIndex));
+ auto elements = Parameter<FixedArrayBase>(Descriptor::kElements);
+ auto search_element = Parameter<Object>(Descriptor::kSearchElement);
+ auto array_length = Parameter<Smi>(Descriptor::kLength);
+ auto from_index = Parameter<Smi>(Descriptor::kFromIndex);
ReturnIfEmpty(array_length, FalseConstant());
GenerateHoleyDoubles(kIncludes, CAST(elements), search_element, array_length,
@@ -1109,28 +1108,28 @@ TF_BUILTIN(ArrayIncludesHoleyDoubles, ArrayIncludesIndexofAssembler) {
TF_BUILTIN(ArrayIndexOf, ArrayIncludesIndexofAssembler) {
TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount));
+ auto context = Parameter<Context>(Descriptor::kContext);
Generate(kIndexOf, argc, context);
}
TF_BUILTIN(ArrayIndexOfSmiOrObject, ArrayIncludesIndexofAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<FixedArray> elements = CAST(Parameter(Descriptor::kElements));
- TNode<Object> search_element = CAST(Parameter(Descriptor::kSearchElement));
- TNode<Smi> array_length = CAST(Parameter(Descriptor::kLength));
- TNode<Smi> from_index = CAST(Parameter(Descriptor::kFromIndex));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto elements = Parameter<FixedArray>(Descriptor::kElements);
+ auto search_element = Parameter<Object>(Descriptor::kSearchElement);
+ auto array_length = Parameter<Smi>(Descriptor::kLength);
+ auto from_index = Parameter<Smi>(Descriptor::kFromIndex);
GenerateSmiOrObject(kIndexOf, context, elements, search_element, array_length,
from_index);
}
TF_BUILTIN(ArrayIndexOfPackedDoubles, ArrayIncludesIndexofAssembler) {
- TNode<FixedArrayBase> elements = CAST(Parameter(Descriptor::kElements));
- TNode<Object> search_element = CAST(Parameter(Descriptor::kSearchElement));
- TNode<Smi> array_length = CAST(Parameter(Descriptor::kLength));
- TNode<Smi> from_index = CAST(Parameter(Descriptor::kFromIndex));
+ auto elements = Parameter<FixedArrayBase>(Descriptor::kElements);
+ auto search_element = Parameter<Object>(Descriptor::kSearchElement);
+ auto array_length = Parameter<Smi>(Descriptor::kLength);
+ auto from_index = Parameter<Smi>(Descriptor::kFromIndex);
ReturnIfEmpty(array_length, NumberConstant(-1));
GeneratePackedDoubles(kIndexOf, CAST(elements), search_element, array_length,
@@ -1138,10 +1137,10 @@ TF_BUILTIN(ArrayIndexOfPackedDoubles, ArrayIncludesIndexofAssembler) {
}
TF_BUILTIN(ArrayIndexOfHoleyDoubles, ArrayIncludesIndexofAssembler) {
- TNode<FixedArrayBase> elements = CAST(Parameter(Descriptor::kElements));
- TNode<Object> search_element = CAST(Parameter(Descriptor::kSearchElement));
- TNode<Smi> array_length = CAST(Parameter(Descriptor::kLength));
- TNode<Smi> from_index = CAST(Parameter(Descriptor::kFromIndex));
+ auto elements = Parameter<FixedArrayBase>(Descriptor::kElements);
+ auto search_element = Parameter<Object>(Descriptor::kSearchElement);
+ auto array_length = Parameter<Smi>(Descriptor::kLength);
+ auto from_index = Parameter<Smi>(Descriptor::kFromIndex);
ReturnIfEmpty(array_length, NumberConstant(-1));
GenerateHoleyDoubles(kIndexOf, CAST(elements), search_element, array_length,
@@ -1150,24 +1149,24 @@ TF_BUILTIN(ArrayIndexOfHoleyDoubles, ArrayIncludesIndexofAssembler) {
// ES #sec-array.prototype.values
TF_BUILTIN(ArrayPrototypeValues, CodeStubAssembler) {
- TNode<NativeContext> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<NativeContext>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
Return(CreateArrayIterator(context, ToObject_Inline(context, receiver),
IterationKind::kValues));
}
// ES #sec-array.prototype.entries
TF_BUILTIN(ArrayPrototypeEntries, CodeStubAssembler) {
- TNode<NativeContext> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<NativeContext>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
Return(CreateArrayIterator(context, ToObject_Inline(context, receiver),
IterationKind::kEntries));
}
// ES #sec-array.prototype.keys
TF_BUILTIN(ArrayPrototypeKeys, CodeStubAssembler) {
- TNode<NativeContext> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<NativeContext>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
Return(CreateArrayIterator(context, ToObject_Inline(context, receiver),
IterationKind::kKeys));
}
@@ -1176,8 +1175,8 @@ TF_BUILTIN(ArrayPrototypeKeys, CodeStubAssembler) {
TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
const char* method_name = "Array Iterator.prototype.next";
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> maybe_iterator = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto maybe_iterator = Parameter<Object>(Descriptor::kReceiver);
TVARIABLE(Oddball, var_done, TrueConstant());
TVARIABLE(Object, var_value, UndefinedConstant());
@@ -1504,12 +1503,12 @@ class ArrayFlattenAssembler : public CodeStubAssembler {
// https://tc39.github.io/proposal-flatMap/#sec-FlattenIntoArray
TF_BUILTIN(FlattenIntoArray, ArrayFlattenAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<JSReceiver> target = CAST(Parameter(Descriptor::kTarget));
- TNode<JSReceiver> source = CAST(Parameter(Descriptor::kSource));
- TNode<Number> source_length = CAST(Parameter(Descriptor::kSourceLength));
- TNode<Number> start = CAST(Parameter(Descriptor::kStart));
- TNode<Number> depth = CAST(Parameter(Descriptor::kDepth));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto target = Parameter<JSReceiver>(Descriptor::kTarget);
+ auto source = Parameter<JSReceiver>(Descriptor::kSource);
+ auto source_length = Parameter<Number>(Descriptor::kSourceLength);
+ auto start = Parameter<Number>(Descriptor::kStart);
+ auto depth = Parameter<Number>(Descriptor::kDepth);
// FlattenIntoArray might get called recursively, check stack for overflow
// manually as it has stub linkage.
@@ -1521,15 +1520,14 @@ TF_BUILTIN(FlattenIntoArray, ArrayFlattenAssembler) {
// https://tc39.github.io/proposal-flatMap/#sec-FlattenIntoArray
TF_BUILTIN(FlatMapIntoArray, ArrayFlattenAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<JSReceiver> target = CAST(Parameter(Descriptor::kTarget));
- TNode<JSReceiver> source = CAST(Parameter(Descriptor::kSource));
- TNode<Number> source_length = CAST(Parameter(Descriptor::kSourceLength));
- TNode<Number> start = CAST(Parameter(Descriptor::kStart));
- TNode<Number> depth = CAST(Parameter(Descriptor::kDepth));
- TNode<HeapObject> mapper_function =
- CAST(Parameter(Descriptor::kMapperFunction));
- TNode<Object> this_arg = CAST(Parameter(Descriptor::kThisArg));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto target = Parameter<JSReceiver>(Descriptor::kTarget);
+ auto source = Parameter<JSReceiver>(Descriptor::kSource);
+ auto source_length = Parameter<Number>(Descriptor::kSourceLength);
+ auto start = Parameter<Number>(Descriptor::kStart);
+ auto depth = Parameter<Number>(Descriptor::kDepth);
+ auto mapper_function = Parameter<HeapObject>(Descriptor::kMapperFunction);
+ auto this_arg = Parameter<Object>(Descriptor::kThisArg);
Return(FlattenIntoArray(context, target, source, source_length, start, depth,
mapper_function, this_arg));
@@ -1538,9 +1536,9 @@ TF_BUILTIN(FlatMapIntoArray, ArrayFlattenAssembler) {
// https://tc39.github.io/proposal-flatMap/#sec-Array.prototype.flat
TF_BUILTIN(ArrayPrototypeFlat, CodeStubAssembler) {
const TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
+ UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto context = Parameter<Context>(Descriptor::kContext);
const TNode<Object> receiver = args.GetReceiver();
const TNode<Object> depth = args.GetOptionalArgumentValue(0);
@@ -1580,9 +1578,9 @@ TF_BUILTIN(ArrayPrototypeFlat, CodeStubAssembler) {
// https://tc39.github.io/proposal-flatMap/#sec-Array.prototype.flatMap
TF_BUILTIN(ArrayPrototypeFlatMap, CodeStubAssembler) {
const TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
+ UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto context = Parameter<Context>(Descriptor::kContext);
const TNode<Object> receiver = args.GetReceiver();
const TNode<Object> mapper_function = args.GetOptionalArgumentValue(0);
@@ -1620,11 +1618,10 @@ TF_BUILTIN(ArrayPrototypeFlatMap, CodeStubAssembler) {
TF_BUILTIN(ArrayConstructor, ArrayBuiltinsAssembler) {
// This is a trampoline to ArrayConstructorImpl which just adds
// allocation_site parameter value and sets new_target if necessary.
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<JSFunction> function = CAST(Parameter(Descriptor::kTarget));
- TNode<Object> new_target = CAST(Parameter(Descriptor::kNewTarget));
- TNode<Int32T> argc =
- UncheckedCast<Int32T>(Parameter(Descriptor::kActualArgumentsCount));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto function = Parameter<JSFunction>(Descriptor::kTarget);
+ auto new_target = Parameter<Object>(Descriptor::kNewTarget);
+ auto argc = UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
// If new_target is undefined, then this is the 'Call' case, so set new_target
// to function.
@@ -1785,12 +1782,11 @@ void ArrayBuiltinsAssembler::GenerateDispatchToArrayStub(
}
TF_BUILTIN(ArrayConstructorImpl, ArrayBuiltinsAssembler) {
- TNode<JSFunction> target = CAST(Parameter(Descriptor::kTarget));
- TNode<Object> new_target = CAST(Parameter(Descriptor::kNewTarget));
- TNode<Int32T> argc =
- UncheckedCast<Int32T>(Parameter(Descriptor::kActualArgumentsCount));
- TNode<HeapObject> maybe_allocation_site =
- CAST(Parameter(Descriptor::kAllocationSite));
+ auto target = Parameter<JSFunction>(Descriptor::kTarget);
+ auto new_target = Parameter<Object>(Descriptor::kNewTarget);
+ auto argc = UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
+ auto maybe_allocation_site =
+ Parameter<HeapObject>(Descriptor::kAllocationSite);
// Initial map for the builtin Array functions should be Map.
CSA_ASSERT(this, IsMap(CAST(LoadObjectField(
@@ -1877,12 +1873,12 @@ void ArrayBuiltinsAssembler::GenerateArrayNoArgumentConstructor(
ElementsKind kind, AllocationSiteOverrideMode mode) {
using Descriptor = ArrayNoArgumentConstructorDescriptor;
TNode<NativeContext> native_context = LoadObjectField<NativeContext>(
- CAST(Parameter(Descriptor::kFunction)), JSFunction::kContextOffset);
+ Parameter<HeapObject>(Descriptor::kFunction), JSFunction::kContextOffset);
bool track_allocation_site =
AllocationSite::ShouldTrack(kind) && mode != DISABLE_ALLOCATION_SITES;
base::Optional<TNode<AllocationSite>> allocation_site =
track_allocation_site
- ? CAST(Parameter(Descriptor::kAllocationSite))
+ ? Parameter<AllocationSite>(Descriptor::kAllocationSite)
: base::Optional<TNode<AllocationSite>>(base::nullopt);
TNode<Map> array_map = LoadJSArrayElementsMap(kind, native_context);
TNode<JSArray> array = AllocateJSArray(
@@ -1894,8 +1890,8 @@ void ArrayBuiltinsAssembler::GenerateArrayNoArgumentConstructor(
void ArrayBuiltinsAssembler::GenerateArraySingleArgumentConstructor(
ElementsKind kind, AllocationSiteOverrideMode mode) {
using Descriptor = ArraySingleArgumentConstructorDescriptor;
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<HeapObject> function = CAST(Parameter(Descriptor::kFunction));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto function = Parameter<HeapObject>(Descriptor::kFunction);
TNode<NativeContext> native_context =
CAST(LoadObjectField(function, JSFunction::kContextOffset));
TNode<Map> array_map = LoadJSArrayElementsMap(kind, native_context);
@@ -1907,11 +1903,9 @@ void ArrayBuiltinsAssembler::GenerateArraySingleArgumentConstructor(
: DONT_TRACK_ALLOCATION_SITE;
}
- TNode<Object> array_size =
- CAST(Parameter(Descriptor::kArraySizeSmiParameter));
+ auto array_size = Parameter<Object>(Descriptor::kArraySizeSmiParameter);
// allocation_site can be Undefined or an AllocationSite
- TNode<HeapObject> allocation_site =
- CAST(Parameter(Descriptor::kAllocationSite));
+ auto allocation_site = Parameter<HeapObject>(Descriptor::kAllocationSite);
GenerateConstructor(context, function, array_map, array_size, allocation_site,
kind, allocation_site_mode);
@@ -1934,12 +1928,11 @@ void ArrayBuiltinsAssembler::GenerateArrayNArgumentsConstructor(
}
TF_BUILTIN(ArrayNArgumentsConstructor, ArrayBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<JSFunction> target = CAST(Parameter(Descriptor::kFunction));
- TNode<Int32T> argc =
- UncheckedCast<Int32T>(Parameter(Descriptor::kActualArgumentsCount));
- TNode<HeapObject> maybe_allocation_site =
- CAST(Parameter(Descriptor::kAllocationSite));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto target = Parameter<JSFunction>(Descriptor::kFunction);
+ auto argc = UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
+ auto maybe_allocation_site =
+ Parameter<HeapObject>(Descriptor::kAllocationSite);
GenerateArrayNArgumentsConstructor(context, target, target, argc,
maybe_allocation_site);
diff --git a/deps/v8/src/builtins/builtins-array.cc b/deps/v8/src/builtins/builtins-array.cc
index 3c2fe33c5b..5467cf7c85 100644
--- a/deps/v8/src/builtins/builtins-array.cc
+++ b/deps/v8/src/builtins/builtins-array.cc
@@ -457,11 +457,11 @@ BUILTIN(ArrayPop) {
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
uint32_t len = static_cast<uint32_t>(array->length().Number());
- if (len == 0) return ReadOnlyRoots(isolate).undefined_value();
if (JSArray::HasReadOnlyLength(array)) {
return GenericArrayPop(isolate, &args);
}
+ if (len == 0) return ReadOnlyRoots(isolate).undefined_value();
Handle<Object> result;
if (IsJSArrayFastElementMovingAllowed(isolate, JSArray::cast(*receiver))) {
@@ -988,7 +988,7 @@ void CollectElementIndices(Isolate* isolate, Handle<JSObject> object,
Handle<String> string(String::cast(js_value->value()), isolate);
uint32_t length = static_cast<uint32_t>(string->length());
uint32_t i = 0;
- uint32_t limit = Min(length, range);
+ uint32_t limit = std::min(length, range);
for (; i < limit; i++) {
indices->push_back(i);
}
diff --git a/deps/v8/src/builtins/builtins-arraybuffer.cc b/deps/v8/src/builtins/builtins-arraybuffer.cc
index 62d7d820c0..0f5f905186 100644
--- a/deps/v8/src/builtins/builtins-arraybuffer.cc
+++ b/deps/v8/src/builtins/builtins-arraybuffer.cc
@@ -139,8 +139,8 @@ static Object SliceHelper(BuiltinArguments args, Isolate* isolate,
// * If relativeStart < 0, let first be max((len + relativeStart), 0); else
// let first be min(relativeStart, len).
double const first = (relative_start->Number() < 0)
- ? Max(len + relative_start->Number(), 0.0)
- : Min(relative_start->Number(), len);
+ ? std::max(len + relative_start->Number(), 0.0)
+ : std::min(relative_start->Number(), len);
Handle<Object> first_obj = isolate->factory()->NewNumber(first);
// * If end is undefined, let relativeEnd be len; else let relativeEnd be ?
@@ -157,11 +157,11 @@ static Object SliceHelper(BuiltinArguments args, Isolate* isolate,
// * If relativeEnd < 0, let final be max((len + relativeEnd), 0); else let
// final be min(relativeEnd, len).
- double const final_ = (relative_end < 0) ? Max(len + relative_end, 0.0)
- : Min(relative_end, len);
+ double const final_ = (relative_end < 0) ? std::max(len + relative_end, 0.0)
+ : std::min(relative_end, len);
// * Let newLen be max(final-first, 0).
- double const new_len = Max(final_ - first, 0.0);
+ double const new_len = std::max(final_ - first, 0.0);
Handle<Object> new_len_obj = isolate->factory()->NewNumber(new_len);
// * [AB] Let ctor be ? SpeciesConstructor(O, %ArrayBuffer%).
diff --git a/deps/v8/src/builtins/builtins-async-function-gen.cc b/deps/v8/src/builtins/builtins-async-function-gen.cc
index e84442295c..49b00caa04 100644
--- a/deps/v8/src/builtins/builtins-async-function-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-function-gen.cc
@@ -77,9 +77,9 @@ void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwaitResumeClosure(
}
TF_BUILTIN(AsyncFunctionEnter, AsyncFunctionBuiltinsAssembler) {
- TNode<JSFunction> closure = CAST(Parameter(Descriptor::kClosure));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto closure = Parameter<JSFunction>(Descriptor::kClosure);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto context = Parameter<Context>(Descriptor::kContext);
// Compute the number of registers and parameters.
TNode<SharedFunctionInfo> shared = LoadObjectField<SharedFunctionInfo>(
@@ -175,11 +175,11 @@ TF_BUILTIN(AsyncFunctionEnter, AsyncFunctionBuiltinsAssembler) {
}
TF_BUILTIN(AsyncFunctionReject, AsyncFunctionBuiltinsAssembler) {
- TNode<JSAsyncFunctionObject> async_function_object =
- CAST(Parameter(Descriptor::kAsyncFunctionObject));
- TNode<Object> reason = CAST(Parameter(Descriptor::kReason));
- TNode<Oddball> can_suspend = CAST(Parameter(Descriptor::kCanSuspend));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto async_function_object =
+ Parameter<JSAsyncFunctionObject>(Descriptor::kAsyncFunctionObject);
+ auto reason = Parameter<Object>(Descriptor::kReason);
+ auto can_suspend = Parameter<Oddball>(Descriptor::kCanSuspend);
+ auto context = Parameter<Context>(Descriptor::kContext);
TNode<JSPromise> promise = LoadObjectField<JSPromise>(
async_function_object, JSAsyncFunctionObject::kPromiseOffset);
@@ -200,11 +200,11 @@ TF_BUILTIN(AsyncFunctionReject, AsyncFunctionBuiltinsAssembler) {
}
TF_BUILTIN(AsyncFunctionResolve, AsyncFunctionBuiltinsAssembler) {
- TNode<JSAsyncFunctionObject> async_function_object =
- CAST(Parameter(Descriptor::kAsyncFunctionObject));
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TNode<Oddball> can_suspend = CAST(Parameter(Descriptor::kCanSuspend));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto async_function_object =
+ Parameter<JSAsyncFunctionObject>(Descriptor::kAsyncFunctionObject);
+ auto value = Parameter<Object>(Descriptor::kValue);
+ auto can_suspend = Parameter<Oddball>(Descriptor::kCanSuspend);
+ auto context = Parameter<Context>(Descriptor::kContext);
TNode<JSPromise> promise = LoadObjectField<JSPromise>(
async_function_object, JSAsyncFunctionObject::kPromiseOffset);
@@ -224,14 +224,14 @@ TF_BUILTIN(AsyncFunctionResolve, AsyncFunctionBuiltinsAssembler) {
// the promise instead of the result of RejectPromise or ResolvePromise
// respectively from a lazy deoptimization.
TF_BUILTIN(AsyncFunctionLazyDeoptContinuation, AsyncFunctionBuiltinsAssembler) {
- TNode<JSPromise> promise = CAST(Parameter(Descriptor::kPromise));
+ auto promise = Parameter<JSPromise>(Descriptor::kPromise);
Return(promise);
}
TF_BUILTIN(AsyncFunctionAwaitRejectClosure, AsyncFunctionBuiltinsAssembler) {
CSA_ASSERT_JS_ARGC_EQ(this, 1);
- const TNode<Object> sentError = CAST(Parameter(Descriptor::kSentError));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto sentError = Parameter<Object>(Descriptor::kSentError);
+ const auto context = Parameter<Context>(Descriptor::kContext);
AsyncFunctionAwaitResumeClosure(context, sentError,
JSGeneratorObject::kThrow);
@@ -240,8 +240,8 @@ TF_BUILTIN(AsyncFunctionAwaitRejectClosure, AsyncFunctionBuiltinsAssembler) {
TF_BUILTIN(AsyncFunctionAwaitResolveClosure, AsyncFunctionBuiltinsAssembler) {
CSA_ASSERT_JS_ARGC_EQ(this, 1);
- const TNode<Object> sentValue = CAST(Parameter(Descriptor::kSentValue));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto sentValue = Parameter<Object>(Descriptor::kSentValue);
+ const auto context = Parameter<Context>(Descriptor::kContext);
AsyncFunctionAwaitResumeClosure(context, sentValue, JSGeneratorObject::kNext);
Return(UndefinedConstant());
@@ -258,10 +258,10 @@ TF_BUILTIN(AsyncFunctionAwaitResolveClosure, AsyncFunctionBuiltinsAssembler) {
template <typename Descriptor>
void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwait(
const bool is_predicted_as_caught) {
- TNode<JSAsyncFunctionObject> async_function_object =
- CAST(Parameter(Descriptor::kAsyncFunctionObject));
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto async_function_object =
+ Parameter<JSAsyncFunctionObject>(Descriptor::kAsyncFunctionObject);
+ auto value = Parameter<Object>(Descriptor::kValue);
+ auto context = Parameter<Context>(Descriptor::kContext);
TNode<JSPromise> outer_promise = LoadObjectField<JSPromise>(
async_function_object, JSAsyncFunctionObject::kPromiseOffset);
diff --git a/deps/v8/src/builtins/builtins-async-gen.cc b/deps/v8/src/builtins/builtins-async-gen.cc
index 383289fd0f..fa05e9b32a 100644
--- a/deps/v8/src/builtins/builtins-async-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-gen.cc
@@ -269,7 +269,7 @@ void AsyncBuiltinsAssembler::InitializeNativeClosure(
StoreObjectFieldNoWriteBarrier(function, JSFunction::kContextOffset, context);
// For the native closures that are initialized here (for `await`)
- // we know that their SharedFunctionInfo::function_data() slot
+ // we know that their SharedFunctionInfo::function_data(kAcquireLoad) slot
// contains a builtin index (as Smi), so there's no need to use
// CodeStubAssembler::GetSharedFunctionInfoCode() helper here,
// which almost doubles the size of `await` builtins (unnecessarily).
@@ -303,8 +303,8 @@ TNode<Context> AsyncBuiltinsAssembler::AllocateAsyncIteratorValueUnwrapContext(
}
TF_BUILTIN(AsyncIteratorValueUnwrap, AsyncBuiltinsAssembler) {
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto value = Parameter<Object>(Descriptor::kValue);
+ auto context = Parameter<Context>(Descriptor::kContext);
const TNode<Object> done =
LoadContextElement(context, ValueUnwrapContext::kDoneSlot);
diff --git a/deps/v8/src/builtins/builtins-async-generator-gen.cc b/deps/v8/src/builtins/builtins-async-generator-gen.cc
index 2b6d720880..c847d838b6 100644
--- a/deps/v8/src/builtins/builtins-async-generator-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-generator-gen.cc
@@ -232,10 +232,10 @@ void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorAwaitResumeClosure(
template <typename Descriptor>
void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorAwait(bool is_catchable) {
- TNode<JSAsyncGeneratorObject> async_generator_object =
- CAST(Parameter(Descriptor::kAsyncGeneratorObject));
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto async_generator_object =
+ Parameter<JSAsyncGeneratorObject>(Descriptor::kAsyncGeneratorObject);
+ auto value = Parameter<Object>(Descriptor::kValue);
+ auto context = Parameter<Context>(Descriptor::kContext);
TNode<AsyncGeneratorRequest> request =
CAST(LoadFirstAsyncGeneratorRequestFromQueue(async_generator_object));
@@ -310,12 +310,12 @@ TF_BUILTIN(AsyncGeneratorPrototypeNext, AsyncGeneratorBuiltinsAssembler) {
const int kValueArg = 0;
TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
+ UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
TNode<Object> generator = args.GetReceiver();
TNode<Object> value = args.GetOptionalArgumentValue(kValueArg);
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto context = Parameter<Context>(Descriptor::kContext);
AsyncGeneratorEnqueue(&args, context, generator, value,
JSAsyncGeneratorObject::kNext,
@@ -328,12 +328,12 @@ TF_BUILTIN(AsyncGeneratorPrototypeReturn, AsyncGeneratorBuiltinsAssembler) {
const int kValueArg = 0;
TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
+ UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
TNode<Object> generator = args.GetReceiver();
TNode<Object> value = args.GetOptionalArgumentValue(kValueArg);
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto context = Parameter<Context>(Descriptor::kContext);
AsyncGeneratorEnqueue(&args, context, generator, value,
JSAsyncGeneratorObject::kReturn,
@@ -346,12 +346,12 @@ TF_BUILTIN(AsyncGeneratorPrototypeThrow, AsyncGeneratorBuiltinsAssembler) {
const int kValueArg = 0;
TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
+ UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
TNode<Object> generator = args.GetReceiver();
TNode<Object> value = args.GetOptionalArgumentValue(kValueArg);
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto context = Parameter<Context>(Descriptor::kContext);
AsyncGeneratorEnqueue(&args, context, generator, value,
JSAsyncGeneratorObject::kThrow,
@@ -359,15 +359,15 @@ TF_BUILTIN(AsyncGeneratorPrototypeThrow, AsyncGeneratorBuiltinsAssembler) {
}
TF_BUILTIN(AsyncGeneratorAwaitResolveClosure, AsyncGeneratorBuiltinsAssembler) {
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto value = Parameter<Object>(Descriptor::kValue);
+ auto context = Parameter<Context>(Descriptor::kContext);
AsyncGeneratorAwaitResumeClosure(context, value,
JSAsyncGeneratorObject::kNext);
}
TF_BUILTIN(AsyncGeneratorAwaitRejectClosure, AsyncGeneratorBuiltinsAssembler) {
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto value = Parameter<Object>(Descriptor::kValue);
+ auto context = Parameter<Context>(Descriptor::kContext);
AsyncGeneratorAwaitResumeClosure(context, value,
JSAsyncGeneratorObject::kThrow);
}
@@ -384,9 +384,9 @@ TF_BUILTIN(AsyncGeneratorAwaitCaught, AsyncGeneratorBuiltinsAssembler) {
TF_BUILTIN(AsyncGeneratorResumeNext, AsyncGeneratorBuiltinsAssembler) {
using Descriptor = AsyncGeneratorResumeNextDescriptor;
- const TNode<JSAsyncGeneratorObject> generator =
- CAST(Parameter(Descriptor::kGenerator));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto generator =
+ Parameter<JSAsyncGeneratorObject>(Descriptor::kGenerator);
+ const auto context = Parameter<Context>(Descriptor::kContext);
// The penultimate step of proposal-async-iteration/#sec-asyncgeneratorresolve
// and proposal-async-iteration/#sec-asyncgeneratorreject both recursively
@@ -475,11 +475,11 @@ TF_BUILTIN(AsyncGeneratorResumeNext, AsyncGeneratorBuiltinsAssembler) {
}
TF_BUILTIN(AsyncGeneratorResolve, AsyncGeneratorBuiltinsAssembler) {
- const TNode<JSAsyncGeneratorObject> generator =
- CAST(Parameter(Descriptor::kGenerator));
- const TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- const TNode<Object> done = CAST(Parameter(Descriptor::kDone));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto generator =
+ Parameter<JSAsyncGeneratorObject>(Descriptor::kGenerator);
+ const auto value = Parameter<Object>(Descriptor::kValue);
+ const auto done = Parameter<Object>(Descriptor::kDone);
+ const auto context = Parameter<Context>(Descriptor::kContext);
CSA_ASSERT(this, Word32BinaryNot(IsGeneratorAwaiting(generator)));
@@ -546,10 +546,10 @@ TF_BUILTIN(AsyncGeneratorResolve, AsyncGeneratorBuiltinsAssembler) {
TF_BUILTIN(AsyncGeneratorReject, AsyncGeneratorBuiltinsAssembler) {
using Descriptor = AsyncGeneratorRejectDescriptor;
- const TNode<JSAsyncGeneratorObject> generator =
- CAST(Parameter(Descriptor::kGenerator));
- const TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto generator =
+ Parameter<JSAsyncGeneratorObject>(Descriptor::kGenerator);
+ const auto value = Parameter<Object>(Descriptor::kValue);
+ const auto context = Parameter<Context>(Descriptor::kContext);
TNode<AsyncGeneratorRequest> next =
TakeFirstAsyncGeneratorRequestFromQueue(generator);
@@ -560,11 +560,10 @@ TF_BUILTIN(AsyncGeneratorReject, AsyncGeneratorBuiltinsAssembler) {
}
TF_BUILTIN(AsyncGeneratorYield, AsyncGeneratorBuiltinsAssembler) {
- const TNode<JSGeneratorObject> generator =
- CAST(Parameter(Descriptor::kGenerator));
- const TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- const TNode<Oddball> is_caught = CAST(Parameter(Descriptor::kIsCaught));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto generator = Parameter<JSGeneratorObject>(Descriptor::kGenerator);
+ const auto value = Parameter<Object>(Descriptor::kValue);
+ const auto is_caught = Parameter<Oddball>(Descriptor::kIsCaught);
+ const auto context = Parameter<Context>(Descriptor::kContext);
const TNode<AsyncGeneratorRequest> request =
CAST(LoadFirstAsyncGeneratorRequestFromQueue(generator));
@@ -579,8 +578,8 @@ TF_BUILTIN(AsyncGeneratorYield, AsyncGeneratorBuiltinsAssembler) {
}
TF_BUILTIN(AsyncGeneratorYieldResolveClosure, AsyncGeneratorBuiltinsAssembler) {
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- const TNode<Object> value = CAST(Parameter(Descriptor::kValue));
+ const auto context = Parameter<Context>(Descriptor::kContext);
+ const auto value = Parameter<Object>(Descriptor::kValue);
const TNode<JSAsyncGeneratorObject> generator =
CAST(LoadContextElement(context, Context::EXTENSION_INDEX));
@@ -611,10 +610,9 @@ TF_BUILTIN(AsyncGeneratorReturn, AsyncGeneratorBuiltinsAssembler) {
// (per proposal-async-iteration/#sec-asyncgeneratorresumenext step 10.b.i)
//
// In all cases, the final step is to jump back to AsyncGeneratorResumeNext.
- const TNode<JSGeneratorObject> generator =
- CAST(Parameter(Descriptor::kGenerator));
- const TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- const TNode<Oddball> is_caught = CAST(Parameter(Descriptor::kIsCaught));
+ const auto generator = Parameter<JSGeneratorObject>(Descriptor::kGenerator);
+ const auto value = Parameter<Object>(Descriptor::kValue);
+ const auto is_caught = Parameter<Oddball>(Descriptor::kIsCaught);
const TNode<AsyncGeneratorRequest> req =
CAST(LoadFirstAsyncGeneratorRequestFromQueue(generator));
@@ -635,7 +633,7 @@ TF_BUILTIN(AsyncGeneratorReturn, AsyncGeneratorBuiltinsAssembler) {
BIND(&perform_await);
SetGeneratorAwaiting(generator);
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto context = Parameter<Context>(Descriptor::kContext);
const TNode<JSPromise> outer_promise =
LoadPromiseFromAsyncGeneratorRequest(req);
Await(context, generator, value, outer_promise, var_on_resolve.value(),
@@ -650,8 +648,8 @@ TF_BUILTIN(AsyncGeneratorReturn, AsyncGeneratorBuiltinsAssembler) {
// proposal-async-iteration/#sec-asyncgeneratoryield step 8.e
TF_BUILTIN(AsyncGeneratorReturnResolveClosure,
AsyncGeneratorBuiltinsAssembler) {
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- const TNode<Object> value = CAST(Parameter(Descriptor::kValue));
+ const auto context = Parameter<Context>(Descriptor::kContext);
+ const auto value = Parameter<Object>(Descriptor::kValue);
AsyncGeneratorAwaitResumeClosure(context, value, JSGeneratorObject::kReturn);
}
@@ -660,8 +658,8 @@ TF_BUILTIN(AsyncGeneratorReturnResolveClosure,
// AsyncGeneratorResumeNext.
TF_BUILTIN(AsyncGeneratorReturnClosedResolveClosure,
AsyncGeneratorBuiltinsAssembler) {
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- const TNode<Object> value = CAST(Parameter(Descriptor::kValue));
+ const auto context = Parameter<Context>(Descriptor::kContext);
+ const auto value = Parameter<Object>(Descriptor::kValue);
const TNode<JSAsyncGeneratorObject> generator =
CAST(LoadContextElement(context, Context::EXTENSION_INDEX));
@@ -678,8 +676,8 @@ TF_BUILTIN(AsyncGeneratorReturnClosedResolveClosure,
TF_BUILTIN(AsyncGeneratorReturnClosedRejectClosure,
AsyncGeneratorBuiltinsAssembler) {
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- const TNode<Object> value = CAST(Parameter(Descriptor::kValue));
+ const auto context = Parameter<Context>(Descriptor::kContext);
+ const auto value = Parameter<Object>(Descriptor::kValue);
const TNode<JSAsyncGeneratorObject> generator =
CAST(LoadContextElement(context, Context::EXTENSION_INDEX));
diff --git a/deps/v8/src/builtins/builtins-async-iterator-gen.cc b/deps/v8/src/builtins/builtins-async-iterator-gen.cc
index 73e5605ccc..9e6223073f 100644
--- a/deps/v8/src/builtins/builtins-async-iterator-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-iterator-gen.cc
@@ -274,12 +274,12 @@ AsyncFromSyncBuiltinsAssembler::LoadIteratorResult(
// Section #sec-%asyncfromsynciteratorprototype%.next
TF_BUILTIN(AsyncFromSyncIteratorPrototypeNext, AsyncFromSyncBuiltinsAssembler) {
TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
+ UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
const TNode<Object> iterator = args.GetReceiver();
const TNode<Object> value = args.GetOptionalArgumentValue(kValueOrReasonArg);
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto context = Parameter<Context>(Descriptor::kContext);
auto get_method = [=](const TNode<JSReceiver> unused) {
return LoadObjectField(CAST(iterator),
@@ -295,12 +295,12 @@ TF_BUILTIN(AsyncFromSyncIteratorPrototypeNext, AsyncFromSyncBuiltinsAssembler) {
TF_BUILTIN(AsyncFromSyncIteratorPrototypeReturn,
AsyncFromSyncBuiltinsAssembler) {
TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
+ UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
const TNode<Object> iterator = args.GetReceiver();
const TNode<Object> value = args.GetOptionalArgumentValue(kValueOrReasonArg);
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto context = Parameter<Context>(Descriptor::kContext);
auto if_return_undefined = [=, &args](
const TNode<NativeContext> native_context,
@@ -328,12 +328,12 @@ TF_BUILTIN(AsyncFromSyncIteratorPrototypeReturn,
TF_BUILTIN(AsyncFromSyncIteratorPrototypeThrow,
AsyncFromSyncBuiltinsAssembler) {
TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
+ UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
const TNode<Object> iterator = args.GetReceiver();
const TNode<Object> reason = args.GetOptionalArgumentValue(kValueOrReasonArg);
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto context = Parameter<Context>(Descriptor::kContext);
auto if_throw_undefined = [=](const TNode<NativeContext> native_context,
const TNode<JSPromise> promise,
diff --git a/deps/v8/src/builtins/builtins-bigint-gen.cc b/deps/v8/src/builtins/builtins-bigint-gen.cc
index f8fe460c45..e424c53caf 100644
--- a/deps/v8/src/builtins/builtins-bigint-gen.cc
+++ b/deps/v8/src/builtins/builtins-bigint-gen.cc
@@ -17,8 +17,8 @@ TF_BUILTIN(BigIntToI64, CodeStubAssembler) {
return;
}
- TNode<Object> value = CAST(Parameter(Descriptor::kArgument));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto value = Parameter<Object>(Descriptor::kArgument);
+ auto context = Parameter<Context>(Descriptor::kContext);
TNode<BigInt> n = ToBigInt(context, value);
TVARIABLE(UintPtrT, var_low);
@@ -35,8 +35,8 @@ TF_BUILTIN(BigIntToI32Pair, CodeStubAssembler) {
return;
}
- TNode<Object> value = CAST(Parameter(Descriptor::kArgument));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto value = Parameter<Object>(Descriptor::kArgument);
+ auto context = Parameter<Context>(Descriptor::kContext);
TNode<BigInt> bigint = ToBigInt(context, value);
TVARIABLE(UintPtrT, var_low);
@@ -53,8 +53,7 @@ TF_BUILTIN(I64ToBigInt, CodeStubAssembler) {
return;
}
- TNode<IntPtrT> argument =
- UncheckedCast<IntPtrT>(Parameter(Descriptor::kArgument));
+ auto argument = UncheckedParameter<IntPtrT>(Descriptor::kArgument);
Return(BigIntFromInt64(argument));
}
@@ -66,8 +65,8 @@ TF_BUILTIN(I32PairToBigInt, CodeStubAssembler) {
return;
}
- TNode<IntPtrT> low = UncheckedCast<IntPtrT>(Parameter(Descriptor::kLow));
- TNode<IntPtrT> high = UncheckedCast<IntPtrT>(Parameter(Descriptor::kHigh));
+ auto low = UncheckedParameter<IntPtrT>(Descriptor::kLow);
+ auto high = UncheckedParameter<IntPtrT>(Descriptor::kHigh);
Return(BigIntFromInt32Pair(low, high));
}
diff --git a/deps/v8/src/builtins/bigint.tq b/deps/v8/src/builtins/builtins-bigint.tq
index 409301dcc9..067fb235de 100644
--- a/deps/v8/src/builtins/bigint.tq
+++ b/deps/v8/src/builtins/builtins-bigint.tq
@@ -4,24 +4,6 @@
#include 'src/builtins/builtins-bigint-gen.h'
-// TODO(nicohartmann): Discuss whether types used by multiple builtins should be
-// in global namespace
-extern class BigIntBase extends PrimitiveHeapObject
- generates 'TNode<BigInt>' {}
-
-type BigInt extends BigIntBase;
-
-@noVerifier
-@hasSameInstanceTypeAsParent
-@doNotGenerateCast
-extern class MutableBigInt extends BigIntBase generates 'TNode<BigInt>' {
-}
-
-Convert<BigInt, MutableBigInt>(i: MutableBigInt): BigInt {
- assert(bigint::IsCanonicalized(i));
- return %RawDownCast<BigInt>(Convert<BigIntBase>(i));
-}
-
namespace bigint {
const kPositiveSign: uint32 = 0;
diff --git a/deps/v8/src/builtins/builtins-call-gen.cc b/deps/v8/src/builtins/builtins-call-gen.cc
index 61ae06bf9e..ffe7aa40e9 100644
--- a/deps/v8/src/builtins/builtins-call-gen.cc
+++ b/deps/v8/src/builtins/builtins-call-gen.cc
@@ -66,13 +66,12 @@ void Builtins::Generate_CallFunctionForwardVarargs(MacroAssembler* masm) {
TF_BUILTIN(Call_ReceiverIsNullOrUndefined_WithFeedback,
CallOrConstructBuiltinsAssembler) {
- TNode<Object> target = CAST(Parameter(Descriptor::kFunction));
- TNode<Int32T> argc =
- UncheckedCast<Int32T>(Parameter(Descriptor::kActualArgumentsCount));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<HeapObject> maybe_feedback_vector =
- CAST(Parameter(Descriptor::kMaybeFeedbackVector));
- TNode<Int32T> slot = UncheckedCast<Int32T>(Parameter(Descriptor::kSlot));
+ auto target = Parameter<Object>(Descriptor::kFunction);
+ auto argc = UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto maybe_feedback_vector =
+ Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector);
+ auto slot = UncheckedParameter<Int32T>(Descriptor::kSlot);
CollectCallFeedback(target, context, maybe_feedback_vector,
Unsigned(ChangeInt32ToIntPtr(slot)));
TailCallBuiltin(Builtins::kCall_ReceiverIsNullOrUndefined, context, target,
@@ -81,13 +80,12 @@ TF_BUILTIN(Call_ReceiverIsNullOrUndefined_WithFeedback,
TF_BUILTIN(Call_ReceiverIsNotNullOrUndefined_WithFeedback,
CallOrConstructBuiltinsAssembler) {
- TNode<Object> target = CAST(Parameter(Descriptor::kFunction));
- TNode<Int32T> argc =
- UncheckedCast<Int32T>(Parameter(Descriptor::kActualArgumentsCount));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<HeapObject> maybe_feedback_vector =
- CAST(Parameter(Descriptor::kMaybeFeedbackVector));
- TNode<Int32T> slot = UncheckedCast<Int32T>(Parameter(Descriptor::kSlot));
+ auto target = Parameter<Object>(Descriptor::kFunction);
+ auto argc = UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto maybe_feedback_vector =
+ Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector);
+ auto slot = UncheckedParameter<Int32T>(Descriptor::kSlot);
CollectCallFeedback(target, context, maybe_feedback_vector,
Unsigned(ChangeInt32ToIntPtr(slot)));
TailCallBuiltin(Builtins::kCall_ReceiverIsNotNullOrUndefined, context, target,
@@ -95,13 +93,12 @@ TF_BUILTIN(Call_ReceiverIsNotNullOrUndefined_WithFeedback,
}
TF_BUILTIN(Call_ReceiverIsAny_WithFeedback, CallOrConstructBuiltinsAssembler) {
- TNode<Object> target = CAST(Parameter(Descriptor::kFunction));
- TNode<Int32T> argc =
- UncheckedCast<Int32T>(Parameter(Descriptor::kActualArgumentsCount));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<HeapObject> maybe_feedback_vector =
- CAST(Parameter(Descriptor::kMaybeFeedbackVector));
- TNode<Int32T> slot = UncheckedCast<Int32T>(Parameter(Descriptor::kSlot));
+ auto target = Parameter<Object>(Descriptor::kFunction);
+ auto argc = UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto maybe_feedback_vector =
+ Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector);
+ auto slot = UncheckedParameter<Int32T>(Descriptor::kSlot);
CollectCallFeedback(target, context, maybe_feedback_vector,
Unsigned(ChangeInt32ToIntPtr(slot)));
TailCallBuiltin(Builtins::kCall_ReceiverIsAny, context, target, argc);
@@ -425,46 +422,44 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithSpread(
}
TF_BUILTIN(CallWithArrayLike, CallOrConstructBuiltinsAssembler) {
- TNode<Object> target = CAST(Parameter(Descriptor::kTarget));
+ auto target = Parameter<Object>(Descriptor::kTarget);
base::Optional<TNode<Object>> new_target = base::nullopt;
- TNode<Object> arguments_list = CAST(Parameter(Descriptor::kArgumentsList));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto arguments_list = Parameter<Object>(Descriptor::kArgumentsList);
+ auto context = Parameter<Context>(Descriptor::kContext);
CallOrConstructWithArrayLike(target, new_target, arguments_list, context);
}
TF_BUILTIN(CallWithArrayLike_WithFeedback, CallOrConstructBuiltinsAssembler) {
- TNode<Object> target = CAST(Parameter(Descriptor::kTarget));
+ auto target = Parameter<Object>(Descriptor::kTarget);
base::Optional<TNode<Object>> new_target = base::nullopt;
- TNode<Object> arguments_list = CAST(Parameter(Descriptor::kArgumentsList));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<HeapObject> maybe_feedback_vector =
- CAST(Parameter(Descriptor::kMaybeFeedbackVector));
- TNode<Int32T> slot = UncheckedCast<Int32T>(Parameter(Descriptor::kSlot));
+ auto arguments_list = Parameter<Object>(Descriptor::kArgumentsList);
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto maybe_feedback_vector =
+ Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector);
+ auto slot = UncheckedParameter<Int32T>(Descriptor::kSlot);
CollectCallFeedback(target, context, maybe_feedback_vector,
Unsigned(ChangeInt32ToIntPtr(slot)));
CallOrConstructWithArrayLike(target, new_target, arguments_list, context);
}
TF_BUILTIN(CallWithSpread, CallOrConstructBuiltinsAssembler) {
- TNode<Object> target = CAST(Parameter(Descriptor::kTarget));
+ auto target = Parameter<Object>(Descriptor::kTarget);
base::Optional<TNode<Object>> new_target = base::nullopt;
- TNode<Object> spread = CAST(Parameter(Descriptor::kSpread));
- TNode<Int32T> args_count =
- UncheckedCast<Int32T>(Parameter(Descriptor::kArgumentsCount));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto spread = Parameter<Object>(Descriptor::kSpread);
+ auto args_count = UncheckedParameter<Int32T>(Descriptor::kArgumentsCount);
+ auto context = Parameter<Context>(Descriptor::kContext);
CallOrConstructWithSpread(target, new_target, spread, args_count, context);
}
TF_BUILTIN(CallWithSpread_WithFeedback, CallOrConstructBuiltinsAssembler) {
- TNode<Object> target = CAST(Parameter(Descriptor::kTarget));
+ auto target = Parameter<Object>(Descriptor::kTarget);
base::Optional<TNode<Object>> new_target = base::nullopt;
- TNode<Object> spread = CAST(Parameter(Descriptor::kSpread));
- TNode<Int32T> args_count =
- UncheckedCast<Int32T>(Parameter(Descriptor::kArgumentsCount));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<HeapObject> maybe_feedback_vector =
- CAST(Parameter(Descriptor::kMaybeFeedbackVector));
- TNode<Int32T> slot = UncheckedCast<Int32T>(Parameter(Descriptor::kSlot));
+ auto spread = Parameter<Object>(Descriptor::kSpread);
+ auto args_count = UncheckedParameter<Int32T>(Descriptor::kArgumentsCount);
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto maybe_feedback_vector =
+ Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector);
+ auto slot = UncheckedParameter<Int32T>(Descriptor::kSlot);
CollectCallFeedback(target, context, maybe_feedback_vector,
Unsigned(ChangeInt32ToIntPtr(slot)));
CallOrConstructWithSpread(target, new_target, spread, args_count, context);
@@ -647,8 +642,7 @@ void CallOrConstructBuiltinsAssembler::CallFunctionTemplate(
function_template_info, FunctionTemplateInfo::kCallCodeOffset);
TNode<Foreign> foreign = LoadObjectField<Foreign>(
call_handler_info, CallHandlerInfo::kJsCallbackOffset);
- TNode<RawPtrT> callback =
- DecodeExternalPointer(LoadForeignForeignAddress(foreign));
+ TNode<RawPtrT> callback = LoadForeignForeignAddressPtr(foreign);
TNode<Object> call_data =
LoadObjectField<Object>(call_handler_info, CallHandlerInfo::kDataOffset);
TailCallStub(CodeFactory::CallApiCallback(isolate()), context, callback, argc,
@@ -656,33 +650,30 @@ void CallOrConstructBuiltinsAssembler::CallFunctionTemplate(
}
TF_BUILTIN(CallFunctionTemplate_CheckAccess, CallOrConstructBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<FunctionTemplateInfo> function_template_info =
- CAST(Parameter(Descriptor::kFunctionTemplateInfo));
- TNode<IntPtrT> argc =
- UncheckedCast<IntPtrT>(Parameter(Descriptor::kArgumentsCount));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto function_template_info = UncheckedParameter<FunctionTemplateInfo>(
+ Descriptor::kFunctionTemplateInfo);
+ auto argc = UncheckedParameter<IntPtrT>(Descriptor::kArgumentsCount);
CallFunctionTemplate(CallFunctionTemplateMode::kCheckAccess,
function_template_info, argc, context);
}
TF_BUILTIN(CallFunctionTemplate_CheckCompatibleReceiver,
CallOrConstructBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<FunctionTemplateInfo> function_template_info =
- CAST(Parameter(Descriptor::kFunctionTemplateInfo));
- TNode<IntPtrT> argc =
- UncheckedCast<IntPtrT>(Parameter(Descriptor::kArgumentsCount));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto function_template_info = UncheckedParameter<FunctionTemplateInfo>(
+ Descriptor::kFunctionTemplateInfo);
+ auto argc = UncheckedParameter<IntPtrT>(Descriptor::kArgumentsCount);
CallFunctionTemplate(CallFunctionTemplateMode::kCheckCompatibleReceiver,
function_template_info, argc, context);
}
TF_BUILTIN(CallFunctionTemplate_CheckAccessAndCompatibleReceiver,
CallOrConstructBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<FunctionTemplateInfo> function_template_info =
- CAST(Parameter(Descriptor::kFunctionTemplateInfo));
- TNode<IntPtrT> argc =
- UncheckedCast<IntPtrT>(Parameter(Descriptor::kArgumentsCount));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto function_template_info = UncheckedParameter<FunctionTemplateInfo>(
+ Descriptor::kFunctionTemplateInfo);
+ auto argc = UncheckedParameter<IntPtrT>(Descriptor::kArgumentsCount);
CallFunctionTemplate(
CallFunctionTemplateMode::kCheckAccessAndCompatibleReceiver,
function_template_info, argc, context);
diff --git a/deps/v8/src/builtins/builtins-callsite.cc b/deps/v8/src/builtins/builtins-callsite.cc
index 63e4d7a572..5b7807ed4a 100644
--- a/deps/v8/src/builtins/builtins-callsite.cc
+++ b/deps/v8/src/builtins/builtins-callsite.cc
@@ -53,22 +53,6 @@ BUILTIN(CallSitePrototypeGetColumnNumber) {
return PositiveNumberOrNull(it.Frame()->GetColumnNumber(), isolate);
}
-BUILTIN(CallSitePrototypeGetEnclosingColumnNumber) {
- HandleScope scope(isolate);
- CHECK_CALLSITE(recv, "getEnclosingColumnNumber");
- FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
- GetFrameIndex(isolate, recv));
- return PositiveNumberOrNull(it.Frame()->GetEnclosingColumnNumber(), isolate);
-}
-
-BUILTIN(CallSitePrototypeGetEnclosingLineNumber) {
- HandleScope scope(isolate);
- CHECK_CALLSITE(recv, "getEnclosingLineNumber");
- FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
- GetFrameIndex(isolate, recv));
- return PositiveNumberOrNull(it.Frame()->GetEnclosingLineNumber(), isolate);
-}
-
BUILTIN(CallSitePrototypeGetEvalOrigin) {
HandleScope scope(isolate);
CHECK_CALLSITE(recv, "getEvalOrigin");
diff --git a/deps/v8/src/builtins/builtins-collections-gen.cc b/deps/v8/src/builtins/builtins-collections-gen.cc
index 9769d785b5..9046c7d008 100644
--- a/deps/v8/src/builtins/builtins-collections-gen.cc
+++ b/deps/v8/src/builtins/builtins-collections-gen.cc
@@ -850,20 +850,20 @@ TNode<HeapObject> CollectionsBuiltinsAssembler::AllocateTable(
}
TF_BUILTIN(MapConstructor, CollectionsBuiltinsAssembler) {
- TNode<Object> new_target = CAST(Parameter(Descriptor::kJSNewTarget));
+ auto new_target = Parameter<Object>(Descriptor::kJSNewTarget);
TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount));
+ auto context = Parameter<Context>(Descriptor::kContext);
GenerateConstructor(kMap, isolate()->factory()->Map_string(), new_target,
argc, context);
}
TF_BUILTIN(SetConstructor, CollectionsBuiltinsAssembler) {
- TNode<Object> new_target = CAST(Parameter(Descriptor::kJSNewTarget));
+ auto new_target = Parameter<Object>(Descriptor::kJSNewTarget);
TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount));
+ auto context = Parameter<Context>(Descriptor::kContext);
GenerateConstructor(kSet, isolate()->factory()->Set_string(), new_target,
argc, context);
@@ -1160,8 +1160,8 @@ TNode<JSArray> CollectionsBuiltinsAssembler::MapIteratorToList(
}
TF_BUILTIN(MapIteratorToList, CollectionsBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<JSMapIterator> iterator = CAST(Parameter(Descriptor::kSource));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto iterator = Parameter<JSMapIterator>(Descriptor::kSource);
Return(MapIteratorToList(context, iterator));
}
@@ -1247,8 +1247,8 @@ TNode<JSArray> CollectionsBuiltinsAssembler::SetOrSetIteratorToList(
}
TF_BUILTIN(SetOrSetIteratorToList, CollectionsBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<HeapObject> object = CAST(Parameter(Descriptor::kSource));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto object = Parameter<HeapObject>(Descriptor::kSource);
Return(SetOrSetIteratorToList(context, object));
}
@@ -1421,8 +1421,8 @@ void CollectionsBuiltinsAssembler::SameValueZeroHeapNumber(
}
TF_BUILTIN(OrderedHashTableHealIndex, CollectionsBuiltinsAssembler) {
- TNode<HeapObject> table = CAST(Parameter(Descriptor::kTable));
- TNode<Smi> index = CAST(Parameter(Descriptor::kIndex));
+ auto table = Parameter<HeapObject>(Descriptor::kTable);
+ auto index = Parameter<Smi>(Descriptor::kIndex);
Label return_index(this), return_zero(this);
// Check if we need to update the {index}.
@@ -1561,9 +1561,9 @@ CollectionsBuiltinsAssembler::NextSkipHoles(TNode<TableType> table,
}
TF_BUILTIN(MapPrototypeGet, CollectionsBuiltinsAssembler) {
- const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- const TNode<Object> key = CAST(Parameter(Descriptor::kKey));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ const auto key = Parameter<Object>(Descriptor::kKey);
+ const auto context = Parameter<Context>(Descriptor::kContext);
ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE, "Map.prototype.get");
@@ -1587,9 +1587,9 @@ TF_BUILTIN(MapPrototypeGet, CollectionsBuiltinsAssembler) {
}
TF_BUILTIN(MapPrototypeHas, CollectionsBuiltinsAssembler) {
- const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- const TNode<Object> key = CAST(Parameter(Descriptor::kKey));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ const auto key = Parameter<Object>(Descriptor::kKey);
+ const auto context = Parameter<Context>(Descriptor::kContext);
ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE, "Map.prototype.has");
@@ -1628,10 +1628,10 @@ const TNode<Object> CollectionsBuiltinsAssembler::NormalizeNumberKey(
}
TF_BUILTIN(MapPrototypeSet, CollectionsBuiltinsAssembler) {
- const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> key = CAST(Parameter(Descriptor::kKey));
- const TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto key = Parameter<Object>(Descriptor::kKey);
+ const auto value = Parameter<Object>(Descriptor::kValue);
+ const auto context = Parameter<Context>(Descriptor::kContext);
ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE, "Map.prototype.set");
@@ -1746,9 +1746,9 @@ void CollectionsBuiltinsAssembler::StoreOrderedHashMapNewEntry(
}
TF_BUILTIN(MapPrototypeDelete, CollectionsBuiltinsAssembler) {
- const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- const TNode<Object> key = CAST(Parameter(Descriptor::kKey));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ const auto key = Parameter<Object>(Descriptor::kKey);
+ const auto context = Parameter<Context>(Descriptor::kContext);
ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE,
"Map.prototype.delete");
@@ -1805,9 +1805,9 @@ TF_BUILTIN(MapPrototypeDelete, CollectionsBuiltinsAssembler) {
}
TF_BUILTIN(SetPrototypeAdd, CollectionsBuiltinsAssembler) {
- const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> key = CAST(Parameter(Descriptor::kKey));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto key = Parameter<Object>(Descriptor::kKey);
+ const auto context = Parameter<Context>(Descriptor::kContext);
ThrowIfNotInstanceType(context, receiver, JS_SET_TYPE, "Set.prototype.add");
@@ -1914,9 +1914,9 @@ void CollectionsBuiltinsAssembler::StoreOrderedHashSetNewEntry(
}
TF_BUILTIN(SetPrototypeDelete, CollectionsBuiltinsAssembler) {
- const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- const TNode<Object> key = CAST(Parameter(Descriptor::kKey));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ const auto key = Parameter<Object>(Descriptor::kKey);
+ const auto context = Parameter<Context>(Descriptor::kContext);
ThrowIfNotInstanceType(context, receiver, JS_SET_TYPE,
"Set.prototype.delete");
@@ -1969,8 +1969,8 @@ TF_BUILTIN(SetPrototypeDelete, CollectionsBuiltinsAssembler) {
}
TF_BUILTIN(MapPrototypeEntries, CollectionsBuiltinsAssembler) {
- const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ const auto context = Parameter<Context>(Descriptor::kContext);
ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE,
"Map.prototype.entries");
Return(AllocateJSCollectionIterator<JSMapIterator>(
@@ -1978,8 +1978,8 @@ TF_BUILTIN(MapPrototypeEntries, CollectionsBuiltinsAssembler) {
}
TF_BUILTIN(MapPrototypeGetSize, CollectionsBuiltinsAssembler) {
- const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ const auto context = Parameter<Context>(Descriptor::kContext);
ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE,
"get Map.prototype.size");
const TNode<OrderedHashMap> table =
@@ -1989,9 +1989,8 @@ TF_BUILTIN(MapPrototypeGetSize, CollectionsBuiltinsAssembler) {
TF_BUILTIN(MapPrototypeForEach, CollectionsBuiltinsAssembler) {
const char* const kMethodName = "Map.prototype.forEach";
- TNode<Int32T> argc =
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto argc = UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount);
+ const auto context = Parameter<Context>(Descriptor::kContext);
CodeStubArguments args(this, argc);
const TNode<Object> receiver = args.GetReceiver();
const TNode<Object> callback = args.GetOptionalArgumentValue(0);
@@ -2051,16 +2050,16 @@ TF_BUILTIN(MapPrototypeForEach, CollectionsBuiltinsAssembler) {
}
TF_BUILTIN(MapPrototypeKeys, CollectionsBuiltinsAssembler) {
- const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ const auto context = Parameter<Context>(Descriptor::kContext);
ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE, "Map.prototype.keys");
Return(AllocateJSCollectionIterator<JSMapIterator>(
context, Context::MAP_KEY_ITERATOR_MAP_INDEX, CAST(receiver)));
}
TF_BUILTIN(MapPrototypeValues, CollectionsBuiltinsAssembler) {
- const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ const auto context = Parameter<Context>(Descriptor::kContext);
ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE,
"Map.prototype.values");
Return(AllocateJSCollectionIterator<JSMapIterator>(
@@ -2069,8 +2068,8 @@ TF_BUILTIN(MapPrototypeValues, CollectionsBuiltinsAssembler) {
TF_BUILTIN(MapIteratorPrototypeNext, CollectionsBuiltinsAssembler) {
const char* const kMethodName = "Map Iterator.prototype.next";
- const TNode<Object> maybe_receiver = CAST(Parameter(Descriptor::kReceiver));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto maybe_receiver = Parameter<Object>(Descriptor::kReceiver);
+ const auto context = Parameter<Context>(Descriptor::kContext);
// Ensure that {maybe_receiver} is actually a JSMapIterator.
Label if_receiver_valid(this), if_receiver_invalid(this, Label::kDeferred);
@@ -2145,9 +2144,9 @@ TF_BUILTIN(MapIteratorPrototypeNext, CollectionsBuiltinsAssembler) {
}
TF_BUILTIN(SetPrototypeHas, CollectionsBuiltinsAssembler) {
- const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- const TNode<Object> key = CAST(Parameter(Descriptor::kKey));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ const auto key = Parameter<Object>(Descriptor::kKey);
+ const auto context = Parameter<Context>(Descriptor::kContext);
ThrowIfNotInstanceType(context, receiver, JS_SET_TYPE, "Set.prototype.has");
@@ -2206,8 +2205,8 @@ TF_BUILTIN(SetPrototypeHas, CollectionsBuiltinsAssembler) {
}
TF_BUILTIN(SetPrototypeEntries, CollectionsBuiltinsAssembler) {
- const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ const auto context = Parameter<Context>(Descriptor::kContext);
ThrowIfNotInstanceType(context, receiver, JS_SET_TYPE,
"Set.prototype.entries");
Return(AllocateJSCollectionIterator<JSSetIterator>(
@@ -2215,8 +2214,8 @@ TF_BUILTIN(SetPrototypeEntries, CollectionsBuiltinsAssembler) {
}
TF_BUILTIN(SetPrototypeGetSize, CollectionsBuiltinsAssembler) {
- const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ const auto context = Parameter<Context>(Descriptor::kContext);
ThrowIfNotInstanceType(context, receiver, JS_SET_TYPE,
"get Set.prototype.size");
const TNode<OrderedHashSet> table =
@@ -2226,9 +2225,8 @@ TF_BUILTIN(SetPrototypeGetSize, CollectionsBuiltinsAssembler) {
TF_BUILTIN(SetPrototypeForEach, CollectionsBuiltinsAssembler) {
const char* const kMethodName = "Set.prototype.forEach";
- TNode<Int32T> argc =
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto argc = UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount);
+ const auto context = Parameter<Context>(Descriptor::kContext);
CodeStubArguments args(this, argc);
const TNode<Object> receiver = args.GetReceiver();
const TNode<Object> callback = args.GetOptionalArgumentValue(0);
@@ -2281,8 +2279,8 @@ TF_BUILTIN(SetPrototypeForEach, CollectionsBuiltinsAssembler) {
}
TF_BUILTIN(SetPrototypeValues, CollectionsBuiltinsAssembler) {
- const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ const auto context = Parameter<Context>(Descriptor::kContext);
ThrowIfNotInstanceType(context, receiver, JS_SET_TYPE,
"Set.prototype.values");
Return(AllocateJSCollectionIterator<JSSetIterator>(
@@ -2291,8 +2289,8 @@ TF_BUILTIN(SetPrototypeValues, CollectionsBuiltinsAssembler) {
TF_BUILTIN(SetIteratorPrototypeNext, CollectionsBuiltinsAssembler) {
const char* const kMethodName = "Set Iterator.prototype.next";
- const TNode<Object> maybe_receiver = CAST(Parameter(Descriptor::kReceiver));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto maybe_receiver = Parameter<Object>(Descriptor::kReceiver);
+ const auto context = Parameter<Context>(Descriptor::kContext);
// Ensure that {maybe_receiver} is actually a JSSetIterator.
Label if_receiver_valid(this), if_receiver_invalid(this, Label::kDeferred);
@@ -2404,8 +2402,8 @@ void CollectionsBuiltinsAssembler::TryLookupOrderedHashTableIndex(
}
TF_BUILTIN(FindOrderedHashMapEntry, CollectionsBuiltinsAssembler) {
- const TNode<OrderedHashMap> table = CAST(Parameter(Descriptor::kTable));
- const TNode<Object> key = CAST(Parameter(Descriptor::kKey));
+ const auto table = Parameter<OrderedHashMap>(Descriptor::kTable);
+ const auto key = Parameter<Object>(Descriptor::kKey);
TVARIABLE(IntPtrT, entry_start_position, IntPtrConstant(0));
Label entry_found(this), not_found(this);
@@ -2698,28 +2696,28 @@ TNode<IntPtrT> WeakCollectionsBuiltinsAssembler::ValueIndexFromKeyIndex(
}
TF_BUILTIN(WeakMapConstructor, WeakCollectionsBuiltinsAssembler) {
- TNode<Object> new_target = CAST(Parameter(Descriptor::kJSNewTarget));
+ auto new_target = Parameter<Object>(Descriptor::kJSNewTarget);
TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount));
+ auto context = Parameter<Context>(Descriptor::kContext);
GenerateConstructor(kWeakMap, isolate()->factory()->WeakMap_string(),
new_target, argc, context);
}
TF_BUILTIN(WeakSetConstructor, WeakCollectionsBuiltinsAssembler) {
- TNode<Object> new_target = CAST(Parameter(Descriptor::kJSNewTarget));
+ auto new_target = Parameter<Object>(Descriptor::kJSNewTarget);
TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount));
+ auto context = Parameter<Context>(Descriptor::kContext);
GenerateConstructor(kWeakSet, isolate()->factory()->WeakSet_string(),
new_target, argc, context);
}
TF_BUILTIN(WeakMapLookupHashIndex, WeakCollectionsBuiltinsAssembler) {
- TNode<EphemeronHashTable> table = CAST(Parameter(Descriptor::kTable));
- TNode<Object> key = CAST(Parameter(Descriptor::kKey));
+ auto table = Parameter<EphemeronHashTable>(Descriptor::kTable);
+ auto key = Parameter<Object>(Descriptor::kKey);
Label if_not_found(this);
@@ -2736,9 +2734,9 @@ TF_BUILTIN(WeakMapLookupHashIndex, WeakCollectionsBuiltinsAssembler) {
}
TF_BUILTIN(WeakMapGet, WeakCollectionsBuiltinsAssembler) {
- const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- const TNode<Object> key = CAST(Parameter(Descriptor::kKey));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ const auto key = Parameter<Object>(Descriptor::kKey);
+ const auto context = Parameter<Context>(Descriptor::kContext);
Label return_undefined(this);
@@ -2758,9 +2756,9 @@ TF_BUILTIN(WeakMapGet, WeakCollectionsBuiltinsAssembler) {
}
TF_BUILTIN(WeakMapPrototypeHas, WeakCollectionsBuiltinsAssembler) {
- const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- const TNode<Object> key = CAST(Parameter(Descriptor::kKey));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ const auto key = Parameter<Object>(Descriptor::kKey);
+ const auto context = Parameter<Context>(Descriptor::kContext);
Label return_false(this);
@@ -2782,9 +2780,9 @@ TF_BUILTIN(WeakMapPrototypeHas, WeakCollectionsBuiltinsAssembler) {
// Helper that removes the entry with a given key from the backing store
// (EphemeronHashTable) of a WeakMap or WeakSet.
TF_BUILTIN(WeakCollectionDelete, WeakCollectionsBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<JSWeakCollection> collection = CAST(Parameter(Descriptor::kCollection));
- TNode<Object> key = CAST(Parameter(Descriptor::kKey));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto collection = Parameter<JSWeakCollection>(Descriptor::kCollection);
+ auto key = Parameter<Object>(Descriptor::kKey);
Label call_runtime(this), if_not_found(this);
@@ -2812,10 +2810,10 @@ TF_BUILTIN(WeakCollectionDelete, WeakCollectionsBuiltinsAssembler) {
// Helper that sets the key and value to the backing store (EphemeronHashTable)
// of a WeakMap or WeakSet.
TF_BUILTIN(WeakCollectionSet, WeakCollectionsBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<JSWeakCollection> collection = CAST(Parameter(Descriptor::kCollection));
- TNode<JSReceiver> key = CAST(Parameter(Descriptor::kKey));
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto collection = Parameter<JSWeakCollection>(Descriptor::kCollection);
+ auto key = Parameter<JSReceiver>(Descriptor::kKey);
+ auto value = Parameter<Object>(Descriptor::kValue);
CSA_ASSERT(this, IsJSReceiver(key));
@@ -2862,9 +2860,9 @@ TF_BUILTIN(WeakCollectionSet, WeakCollectionsBuiltinsAssembler) {
}
TF_BUILTIN(WeakMapPrototypeDelete, CodeStubAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> key = CAST(Parameter(Descriptor::kKey));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto key = Parameter<Object>(Descriptor::kKey);
ThrowIfNotInstanceType(context, receiver, JS_WEAK_MAP_TYPE,
"WeakMap.prototype.delete");
@@ -2873,10 +2871,10 @@ TF_BUILTIN(WeakMapPrototypeDelete, CodeStubAssembler) {
}
TF_BUILTIN(WeakMapPrototypeSet, WeakCollectionsBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> key = CAST(Parameter(Descriptor::kKey));
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto key = Parameter<Object>(Descriptor::kKey);
+ auto value = Parameter<Object>(Descriptor::kValue);
ThrowIfNotInstanceType(context, receiver, JS_WEAK_MAP_TYPE,
"WeakMap.prototype.set");
@@ -2892,9 +2890,9 @@ TF_BUILTIN(WeakMapPrototypeSet, WeakCollectionsBuiltinsAssembler) {
}
TF_BUILTIN(WeakSetPrototypeAdd, WeakCollectionsBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto value = Parameter<Object>(Descriptor::kValue);
ThrowIfNotInstanceType(context, receiver, JS_WEAK_SET_TYPE,
"WeakSet.prototype.add");
@@ -2910,9 +2908,9 @@ TF_BUILTIN(WeakSetPrototypeAdd, WeakCollectionsBuiltinsAssembler) {
}
TF_BUILTIN(WeakSetPrototypeDelete, CodeStubAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto value = Parameter<Object>(Descriptor::kValue);
ThrowIfNotInstanceType(context, receiver, JS_WEAK_SET_TYPE,
"WeakSet.prototype.delete");
@@ -2922,9 +2920,9 @@ TF_BUILTIN(WeakSetPrototypeDelete, CodeStubAssembler) {
}
TF_BUILTIN(WeakSetPrototypeHas, WeakCollectionsBuiltinsAssembler) {
- const TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- const TNode<Object> key = CAST(Parameter(Descriptor::kKey));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ const auto key = Parameter<Object>(Descriptor::kKey);
+ const auto context = Parameter<Context>(Descriptor::kContext);
Label return_false(this);
diff --git a/deps/v8/src/builtins/builtins-constructor-gen.cc b/deps/v8/src/builtins/builtins-constructor-gen.cc
index ecab531e2c..3cd4503471 100644
--- a/deps/v8/src/builtins/builtins-constructor-gen.cc
+++ b/deps/v8/src/builtins/builtins-constructor-gen.cc
@@ -37,14 +37,13 @@ void Builtins::Generate_ConstructFunctionForwardVarargs(MacroAssembler* masm) {
}
TF_BUILTIN(Construct_WithFeedback, CallOrConstructBuiltinsAssembler) {
- TNode<Object> target = CAST(Parameter(Descriptor::kTarget));
- TNode<Object> new_target = CAST(Parameter(Descriptor::kNewTarget));
- TNode<Int32T> argc =
- UncheckedCast<Int32T>(Parameter(Descriptor::kActualArgumentsCount));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<HeapObject> maybe_feedback_vector =
- CAST(Parameter(Descriptor::kMaybeFeedbackVector));
- TNode<Int32T> slot = UncheckedCast<Int32T>(Parameter(Descriptor::kSlot));
+ auto target = Parameter<Object>(Descriptor::kTarget);
+ auto new_target = Parameter<Object>(Descriptor::kNewTarget);
+ auto argc = UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto maybe_feedback_vector =
+ Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector);
+ auto slot = UncheckedParameter<Int32T>(Descriptor::kSlot);
TVARIABLE(AllocationSite, allocation_site);
Label if_construct_generic(this), if_construct_array(this);
@@ -62,22 +61,22 @@ TF_BUILTIN(Construct_WithFeedback, CallOrConstructBuiltinsAssembler) {
}
TF_BUILTIN(ConstructWithArrayLike, CallOrConstructBuiltinsAssembler) {
- TNode<Object> target = CAST(Parameter(Descriptor::kTarget));
- TNode<Object> new_target = CAST(Parameter(Descriptor::kNewTarget));
- TNode<Object> arguments_list = CAST(Parameter(Descriptor::kArgumentsList));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto target = Parameter<Object>(Descriptor::kTarget);
+ auto new_target = Parameter<Object>(Descriptor::kNewTarget);
+ auto arguments_list = Parameter<Object>(Descriptor::kArgumentsList);
+ auto context = Parameter<Context>(Descriptor::kContext);
CallOrConstructWithArrayLike(target, new_target, arguments_list, context);
}
TF_BUILTIN(ConstructWithArrayLike_WithFeedback,
CallOrConstructBuiltinsAssembler) {
- TNode<Object> target = CAST(Parameter(Descriptor::kTarget));
- TNode<Object> new_target = CAST(Parameter(Descriptor::kNewTarget));
- TNode<Object> arguments_list = CAST(Parameter(Descriptor::kArgumentsList));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<HeapObject> maybe_feedback_vector =
- CAST(Parameter(Descriptor::kMaybeFeedbackVector));
- TNode<Int32T> slot = UncheckedCast<Int32T>(Parameter(Descriptor::kSlot));
+ auto target = Parameter<Object>(Descriptor::kTarget);
+ auto new_target = Parameter<Object>(Descriptor::kNewTarget);
+ auto arguments_list = Parameter<Object>(Descriptor::kArgumentsList);
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto maybe_feedback_vector =
+ Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector);
+ auto slot = UncheckedParameter<Int32T>(Descriptor::kSlot);
TVARIABLE(AllocationSite, allocation_site);
Label if_construct_generic(this), if_construct_array(this);
@@ -94,25 +93,25 @@ TF_BUILTIN(ConstructWithArrayLike_WithFeedback,
}
TF_BUILTIN(ConstructWithSpread, CallOrConstructBuiltinsAssembler) {
- TNode<Object> target = CAST(Parameter(Descriptor::kTarget));
- TNode<Object> new_target = CAST(Parameter(Descriptor::kNewTarget));
- TNode<Object> spread = CAST(Parameter(Descriptor::kSpread));
- TNode<Int32T> args_count =
- UncheckedCast<Int32T>(Parameter(Descriptor::kActualArgumentsCount));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto target = Parameter<Object>(Descriptor::kTarget);
+ auto new_target = Parameter<Object>(Descriptor::kNewTarget);
+ auto spread = Parameter<Object>(Descriptor::kSpread);
+ auto args_count =
+ UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
+ auto context = Parameter<Context>(Descriptor::kContext);
CallOrConstructWithSpread(target, new_target, spread, args_count, context);
}
TF_BUILTIN(ConstructWithSpread_WithFeedback, CallOrConstructBuiltinsAssembler) {
- TNode<Object> target = CAST(Parameter(Descriptor::kTarget));
- TNode<Object> new_target = CAST(Parameter(Descriptor::kNewTarget));
- TNode<Object> spread = CAST(Parameter(Descriptor::kSpread));
- TNode<Int32T> args_count =
- UncheckedCast<Int32T>(Parameter(Descriptor::kActualArgumentsCount));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<HeapObject> maybe_feedback_vector =
- CAST(Parameter(Descriptor::kMaybeFeedbackVector));
- TNode<Int32T> slot = UncheckedCast<Int32T>(Parameter(Descriptor::kSlot));
+ auto target = Parameter<Object>(Descriptor::kTarget);
+ auto new_target = Parameter<Object>(Descriptor::kNewTarget);
+ auto spread = Parameter<Object>(Descriptor::kSpread);
+ auto args_count =
+ UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto maybe_feedback_vector =
+ Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector);
+ auto slot = UncheckedParameter<Int32T>(Descriptor::kSlot);
TVARIABLE(AllocationSite, allocation_site);
Label if_construct_generic(this), if_construct_array(this);
@@ -131,11 +130,10 @@ TF_BUILTIN(ConstructWithSpread_WithFeedback, CallOrConstructBuiltinsAssembler) {
using Node = compiler::Node;
TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) {
- TNode<SharedFunctionInfo> shared_function_info =
- CAST(Parameter(Descriptor::kSharedFunctionInfo));
- TNode<FeedbackCell> feedback_cell =
- CAST(Parameter(Descriptor::kFeedbackCell));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto shared_function_info =
+ Parameter<SharedFunctionInfo>(Descriptor::kSharedFunctionInfo);
+ auto feedback_cell = Parameter<FeedbackCell>(Descriptor::kFeedbackCell);
+ auto context = Parameter<Context>(Descriptor::kContext);
IncrementCounter(isolate()->counters()->fast_new_closure_total(), 1);
@@ -219,9 +217,9 @@ TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) {
}
TF_BUILTIN(FastNewObject, ConstructorBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<JSFunction> target = CAST(Parameter(Descriptor::kTarget));
- TNode<JSReceiver> new_target = CAST(Parameter(Descriptor::kNewTarget));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto target = Parameter<JSFunction>(Descriptor::kTarget);
+ auto new_target = Parameter<JSReceiver>(Descriptor::kNewTarget);
Label call_runtime(this);
diff --git a/deps/v8/src/builtins/builtins-conversion-gen.cc b/deps/v8/src/builtins/builtins-conversion-gen.cc
index cf56c5366c..35865c70cb 100644
--- a/deps/v8/src/builtins/builtins-conversion-gen.cc
+++ b/deps/v8/src/builtins/builtins-conversion-gen.cc
@@ -14,22 +14,22 @@ namespace internal {
// ES6 section 7.1.3 ToNumber ( argument )
TF_BUILTIN(ToNumber, CodeStubAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> input = CAST(Parameter(Descriptor::kArgument));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto input = Parameter<Object>(Descriptor::kArgument);
Return(ToNumber(context, input));
}
TF_BUILTIN(PlainPrimitiveToNumber, CodeStubAssembler) {
- TNode<Object> input = CAST(Parameter(Descriptor::kArgument));
+ auto input = Parameter<Object>(Descriptor::kArgument);
Return(PlainPrimitiveToNumber(input));
}
// Like ToNumber, but also converts BigInts.
TF_BUILTIN(ToNumberConvertBigInt, CodeStubAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> input = CAST(Parameter(Descriptor::kArgument));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto input = Parameter<Object>(Descriptor::kArgument);
Return(ToNumber(context, input, BigIntHandling::kConvertToNumber));
}
@@ -38,7 +38,7 @@ TF_BUILTIN(ToNumberConvertBigInt, CodeStubAssembler) {
// Requires parameter on stack so that it can be used as a continuation from a
// LAZY deopt.
TF_BUILTIN(ToBooleanLazyDeoptContinuation, CodeStubAssembler) {
- TNode<Object> value = CAST(Parameter(Descriptor::kArgument));
+ auto value = Parameter<Object>(Descriptor::kArgument);
Label return_true(this), return_false(this);
BranchIfToBooleanIsTrue(value, &return_true, &return_false);
@@ -52,7 +52,7 @@ TF_BUILTIN(ToBooleanLazyDeoptContinuation, CodeStubAssembler) {
// ES6 section 12.5.5 typeof operator
TF_BUILTIN(Typeof, CodeStubAssembler) {
- TNode<Object> object = CAST(Parameter(Descriptor::kObject));
+ auto object = Parameter<Object>(Descriptor::kObject);
Return(Typeof(object));
}
diff --git a/deps/v8/src/builtins/builtins-dataview.cc b/deps/v8/src/builtins/builtins-dataview.cc
index 1718ea97ad..3ae331f5d7 100644
--- a/deps/v8/src/builtins/builtins-dataview.cc
+++ b/deps/v8/src/builtins/builtins-dataview.cc
@@ -90,6 +90,7 @@ BUILTIN(DataViewConstructor) {
isolate, result,
JSObject::New(target, new_target, Handle<AllocationSite>::null()));
for (int i = 0; i < ArrayBufferView::kEmbedderFieldCount; ++i) {
+ // TODO(v8:10391, saelo): Handle external pointers in EmbedderDataSlot
Handle<JSDataView>::cast(result)->SetEmbedderField(i, Smi::zero());
}
@@ -101,6 +102,7 @@ BUILTIN(DataViewConstructor) {
// 13. Set O's [[ByteOffset]] internal slot to offset.
Handle<JSDataView>::cast(result)->set_byte_offset(view_byte_offset);
+ Handle<JSDataView>::cast(result)->AllocateExternalPointerEntries(isolate);
Handle<JSDataView>::cast(result)->set_data_pointer(
isolate,
static_cast<uint8_t*>(array_buffer->backing_store()) + view_byte_offset);
diff --git a/deps/v8/src/builtins/builtins-date-gen.cc b/deps/v8/src/builtins/builtins-date-gen.cc
index 05fcc53f12..6d43013501 100644
--- a/deps/v8/src/builtins/builtins-date-gen.cc
+++ b/deps/v8/src/builtins/builtins-date-gen.cc
@@ -69,123 +69,123 @@ void DateBuiltinsAssembler::Generate_DatePrototype_GetField(
}
TF_BUILTIN(DatePrototypeGetDate, DateBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
Generate_DatePrototype_GetField(context, receiver, JSDate::kDay);
}
TF_BUILTIN(DatePrototypeGetDay, DateBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
Generate_DatePrototype_GetField(context, receiver, JSDate::kWeekday);
}
TF_BUILTIN(DatePrototypeGetFullYear, DateBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
Generate_DatePrototype_GetField(context, receiver, JSDate::kYear);
}
TF_BUILTIN(DatePrototypeGetHours, DateBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
Generate_DatePrototype_GetField(context, receiver, JSDate::kHour);
}
TF_BUILTIN(DatePrototypeGetMilliseconds, DateBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
Generate_DatePrototype_GetField(context, receiver, JSDate::kMillisecond);
}
TF_BUILTIN(DatePrototypeGetMinutes, DateBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
Generate_DatePrototype_GetField(context, receiver, JSDate::kMinute);
}
TF_BUILTIN(DatePrototypeGetMonth, DateBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
Generate_DatePrototype_GetField(context, receiver, JSDate::kMonth);
}
TF_BUILTIN(DatePrototypeGetSeconds, DateBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
Generate_DatePrototype_GetField(context, receiver, JSDate::kSecond);
}
TF_BUILTIN(DatePrototypeGetTime, DateBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
Generate_DatePrototype_GetField(context, receiver, JSDate::kDateValue);
}
TF_BUILTIN(DatePrototypeGetTimezoneOffset, DateBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
Generate_DatePrototype_GetField(context, receiver, JSDate::kTimezoneOffset);
}
TF_BUILTIN(DatePrototypeGetUTCDate, DateBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
Generate_DatePrototype_GetField(context, receiver, JSDate::kDayUTC);
}
TF_BUILTIN(DatePrototypeGetUTCDay, DateBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
Generate_DatePrototype_GetField(context, receiver, JSDate::kWeekdayUTC);
}
TF_BUILTIN(DatePrototypeGetUTCFullYear, DateBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
Generate_DatePrototype_GetField(context, receiver, JSDate::kYearUTC);
}
TF_BUILTIN(DatePrototypeGetUTCHours, DateBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
Generate_DatePrototype_GetField(context, receiver, JSDate::kHourUTC);
}
TF_BUILTIN(DatePrototypeGetUTCMilliseconds, DateBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
Generate_DatePrototype_GetField(context, receiver, JSDate::kMillisecondUTC);
}
TF_BUILTIN(DatePrototypeGetUTCMinutes, DateBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
Generate_DatePrototype_GetField(context, receiver, JSDate::kMinuteUTC);
}
TF_BUILTIN(DatePrototypeGetUTCMonth, DateBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
Generate_DatePrototype_GetField(context, receiver, JSDate::kMonthUTC);
}
TF_BUILTIN(DatePrototypeGetUTCSeconds, DateBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
Generate_DatePrototype_GetField(context, receiver, JSDate::kSecondUTC);
}
TF_BUILTIN(DatePrototypeValueOf, DateBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
Generate_DatePrototype_GetField(context, receiver, JSDate::kDateValue);
}
TF_BUILTIN(DatePrototypeToPrimitive, CodeStubAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> hint = CAST(Parameter(Descriptor::kHint));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto hint = Parameter<Object>(Descriptor::kHint);
// Check if the {receiver} is actually a JSReceiver.
Label receiver_is_invalid(this, Label::kDeferred);
diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h
index 10bbd12f8a..a30520d150 100644
--- a/deps/v8/src/builtins/builtins-definitions.h
+++ b/deps/v8/src/builtins/builtins-definitions.h
@@ -138,6 +138,10 @@ namespace internal {
TFC(CompileLazyDeoptimizedCode, JSTrampoline) \
TFC(InstantiateAsmJs, JSTrampoline) \
ASM(NotifyDeoptimized, Dummy) \
+ ASM(DeoptimizationEntry_Eager, DeoptimizationEntry) \
+ ASM(DeoptimizationEntry_Soft, DeoptimizationEntry) \
+ ASM(DeoptimizationEntry_Bailout, DeoptimizationEntry) \
+ ASM(DeoptimizationEntry_Lazy, DeoptimizationEntry) \
\
/* Trampolines called when returning from a deoptimization that expects */ \
/* to continue in a JavaScript builtin to finish the functionality of a */ \
@@ -192,7 +196,6 @@ namespace internal {
TFC(PlainPrimitiveToNumber, TypeConversionNoContext) \
TFC(ToNumberConvertBigInt, TypeConversion) \
TFC(Typeof, Typeof) \
- TFC(GetSuperConstructor, Typeof) \
TFC(BigIntToI64, BigIntToI64) \
TFC(BigIntToI32Pair, BigIntToI32Pair) \
TFC(I64ToBigInt, I64ToBigInt) \
@@ -364,8 +367,6 @@ namespace internal {
\
/* CallSite */ \
CPP(CallSitePrototypeGetColumnNumber) \
- CPP(CallSitePrototypeGetEnclosingColumnNumber) \
- CPP(CallSitePrototypeGetEnclosingLineNumber) \
CPP(CallSitePrototypeGetEvalOrigin) \
CPP(CallSitePrototypeGetFileName) \
CPP(CallSitePrototypeGetFunction) \
@@ -762,9 +763,6 @@ namespace internal {
TFJ(StringPrototypeSearch, 1, kReceiver, kRegexp) \
/* ES6 #sec-string.prototype.split */ \
TFJ(StringPrototypeSplit, kDontAdaptArgumentsSentinel) \
- TFJ(StringPrototypeTrim, kDontAdaptArgumentsSentinel) \
- TFJ(StringPrototypeTrimEnd, kDontAdaptArgumentsSentinel) \
- TFJ(StringPrototypeTrimStart, kDontAdaptArgumentsSentinel) \
/* ES6 #sec-string.raw */ \
CPP(StringRaw) \
\
@@ -813,6 +811,7 @@ namespace internal {
TFS(WasmAllocateArrayWithRtt, kMap, kLength, kElementSize) \
TFC(WasmI32AtomicWait32, WasmI32AtomicWait32) \
TFC(WasmI64AtomicWait32, WasmI64AtomicWait32) \
+ TFS(WasmAllocatePair, kValue1, kValue2) \
\
/* WeakMap */ \
TFJ(WeakMapConstructor, kDontAdaptArgumentsSentinel) \
diff --git a/deps/v8/src/builtins/builtins-function.cc b/deps/v8/src/builtins/builtins-function.cc
index b3fbc4fd94..23ee4da8c1 100644
--- a/deps/v8/src/builtins/builtins-function.cc
+++ b/deps/v8/src/builtins/builtins-function.cc
@@ -80,6 +80,14 @@ MaybeHandle<Object> CreateDynamicFunction(Isolate* isolate,
}
}
+ bool is_code_like = true;
+ for (int i = 0; i < argc; ++i) {
+ if (!args.at(i + 1)->IsCodeLike(isolate)) {
+ is_code_like = false;
+ break;
+ }
+ }
+
// Compile the string in the constructor and not a helper so that errors to
// come from here.
Handle<JSFunction> function;
@@ -88,7 +96,7 @@ MaybeHandle<Object> CreateDynamicFunction(Isolate* isolate,
isolate, function,
Compiler::GetFunctionFromString(
handle(target->native_context(), isolate), source,
- ONLY_SINGLE_FUNCTION_LITERAL, parameters_end_pos),
+ ONLY_SINGLE_FUNCTION_LITERAL, parameters_end_pos, is_code_like),
Object);
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION(
diff --git a/deps/v8/src/builtins/builtins-generator-gen.cc b/deps/v8/src/builtins/builtins-generator-gen.cc
index 8693cd61f4..d93ab2e103 100644
--- a/deps/v8/src/builtins/builtins-generator-gen.cc
+++ b/deps/v8/src/builtins/builtins-generator-gen.cc
@@ -137,13 +137,12 @@ void GeneratorBuiltinsAssembler::GeneratorPrototypeResume(
TF_BUILTIN(AsyncModuleEvaluate, GeneratorBuiltinsAssembler) {
const int kValueArg = 0;
- TNode<Int32T> argc =
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
+ auto argc = UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount);
CodeStubArguments args(this, argc);
TNode<Object> receiver = args.GetReceiver();
TNode<Object> value = args.GetOptionalArgumentValue(kValueArg);
- TNode<Context> context = Cast(Parameter(Descriptor::kContext));
+ auto context = Parameter<Context>(Descriptor::kContext);
// AsyncModules act like JSAsyncFunctions. Thus we check here
// that the {receiver} is a JSAsyncFunction.
@@ -159,13 +158,12 @@ TF_BUILTIN(AsyncModuleEvaluate, GeneratorBuiltinsAssembler) {
TF_BUILTIN(GeneratorPrototypeNext, GeneratorBuiltinsAssembler) {
const int kValueArg = 0;
- TNode<Int32T> argc =
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
+ auto argc = UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount);
CodeStubArguments args(this, argc);
TNode<Object> receiver = args.GetReceiver();
TNode<Object> value = args.GetOptionalArgumentValue(kValueArg);
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto context = Parameter<Context>(Descriptor::kContext);
GeneratorPrototypeResume(&args, receiver, value, context,
JSGeneratorObject::kNext,
@@ -176,13 +174,12 @@ TF_BUILTIN(GeneratorPrototypeNext, GeneratorBuiltinsAssembler) {
TF_BUILTIN(GeneratorPrototypeReturn, GeneratorBuiltinsAssembler) {
const int kValueArg = 0;
- TNode<Int32T> argc =
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
+ auto argc = UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount);
CodeStubArguments args(this, argc);
TNode<Object> receiver = args.GetReceiver();
TNode<Object> value = args.GetOptionalArgumentValue(kValueArg);
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto context = Parameter<Context>(Descriptor::kContext);
GeneratorPrototypeResume(&args, receiver, value, context,
JSGeneratorObject::kReturn,
@@ -193,13 +190,12 @@ TF_BUILTIN(GeneratorPrototypeReturn, GeneratorBuiltinsAssembler) {
TF_BUILTIN(GeneratorPrototypeThrow, GeneratorBuiltinsAssembler) {
const int kExceptionArg = 0;
- TNode<Int32T> argc =
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
+ auto argc = UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount);
CodeStubArguments args(this, argc);
TNode<Object> receiver = args.GetReceiver();
TNode<Object> exception = args.GetOptionalArgumentValue(kExceptionArg);
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto context = Parameter<Context>(Descriptor::kContext);
GeneratorPrototypeResume(&args, receiver, exception, context,
JSGeneratorObject::kThrow,
diff --git a/deps/v8/src/builtins/builtins-global-gen.cc b/deps/v8/src/builtins/builtins-global-gen.cc
index 43d30cc6b1..d33fc3c37b 100644
--- a/deps/v8/src/builtins/builtins-global-gen.cc
+++ b/deps/v8/src/builtins/builtins-global-gen.cc
@@ -11,14 +11,14 @@ namespace internal {
// ES #sec-isfinite-number
TF_BUILTIN(GlobalIsFinite, CodeStubAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto context = Parameter<Context>(Descriptor::kContext);
Label return_true(this), return_false(this);
// We might need to loop once for ToNumber conversion.
TVARIABLE(Object, var_num);
Label loop(this, &var_num);
- var_num = CAST(Parameter(Descriptor::kNumber));
+ var_num = Parameter<Object>(Descriptor::kNumber);
Goto(&loop);
BIND(&loop);
{
@@ -60,14 +60,14 @@ TF_BUILTIN(GlobalIsFinite, CodeStubAssembler) {
// ES6 #sec-isnan-number
TF_BUILTIN(GlobalIsNaN, CodeStubAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto context = Parameter<Context>(Descriptor::kContext);
Label return_true(this), return_false(this);
// We might need to loop once for ToNumber conversion.
TVARIABLE(Object, var_num);
Label loop(this, &var_num);
- var_num = CAST(Parameter(Descriptor::kNumber));
+ var_num = Parameter<Object>(Descriptor::kNumber);
Goto(&loop);
BIND(&loop);
{
diff --git a/deps/v8/src/builtins/builtins-handler-gen.cc b/deps/v8/src/builtins/builtins-handler-gen.cc
index 8075a597e8..3cbd626b8e 100644
--- a/deps/v8/src/builtins/builtins-handler-gen.cc
+++ b/deps/v8/src/builtins/builtins-handler-gen.cc
@@ -43,12 +43,12 @@ class HandlerBuiltinsAssembler : public CodeStubAssembler {
};
TF_BUILTIN(LoadIC_StringLength, CodeStubAssembler) {
- TNode<String> string = CAST(Parameter(Descriptor::kReceiver));
+ auto string = Parameter<String>(Descriptor::kReceiver);
Return(LoadStringLengthAsSmi(string));
}
TF_BUILTIN(LoadIC_StringWrapperLength, CodeStubAssembler) {
- TNode<JSPrimitiveWrapper> value = CAST(Parameter(Descriptor::kReceiver));
+ auto value = Parameter<JSPrimitiveWrapper>(Descriptor::kReceiver);
TNode<String> string = CAST(LoadJSPrimitiveWrapperValue(value));
Return(LoadStringLengthAsSmi(string));
}
@@ -130,13 +130,13 @@ void HandlerBuiltinsAssembler::DispatchForElementsKindTransition(
void HandlerBuiltinsAssembler::Generate_ElementsTransitionAndStore(
KeyedAccessStoreMode store_mode) {
using Descriptor = StoreTransitionDescriptor;
- TNode<JSObject> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> key = CAST(Parameter(Descriptor::kName));
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TNode<Map> map = CAST(Parameter(Descriptor::kMap));
- TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<FeedbackVector> vector = CAST(Parameter(Descriptor::kVector));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<JSObject>(Descriptor::kReceiver);
+ auto key = Parameter<Object>(Descriptor::kName);
+ auto value = Parameter<Object>(Descriptor::kValue);
+ auto map = Parameter<Map>(Descriptor::kMap);
+ auto slot = Parameter<Smi>(Descriptor::kSlot);
+ auto vector = Parameter<FeedbackVector>(Descriptor::kVector);
+ auto context = Parameter<Context>(Descriptor::kContext);
Comment("ElementsTransitionAndStore: store_mode=", store_mode);
@@ -262,12 +262,12 @@ void HandlerBuiltinsAssembler::DispatchByElementsKind(
void HandlerBuiltinsAssembler::Generate_StoreFastElementIC(
KeyedAccessStoreMode store_mode) {
using Descriptor = StoreWithVectorDescriptor;
- TNode<JSObject> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> key = CAST(Parameter(Descriptor::kName));
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<HeapObject> vector = CAST(Parameter(Descriptor::kVector));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<JSObject>(Descriptor::kReceiver);
+ auto key = Parameter<Object>(Descriptor::kName);
+ auto value = Parameter<Object>(Descriptor::kValue);
+ auto slot = Parameter<Smi>(Descriptor::kSlot);
+ auto vector = Parameter<HeapObject>(Descriptor::kVector);
+ auto context = Parameter<Context>(Descriptor::kContext);
Comment("StoreFastElementStub: store_mode=", store_mode);
@@ -312,11 +312,11 @@ TF_BUILTIN(StoreFastElementIC_NoTransitionHandleCOW, HandlerBuiltinsAssembler) {
}
TF_BUILTIN(LoadIC_FunctionPrototype, CodeStubAssembler) {
- TNode<JSFunction> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Name> name = CAST(Parameter(Descriptor::kName));
- TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<FeedbackVector> vector = CAST(Parameter(Descriptor::kVector));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<JSFunction>(Descriptor::kReceiver);
+ auto name = Parameter<Name>(Descriptor::kName);
+ auto slot = Parameter<Smi>(Descriptor::kSlot);
+ auto vector = Parameter<FeedbackVector>(Descriptor::kVector);
+ auto context = Parameter<Context>(Descriptor::kContext);
Label miss(this, Label::kDeferred);
Return(LoadJSFunctionPrototype(receiver, &miss));
@@ -326,12 +326,12 @@ TF_BUILTIN(LoadIC_FunctionPrototype, CodeStubAssembler) {
}
TF_BUILTIN(StoreGlobalIC_Slow, CodeStubAssembler) {
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Name> name = CAST(Parameter(Descriptor::kName));
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<FeedbackVector> vector = CAST(Parameter(Descriptor::kVector));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto name = Parameter<Name>(Descriptor::kName);
+ auto value = Parameter<Object>(Descriptor::kValue);
+ auto slot = Parameter<Smi>(Descriptor::kSlot);
+ auto vector = Parameter<FeedbackVector>(Descriptor::kVector);
+ auto context = Parameter<Context>(Descriptor::kContext);
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
@@ -340,11 +340,11 @@ TF_BUILTIN(StoreGlobalIC_Slow, CodeStubAssembler) {
}
TF_BUILTIN(KeyedLoadIC_SloppyArguments, HandlerBuiltinsAssembler) {
- TNode<JSObject> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> key = CAST(Parameter(Descriptor::kName));
- TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<HeapObject> vector = CAST(Parameter(Descriptor::kVector));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<JSObject>(Descriptor::kReceiver);
+ auto key = Parameter<Object>(Descriptor::kName);
+ auto slot = Parameter<Smi>(Descriptor::kSlot);
+ auto vector = Parameter<HeapObject>(Descriptor::kVector);
+ auto context = Parameter<Context>(Descriptor::kContext);
Label miss(this);
@@ -361,12 +361,12 @@ TF_BUILTIN(KeyedLoadIC_SloppyArguments, HandlerBuiltinsAssembler) {
void HandlerBuiltinsAssembler::Generate_KeyedStoreIC_SloppyArguments() {
using Descriptor = StoreWithVectorDescriptor;
- TNode<JSObject> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> key = CAST(Parameter(Descriptor::kName));
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<HeapObject> vector = CAST(Parameter(Descriptor::kVector));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<JSObject>(Descriptor::kReceiver);
+ auto key = Parameter<Object>(Descriptor::kName);
+ auto value = Parameter<Object>(Descriptor::kValue);
+ auto slot = Parameter<Smi>(Descriptor::kSlot);
+ auto vector = Parameter<HeapObject>(Descriptor::kVector);
+ auto context = Parameter<Context>(Descriptor::kContext);
Label miss(this);
@@ -398,11 +398,11 @@ TF_BUILTIN(KeyedStoreIC_SloppyArguments_NoTransitionHandleCOW,
}
TF_BUILTIN(LoadIndexedInterceptorIC, CodeStubAssembler) {
- TNode<JSObject> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> key = CAST(Parameter(Descriptor::kName));
- TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<HeapObject> vector = CAST(Parameter(Descriptor::kVector));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<JSObject>(Descriptor::kReceiver);
+ auto key = Parameter<Object>(Descriptor::kName);
+ auto slot = Parameter<Smi>(Descriptor::kSlot);
+ auto vector = Parameter<HeapObject>(Descriptor::kVector);
+ auto context = Parameter<Context>(Descriptor::kContext);
Label if_keyispositivesmi(this), if_keyisinvalid(this);
Branch(TaggedIsPositiveSmi(key), &if_keyispositivesmi, &if_keyisinvalid);
@@ -415,11 +415,11 @@ TF_BUILTIN(LoadIndexedInterceptorIC, CodeStubAssembler) {
}
TF_BUILTIN(KeyedHasIC_SloppyArguments, HandlerBuiltinsAssembler) {
- TNode<JSObject> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> key = CAST(Parameter(Descriptor::kName));
- TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<HeapObject> vector = CAST(Parameter(Descriptor::kVector));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<JSObject>(Descriptor::kReceiver);
+ auto key = Parameter<Object>(Descriptor::kName);
+ auto slot = Parameter<Smi>(Descriptor::kSlot);
+ auto vector = Parameter<HeapObject>(Descriptor::kVector);
+ auto context = Parameter<Context>(Descriptor::kContext);
Label miss(this);
@@ -435,11 +435,11 @@ TF_BUILTIN(KeyedHasIC_SloppyArguments, HandlerBuiltinsAssembler) {
}
TF_BUILTIN(HasIndexedInterceptorIC, CodeStubAssembler) {
- TNode<JSObject> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> key = CAST(Parameter(Descriptor::kName));
- TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<HeapObject> vector = CAST(Parameter(Descriptor::kVector));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<JSObject>(Descriptor::kReceiver);
+ auto key = Parameter<Object>(Descriptor::kName);
+ auto slot = Parameter<Smi>(Descriptor::kSlot);
+ auto vector = Parameter<HeapObject>(Descriptor::kVector);
+ auto context = Parameter<Context>(Descriptor::kContext);
Label if_keyispositivesmi(this), if_keyisinvalid(this);
Branch(TaggedIsPositiveSmi(key), &if_keyispositivesmi, &if_keyisinvalid);
diff --git a/deps/v8/src/builtins/builtins-internal-gen.cc b/deps/v8/src/builtins/builtins-internal-gen.cc
index 13698758e6..4108d897f6 100644
--- a/deps/v8/src/builtins/builtins-internal-gen.cc
+++ b/deps/v8/src/builtins/builtins-internal-gen.cc
@@ -30,7 +30,7 @@ void Builtins::Generate_StackCheck(MacroAssembler* masm) {
// TurboFan support builtins.
TF_BUILTIN(CopyFastSmiOrObjectElements, CodeStubAssembler) {
- TNode<JSObject> js_object = CAST(Parameter(Descriptor::kObject));
+ auto js_object = Parameter<JSObject>(Descriptor::kObject);
// Load the {object}s elements.
TNode<FixedArrayBase> source =
@@ -42,8 +42,8 @@ TF_BUILTIN(CopyFastSmiOrObjectElements, CodeStubAssembler) {
}
TF_BUILTIN(GrowFastDoubleElements, CodeStubAssembler) {
- TNode<JSObject> object = CAST(Parameter(Descriptor::kObject));
- TNode<Smi> key = CAST(Parameter(Descriptor::kKey));
+ auto object = Parameter<JSObject>(Descriptor::kObject);
+ auto key = Parameter<Smi>(Descriptor::kKey);
Label runtime(this, Label::kDeferred);
TNode<FixedArrayBase> elements = LoadElements(object);
@@ -57,8 +57,8 @@ TF_BUILTIN(GrowFastDoubleElements, CodeStubAssembler) {
}
TF_BUILTIN(GrowFastSmiOrObjectElements, CodeStubAssembler) {
- TNode<JSObject> object = CAST(Parameter(Descriptor::kObject));
- TNode<Smi> key = CAST(Parameter(Descriptor::kKey));
+ auto object = Parameter<JSObject>(Descriptor::kObject);
+ auto key = Parameter<Smi>(Descriptor::kKey);
Label runtime(this, Label::kDeferred);
TNode<FixedArrayBase> elements = LoadElements(object);
@@ -72,17 +72,17 @@ TF_BUILTIN(GrowFastSmiOrObjectElements, CodeStubAssembler) {
}
TF_BUILTIN(ReturnReceiver, CodeStubAssembler) {
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
Return(receiver);
}
TF_BUILTIN(DebugBreakTrampoline, CodeStubAssembler) {
Label tailcall_to_shared(this);
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> new_target = CAST(Parameter(Descriptor::kJSNewTarget));
- TNode<Int32T> arg_count =
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
- TNode<JSFunction> function = CAST(Parameter(Descriptor::kJSTarget));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto new_target = Parameter<Object>(Descriptor::kJSNewTarget);
+ auto arg_count =
+ UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount);
+ auto function = Parameter<JSFunction>(Descriptor::kJSTarget);
// Check break-at-entry flag on the debug info.
TNode<SharedFunctionInfo> shared =
@@ -311,8 +311,7 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
Label incremental_wb(this);
Label exit(this);
- TNode<Smi> remembered_set =
- UncheckedCast<Smi>(Parameter(Descriptor::kRememberedSet));
+ auto remembered_set = UncheckedParameter<Smi>(Descriptor::kRememberedSet);
Branch(ShouldEmitRememberSet(remembered_set), &generational_wb,
&incremental_wb);
@@ -327,7 +326,7 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
// `kPointersToHereAreInterestingMask` in
// `src/compiler/<arch>/code-generator-<arch>.cc` before calling this stub,
// which serves as the cross generation checking.
- TNode<IntPtrT> slot = UncheckedCast<IntPtrT>(Parameter(Descriptor::kSlot));
+ auto slot = UncheckedParameter<IntPtrT>(Descriptor::kSlot);
Branch(IsMarking(), &test_old_to_young_flags, &store_buffer_exit);
BIND(&test_old_to_young_flags);
@@ -343,7 +342,7 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
GotoIfNot(value_is_young, &incremental_wb);
TNode<IntPtrT> object =
- BitcastTaggedToWord(Parameter(Descriptor::kObject));
+ BitcastTaggedToWord(UntypedParameter(Descriptor::kObject));
TNode<BoolT> object_is_young =
IsPageFlagSet(object, MemoryChunk::kIsInYoungGenerationMask);
Branch(object_is_young, &incremental_wb, &store_buffer_incremental_wb);
@@ -351,17 +350,17 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
BIND(&store_buffer_exit);
{
- TNode<Smi> fp_mode = UncheckedCast<Smi>(Parameter(Descriptor::kFPMode));
+ auto fp_mode = UncheckedParameter<Smi>(Descriptor::kFPMode);
TNode<IntPtrT> object =
- BitcastTaggedToWord(Parameter(Descriptor::kObject));
+ BitcastTaggedToWord(UntypedParameter(Descriptor::kObject));
InsertIntoRememberedSetAndGoto(object, slot, fp_mode, &exit);
}
BIND(&store_buffer_incremental_wb);
{
- TNode<Smi> fp_mode = UncheckedCast<Smi>(Parameter(Descriptor::kFPMode));
+ auto fp_mode = UncheckedParameter<Smi>(Descriptor::kFPMode);
TNode<IntPtrT> object =
- BitcastTaggedToWord(Parameter(Descriptor::kObject));
+ BitcastTaggedToWord(UntypedParameter(Descriptor::kObject));
InsertIntoRememberedSetAndGoto(object, slot, fp_mode, &incremental_wb);
}
}
@@ -370,7 +369,7 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
{
Label call_incremental_wb(this);
- TNode<IntPtrT> slot = UncheckedCast<IntPtrT>(Parameter(Descriptor::kSlot));
+ auto slot = UncheckedParameter<IntPtrT>(Descriptor::kSlot);
TNode<IntPtrT> value =
BitcastTaggedToWord(Load(MachineType::TaggedPointer(), slot));
@@ -383,7 +382,8 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
GotoIfNot(IsPageFlagSet(value, MemoryChunk::kEvacuationCandidateMask),
&exit);
- TNode<IntPtrT> object = BitcastTaggedToWord(Parameter(Descriptor::kObject));
+ TNode<IntPtrT> object =
+ BitcastTaggedToWord(UntypedParameter(Descriptor::kObject));
Branch(
IsPageFlagSet(object, MemoryChunk::kSkipEvacuationSlotsRecordingMask),
&exit, &call_incremental_wb);
@@ -392,9 +392,9 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
{
TNode<ExternalReference> function = ExternalConstant(
ExternalReference::write_barrier_marking_from_code_function());
- TNode<Smi> fp_mode = UncheckedCast<Smi>(Parameter(Descriptor::kFPMode));
+ auto fp_mode = UncheckedParameter<Smi>(Descriptor::kFPMode);
TNode<IntPtrT> object =
- BitcastTaggedToWord(Parameter(Descriptor::kObject));
+ BitcastTaggedToWord(UntypedParameter(Descriptor::kObject));
CallCFunction2WithCallerSavedRegistersMode<Int32T, IntPtrT, IntPtrT>(
function, object, slot, fp_mode, &exit);
}
@@ -412,10 +412,10 @@ TF_BUILTIN(EphemeronKeyBarrier, RecordWriteCodeStubAssembler) {
ExternalReference::ephemeron_key_write_barrier_function());
TNode<ExternalReference> isolate_constant =
ExternalConstant(ExternalReference::isolate_address(isolate()));
- TNode<IntPtrT> address =
- UncheckedCast<IntPtrT>(Parameter(Descriptor::kSlotAddress));
- TNode<IntPtrT> object = BitcastTaggedToWord(Parameter(Descriptor::kObject));
- TNode<Smi> fp_mode = UncheckedCast<Smi>(Parameter(Descriptor::kFPMode));
+ auto address = UncheckedParameter<IntPtrT>(Descriptor::kSlotAddress);
+ TNode<IntPtrT> object =
+ BitcastTaggedToWord(UntypedParameter(Descriptor::kObject));
+ TNode<Smi> fp_mode = UncheckedParameter<Smi>(Descriptor::kFPMode);
CallCFunction3WithCallerSavedRegistersMode<Int32T, IntPtrT, IntPtrT,
ExternalReference>(
function, object, address, isolate_constant, fp_mode, &exit);
@@ -476,10 +476,10 @@ class DeletePropertyBaseAssembler : public AccessorAssembler {
};
TF_BUILTIN(DeleteProperty, DeletePropertyBaseAssembler) {
- TNode<Object> receiver = CAST(Parameter(Descriptor::kObject));
- TNode<Object> key = CAST(Parameter(Descriptor::kKey));
- TNode<Smi> language_mode = CAST(Parameter(Descriptor::kLanguageMode));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<Object>(Descriptor::kObject);
+ auto key = Parameter<Object>(Descriptor::kKey);
+ auto language_mode = Parameter<Smi>(Descriptor::kLanguageMode);
+ auto context = Parameter<Context>(Descriptor::kContext);
TVARIABLE(IntPtrT, var_index);
TVARIABLE(Name, var_unique);
@@ -641,9 +641,9 @@ class SetOrCopyDataPropertiesAssembler : public CodeStubAssembler {
// ES #sec-copydataproperties
TF_BUILTIN(CopyDataProperties, SetOrCopyDataPropertiesAssembler) {
- TNode<JSObject> target = CAST(Parameter(Descriptor::kTarget));
- TNode<Object> source = CAST(Parameter(Descriptor::kSource));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto target = Parameter<JSObject>(Descriptor::kTarget);
+ auto source = Parameter<Object>(Descriptor::kSource);
+ auto context = Parameter<Context>(Descriptor::kContext);
CSA_ASSERT(this, TaggedNotEqual(target, source));
@@ -655,9 +655,9 @@ TF_BUILTIN(CopyDataProperties, SetOrCopyDataPropertiesAssembler) {
}
TF_BUILTIN(SetDataProperties, SetOrCopyDataPropertiesAssembler) {
- TNode<JSReceiver> target = CAST(Parameter(Descriptor::kTarget));
- TNode<Object> source = CAST(Parameter(Descriptor::kSource));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto target = Parameter<JSReceiver>(Descriptor::kTarget);
+ auto source = Parameter<Object>(Descriptor::kSource);
+ auto context = Parameter<Context>(Descriptor::kContext);
Label if_runtime(this, Label::kDeferred);
Return(SetOrCopyDataProperties(context, target, source, &if_runtime, true));
@@ -667,8 +667,8 @@ TF_BUILTIN(SetDataProperties, SetOrCopyDataPropertiesAssembler) {
}
TF_BUILTIN(ForInEnumerate, CodeStubAssembler) {
- TNode<JSReceiver> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<JSReceiver>(Descriptor::kReceiver);
+ auto context = Parameter<Context>(Descriptor::kContext);
Label if_empty(this), if_runtime(this, Label::kDeferred);
TNode<Map> receiver_map = CheckEnumCache(receiver, &if_empty, &if_runtime);
@@ -682,9 +682,9 @@ TF_BUILTIN(ForInEnumerate, CodeStubAssembler) {
}
TF_BUILTIN(ForInFilter, CodeStubAssembler) {
- TNode<String> key = CAST(Parameter(Descriptor::kKey));
- TNode<HeapObject> object = CAST(Parameter(Descriptor::kObject));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto key = Parameter<String>(Descriptor::kKey);
+ auto object = Parameter<HeapObject>(Descriptor::kObject);
+ auto context = Parameter<Context>(Descriptor::kContext);
Label if_true(this), if_false(this);
TNode<Oddball> result = HasProperty(context, object, key, kForInHasProperty);
@@ -698,8 +698,8 @@ TF_BUILTIN(ForInFilter, CodeStubAssembler) {
}
TF_BUILTIN(SameValue, CodeStubAssembler) {
- TNode<Object> lhs = CAST(Parameter(Descriptor::kLeft));
- TNode<Object> rhs = CAST(Parameter(Descriptor::kRight));
+ auto lhs = Parameter<Object>(Descriptor::kLeft);
+ auto rhs = Parameter<Object>(Descriptor::kRight);
Label if_true(this), if_false(this);
BranchIfSameValue(lhs, rhs, &if_true, &if_false);
@@ -712,8 +712,8 @@ TF_BUILTIN(SameValue, CodeStubAssembler) {
}
TF_BUILTIN(SameValueNumbersOnly, CodeStubAssembler) {
- TNode<Object> lhs = CAST(Parameter(Descriptor::kLeft));
- TNode<Object> rhs = CAST(Parameter(Descriptor::kRight));
+ auto lhs = Parameter<Object>(Descriptor::kLeft);
+ auto rhs = Parameter<Object>(Descriptor::kRight);
Label if_true(this), if_false(this);
BranchIfSameValue(lhs, rhs, &if_true, &if_false, SameValueMode::kNumbersOnly);
@@ -726,10 +726,9 @@ TF_BUILTIN(SameValueNumbersOnly, CodeStubAssembler) {
}
TF_BUILTIN(AdaptorWithBuiltinExitFrame, CodeStubAssembler) {
- TNode<JSFunction> target = CAST(Parameter(Descriptor::kTarget));
- TNode<Object> new_target = CAST(Parameter(Descriptor::kNewTarget));
- TNode<WordT> c_function =
- UncheckedCast<WordT>(Parameter(Descriptor::kCFunction));
+ auto target = Parameter<JSFunction>(Descriptor::kTarget);
+ auto new_target = Parameter<Object>(Descriptor::kNewTarget);
+ auto c_function = UncheckedParameter<WordT>(Descriptor::kCFunction);
// The logic contained here is mirrored for TurboFan inlining in
// JSTypedLowering::ReduceJSCall{Function,Construct}. Keep these in sync.
@@ -740,8 +739,8 @@ TF_BUILTIN(AdaptorWithBuiltinExitFrame, CodeStubAssembler) {
// ordinary functions).
TNode<Context> context = LoadJSFunctionContext(target);
- TNode<Int32T> actual_argc =
- UncheckedCast<Int32T>(Parameter(Descriptor::kActualArgumentsCount));
+ auto actual_argc =
+ UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
TVARIABLE(Int32T, pushed_argc, actual_argc);
@@ -789,8 +788,7 @@ TF_BUILTIN(AdaptorWithBuiltinExitFrame, CodeStubAssembler) {
}
TF_BUILTIN(AllocateInYoungGeneration, CodeStubAssembler) {
- TNode<IntPtrT> requested_size =
- UncheckedCast<IntPtrT>(Parameter(Descriptor::kRequestedSize));
+ auto requested_size = UncheckedParameter<IntPtrT>(Descriptor::kRequestedSize);
CSA_CHECK(this, IsValidPositiveSmi(requested_size));
TNode<Smi> allocation_flags =
@@ -801,8 +799,7 @@ TF_BUILTIN(AllocateInYoungGeneration, CodeStubAssembler) {
}
TF_BUILTIN(AllocateRegularInYoungGeneration, CodeStubAssembler) {
- TNode<IntPtrT> requested_size =
- UncheckedCast<IntPtrT>(Parameter(Descriptor::kRequestedSize));
+ auto requested_size = UncheckedParameter<IntPtrT>(Descriptor::kRequestedSize);
CSA_CHECK(this, IsValidPositiveSmi(requested_size));
TNode<Smi> allocation_flags =
@@ -813,8 +810,7 @@ TF_BUILTIN(AllocateRegularInYoungGeneration, CodeStubAssembler) {
}
TF_BUILTIN(AllocateInOldGeneration, CodeStubAssembler) {
- TNode<IntPtrT> requested_size =
- UncheckedCast<IntPtrT>(Parameter(Descriptor::kRequestedSize));
+ auto requested_size = UncheckedParameter<IntPtrT>(Descriptor::kRequestedSize);
CSA_CHECK(this, IsValidPositiveSmi(requested_size));
TNode<Smi> runtime_flags =
@@ -825,8 +821,7 @@ TF_BUILTIN(AllocateInOldGeneration, CodeStubAssembler) {
}
TF_BUILTIN(AllocateRegularInOldGeneration, CodeStubAssembler) {
- TNode<IntPtrT> requested_size =
- UncheckedCast<IntPtrT>(Parameter(Descriptor::kRequestedSize));
+ auto requested_size = UncheckedParameter<IntPtrT>(Descriptor::kRequestedSize);
CSA_CHECK(this, IsValidPositiveSmi(requested_size));
TNode<Smi> runtime_flags =
@@ -837,12 +832,12 @@ TF_BUILTIN(AllocateRegularInOldGeneration, CodeStubAssembler) {
}
TF_BUILTIN(Abort, CodeStubAssembler) {
- TNode<Smi> message_id = CAST(Parameter(Descriptor::kMessageOrMessageId));
+ auto message_id = Parameter<Smi>(Descriptor::kMessageOrMessageId);
TailCallRuntime(Runtime::kAbort, NoContextConstant(), message_id);
}
TF_BUILTIN(AbortCSAAssert, CodeStubAssembler) {
- TNode<String> message = CAST(Parameter(Descriptor::kMessageOrMessageId));
+ auto message = Parameter<String>(Descriptor::kMessageOrMessageId);
TailCallRuntime(Runtime::kAbortCSAAssert, NoContextConstant(), message);
}
@@ -912,9 +907,9 @@ void Builtins::Generate_MemMove(MacroAssembler* masm) {
// ES6 [[Get]] operation.
TF_BUILTIN(GetProperty, CodeStubAssembler) {
- TNode<Object> object = CAST(Parameter(Descriptor::kObject));
- TNode<Object> key = CAST(Parameter(Descriptor::kKey));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto object = Parameter<Object>(Descriptor::kObject);
+ auto key = Parameter<Object>(Descriptor::kKey);
+ auto context = Parameter<Context>(Descriptor::kContext);
// TODO(duongn): consider tailcalling to GetPropertyWithReceiver(object,
// object, key, OnNonExistent::kReturnUndefined).
Label if_notfound(this), if_proxy(this, Label::kDeferred),
@@ -967,11 +962,11 @@ TF_BUILTIN(GetProperty, CodeStubAssembler) {
// ES6 [[Get]] operation with Receiver.
TF_BUILTIN(GetPropertyWithReceiver, CodeStubAssembler) {
- TNode<Object> object = CAST(Parameter(Descriptor::kObject));
- TNode<Object> key = CAST(Parameter(Descriptor::kKey));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> on_non_existent = CAST(Parameter(Descriptor::kOnNonExistent));
+ auto object = Parameter<Object>(Descriptor::kObject);
+ auto key = Parameter<Object>(Descriptor::kKey);
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto on_non_existent = Parameter<Object>(Descriptor::kOnNonExistent);
Label if_notfound(this), if_proxy(this, Label::kDeferred),
if_slow(this, Label::kDeferred);
@@ -1035,10 +1030,10 @@ TF_BUILTIN(GetPropertyWithReceiver, CodeStubAssembler) {
// ES6 [[Set]] operation.
TF_BUILTIN(SetProperty, CodeStubAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> key = CAST(Parameter(Descriptor::kKey));
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto key = Parameter<Object>(Descriptor::kKey);
+ auto value = Parameter<Object>(Descriptor::kValue);
KeyedStoreGenericGenerator::SetProperty(state(), context, receiver, key,
value, LanguageMode::kStrict);
@@ -1049,10 +1044,10 @@ TF_BUILTIN(SetProperty, CodeStubAssembler) {
// any operation here should be unobservable until after the object has been
// returned.
TF_BUILTIN(SetPropertyInLiteral, CodeStubAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<JSObject> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> key = CAST(Parameter(Descriptor::kKey));
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<JSObject>(Descriptor::kReceiver);
+ auto key = Parameter<Object>(Descriptor::kKey);
+ auto value = Parameter<Object>(Descriptor::kValue);
KeyedStoreGenericGenerator::SetPropertyInLiteral(state(), context, receiver,
key, value);
@@ -1060,11 +1055,11 @@ TF_BUILTIN(SetPropertyInLiteral, CodeStubAssembler) {
TF_BUILTIN(InstantiateAsmJs, CodeStubAssembler) {
Label tailcall_to_function(this);
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> new_target = CAST(Parameter(Descriptor::kNewTarget));
- TNode<Int32T> arg_count =
- UncheckedCast<Int32T>(Parameter(Descriptor::kActualArgumentsCount));
- TNode<JSFunction> function = CAST(Parameter(Descriptor::kTarget));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto new_target = Parameter<Object>(Descriptor::kNewTarget);
+ auto arg_count =
+ UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
+ auto function = Parameter<JSFunction>(Descriptor::kTarget);
// Retrieve arguments from caller (stdlib, foreign, heap).
CodeStubArguments args(this, arg_count);
@@ -1077,6 +1072,22 @@ TF_BUILTIN(InstantiateAsmJs, CodeStubAssembler) {
TNode<Object> maybe_result_or_smi_zero = CallRuntime(
Runtime::kInstantiateAsmJs, context, function, stdlib, foreign, heap);
GotoIf(TaggedIsSmi(maybe_result_or_smi_zero), &tailcall_to_function);
+
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ TNode<SharedFunctionInfo> shared = LoadJSFunctionSharedFunctionInfo(function);
+ TNode<Int32T> parameter_count =
+ UncheckedCast<Int32T>(LoadSharedFunctionInfoFormalParameterCount(shared));
+ // This builtin intercepts a call to {function}, where the number of arguments
+ // pushed is the maximum of actual arguments count and formal parameters
+ // count.
+ Label argc_lt_param_count(this), argc_ge_param_count(this);
+ Branch(Int32LessThan(arg_count, parameter_count), &argc_lt_param_count,
+ &argc_ge_param_count);
+ BIND(&argc_lt_param_count);
+ PopAndReturn(Int32Add(parameter_count, Int32Constant(1)),
+ maybe_result_or_smi_zero);
+ BIND(&argc_ge_param_count);
+#endif
args.PopAndReturn(maybe_result_or_smi_zero);
BIND(&tailcall_to_function);
diff --git a/deps/v8/src/builtins/builtins-intl-gen.cc b/deps/v8/src/builtins/builtins-intl-gen.cc
index 51546f98e8..42ccbebcbc 100644
--- a/deps/v8/src/builtins/builtins-intl-gen.cc
+++ b/deps/v8/src/builtins/builtins-intl-gen.cc
@@ -40,7 +40,7 @@ class IntlBuiltinsAssembler : public CodeStubAssembler {
};
TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) {
- const TNode<String> string = CAST(Parameter(Descriptor::kString));
+ const auto string = Parameter<String>(Descriptor::kString);
Label call_c(this), return_string(this), runtime(this, Label::kDeferred);
@@ -136,8 +136,8 @@ TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) {
}
TF_BUILTIN(StringPrototypeToLowerCaseIntl, IntlBuiltinsAssembler) {
- TNode<Object> maybe_string = CAST(Parameter(Descriptor::kReceiver));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto maybe_string = Parameter<Object>(Descriptor::kReceiver);
+ auto context = Parameter<Context>(Descriptor::kContext);
TNode<String> string =
ToThisString(context, maybe_string, "String.prototype.toLowerCase");
@@ -183,15 +183,15 @@ TNode<JSArray> IntlBuiltinsAssembler::AllocateEmptyJSArray(
TF_BUILTIN(ListFormatPrototypeFormat, IntlBuiltinsAssembler) {
ListFormatCommon(
- CAST(Parameter(Descriptor::kContext)),
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)),
+ Parameter<Context>(Descriptor::kContext),
+ UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount),
Runtime::kFormatList, "Intl.ListFormat.prototype.format");
}
TF_BUILTIN(ListFormatPrototypeFormatToParts, IntlBuiltinsAssembler) {
ListFormatCommon(
- CAST(Parameter(Descriptor::kContext)),
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)),
+ Parameter<Context>(Descriptor::kContext),
+ UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount),
Runtime::kFormatListToParts, "Intl.ListFormat.prototype.formatToParts");
}
diff --git a/deps/v8/src/builtins/builtins-iterator-gen.cc b/deps/v8/src/builtins/builtins-iterator-gen.cc
index 9f3ec5c323..8cf52e5368 100644
--- a/deps/v8/src/builtins/builtins-iterator-gen.cc
+++ b/deps/v8/src/builtins/builtins-iterator-gen.cc
@@ -181,25 +181,25 @@ void IteratorBuiltinsAssembler::FillFixedArrayFromIterable(
}
TF_BUILTIN(IterableToList, IteratorBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> iterable = CAST(Parameter(Descriptor::kIterable));
- TNode<Object> iterator_fn = CAST(Parameter(Descriptor::kIteratorFn));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto iterable = Parameter<Object>(Descriptor::kIterable);
+ auto iterator_fn = Parameter<Object>(Descriptor::kIteratorFn);
Return(IterableToList(context, iterable, iterator_fn));
}
TF_BUILTIN(IterableToFixedArray, IteratorBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> iterable = CAST(Parameter(Descriptor::kIterable));
- TNode<Object> iterator_fn = CAST(Parameter(Descriptor::kIteratorFn));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto iterable = Parameter<Object>(Descriptor::kIterable);
+ auto iterator_fn = Parameter<Object>(Descriptor::kIteratorFn);
Return(IterableToFixedArray(context, iterable, iterator_fn));
}
TF_BUILTIN(IterableToFixedArrayForWasm, IteratorBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> iterable = CAST(Parameter(Descriptor::kIterable));
- TNode<Smi> expected_length = CAST(Parameter(Descriptor::kExpectedLength));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto iterable = Parameter<Object>(Descriptor::kIterable);
+ auto expected_length = Parameter<Smi>(Descriptor::kExpectedLength);
TNode<Object> iterator_fn = GetIteratorMethod(context, iterable);
GrowableFixedArray values(state());
@@ -280,8 +280,8 @@ TNode<JSArray> IteratorBuiltinsAssembler::StringListFromIterable(
}
TF_BUILTIN(StringListFromIterable, IteratorBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> iterable = CAST(Parameter(Descriptor::kIterable));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto iterable = Parameter<Object>(Descriptor::kIterable);
Return(StringListFromIterable(context, iterable));
}
@@ -296,9 +296,9 @@ TF_BUILTIN(StringListFromIterable, IteratorBuiltinsAssembler) {
// prototype has no elements). To maintain the correct behavior for holey
// arrays, use the builtins IterableToList or IterableToListWithSymbolLookup.
TF_BUILTIN(IterableToListMayPreserveHoles, IteratorBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> iterable = CAST(Parameter(Descriptor::kIterable));
- TNode<Object> iterator_fn = CAST(Parameter(Descriptor::kIteratorFn));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto iterable = Parameter<Object>(Descriptor::kIterable);
+ auto iterator_fn = Parameter<Object>(Descriptor::kIteratorFn);
Label slow_path(this);
@@ -389,8 +389,8 @@ TNode<JSArray> IteratorBuiltinsAssembler::FastIterableToList(
// iterator is not partially consumed. To be spec-compliant, after spreading
// the iterator is set to be exhausted.
TF_BUILTIN(IterableToListWithSymbolLookup, IteratorBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> iterable = CAST(Parameter(Descriptor::kIterable));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto iterable = Parameter<Object>(Descriptor::kIterable);
Label slow_path(this);
@@ -409,13 +409,13 @@ TF_BUILTIN(IterableToListWithSymbolLookup, IteratorBuiltinsAssembler) {
TF_BUILTIN(GetIteratorWithFeedbackLazyDeoptContinuation,
IteratorBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
// TODO(v8:10047): Use TaggedIndex here once TurboFan supports it.
- TNode<Smi> call_slot_smi = CAST(Parameter(Descriptor::kCallSlot));
+ auto call_slot_smi = Parameter<Smi>(Descriptor::kCallSlot);
TNode<TaggedIndex> call_slot = SmiToTaggedIndex(call_slot_smi);
- TNode<FeedbackVector> feedback = CAST(Parameter(Descriptor::kFeedback));
- TNode<Object> iterator_method = CAST(Parameter(Descriptor::kResult));
+ auto feedback = Parameter<FeedbackVector>(Descriptor::kFeedback);
+ auto iterator_method = Parameter<Object>(Descriptor::kResult);
TNode<Object> result =
CallBuiltin(Builtins::kCallIteratorWithFeedback, context, receiver,
@@ -427,8 +427,8 @@ TF_BUILTIN(GetIteratorWithFeedbackLazyDeoptContinuation,
// fast path for anything.
TF_BUILTIN(IterableToFixedArrayWithSymbolLookupSlow,
IteratorBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> iterable = CAST(Parameter(Descriptor::kIterable));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto iterable = Parameter<Object>(Descriptor::kIterable);
TNode<Object> iterator_fn = GetIteratorMethod(context, iterable);
TailCallBuiltin(Builtins::kIterableToFixedArray, context, iterable,
diff --git a/deps/v8/src/builtins/builtins-lazy-gen.cc b/deps/v8/src/builtins/builtins-lazy-gen.cc
index 95d5229974..bd28cbc160 100644
--- a/deps/v8/src/builtins/builtins-lazy-gen.cc
+++ b/deps/v8/src/builtins/builtins-lazy-gen.cc
@@ -15,82 +15,72 @@ namespace internal {
void LazyBuiltinsAssembler::GenerateTailCallToJSCode(
TNode<Code> code, TNode<JSFunction> function) {
- TNode<Int32T> argc =
- UncheckedCast<Int32T>(Parameter(Descriptor::kActualArgumentsCount));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> new_target = CAST(Parameter(Descriptor::kNewTarget));
+ auto argc = UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto new_target = Parameter<Object>(Descriptor::kNewTarget);
TailCallJSCode(code, context, function, new_target, argc);
}
void LazyBuiltinsAssembler::GenerateTailCallToReturnedCode(
Runtime::FunctionId function_id, TNode<JSFunction> function) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto context = Parameter<Context>(Descriptor::kContext);
TNode<Code> code = CAST(CallRuntime(function_id, context, function));
GenerateTailCallToJSCode(code, function);
}
void LazyBuiltinsAssembler::TailCallRuntimeIfMarkerEquals(
- TNode<Smi> marker, OptimizationMarker expected_marker,
+ TNode<Uint32T> marker, OptimizationMarker expected_marker,
Runtime::FunctionId function_id, TNode<JSFunction> function) {
Label no_match(this);
- GotoIfNot(SmiEqual(marker, SmiConstant(expected_marker)), &no_match);
+ GotoIfNot(Word32Equal(marker, Uint32Constant(expected_marker)), &no_match);
GenerateTailCallToReturnedCode(function_id, function);
BIND(&no_match);
}
void LazyBuiltinsAssembler::MaybeTailCallOptimizedCodeSlot(
TNode<JSFunction> function, TNode<FeedbackVector> feedback_vector) {
- Label fallthrough(this);
-
- TNode<MaybeObject> maybe_optimized_code_entry = LoadMaybeWeakObjectField(
- feedback_vector, FeedbackVector::kOptimizedCodeWeakOrSmiOffset);
-
- // Check if the code entry is a Smi. If yes, we interpret it as an
- // optimisation marker. Otherwise, interpret it as a weak reference to a code
- // object.
- Label optimized_code_slot_is_smi(this), optimized_code_slot_is_weak_ref(this);
- Branch(TaggedIsSmi(maybe_optimized_code_entry), &optimized_code_slot_is_smi,
- &optimized_code_slot_is_weak_ref);
-
- BIND(&optimized_code_slot_is_smi);
- {
- // Optimized code slot is a Smi optimization marker.
- TNode<Smi> marker = CAST(maybe_optimized_code_entry);
-
- // Fall through if no optimization trigger.
- GotoIf(SmiEqual(marker, SmiConstant(OptimizationMarker::kNone)),
- &fallthrough);
-
- // TODO(ishell): introduce Runtime::kHandleOptimizationMarker and check
- // all these marker values there.
- TailCallRuntimeIfMarkerEquals(marker,
- OptimizationMarker::kLogFirstExecution,
- Runtime::kFunctionFirstExecution, function);
- TailCallRuntimeIfMarkerEquals(marker, OptimizationMarker::kCompileOptimized,
- Runtime::kCompileOptimized_NotConcurrent,
- function);
- TailCallRuntimeIfMarkerEquals(
- marker, OptimizationMarker::kCompileOptimizedConcurrent,
- Runtime::kCompileOptimized_Concurrent, function);
-
- // Otherwise, the marker is InOptimizationQueue, so fall through hoping
- // that an interrupt will eventually update the slot with optimized code.
- CSA_ASSERT(this,
- SmiEqual(marker,
- SmiConstant(OptimizationMarker::kInOptimizationQueue)));
- Goto(&fallthrough);
- }
-
- BIND(&optimized_code_slot_is_weak_ref);
+ Label fallthrough(this), may_have_optimized_code(this);
+
+ TNode<Uint32T> optimization_state =
+ LoadObjectField<Uint32T>(feedback_vector, FeedbackVector::kFlagsOffset);
+
+ // Fall through if no optimization trigger or optimized code.
+ GotoIfNot(IsSetWord32(
+ optimization_state,
+ FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask),
+ &fallthrough);
+
+ GotoIfNot(IsSetWord32(
+ optimization_state,
+ FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker),
+ &may_have_optimized_code);
+
+ // TODO(ishell): introduce Runtime::kHandleOptimizationMarker and check
+ // all these marker values there.
+ TNode<Uint32T> marker =
+ DecodeWord32<FeedbackVector::OptimizationMarkerBits>(optimization_state);
+ TailCallRuntimeIfMarkerEquals(marker, OptimizationMarker::kLogFirstExecution,
+ Runtime::kFunctionFirstExecution, function);
+ TailCallRuntimeIfMarkerEquals(marker, OptimizationMarker::kCompileOptimized,
+ Runtime::kCompileOptimized_NotConcurrent,
+ function);
+ TailCallRuntimeIfMarkerEquals(
+ marker, OptimizationMarker::kCompileOptimizedConcurrent,
+ Runtime::kCompileOptimized_Concurrent, function);
+
+ Unreachable();
+ BIND(&may_have_optimized_code);
{
+ Label heal_optimized_code_slot(this);
+ TNode<MaybeObject> maybe_optimized_code_entry = LoadMaybeWeakObjectField(
+ feedback_vector, FeedbackVector::kMaybeOptimizedCodeOffset);
// Optimized code slot is a weak reference.
- TNode<Code> optimized_code =
- CAST(GetHeapObjectAssumeWeak(maybe_optimized_code_entry, &fallthrough));
+ TNode<Code> optimized_code = CAST(GetHeapObjectAssumeWeak(
+ maybe_optimized_code_entry, &heal_optimized_code_slot));
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
- Label found_deoptimized_code(this);
TNode<CodeDataContainer> code_data_container =
CAST(LoadObjectField(optimized_code, Code::kCodeDataContainerOffset));
@@ -98,17 +88,18 @@ void LazyBuiltinsAssembler::MaybeTailCallOptimizedCodeSlot(
code_data_container, CodeDataContainer::kKindSpecificFlagsOffset);
GotoIf(IsSetWord32<Code::MarkedForDeoptimizationField>(
code_kind_specific_flags),
- &found_deoptimized_code);
+ &heal_optimized_code_slot);
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
StoreObjectField(function, JSFunction::kCodeOffset, optimized_code);
GenerateTailCallToJSCode(optimized_code, function);
- // Optimized code slot contains deoptimized code, evict it and re-enter the
- // closure's code.
- BIND(&found_deoptimized_code);
- GenerateTailCallToReturnedCode(Runtime::kEvictOptimizedCodeSlot, function);
+ // Optimized code slot contains deoptimized code or code is cleared and
+ // optimized code marker isn't updated. Evict the code, update the marker
+ // and re-enter the closure's code.
+ BIND(&heal_optimized_code_slot);
+ GenerateTailCallToReturnedCode(Runtime::kHealOptimizedCodeSlot, function);
}
// Fall-through if the optimized code cell is clear and there is no
@@ -156,13 +147,13 @@ void LazyBuiltinsAssembler::CompileLazy(TNode<JSFunction> function) {
}
TF_BUILTIN(CompileLazy, LazyBuiltinsAssembler) {
- TNode<JSFunction> function = CAST(Parameter(Descriptor::kTarget));
+ auto function = Parameter<JSFunction>(Descriptor::kTarget);
CompileLazy(function);
}
TF_BUILTIN(CompileLazyDeoptimizedCode, LazyBuiltinsAssembler) {
- TNode<JSFunction> function = CAST(Parameter(Descriptor::kTarget));
+ auto function = Parameter<JSFunction>(Descriptor::kTarget);
// Set the code slot inside the JSFunction to CompileLazy.
TNode<Code> code = HeapConstant(BUILTIN_CODE(isolate(), CompileLazy));
diff --git a/deps/v8/src/builtins/builtins-lazy-gen.h b/deps/v8/src/builtins/builtins-lazy-gen.h
index 6036da4661..b51dcb58d4 100644
--- a/deps/v8/src/builtins/builtins-lazy-gen.h
+++ b/deps/v8/src/builtins/builtins-lazy-gen.h
@@ -21,7 +21,7 @@ class LazyBuiltinsAssembler : public CodeStubAssembler {
void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id,
TNode<JSFunction> function);
- void TailCallRuntimeIfMarkerEquals(TNode<Smi> marker,
+ void TailCallRuntimeIfMarkerEquals(TNode<Uint32T> marker,
OptimizationMarker expected_marker,
Runtime::FunctionId function_id,
TNode<JSFunction> function);
diff --git a/deps/v8/src/builtins/builtins-microtask-queue-gen.cc b/deps/v8/src/builtins/builtins-microtask-queue-gen.cc
index 1da6f54c82..9f16186d13 100644
--- a/deps/v8/src/builtins/builtins-microtask-queue-gen.cc
+++ b/deps/v8/src/builtins/builtins-microtask-queue-gen.cc
@@ -53,8 +53,9 @@ class MicrotaskQueueBuiltinsAssembler : public CodeStubAssembler {
TNode<RawPtrT> MicrotaskQueueBuiltinsAssembler::GetMicrotaskQueue(
TNode<Context> native_context) {
CSA_ASSERT(this, IsNativeContext(native_context));
- return DecodeExternalPointer(LoadObjectField<ExternalPointerT>(
- native_context, NativeContext::kMicrotaskQueueOffset));
+ return LoadExternalPointerFromObject(native_context,
+ NativeContext::kMicrotaskQueueOffset,
+ kNativeContextMicrotaskQueueTag);
}
TNode<RawPtrT> MicrotaskQueueBuiltinsAssembler::GetMicrotaskRingBuffer(
@@ -489,8 +490,8 @@ void MicrotaskQueueBuiltinsAssembler::RunPromiseHook(
}
TF_BUILTIN(EnqueueMicrotask, MicrotaskQueueBuiltinsAssembler) {
- TNode<Microtask> microtask = CAST(Parameter(Descriptor::kMicrotask));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto microtask = Parameter<Microtask>(Descriptor::kMicrotask);
+ auto context = Parameter<Context>(Descriptor::kContext);
TNode<NativeContext> native_context = LoadNativeContext(context);
TNode<RawPtrT> microtask_queue = GetMicrotaskQueue(native_context);
@@ -541,8 +542,8 @@ TF_BUILTIN(RunMicrotasks, MicrotaskQueueBuiltinsAssembler) {
// Load the current context from the isolate.
TNode<Context> current_context = GetCurrentContext();
- TNode<RawPtrT> microtask_queue =
- UncheckedCast<RawPtrT>(Parameter(Descriptor::kMicrotaskQueue));
+ auto microtask_queue =
+ UncheckedParameter<RawPtrT>(Descriptor::kMicrotaskQueue);
Label loop(this), done(this);
Goto(&loop);
diff --git a/deps/v8/src/builtins/builtins-number-gen.cc b/deps/v8/src/builtins/builtins-number-gen.cc
index 4e8bcae60b..0e57959aad 100644
--- a/deps/v8/src/builtins/builtins-number-gen.cc
+++ b/deps/v8/src/builtins/builtins-number-gen.cc
@@ -16,13 +16,12 @@ namespace internal {
#define DEF_BINOP(Name, Generator) \
TF_BUILTIN(Name, CodeStubAssembler) { \
- TNode<Object> lhs = CAST(Parameter(Descriptor::kLeft)); \
- TNode<Object> rhs = CAST(Parameter(Descriptor::kRight)); \
- TNode<Context> context = CAST(Parameter(Descriptor::kContext)); \
- TNode<HeapObject> maybe_feedback_vector = \
- CAST(Parameter(Descriptor::kMaybeFeedbackVector)); \
- TNode<UintPtrT> slot = \
- UncheckedCast<UintPtrT>(Parameter(Descriptor::kSlot)); \
+ auto lhs = Parameter<Object>(Descriptor::kLeft); \
+ auto rhs = Parameter<Object>(Descriptor::kRight); \
+ auto context = Parameter<Context>(Descriptor::kContext); \
+ auto maybe_feedback_vector = \
+ Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector); \
+ auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot); \
\
BinaryOpAssembler binop_asm(state()); \
TNode<Object> result = binop_asm.Generator(context, lhs, rhs, slot, \
@@ -45,20 +44,19 @@ DEF_BINOP(ShiftRightLogical_WithFeedback,
Generate_ShiftRightLogicalWithFeedback)
#undef DEF_BINOP
-#define DEF_UNOP(Name, Generator) \
- TF_BUILTIN(Name, CodeStubAssembler) { \
- TNode<Object> value = CAST(Parameter(Descriptor::kValue)); \
- TNode<Context> context = CAST(Parameter(Descriptor::kContext)); \
- TNode<HeapObject> maybe_feedback_vector = \
- CAST(Parameter(Descriptor::kMaybeFeedbackVector)); \
- TNode<UintPtrT> slot = \
- UncheckedCast<UintPtrT>(Parameter(Descriptor::kSlot)); \
- \
- UnaryOpAssembler a(state()); \
- TNode<Object> result = \
- a.Generator(context, value, slot, maybe_feedback_vector); \
- \
- Return(result); \
+#define DEF_UNOP(Name, Generator) \
+ TF_BUILTIN(Name, CodeStubAssembler) { \
+ auto value = Parameter<Object>(Descriptor::kValue); \
+ auto context = Parameter<Context>(Descriptor::kContext); \
+ auto maybe_feedback_vector = \
+ Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector); \
+ auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot); \
+ \
+ UnaryOpAssembler a(state()); \
+ TNode<Object> result = \
+ a.Generator(context, value, slot, maybe_feedback_vector); \
+ \
+ Return(result); \
}
DEF_UNOP(BitwiseNot_WithFeedback, Generate_BitwiseNotWithFeedback)
DEF_UNOP(Decrement_WithFeedback, Generate_DecrementWithFeedback)
@@ -68,13 +66,12 @@ DEF_UNOP(Negate_WithFeedback, Generate_NegateWithFeedback)
#define DEF_COMPARE(Name) \
TF_BUILTIN(Name##_WithFeedback, CodeStubAssembler) { \
- TNode<Object> lhs = CAST(Parameter(Descriptor::kLeft)); \
- TNode<Object> rhs = CAST(Parameter(Descriptor::kRight)); \
- TNode<Context> context = CAST(Parameter(Descriptor::kContext)); \
- TNode<HeapObject> maybe_feedback_vector = \
- CAST(Parameter(Descriptor::kMaybeFeedbackVector)); \
- TNode<UintPtrT> slot = \
- UncheckedCast<UintPtrT>(Parameter(Descriptor::kSlot)); \
+ auto lhs = Parameter<Object>(Descriptor::kLeft); \
+ auto rhs = Parameter<Object>(Descriptor::kRight); \
+ auto context = Parameter<Context>(Descriptor::kContext); \
+ auto maybe_feedback_vector = \
+ Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector); \
+ auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot); \
\
TVARIABLE(Smi, var_type_feedback); \
TNode<Oddball> result = RelationalComparison(Operation::k##Name, lhs, rhs, \
@@ -90,12 +87,12 @@ DEF_COMPARE(GreaterThanOrEqual)
#undef DEF_COMPARE
TF_BUILTIN(Equal_WithFeedback, CodeStubAssembler) {
- TNode<Object> lhs = CAST(Parameter(Descriptor::kLeft));
- TNode<Object> rhs = CAST(Parameter(Descriptor::kRight));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<HeapObject> maybe_feedback_vector =
- CAST(Parameter(Descriptor::kMaybeFeedbackVector));
- TNode<UintPtrT> slot = UncheckedCast<UintPtrT>(Parameter(Descriptor::kSlot));
+ auto lhs = Parameter<Object>(Descriptor::kLeft);
+ auto rhs = Parameter<Object>(Descriptor::kRight);
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto maybe_feedback_vector =
+ Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector);
+ auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot);
TVARIABLE(Smi, var_type_feedback);
TNode<Oddball> result = Equal(lhs, rhs, context, &var_type_feedback);
@@ -105,11 +102,11 @@ TF_BUILTIN(Equal_WithFeedback, CodeStubAssembler) {
}
TF_BUILTIN(StrictEqual_WithFeedback, CodeStubAssembler) {
- TNode<Object> lhs = CAST(Parameter(Descriptor::kLeft));
- TNode<Object> rhs = CAST(Parameter(Descriptor::kRight));
- TNode<HeapObject> maybe_feedback_vector =
- CAST(Parameter(Descriptor::kMaybeFeedbackVector));
- TNode<UintPtrT> slot = UncheckedCast<UintPtrT>(Parameter(Descriptor::kSlot));
+ auto lhs = Parameter<Object>(Descriptor::kLeft);
+ auto rhs = Parameter<Object>(Descriptor::kRight);
+ auto maybe_feedback_vector =
+ Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector);
+ auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot);
TVARIABLE(Smi, var_type_feedback);
TNode<Oddball> result = StrictEqual(lhs, rhs, &var_type_feedback);
diff --git a/deps/v8/src/builtins/builtins-object-gen.cc b/deps/v8/src/builtins/builtins-object-gen.cc
index bcc2f8ea64..7d133a6198 100644
--- a/deps/v8/src/builtins/builtins-object-gen.cc
+++ b/deps/v8/src/builtins/builtins-object-gen.cc
@@ -350,9 +350,9 @@ ObjectEntriesValuesBuiltinsAssembler::FinalizeValuesOrEntriesJSArray(
}
TF_BUILTIN(ObjectPrototypeHasOwnProperty, ObjectBuiltinsAssembler) {
- TNode<Object> object = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> key = CAST(Parameter(Descriptor::kKey));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto object = Parameter<Object>(Descriptor::kReceiver);
+ auto key = Parameter<Object>(Descriptor::kKey);
+ auto context = Parameter<Context>(Descriptor::kContext);
Label call_runtime(this), return_true(this), return_false(this),
to_primitive(this);
@@ -421,10 +421,10 @@ TF_BUILTIN(ObjectPrototypeHasOwnProperty, ObjectBuiltinsAssembler) {
// ES #sec-object.assign
TF_BUILTIN(ObjectAssign, ObjectBuiltinsAssembler) {
TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
+ UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto context = Parameter<Context>(Descriptor::kContext);
TNode<Object> target = args.GetOptionalArgumentValue(0);
// 1. Let to be ? ToObject(target).
@@ -451,8 +451,8 @@ TF_BUILTIN(ObjectAssign, ObjectBuiltinsAssembler) {
// ES #sec-object.keys
TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) {
- TNode<Object> object = CAST(Parameter(Descriptor::kObject));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto object = Parameter<Object>(Descriptor::kObject);
+ auto context = Parameter<Context>(Descriptor::kContext);
TVARIABLE(Smi, var_length);
TVARIABLE(FixedArrayBase, var_elements);
@@ -541,8 +541,8 @@ TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) {
// ES #sec-object.getOwnPropertyNames
TF_BUILTIN(ObjectGetOwnPropertyNames, ObjectBuiltinsAssembler) {
- TNode<Object> object = CAST(Parameter(Descriptor::kObject));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto object = Parameter<Object>(Descriptor::kObject);
+ auto context = Parameter<Context>(Descriptor::kContext);
TVARIABLE(Smi, var_length);
TVARIABLE(FixedArrayBase, var_elements);
@@ -650,26 +650,22 @@ TF_BUILTIN(ObjectGetOwnPropertyNames, ObjectBuiltinsAssembler) {
}
TF_BUILTIN(ObjectValues, ObjectEntriesValuesBuiltinsAssembler) {
- TNode<JSObject> object =
- TNode<JSObject>::UncheckedCast(Parameter(Descriptor::kObject));
- TNode<Context> context =
- TNode<Context>::UncheckedCast(Parameter(Descriptor::kContext));
+ auto object = UncheckedParameter<JSObject>(Descriptor::kObject);
+ auto context = UncheckedParameter<Context>(Descriptor::kContext);
GetOwnValuesOrEntries(context, object, CollectType::kValues);
}
TF_BUILTIN(ObjectEntries, ObjectEntriesValuesBuiltinsAssembler) {
- TNode<JSObject> object =
- TNode<JSObject>::UncheckedCast(Parameter(Descriptor::kObject));
- TNode<Context> context =
- TNode<Context>::UncheckedCast(Parameter(Descriptor::kContext));
+ auto object = UncheckedParameter<JSObject>(Descriptor::kObject);
+ auto context = UncheckedParameter<Context>(Descriptor::kContext);
GetOwnValuesOrEntries(context, object, CollectType::kEntries);
}
// ES #sec-object.prototype.isprototypeof
TF_BUILTIN(ObjectPrototypeIsPrototypeOf, ObjectBuiltinsAssembler) {
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto value = Parameter<Object>(Descriptor::kValue);
+ auto context = Parameter<Context>(Descriptor::kContext);
Label if_receiverisnullorundefined(this, Label::kDeferred),
if_valueisnotreceiver(this, Label::kDeferred);
@@ -723,8 +719,8 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
if_symbol(this, Label::kDeferred), if_value(this),
if_bigint(this, Label::kDeferred);
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto context = Parameter<Context>(Descriptor::kContext);
TVARIABLE(String, var_default);
TVARIABLE(HeapObject, var_holder);
@@ -1024,12 +1020,12 @@ TF_BUILTIN(ObjectCreate, ObjectBuiltinsAssembler) {
int const kPropertiesArg = 1;
TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
+ UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
TNode<Object> prototype = args.GetOptionalArgumentValue(kPrototypeArg);
TNode<Object> properties = args.GetOptionalArgumentValue(kPropertiesArg);
- TNode<NativeContext> native_context = CAST(Parameter(Descriptor::kContext));
+ auto native_context = Parameter<NativeContext>(Descriptor::kContext);
Label call_runtime(this, Label::kDeferred), prototype_valid(this),
no_properties(this);
@@ -1111,8 +1107,8 @@ TF_BUILTIN(ObjectCreate, ObjectBuiltinsAssembler) {
// ES #sec-object.is
TF_BUILTIN(ObjectIs, ObjectBuiltinsAssembler) {
- const TNode<Object> left = CAST(Parameter(Descriptor::kLeft));
- const TNode<Object> right = CAST(Parameter(Descriptor::kRight));
+ const auto left = Parameter<Object>(Descriptor::kLeft);
+ const auto right = Parameter<Object>(Descriptor::kRight);
Label return_true(this), return_false(this);
BranchIfSameValue(left, right, &return_true, &return_false);
@@ -1125,9 +1121,9 @@ TF_BUILTIN(ObjectIs, ObjectBuiltinsAssembler) {
}
TF_BUILTIN(CreateIterResultObject, ObjectBuiltinsAssembler) {
- const TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- const TNode<Oddball> done = CAST(Parameter(Descriptor::kDone));
- const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ const auto value = Parameter<Object>(Descriptor::kValue);
+ const auto done = Parameter<Oddball>(Descriptor::kDone);
+ const auto context = Parameter<Context>(Descriptor::kContext);
const TNode<NativeContext> native_context = LoadNativeContext(context);
const TNode<Map> map = CAST(
@@ -1142,28 +1138,28 @@ TF_BUILTIN(CreateIterResultObject, ObjectBuiltinsAssembler) {
}
TF_BUILTIN(HasProperty, ObjectBuiltinsAssembler) {
- TNode<Object> key = CAST(Parameter(Descriptor::kKey));
- TNode<Object> object = CAST(Parameter(Descriptor::kObject));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto key = Parameter<Object>(Descriptor::kKey);
+ auto object = Parameter<Object>(Descriptor::kObject);
+ auto context = Parameter<Context>(Descriptor::kContext);
Return(HasProperty(context, object, key, kHasProperty));
}
TF_BUILTIN(InstanceOf, ObjectBuiltinsAssembler) {
- TNode<Object> object = CAST(Parameter(Descriptor::kLeft));
- TNode<Object> callable = CAST(Parameter(Descriptor::kRight));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto object = Parameter<Object>(Descriptor::kLeft);
+ auto callable = Parameter<Object>(Descriptor::kRight);
+ auto context = Parameter<Context>(Descriptor::kContext);
Return(InstanceOf(object, callable, context));
}
TF_BUILTIN(InstanceOf_WithFeedback, ObjectBuiltinsAssembler) {
- TNode<Object> object = CAST(Parameter(Descriptor::kLeft));
- TNode<Object> callable = CAST(Parameter(Descriptor::kRight));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<HeapObject> maybe_feedback_vector =
- CAST(Parameter(Descriptor::kMaybeFeedbackVector));
- TNode<UintPtrT> slot = UncheckedCast<UintPtrT>(Parameter(Descriptor::kSlot));
+ auto object = Parameter<Object>(Descriptor::kLeft);
+ auto callable = Parameter<Object>(Descriptor::kRight);
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto maybe_feedback_vector =
+ Parameter<HeapObject>(Descriptor::kMaybeFeedbackVector);
+ auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot);
CollectInstanceOfFeedback(callable, context, maybe_feedback_vector, slot);
Return(InstanceOf(object, callable, context));
@@ -1171,24 +1167,17 @@ TF_BUILTIN(InstanceOf_WithFeedback, ObjectBuiltinsAssembler) {
// ES6 section 7.3.19 OrdinaryHasInstance ( C, O )
TF_BUILTIN(OrdinaryHasInstance, ObjectBuiltinsAssembler) {
- TNode<Object> constructor = CAST(Parameter(Descriptor::kLeft));
- TNode<Object> object = CAST(Parameter(Descriptor::kRight));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto constructor = Parameter<Object>(Descriptor::kLeft);
+ auto object = Parameter<Object>(Descriptor::kRight);
+ auto context = Parameter<Context>(Descriptor::kContext);
Return(OrdinaryHasInstance(context, constructor, object));
}
-TF_BUILTIN(GetSuperConstructor, ObjectBuiltinsAssembler) {
- TNode<JSFunction> object = CAST(Parameter(Descriptor::kObject));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-
- Return(GetSuperConstructor(context, object));
-}
-
TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) {
- TNode<JSFunction> closure = CAST(Parameter(Descriptor::kClosure));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto closure = Parameter<JSFunction>(Descriptor::kClosure);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto context = Parameter<Context>(Descriptor::kContext);
// Get the initial map from the function, jumping to the runtime if we don't
// have one.
@@ -1253,10 +1242,9 @@ TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) {
// ES6 section 19.1.2.7 Object.getOwnPropertyDescriptor ( O, P )
TF_BUILTIN(ObjectGetOwnPropertyDescriptor, ObjectBuiltinsAssembler) {
- TNode<Int32T> argc =
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- CSA_ASSERT(this, IsUndefined(Parameter(Descriptor::kJSNewTarget)));
+ auto argc = UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount);
+ auto context = Parameter<Context>(Descriptor::kContext);
+ CSA_ASSERT(this, IsUndefined(Parameter<Object>(Descriptor::kJSNewTarget)));
CodeStubArguments args(this, argc);
TNode<Object> object_input = args.GetOptionalArgumentValue(0);
diff --git a/deps/v8/src/builtins/builtins-object.cc b/deps/v8/src/builtins/builtins-object.cc
index b482c6ba02..16f81dc3d0 100644
--- a/deps/v8/src/builtins/builtins-object.cc
+++ b/deps/v8/src/builtins/builtins-object.cc
@@ -38,7 +38,7 @@ BUILTIN(ObjectPrototypePropertyIsEnumerable) {
// ES6 section 19.1.2.3 Object.defineProperties
BUILTIN(ObjectDefineProperties) {
HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
+ DCHECK_LE(3, args.length());
Handle<Object> target = args.at(1);
Handle<Object> properties = args.at(2);
@@ -49,7 +49,7 @@ BUILTIN(ObjectDefineProperties) {
// ES6 section 19.1.2.4 Object.defineProperty
BUILTIN(ObjectDefineProperty) {
HandleScope scope(isolate);
- DCHECK_EQ(4, args.length());
+ DCHECK_LE(4, args.length());
Handle<Object> target = args.at(1);
Handle<Object> key = args.at(2);
Handle<Object> attributes = args.at(3);
diff --git a/deps/v8/src/builtins/builtins-proxy-gen.cc b/deps/v8/src/builtins/builtins-proxy-gen.cc
index 5b4b9d2536..74ac2b6681 100644
--- a/deps/v8/src/builtins/builtins-proxy-gen.cc
+++ b/deps/v8/src/builtins/builtins-proxy-gen.cc
@@ -83,11 +83,10 @@ TNode<JSFunction> ProxiesCodeStubAssembler::AllocateProxyRevokeFunction(
}
TF_BUILTIN(CallProxy, ProxiesCodeStubAssembler) {
- TNode<Int32T> argc =
- UncheckedCast<Int32T>(Parameter(Descriptor::kActualArgumentsCount));
+ auto argc = UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
TNode<IntPtrT> argc_ptr = ChangeInt32ToIntPtr(argc);
- TNode<JSProxy> proxy = CAST(Parameter(Descriptor::kFunction));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto proxy = Parameter<JSProxy>(Descriptor::kFunction);
+ auto context = Parameter<Context>(Descriptor::kContext);
CSA_ASSERT(this, IsCallable(proxy));
@@ -139,12 +138,11 @@ TF_BUILTIN(CallProxy, ProxiesCodeStubAssembler) {
}
TF_BUILTIN(ConstructProxy, ProxiesCodeStubAssembler) {
- TNode<Int32T> argc =
- UncheckedCast<Int32T>(Parameter(Descriptor::kActualArgumentsCount));
+ auto argc = UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
TNode<IntPtrT> argc_ptr = ChangeInt32ToIntPtr(argc);
- TNode<JSProxy> proxy = CAST(Parameter(Descriptor::kTarget));
- TNode<Object> new_target = CAST(Parameter(Descriptor::kNewTarget));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto proxy = Parameter<JSProxy>(Descriptor::kTarget);
+ auto new_target = Parameter<Object>(Descriptor::kNewTarget);
+ auto context = Parameter<Context>(Descriptor::kContext);
CSA_ASSERT(this, IsCallable(proxy));
diff --git a/deps/v8/src/builtins/builtins-reflect.cc b/deps/v8/src/builtins/builtins-reflect.cc
index 8ad1e56143..cf835b3476 100644
--- a/deps/v8/src/builtins/builtins-reflect.cc
+++ b/deps/v8/src/builtins/builtins-reflect.cc
@@ -19,7 +19,7 @@ namespace internal {
// ES6 section 26.1.3 Reflect.defineProperty
BUILTIN(ReflectDefineProperty) {
HandleScope scope(isolate);
- DCHECK_EQ(4, args.length());
+ DCHECK_LE(4, args.length());
Handle<Object> target = args.at(1);
Handle<Object> key = args.at(2);
Handle<Object> attributes = args.at(3);
@@ -49,7 +49,7 @@ BUILTIN(ReflectDefineProperty) {
// ES6 section 26.1.7 Reflect.getOwnPropertyDescriptor
BUILTIN(ReflectGetOwnPropertyDescriptor) {
HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
+ DCHECK_LE(3, args.length());
Handle<Object> target = args.at(1);
Handle<Object> key = args.at(2);
@@ -75,7 +75,7 @@ BUILTIN(ReflectGetOwnPropertyDescriptor) {
// ES6 section 26.1.11 Reflect.ownKeys
BUILTIN(ReflectOwnKeys) {
HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
+ DCHECK_LE(2, args.length());
Handle<Object> target = args.at(1);
if (!target->IsJSReceiver()) {
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.cc b/deps/v8/src/builtins/builtins-regexp-gen.cc
index 8be87180eb..fa0f45e831 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.cc
+++ b/deps/v8/src/builtins/builtins-regexp-gen.cc
@@ -372,7 +372,8 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
ToDirectStringAssembler to_direct(state(), string);
TVARIABLE(HeapObject, var_result);
- Label out(this), atom(this), runtime(this, Label::kDeferred);
+ Label out(this), atom(this), runtime(this, Label::kDeferred),
+ retry_experimental(this, Label::kDeferred);
// External constants.
TNode<ExternalReference> isolate_address =
@@ -595,6 +596,10 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
GotoIf(IntPtrEqual(int_result,
IntPtrConstant(RegExp::kInternalRegExpException)),
&if_exception);
+ GotoIf(IntPtrEqual(
+ int_result,
+ IntPtrConstant(RegExp::kInternalRegExpFallbackToExperimental)),
+ &retry_experimental);
CSA_ASSERT(this, IntPtrEqual(int_result,
IntPtrConstant(RegExp::kInternalRegExpRetry)));
@@ -672,6 +677,14 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
Unreachable();
}
+ BIND(&retry_experimental);
+ {
+ var_result =
+ CAST(CallRuntime(Runtime::kRegExpExperimentalOneshotExec, context,
+ regexp, string, last_index, match_info));
+ Goto(&out);
+ }
+
BIND(&runtime);
{
var_result = CAST(CallRuntime(Runtime::kRegExpExec, context, regexp, string,
@@ -813,11 +826,11 @@ void RegExpBuiltinsAssembler::BranchIfRegExpResult(const TNode<Context> context,
// and {match_info} is updated on success.
// The slow path is implemented in RegExp::AtomExec.
TF_BUILTIN(RegExpExecAtom, RegExpBuiltinsAssembler) {
- TNode<JSRegExp> regexp = CAST(Parameter(Descriptor::kRegExp));
- TNode<String> subject_string = CAST(Parameter(Descriptor::kString));
- TNode<Smi> last_index = CAST(Parameter(Descriptor::kLastIndex));
- TNode<FixedArray> match_info = CAST(Parameter(Descriptor::kMatchInfo));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto regexp = Parameter<JSRegExp>(Descriptor::kRegExp);
+ auto subject_string = Parameter<String>(Descriptor::kString);
+ auto last_index = Parameter<Smi>(Descriptor::kLastIndex);
+ auto match_info = Parameter<FixedArray>(Descriptor::kMatchInfo);
+ auto context = Parameter<Context>(Descriptor::kContext);
CSA_ASSERT(this, TaggedIsPositiveSmi(last_index));
@@ -874,11 +887,11 @@ TF_BUILTIN(RegExpExecAtom, RegExpBuiltinsAssembler) {
}
TF_BUILTIN(RegExpExecInternal, RegExpBuiltinsAssembler) {
- TNode<JSRegExp> regexp = CAST(Parameter(Descriptor::kRegExp));
- TNode<String> string = CAST(Parameter(Descriptor::kString));
- TNode<Number> last_index = CAST(Parameter(Descriptor::kLastIndex));
- TNode<RegExpMatchInfo> match_info = CAST(Parameter(Descriptor::kMatchInfo));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto regexp = Parameter<JSRegExp>(Descriptor::kRegExp);
+ auto string = Parameter<String>(Descriptor::kString);
+ auto last_index = Parameter<Number>(Descriptor::kLastIndex);
+ auto match_info = Parameter<RegExpMatchInfo>(Descriptor::kMatchInfo);
+ auto context = Parameter<Context>(Descriptor::kContext);
CSA_ASSERT(this, IsNumberNormalized(last_index));
CSA_ASSERT(this, IsNumberPositive(last_index));
@@ -916,6 +929,7 @@ TNode<String> RegExpBuiltinsAssembler::FlagsGetter(TNode<Context> context,
CASE_FOR_FLAG(JSRegExp::kGlobal);
CASE_FOR_FLAG(JSRegExp::kIgnoreCase);
+ CASE_FOR_FLAG(JSRegExp::kLinear);
CASE_FOR_FLAG(JSRegExp::kMultiline);
CASE_FOR_FLAG(JSRegExp::kDotAll);
CASE_FOR_FLAG(JSRegExp::kUnicode);
@@ -948,6 +962,32 @@ TNode<String> RegExpBuiltinsAssembler::FlagsGetter(TNode<Context> context,
CASE_FOR_FLAG("unicode", JSRegExp::kUnicode);
CASE_FOR_FLAG("sticky", JSRegExp::kSticky);
#undef CASE_FOR_FLAG
+
+ {
+ Label next(this);
+
+ // Check the runtime value of FLAG_enable_experimental_regexp_engine
+ // first.
+ TNode<Word32T> flag_value = UncheckedCast<Word32T>(
+ Load(MachineType::Uint8(),
+ ExternalConstant(
+ ExternalReference::
+ address_of_enable_experimental_regexp_engine())));
+ GotoIf(Word32Equal(Word32And(flag_value, Int32Constant(0xFF)),
+ Int32Constant(0)),
+ &next);
+
+ const TNode<Object> flag = GetProperty(
+ context, regexp, isolate->factory()->InternalizeUtf8String("linear"));
+ Label if_isflagset(this);
+ BranchIfToBooleanIsTrue(flag, &if_isflagset, &next);
+ BIND(&if_isflagset);
+ var_length = Uint32Add(var_length.value(), Uint32Constant(1));
+ var_flags =
+ Signed(WordOr(var_flags.value(), IntPtrConstant(JSRegExp::kLinear)));
+ Goto(&next);
+ BIND(&next);
+ }
}
// Allocate a string of the required length and fill it with the corresponding
@@ -973,6 +1013,7 @@ TNode<String> RegExpBuiltinsAssembler::FlagsGetter(TNode<Context> context,
CASE_FOR_FLAG(JSRegExp::kGlobal, 'g');
CASE_FOR_FLAG(JSRegExp::kIgnoreCase, 'i');
+ CASE_FOR_FLAG(JSRegExp::kLinear, 'l');
CASE_FOR_FLAG(JSRegExp::kMultiline, 'm');
CASE_FOR_FLAG(JSRegExp::kDotAll, 's');
CASE_FOR_FLAG(JSRegExp::kUnicode, 'u');
@@ -1007,10 +1048,10 @@ TNode<Object> RegExpBuiltinsAssembler::RegExpInitialize(
// ES#sec-regexp-pattern-flags
// RegExp ( pattern, flags )
TF_BUILTIN(RegExpConstructor, RegExpBuiltinsAssembler) {
- TNode<Object> pattern = CAST(Parameter(Descriptor::kPattern));
- TNode<Object> flags = CAST(Parameter(Descriptor::kFlags));
- TNode<Object> new_target = CAST(Parameter(Descriptor::kJSNewTarget));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto pattern = Parameter<Object>(Descriptor::kPattern);
+ auto flags = Parameter<Object>(Descriptor::kFlags);
+ auto new_target = Parameter<Object>(Descriptor::kJSNewTarget);
+ auto context = Parameter<Context>(Descriptor::kContext);
Isolate* isolate = this->isolate();
@@ -1128,10 +1169,10 @@ TF_BUILTIN(RegExpConstructor, RegExpBuiltinsAssembler) {
// ES#sec-regexp.prototype.compile
// RegExp.prototype.compile ( pattern, flags )
TF_BUILTIN(RegExpPrototypeCompile, RegExpBuiltinsAssembler) {
- TNode<Object> maybe_receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> maybe_pattern = CAST(Parameter(Descriptor::kPattern));
- TNode<Object> maybe_flags = CAST(Parameter(Descriptor::kFlags));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto maybe_receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto maybe_pattern = Parameter<Object>(Descriptor::kPattern);
+ auto maybe_flags = Parameter<Object>(Descriptor::kFlags);
+ auto context = Parameter<Context>(Descriptor::kContext);
ThrowIfNotInstanceType(context, maybe_receiver, JS_REG_EXP_TYPE,
"RegExp.prototype.compile");
@@ -1188,11 +1229,24 @@ TNode<BoolT> RegExpBuiltinsAssembler::FastFlagGetter(TNode<JSRegExp> regexp,
TNode<BoolT> RegExpBuiltinsAssembler::SlowFlagGetter(TNode<Context> context,
TNode<Object> regexp,
JSRegExp::Flag flag) {
- Label out(this);
+ Label out(this), if_true(this), if_false(this);
TVARIABLE(BoolT, var_result);
+ // Only enabled based on a runtime flag.
+ if (flag == JSRegExp::kLinear) {
+ TNode<Word32T> flag_value = UncheckedCast<Word32T>(Load(
+ MachineType::Uint8(),
+ ExternalConstant(ExternalReference::
+ address_of_enable_experimental_regexp_engine())));
+ GotoIf(Word32Equal(Word32And(flag_value, Int32Constant(0xFF)),
+ Int32Constant(0)),
+ &if_false);
+ }
+
Handle<String> name;
switch (flag) {
+ case JSRegExp::kNone:
+ UNREACHABLE();
case JSRegExp::kGlobal:
name = isolate()->factory()->global_string();
break;
@@ -1211,13 +1265,12 @@ TNode<BoolT> RegExpBuiltinsAssembler::SlowFlagGetter(TNode<Context> context,
case JSRegExp::kUnicode:
name = isolate()->factory()->unicode_string();
break;
- default:
- UNREACHABLE();
+ case JSRegExp::kLinear:
+ name = isolate()->factory()->linear_string();
+ break;
}
TNode<Object> value = GetProperty(context, regexp, name);
-
- Label if_true(this), if_false(this);
BranchIfToBooleanIsTrue(value, &if_true, &if_false);
BIND(&if_true);
@@ -1243,7 +1296,6 @@ TNode<BoolT> RegExpBuiltinsAssembler::FlagGetter(TNode<Context> context,
TNode<Number> RegExpBuiltinsAssembler::AdvanceStringIndex(
TNode<String> string, TNode<Number> index, TNode<BoolT> is_unicode,
bool is_fastpath) {
- CSA_ASSERT(this, IsString(string));
CSA_ASSERT(this, IsNumberNormalized(index));
if (is_fastpath) CSA_ASSERT(this, TaggedIsPositiveSmi(index));
diff --git a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
index 26cf4fe159..1a004c4939 100644
--- a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
+++ b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
@@ -19,10 +19,10 @@ class SharedArrayBufferBuiltinsAssembler : public CodeStubAssembler {
: CodeStubAssembler(state) {}
protected:
- using AssemblerFunction = Node* (CodeAssembler::*)(MachineType type,
- Node* base, Node* offset,
- Node* value,
- Node* value_high);
+ using AssemblerFunction =
+ Node* (CodeAssembler::*)(MachineType type, TNode<RawPtrT> base,
+ TNode<UintPtrT> offset, Node* value,
+ base::Optional<TNode<UintPtrT>> value_high);
TNode<JSArrayBuffer> ValidateIntegerTypedArray(
TNode<Object> maybe_array, TNode<Context> context,
TNode<Int32T>* out_elements_kind, TNode<RawPtrT>* out_backing_store,
@@ -32,8 +32,8 @@ class SharedArrayBufferBuiltinsAssembler : public CodeStubAssembler {
TNode<Object> index,
TNode<Context> context);
- inline void DebugSanityCheckAtomicIndex(TNode<JSTypedArray> array,
- TNode<UintPtrT> index);
+ inline void DebugCheckAtomicIndex(TNode<JSTypedArray> array,
+ TNode<UintPtrT> index);
void AtomicBinopBuiltinCommon(TNode<Object> maybe_array, TNode<Object> index,
TNode<Object> value, TNode<Context> context,
@@ -127,7 +127,7 @@ TNode<UintPtrT> SharedArrayBufferBuiltinsAssembler::ValidateAtomicAccess(
return index_uintptr;
}
-void SharedArrayBufferBuiltinsAssembler::DebugSanityCheckAtomicIndex(
+void SharedArrayBufferBuiltinsAssembler::DebugCheckAtomicIndex(
TNode<JSTypedArray> array, TNode<UintPtrT> index) {
// In Debug mode, we re-validate the index as a sanity check because ToInteger
// above calls out to JavaScript. Atomics work on ArrayBuffers, which may be
@@ -165,9 +165,9 @@ TNode<BigInt> SharedArrayBufferBuiltinsAssembler::BigIntFromUnsigned64(
// https://tc39.es/ecma262/#sec-atomicload
TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) {
- TNode<Object> maybe_array = CAST(Parameter(Descriptor::kArray));
- TNode<Object> index = CAST(Parameter(Descriptor::kIndex));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto maybe_array = Parameter<Object>(Descriptor::kArray);
+ auto index = Parameter<Object>(Descriptor::kIndex);
+ auto context = Parameter<Context>(Descriptor::kContext);
// 1. Let buffer be ? ValidateIntegerTypedArray(typedArray).
Label detached(this);
@@ -258,10 +258,10 @@ TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) {
// https://tc39.es/ecma262/#sec-atomics.store
TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) {
- TNode<Object> maybe_array = CAST(Parameter(Descriptor::kArray));
- TNode<Object> index = CAST(Parameter(Descriptor::kIndex));
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto maybe_array = Parameter<Object>(Descriptor::kArray);
+ auto index = Parameter<Object>(Descriptor::kIndex);
+ auto value = Parameter<Object>(Descriptor::kValue);
+ auto context = Parameter<Context>(Descriptor::kContext);
// 1. Let buffer be ? ValidateIntegerTypedArray(typedArray).
Label detached(this);
@@ -295,7 +295,7 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) {
TNode<Word32T> value_word32 = TruncateTaggedToWord32(context, value_integer);
- DebugSanityCheckAtomicIndex(array, index_word);
+ DebugCheckAtomicIndex(array, index_word);
// Steps 8-13.
//
@@ -336,7 +336,7 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) {
// 6. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
GotoIf(IsDetachedBuffer(array_buffer), &detached);
- DebugSanityCheckAtomicIndex(array, index_word);
+ DebugCheckAtomicIndex(array, index_word);
TVARIABLE(UintPtrT, var_low);
TVARIABLE(UintPtrT, var_high);
@@ -360,10 +360,10 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) {
// https://tc39.es/ecma262/#sec-atomics.exchange
TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) {
- TNode<Object> maybe_array = CAST(Parameter(Descriptor::kArray));
- TNode<Object> index = CAST(Parameter(Descriptor::kIndex));
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto maybe_array = Parameter<Object>(Descriptor::kArray);
+ auto index = Parameter<Object>(Descriptor::kIndex);
+ auto value = Parameter<Object>(Descriptor::kValue);
+ auto context = Parameter<Context>(Descriptor::kContext);
// Inlines AtomicReadModifyWrite
// https://tc39.es/ecma262/#sec-atomicreadmodifywrite
@@ -405,7 +405,7 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) {
// buffer to become detached.
GotoIf(IsDetachedBuffer(array_buffer), &detached);
- DebugSanityCheckAtomicIndex(array, index_word);
+ DebugCheckAtomicIndex(array, index_word);
TNode<Word32T> value_word32 = TruncateTaggedToWord32(context, value_integer);
@@ -424,29 +424,31 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) {
BIND(&i8);
Return(SmiFromInt32(AtomicExchange(MachineType::Int8(), backing_store,
- index_word, value_word32)));
+ index_word, value_word32, base::nullopt)));
BIND(&u8);
Return(SmiFromInt32(AtomicExchange(MachineType::Uint8(), backing_store,
- index_word, value_word32)));
+ index_word, value_word32, base::nullopt)));
BIND(&i16);
Return(SmiFromInt32(AtomicExchange(MachineType::Int16(), backing_store,
- WordShl(index_word, 1), value_word32)));
+ WordShl(index_word, UintPtrConstant(1)),
+ value_word32, base::nullopt)));
BIND(&u16);
Return(SmiFromInt32(AtomicExchange(MachineType::Uint16(), backing_store,
- WordShl(index_word, 1), value_word32)));
+ WordShl(index_word, UintPtrConstant(1)),
+ value_word32, base::nullopt)));
BIND(&i32);
- Return(ChangeInt32ToTagged(AtomicExchange(MachineType::Int32(), backing_store,
- WordShl(index_word, 2),
- value_word32)));
+ Return(ChangeInt32ToTagged(AtomicExchange(
+ MachineType::Int32(), backing_store,
+ WordShl(index_word, UintPtrConstant(2)), value_word32, base::nullopt)));
BIND(&u32);
- Return(ChangeUint32ToTagged(
- AtomicExchange(MachineType::Uint32(), backing_store,
- WordShl(index_word, 2), value_word32)));
+ Return(ChangeUint32ToTagged(AtomicExchange(
+ MachineType::Uint32(), backing_store,
+ WordShl(index_word, UintPtrConstant(2)), value_word32, base::nullopt)));
BIND(&big);
// 4. If typedArray.[[ContentType]] is BigInt, let v be ? ToBigInt(value).
@@ -455,7 +457,7 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) {
// 6. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
GotoIf(IsDetachedBuffer(array_buffer), &detached);
- DebugSanityCheckAtomicIndex(array, index_word);
+ DebugCheckAtomicIndex(array, index_word);
TVARIABLE(UintPtrT, var_low);
TVARIABLE(UintPtrT, var_high);
@@ -469,14 +471,14 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) {
// This uses Uint64() intentionally: AtomicExchange is not implemented for
// Int64(), which is fine because the machine instruction only cares
// about words.
- Return(BigIntFromSigned64(AtomicExchange(MachineType::Uint64(), backing_store,
- WordShl(index_word, 3),
- var_low.value(), high)));
+ Return(BigIntFromSigned64(AtomicExchange(
+ MachineType::Uint64(), backing_store,
+ WordShl(index_word, UintPtrConstant(3)), var_low.value(), high)));
BIND(&u64);
- Return(BigIntFromUnsigned64(
- AtomicExchange(MachineType::Uint64(), backing_store,
- WordShl(index_word, 3), var_low.value(), high)));
+ Return(BigIntFromUnsigned64(AtomicExchange(
+ MachineType::Uint64(), backing_store,
+ WordShl(index_word, UintPtrConstant(3)), var_low.value(), high)));
// This shouldn't happen, we've already validated the type.
BIND(&other);
@@ -492,11 +494,11 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) {
// https://tc39.es/ecma262/#sec-atomics.compareexchange
TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) {
- TNode<Object> maybe_array = CAST(Parameter(Descriptor::kArray));
- TNode<Object> index = CAST(Parameter(Descriptor::kIndex));
- TNode<Object> old_value = CAST(Parameter(Descriptor::kOldValue));
- TNode<Object> new_value = CAST(Parameter(Descriptor::kNewValue));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto maybe_array = Parameter<Object>(Descriptor::kArray);
+ auto index = Parameter<Object>(Descriptor::kIndex);
+ auto old_value = Parameter<Object>(Descriptor::kOldValue);
+ auto new_value = Parameter<Object>(Descriptor::kNewValue);
+ auto context = Parameter<Context>(Descriptor::kContext);
// 1. Let buffer be ? ValidateIntegerTypedArray(typedArray).
Label detached(this);
@@ -540,7 +542,7 @@ TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) {
// buffer to become detached.
GotoIf(IsDetachedBuffer(array_buffer), &detached);
- DebugSanityCheckAtomicIndex(array, index_word);
+ DebugCheckAtomicIndex(array, index_word);
TNode<Word32T> old_value_word32 =
TruncateTaggedToWord32(context, old_value_integer);
@@ -600,7 +602,7 @@ TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) {
// 6. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
GotoIf(IsDetachedBuffer(array_buffer), &detached);
- DebugSanityCheckAtomicIndex(array, index_word);
+ DebugCheckAtomicIndex(array, index_word);
TVARIABLE(UintPtrT, var_old_low);
TVARIABLE(UintPtrT, var_old_high);
@@ -640,15 +642,15 @@ TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) {
}
}
-#define BINOP_BUILTIN(op, method_name) \
- TF_BUILTIN(Atomics##op, SharedArrayBufferBuiltinsAssembler) { \
- TNode<Object> array = CAST(Parameter(Descriptor::kArray)); \
- TNode<Object> index = CAST(Parameter(Descriptor::kIndex)); \
- TNode<Object> value = CAST(Parameter(Descriptor::kValue)); \
- TNode<Context> context = CAST(Parameter(Descriptor::kContext)); \
- AtomicBinopBuiltinCommon(array, index, value, context, \
- &CodeAssembler::Atomic##op, \
- Runtime::kAtomics##op, method_name); \
+#define BINOP_BUILTIN(op, method_name) \
+ TF_BUILTIN(Atomics##op, SharedArrayBufferBuiltinsAssembler) { \
+ auto array = Parameter<Object>(Descriptor::kArray); \
+ auto index = Parameter<Object>(Descriptor::kIndex); \
+ auto value = Parameter<Object>(Descriptor::kValue); \
+ auto context = Parameter<Context>(Descriptor::kContext); \
+ AtomicBinopBuiltinCommon(array, index, value, context, \
+ &CodeAssembler::Atomic##op, \
+ Runtime::kAtomics##op, method_name); \
}
// https://tc39.es/ecma262/#sec-atomics.add
BINOP_BUILTIN(Add, "Atomics.add")
@@ -703,7 +705,7 @@ void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon(
// buffer to become detached.
GotoIf(IsDetachedBuffer(array_buffer), &detached);
- DebugSanityCheckAtomicIndex(array, index_word);
+ DebugCheckAtomicIndex(array, index_word);
TNode<Word32T> value_word32 = TruncateTaggedToWord32(context, value_integer);
@@ -721,33 +723,29 @@ void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon(
arraysize(case_labels));
BIND(&i8);
- Return(SmiFromInt32((this->*function)(MachineType::Int8(), backing_store,
- index_word, value_word32, nullptr)));
-
+ Return(
+ SmiFromInt32((this->*function)(MachineType::Int8(), backing_store,
+ index_word, value_word32, base::nullopt)));
BIND(&u8);
- Return(SmiFromInt32((this->*function)(MachineType::Uint8(), backing_store,
- index_word, value_word32, nullptr)));
-
+ Return(
+ SmiFromInt32((this->*function)(MachineType::Uint8(), backing_store,
+ index_word, value_word32, base::nullopt)));
BIND(&i16);
Return(SmiFromInt32((this->*function)(MachineType::Int16(), backing_store,
- WordShl(index_word, 1), value_word32,
- nullptr)));
-
+ WordShl(index_word, UintPtrConstant(1)),
+ value_word32, base::nullopt)));
BIND(&u16);
Return(SmiFromInt32((this->*function)(MachineType::Uint16(), backing_store,
- WordShl(index_word, 1), value_word32,
- nullptr)));
-
+ WordShl(index_word, UintPtrConstant(1)),
+ value_word32, base::nullopt)));
BIND(&i32);
- Return(ChangeInt32ToTagged(
- (this->*function)(MachineType::Int32(), backing_store,
- WordShl(index_word, 2), value_word32, nullptr)));
-
+ Return(ChangeInt32ToTagged((this->*function)(
+ MachineType::Int32(), backing_store,
+ WordShl(index_word, UintPtrConstant(2)), value_word32, base::nullopt)));
BIND(&u32);
- Return(ChangeUint32ToTagged(
- (this->*function)(MachineType::Uint32(), backing_store,
- WordShl(index_word, 2), value_word32, nullptr)));
-
+ Return(ChangeUint32ToTagged((this->*function)(
+ MachineType::Uint32(), backing_store,
+ WordShl(index_word, UintPtrConstant(2)), value_word32, base::nullopt)));
BIND(&big);
// 4. If typedArray.[[ContentType]] is BigInt, let v be ? ToBigInt(value).
TNode<BigInt> value_bigint = ToBigInt(context, value);
@@ -755,7 +753,7 @@ void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon(
// 6. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
GotoIf(IsDetachedBuffer(array_buffer), &detached);
- DebugSanityCheckAtomicIndex(array, index_word);
+ DebugCheckAtomicIndex(array, index_word);
TVARIABLE(UintPtrT, var_low);
TVARIABLE(UintPtrT, var_high);
@@ -769,15 +767,13 @@ void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon(
// This uses Uint64() intentionally: Atomic* ops are not implemented for
// Int64(), which is fine because the machine instructions only care
// about words.
- Return(BigIntFromSigned64(
- (this->*function)(MachineType::Uint64(), backing_store,
- WordShl(index_word, 3), var_low.value(), high)));
-
+ Return(BigIntFromSigned64((this->*function)(
+ MachineType::Uint64(), backing_store,
+ WordShl(index_word, UintPtrConstant(3)), var_low.value(), high)));
BIND(&u64);
- Return(BigIntFromUnsigned64(
- (this->*function)(MachineType::Uint64(), backing_store,
- WordShl(index_word, 3), var_low.value(), high)));
-
+ Return(BigIntFromUnsigned64((this->*function)(
+ MachineType::Uint64(), backing_store,
+ WordShl(index_word, UintPtrConstant(3)), var_low.value(), high)));
// This shouldn't happen, we've already validated the type.
BIND(&other);
Unreachable();
diff --git a/deps/v8/src/builtins/builtins-string-gen.cc b/deps/v8/src/builtins/builtins-string-gen.cc
index 9920369136..aa982a3c1b 100644
--- a/deps/v8/src/builtins/builtins-string-gen.cc
+++ b/deps/v8/src/builtins/builtins-string-gen.cc
@@ -46,8 +46,7 @@ TNode<RawPtrT> StringBuiltinsAssembler::DirectStringData(
Word32And(string_instance_type,
Int32Constant(kUncachedExternalStringMask)),
Int32Constant(kUncachedExternalStringTag)));
- var_data =
- DecodeExternalPointer(LoadExternalStringResourceData(CAST(string)));
+ var_data = LoadExternalStringResourceDataPtr(CAST(string));
Goto(&if_join);
}
@@ -344,9 +343,11 @@ TNode<String> StringBuiltinsAssembler::AllocateConsString(TNode<Uint32T> length,
return CAST(result);
}
-TNode<String> StringBuiltinsAssembler::StringAdd(SloppyTNode<Context> context,
- TNode<String> left,
- TNode<String> right) {
+TNode<String> StringBuiltinsAssembler::StringAdd(
+ TNode<ContextOrEmptyContext> context, TNode<String> left,
+ TNode<String> right) {
+ CSA_ASSERT(this, IsZeroOrContext(context));
+
TVARIABLE(String, result);
Label check_right(this), runtime(this, Label::kDeferred), cons(this),
done(this, &result), done_native(this, &result);
@@ -540,16 +541,18 @@ TNode<String> StringBuiltinsAssembler::DerefIndirectString(
}
TF_BUILTIN(StringAdd_CheckNone, StringBuiltinsAssembler) {
- TNode<String> left = CAST(Parameter(Descriptor::kLeft));
- TNode<String> right = CAST(Parameter(Descriptor::kRight));
- Node* context = Parameter(Descriptor::kContext);
+ auto left = Parameter<String>(Descriptor::kLeft);
+ auto right = Parameter<String>(Descriptor::kRight);
+ TNode<ContextOrEmptyContext> context =
+ UncheckedParameter<ContextOrEmptyContext>(Descriptor::kContext);
+ CSA_ASSERT(this, IsZeroOrContext(context));
Return(StringAdd(context, left, right));
}
TF_BUILTIN(SubString, StringBuiltinsAssembler) {
- TNode<String> string = CAST(Parameter(Descriptor::kString));
- TNode<Smi> from = CAST(Parameter(Descriptor::kFrom));
- TNode<Smi> to = CAST(Parameter(Descriptor::kTo));
+ auto string = Parameter<String>(Descriptor::kString);
+ auto from = Parameter<Smi>(Descriptor::kFrom);
+ auto to = Parameter<Smi>(Descriptor::kTo);
Return(SubString(string, SmiUntag(from), SmiUntag(to)));
}
@@ -723,40 +726,39 @@ void StringBuiltinsAssembler::GenerateStringRelationalComparison(
}
TF_BUILTIN(StringEqual, StringBuiltinsAssembler) {
- TNode<String> left = CAST(Parameter(Descriptor::kLeft));
- TNode<String> right = CAST(Parameter(Descriptor::kRight));
+ auto left = Parameter<String>(Descriptor::kLeft);
+ auto right = Parameter<String>(Descriptor::kRight);
GenerateStringEqual(left, right);
}
TF_BUILTIN(StringLessThan, StringBuiltinsAssembler) {
- TNode<String> left = CAST(Parameter(Descriptor::kLeft));
- TNode<String> right = CAST(Parameter(Descriptor::kRight));
+ auto left = Parameter<String>(Descriptor::kLeft);
+ auto right = Parameter<String>(Descriptor::kRight);
GenerateStringRelationalComparison(left, right, Operation::kLessThan);
}
TF_BUILTIN(StringLessThanOrEqual, StringBuiltinsAssembler) {
- TNode<String> left = CAST(Parameter(Descriptor::kLeft));
- TNode<String> right = CAST(Parameter(Descriptor::kRight));
+ auto left = Parameter<String>(Descriptor::kLeft);
+ auto right = Parameter<String>(Descriptor::kRight);
GenerateStringRelationalComparison(left, right, Operation::kLessThanOrEqual);
}
TF_BUILTIN(StringGreaterThan, StringBuiltinsAssembler) {
- TNode<String> left = CAST(Parameter(Descriptor::kLeft));
- TNode<String> right = CAST(Parameter(Descriptor::kRight));
+ auto left = Parameter<String>(Descriptor::kLeft);
+ auto right = Parameter<String>(Descriptor::kRight);
GenerateStringRelationalComparison(left, right, Operation::kGreaterThan);
}
TF_BUILTIN(StringGreaterThanOrEqual, StringBuiltinsAssembler) {
- TNode<String> left = CAST(Parameter(Descriptor::kLeft));
- TNode<String> right = CAST(Parameter(Descriptor::kRight));
+ auto left = Parameter<String>(Descriptor::kLeft);
+ auto right = Parameter<String>(Descriptor::kRight);
GenerateStringRelationalComparison(left, right,
Operation::kGreaterThanOrEqual);
}
TF_BUILTIN(StringCodePointAt, StringBuiltinsAssembler) {
- TNode<String> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<IntPtrT> position =
- UncheckedCast<IntPtrT>(Parameter(Descriptor::kPosition));
+ auto receiver = Parameter<String>(Descriptor::kReceiver);
+ auto position = UncheckedParameter<IntPtrT>(Descriptor::kPosition);
// TODO(sigurds) Figure out if passing length as argument pays off.
TNode<IntPtrT> length = LoadStringLengthAsWord(receiver);
@@ -770,9 +772,8 @@ TF_BUILTIN(StringCodePointAt, StringBuiltinsAssembler) {
}
TF_BUILTIN(StringFromCodePointAt, StringBuiltinsAssembler) {
- TNode<String> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<IntPtrT> position =
- UncheckedCast<IntPtrT>(Parameter(Descriptor::kPosition));
+ auto receiver = Parameter<String>(Descriptor::kReceiver);
+ auto position = UncheckedParameter<IntPtrT>(Descriptor::kPosition);
// TODO(sigurds) Figure out if passing length as argument pays off.
TNode<IntPtrT> length = LoadStringLengthAsWord(receiver);
@@ -791,9 +792,8 @@ TF_BUILTIN(StringFromCodePointAt, StringBuiltinsAssembler) {
TF_BUILTIN(StringFromCharCode, StringBuiltinsAssembler) {
// TODO(ishell): use constants from Descriptor once the JSFunction linkage
// arguments are reordered.
- TNode<Int32T> argc =
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto argc = UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount);
+ auto context = Parameter<Context>(Descriptor::kContext);
CodeStubArguments arguments(this, argc);
// Check if we have exactly one argument (plus the implicit receiver), i.e.
@@ -1064,9 +1064,9 @@ void StringBuiltinsAssembler::StringIndexOf(
// #sec-string.prototype.indexof
// Unchecked helper for builtins lowering.
TF_BUILTIN(StringIndexOf, StringBuiltinsAssembler) {
- TNode<String> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<String> search_string = CAST(Parameter(Descriptor::kSearchString));
- TNode<Smi> position = CAST(Parameter(Descriptor::kPosition));
+ auto receiver = Parameter<String>(Descriptor::kReceiver);
+ auto search_string = Parameter<String>(Descriptor::kSearchString);
+ auto position = Parameter<Smi>(Descriptor::kPosition);
StringIndexOf(receiver, search_string, position,
[this](TNode<Smi> result) { this->Return(result); });
}
@@ -1075,8 +1075,8 @@ TF_BUILTIN(StringIndexOf, StringBuiltinsAssembler) {
// #sec-string.prototype.includes
TF_BUILTIN(StringPrototypeIncludes, StringIncludesIndexOfAssembler) {
TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount));
+ auto context = Parameter<Context>(Descriptor::kContext);
Generate(kIncludes, argc, context);
}
@@ -1084,8 +1084,8 @@ TF_BUILTIN(StringPrototypeIncludes, StringIncludesIndexOfAssembler) {
// #sec-string.prototype.indexof
TF_BUILTIN(StringPrototypeIndexOf, StringIncludesIndexOfAssembler) {
TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount));
+ auto context = Parameter<Context>(Descriptor::kContext);
Generate(kIndexOf, argc, context);
}
@@ -1276,10 +1276,10 @@ TNode<String> StringBuiltinsAssembler::GetSubstitution(
TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) {
Label out(this);
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- const TNode<Object> search = CAST(Parameter(Descriptor::kSearch));
- const TNode<Object> replace = CAST(Parameter(Descriptor::kReplace));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ const auto search = Parameter<Object>(Descriptor::kSearch);
+ const auto replace = Parameter<Object>(Descriptor::kReplace);
+ auto context = Parameter<Context>(Descriptor::kContext);
const TNode<Smi> smi_zero = SmiConstant(0);
@@ -1503,9 +1503,9 @@ class StringMatchSearchAssembler : public StringBuiltinsAssembler {
// ES6 #sec-string.prototype.match
TF_BUILTIN(StringPrototypeMatch, StringMatchSearchAssembler) {
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> maybe_regexp = CAST(Parameter(Descriptor::kRegexp));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto maybe_regexp = Parameter<Object>(Descriptor::kRegexp);
+ auto context = Parameter<Context>(Descriptor::kContext);
Generate(kMatch, "String.prototype.match", receiver, maybe_regexp, context);
}
@@ -1514,9 +1514,9 @@ TF_BUILTIN(StringPrototypeMatch, StringMatchSearchAssembler) {
TF_BUILTIN(StringPrototypeMatchAll, StringBuiltinsAssembler) {
char const* method_name = "String.prototype.matchAll";
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> maybe_regexp = CAST(Parameter(Descriptor::kRegexp));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto maybe_regexp = Parameter<Object>(Descriptor::kRegexp);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
TNode<NativeContext> native_context = LoadNativeContext(context);
// 1. Let O be ? RequireObjectCoercible(this value).
@@ -1611,9 +1611,9 @@ TF_BUILTIN(StringPrototypeMatchAll, StringBuiltinsAssembler) {
// ES6 #sec-string.prototype.search
TF_BUILTIN(StringPrototypeSearch, StringMatchSearchAssembler) {
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> maybe_regexp = CAST(Parameter(Descriptor::kRegexp));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto maybe_regexp = Parameter<Object>(Descriptor::kRegexp);
+ auto context = Parameter<Context>(Descriptor::kContext);
Generate(kSearch, "String.prototype.search", receiver, maybe_regexp, context);
}
@@ -1703,13 +1703,13 @@ TF_BUILTIN(StringPrototypeSplit, StringBuiltinsAssembler) {
const int kLimitArg = 1;
const TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
+ UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
TNode<Object> receiver = args.GetReceiver();
const TNode<Object> separator = args.GetOptionalArgumentValue(kSeparatorArg);
const TNode<Object> limit = args.GetOptionalArgumentValue(kLimitArg);
- TNode<NativeContext> context = CAST(Parameter(Descriptor::kContext));
+ auto context = Parameter<NativeContext>(Descriptor::kContext);
TNode<Smi> smi_zero = SmiConstant(0);
@@ -1800,185 +1800,13 @@ TF_BUILTIN(StringPrototypeSplit, StringBuiltinsAssembler) {
}
TF_BUILTIN(StringSubstring, StringBuiltinsAssembler) {
- TNode<String> string = CAST(Parameter(Descriptor::kString));
- TNode<IntPtrT> from = UncheckedCast<IntPtrT>(Parameter(Descriptor::kFrom));
- TNode<IntPtrT> to = UncheckedCast<IntPtrT>(Parameter(Descriptor::kTo));
+ auto string = Parameter<String>(Descriptor::kString);
+ auto from = UncheckedParameter<IntPtrT>(Descriptor::kFrom);
+ auto to = UncheckedParameter<IntPtrT>(Descriptor::kTo);
Return(SubString(string, from, to));
}
-// ES6 #sec-string.prototype.trim
-TF_BUILTIN(StringPrototypeTrim, StringTrimAssembler) {
- TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-
- Generate(String::kTrim, "String.prototype.trim", argc, context);
-}
-
-// https://github.com/tc39/proposal-string-left-right-trim
-TF_BUILTIN(StringPrototypeTrimStart, StringTrimAssembler) {
- TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-
- Generate(String::kTrimStart, "String.prototype.trimLeft", argc, context);
-}
-
-// https://github.com/tc39/proposal-string-left-right-trim
-TF_BUILTIN(StringPrototypeTrimEnd, StringTrimAssembler) {
- TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-
- Generate(String::kTrimEnd, "String.prototype.trimRight", argc, context);
-}
-
-void StringTrimAssembler::Generate(String::TrimMode mode,
- const char* method_name, TNode<IntPtrT> argc,
- TNode<Context> context) {
- Label return_emptystring(this), if_runtime(this);
-
- CodeStubArguments arguments(this, argc);
- TNode<Object> receiver = arguments.GetReceiver();
-
- // Check that {receiver} is coercible to Object and convert it to a String.
- const TNode<String> string = ToThisString(context, receiver, method_name);
- const TNode<IntPtrT> string_length = LoadStringLengthAsWord(string);
-
- ToDirectStringAssembler to_direct(state(), string);
- to_direct.TryToDirect(&if_runtime);
- const TNode<RawPtrT> string_data = to_direct.PointerToData(&if_runtime);
- const TNode<Int32T> instance_type = to_direct.instance_type();
- const TNode<BoolT> is_stringonebyte =
- IsOneByteStringInstanceType(instance_type);
- const TNode<IntPtrT> string_data_offset = to_direct.offset();
-
- TVARIABLE(IntPtrT, var_start, IntPtrConstant(0));
- TVARIABLE(IntPtrT, var_end, IntPtrSub(string_length, IntPtrConstant(1)));
-
- if (mode == String::kTrimStart || mode == String::kTrim) {
- ScanForNonWhiteSpaceOrLineTerminator(string_data, string_data_offset,
- is_stringonebyte, &var_start,
- string_length, 1, &return_emptystring);
- }
- if (mode == String::kTrimEnd || mode == String::kTrim) {
- ScanForNonWhiteSpaceOrLineTerminator(
- string_data, string_data_offset, is_stringonebyte, &var_end,
- IntPtrConstant(-1), -1, &return_emptystring);
- }
-
- arguments.PopAndReturn(
- SubString(string, var_start.value(),
- IntPtrAdd(var_end.value(), IntPtrConstant(1))));
-
- BIND(&if_runtime);
- arguments.PopAndReturn(
- CallRuntime(Runtime::kStringTrim, context, string, SmiConstant(mode)));
-
- BIND(&return_emptystring);
- arguments.PopAndReturn(EmptyStringConstant());
-}
-
-void StringTrimAssembler::ScanForNonWhiteSpaceOrLineTerminator(
- const TNode<RawPtrT> string_data, const TNode<IntPtrT> string_data_offset,
- const TNode<BoolT> is_stringonebyte, TVariable<IntPtrT>* const var_index,
- const TNode<IntPtrT> end, int increment, Label* const if_none_found) {
- Label if_stringisonebyte(this), out(this);
-
- GotoIf(is_stringonebyte, &if_stringisonebyte);
-
- // Two Byte String
- BuildLoop<Uint16T>(
- var_index, end, increment, if_none_found, &out,
- [&](const TNode<IntPtrT> index) {
- return Load<Uint16T>(
- string_data,
- WordShl(IntPtrAdd(index, string_data_offset), IntPtrConstant(1)));
- });
-
- BIND(&if_stringisonebyte);
- BuildLoop<Uint8T>(var_index, end, increment, if_none_found, &out,
- [&](const TNode<IntPtrT> index) {
- return Load<Uint8T>(string_data,
- IntPtrAdd(index, string_data_offset));
- });
-
- BIND(&out);
-}
-
-template <typename T>
-void StringTrimAssembler::BuildLoop(
- TVariable<IntPtrT>* const var_index, const TNode<IntPtrT> end,
- int increment, Label* const if_none_found, Label* const out,
- const std::function<TNode<T>(const TNode<IntPtrT>)>& get_character) {
- Label loop(this, var_index);
- Goto(&loop);
- BIND(&loop);
- {
- TNode<IntPtrT> index = var_index->value();
- GotoIf(IntPtrEqual(index, end), if_none_found);
- GotoIfNotWhiteSpaceOrLineTerminator(
- UncheckedCast<Uint32T>(get_character(index)), out);
- Increment(var_index, increment);
- Goto(&loop);
- }
-}
-
-void StringTrimAssembler::GotoIfNotWhiteSpaceOrLineTerminator(
- const TNode<Word32T> char_code, Label* const if_not_whitespace) {
- Label out(this);
-
- // 0x0020 - SPACE (Intentionally out of order to fast path a commmon case)
- GotoIf(Word32Equal(char_code, Int32Constant(0x0020)), &out);
-
- // 0x0009 - HORIZONTAL TAB
- GotoIf(Uint32LessThan(char_code, Int32Constant(0x0009)), if_not_whitespace);
- // 0x000A - LINE FEED OR NEW LINE
- // 0x000B - VERTICAL TAB
- // 0x000C - FORMFEED
- // 0x000D - HORIZONTAL TAB
- GotoIf(Uint32LessThanOrEqual(char_code, Int32Constant(0x000D)), &out);
-
- // Common Non-whitespace characters
- GotoIf(Uint32LessThan(char_code, Int32Constant(0x00A0)), if_not_whitespace);
-
- // 0x00A0 - NO-BREAK SPACE
- GotoIf(Word32Equal(char_code, Int32Constant(0x00A0)), &out);
-
- // 0x1680 - Ogham Space Mark
- GotoIf(Word32Equal(char_code, Int32Constant(0x1680)), &out);
-
- // 0x2000 - EN QUAD
- GotoIf(Uint32LessThan(char_code, Int32Constant(0x2000)), if_not_whitespace);
- // 0x2001 - EM QUAD
- // 0x2002 - EN SPACE
- // 0x2003 - EM SPACE
- // 0x2004 - THREE-PER-EM SPACE
- // 0x2005 - FOUR-PER-EM SPACE
- // 0x2006 - SIX-PER-EM SPACE
- // 0x2007 - FIGURE SPACE
- // 0x2008 - PUNCTUATION SPACE
- // 0x2009 - THIN SPACE
- // 0x200A - HAIR SPACE
- GotoIf(Uint32LessThanOrEqual(char_code, Int32Constant(0x200A)), &out);
-
- // 0x2028 - LINE SEPARATOR
- GotoIf(Word32Equal(char_code, Int32Constant(0x2028)), &out);
- // 0x2029 - PARAGRAPH SEPARATOR
- GotoIf(Word32Equal(char_code, Int32Constant(0x2029)), &out);
- // 0x202F - NARROW NO-BREAK SPACE
- GotoIf(Word32Equal(char_code, Int32Constant(0x202F)), &out);
- // 0x205F - MEDIUM MATHEMATICAL SPACE
- GotoIf(Word32Equal(char_code, Int32Constant(0x205F)), &out);
- // 0xFEFF - BYTE ORDER MARK
- GotoIf(Word32Equal(char_code, Int32Constant(0xFEFF)), &out);
- // 0x3000 - IDEOGRAPHIC SPACE
- Branch(Word32Equal(char_code, Int32Constant(0x3000)), &out,
- if_not_whitespace);
-
- BIND(&out);
-}
// Return the |word32| codepoint at {index}. Supports SeqStrings and
// ExternalStrings.
diff --git a/deps/v8/src/builtins/builtins-string-gen.h b/deps/v8/src/builtins/builtins-string-gen.h
index 2b4dadbbb0..5e3ee93f17 100644
--- a/deps/v8/src/builtins/builtins-string-gen.h
+++ b/deps/v8/src/builtins/builtins-string-gen.h
@@ -113,8 +113,8 @@ class StringBuiltinsAssembler : public CodeStubAssembler {
TNode<String> AllocateConsString(TNode<Uint32T> length, TNode<String> left,
TNode<String> right);
- TNode<String> StringAdd(SloppyTNode<Context> context, TNode<String> left,
- TNode<String> right);
+ TNode<String> StringAdd(TNode<ContextOrEmptyContext> context,
+ TNode<String> left, TNode<String> right);
// Check if |string| is an indirect (thin or flat cons) string type that can
// be dereferenced by DerefIndirectString.
@@ -184,30 +184,6 @@ class StringIncludesIndexOfAssembler : public StringBuiltinsAssembler {
TNode<Context> context);
};
-class StringTrimAssembler : public StringBuiltinsAssembler {
- public:
- explicit StringTrimAssembler(compiler::CodeAssemblerState* state)
- : StringBuiltinsAssembler(state) {}
-
- V8_EXPORT_PRIVATE void GotoIfNotWhiteSpaceOrLineTerminator(
- const TNode<Word32T> char_code, Label* const if_not_whitespace);
-
- protected:
- void Generate(String::TrimMode mode, const char* method, TNode<IntPtrT> argc,
- TNode<Context> context);
-
- void ScanForNonWhiteSpaceOrLineTerminator(
- const TNode<RawPtrT> string_data, const TNode<IntPtrT> string_data_offset,
- const TNode<BoolT> is_stringonebyte, TVariable<IntPtrT>* const var_index,
- const TNode<IntPtrT> end, int increment, Label* const if_none_found);
-
- template <typename T>
- void BuildLoop(
- TVariable<IntPtrT>* const var_index, const TNode<IntPtrT> end,
- int increment, Label* const if_none_found, Label* const out,
- const std::function<TNode<T>(const TNode<IntPtrT>)>& get_character);
-};
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-string.cc b/deps/v8/src/builtins/builtins-string.cc
index df5ba93a59..8994211756 100644
--- a/deps/v8/src/builtins/builtins-string.cc
+++ b/deps/v8/src/builtins/builtins-string.cc
@@ -150,7 +150,7 @@ BUILTIN(StringPrototypeLocaleCompare) {
isolate, str1, str2, args.atOrUndefined(isolate, 2),
args.atOrUndefined(isolate, 3), method));
#else
- DCHECK_EQ(2, args.length());
+ DCHECK_LE(2, args.length());
TO_THIS_STRING(str1, method);
Handle<String> str2;
diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.cc b/deps/v8/src/builtins/builtins-typed-array-gen.cc
index 26c67cfc12..3bf0c6e73e 100644
--- a/deps/v8/src/builtins/builtins-typed-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-typed-array-gen.cc
@@ -25,6 +25,7 @@ void TypedArrayBuiltinsAssembler::SetupTypedArrayEmbedderFields(
TNode<JSTypedArray> holder) {
for (int offset = JSTypedArray::kHeaderSize;
offset < JSTypedArray::kSizeWithEmbedderFields; offset += kTaggedSize) {
+ // TODO(v8:10391, saelo): Handle external pointers in EmbedderDataSlot
StoreObjectField(holder, offset, SmiConstant(0));
}
}
@@ -65,31 +66,32 @@ TNode<JSArrayBuffer> TypedArrayBuiltinsAssembler::AllocateEmptyOnHeapBuffer(
StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kByteLengthOffset,
byte_length);
- StoreJSArrayBufferBackingStore(
- buffer,
- EncodeExternalPointer(ReinterpretCast<RawPtrT>(IntPtrConstant(0))));
- StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kExtensionOffset,
- IntPtrConstant(0));
+ InitializeExternalPointerField(buffer, JSArrayBuffer::kBackingStoreOffset,
+ PointerConstant(nullptr),
+ kArrayBufferBackingStoreTag);
+ StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kExtensionOffset,
+ IntPtrConstant(0));
for (int offset = JSArrayBuffer::kHeaderSize;
offset < JSArrayBuffer::kSizeWithEmbedderFields; offset += kTaggedSize) {
+ // TODO(v8:10391, saelo): Handle external pointers in EmbedderDataSlot
StoreObjectFieldNoWriteBarrier(buffer, offset, SmiConstant(0));
}
return buffer;
}
TF_BUILTIN(TypedArrayBaseConstructor, TypedArrayBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto context = Parameter<Context>(Descriptor::kContext);
ThrowTypeError(context, MessageTemplate::kConstructAbstractClass,
"TypedArray");
}
// ES #sec-typedarray-constructors
TF_BUILTIN(TypedArrayConstructor, TypedArrayBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<JSFunction> target = CAST(Parameter(Descriptor::kJSTarget));
- TNode<Object> new_target = CAST(Parameter(Descriptor::kJSNewTarget));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto target = Parameter<JSFunction>(Descriptor::kJSTarget);
+ auto new_target = Parameter<Object>(Descriptor::kJSNewTarget);
TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
+ UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
TNode<Object> arg1 = args.GetOptionalArgumentValue(0);
TNode<Object> arg2 = args.GetOptionalArgumentValue(1);
@@ -116,8 +118,8 @@ TF_BUILTIN(TypedArrayConstructor, TypedArrayBuiltinsAssembler) {
// ES6 #sec-get-%typedarray%.prototype.bytelength
TF_BUILTIN(TypedArrayPrototypeByteLength, TypedArrayBuiltinsAssembler) {
const char* const kMethodName = "get TypedArray.prototype.byteLength";
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
// Check if the {receiver} is actually a JSTypedArray.
ThrowIfNotInstanceType(context, receiver, JS_TYPED_ARRAY_TYPE, kMethodName);
@@ -134,8 +136,8 @@ TF_BUILTIN(TypedArrayPrototypeByteLength, TypedArrayBuiltinsAssembler) {
// ES6 #sec-get-%typedarray%.prototype.byteoffset
TF_BUILTIN(TypedArrayPrototypeByteOffset, TypedArrayBuiltinsAssembler) {
const char* const kMethodName = "get TypedArray.prototype.byteOffset";
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
// Check if the {receiver} is actually a JSTypedArray.
ThrowIfNotInstanceType(context, receiver, JS_TYPED_ARRAY_TYPE, kMethodName);
@@ -152,8 +154,8 @@ TF_BUILTIN(TypedArrayPrototypeByteOffset, TypedArrayBuiltinsAssembler) {
// ES6 #sec-get-%typedarray%.prototype.length
TF_BUILTIN(TypedArrayPrototypeLength, TypedArrayBuiltinsAssembler) {
const char* const kMethodName = "get TypedArray.prototype.length";
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
// Check if the {receiver} is actually a JSTypedArray.
ThrowIfNotInstanceType(context, receiver, JS_TYPED_ARRAY_TYPE, kMethodName);
@@ -355,6 +357,12 @@ void TypedArrayBuiltinsAssembler::DispatchTypedArrayByElementsKind(
BIND(&next);
}
+void TypedArrayBuiltinsAssembler::AllocateJSTypedArrayExternalPointerEntry(
+ TNode<JSTypedArray> holder) {
+ InitializeExternalPointerField(
+ holder, IntPtrConstant(JSTypedArray::kExternalPointerOffset));
+}
+
void TypedArrayBuiltinsAssembler::SetJSTypedArrayOnHeapDataPtr(
TNode<JSTypedArray> holder, TNode<ByteArray> base, TNode<UintPtrT> offset) {
offset = UintPtrAdd(UintPtrConstant(ByteArray::kHeaderSize - kHeapObjectTag),
@@ -373,9 +381,8 @@ void TypedArrayBuiltinsAssembler::SetJSTypedArrayOnHeapDataPtr(
offset = Unsigned(IntPtrAdd(offset, isolate_root));
}
- StoreObjectField(holder, JSTypedArray::kBasePointerOffset, base);
- StoreJSTypedArrayExternalPointer(
- holder, EncodeExternalPointer(ReinterpretCast<RawPtrT>(offset)));
+ StoreJSTypedArrayBasePointer(holder, base);
+ StoreJSTypedArrayExternalPointerPtr(holder, ReinterpretCast<RawPtrT>(offset));
}
void TypedArrayBuiltinsAssembler::SetJSTypedArrayOffHeapDataPtr(
@@ -384,7 +391,7 @@ void TypedArrayBuiltinsAssembler::SetJSTypedArrayOffHeapDataPtr(
SmiConstant(0));
base = RawPtrAdd(base, Signed(offset));
- StoreJSTypedArrayExternalPointer(holder, EncodeExternalPointer(base));
+ StoreJSTypedArrayExternalPointerPtr(holder, base);
}
void TypedArrayBuiltinsAssembler::StoreJSTypedArrayElementFromNumeric(
@@ -441,7 +448,7 @@ void TypedArrayBuiltinsAssembler::StoreJSTypedArrayElementFromTagged(
// ES #sec-get-%typedarray%.prototype-@@tostringtag
TF_BUILTIN(TypedArrayPrototypeToStringTag, TypedArrayBuiltinsAssembler) {
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
Label if_receiverisheapobject(this), return_undefined(this);
Branch(TaggedIsSmi(receiver), &return_undefined, &if_receiverisheapobject);
diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.h b/deps/v8/src/builtins/builtins-typed-array-gen.h
index 780c36123e..fcaa2f2a65 100644
--- a/deps/v8/src/builtins/builtins-typed-array-gen.h
+++ b/deps/v8/src/builtins/builtins-typed-array-gen.h
@@ -77,6 +77,7 @@ class TypedArrayBuiltinsAssembler : public CodeStubAssembler {
void DispatchTypedArrayByElementsKind(
TNode<Word32T> elements_kind, const TypedArraySwitchCase& case_function);
+ void AllocateJSTypedArrayExternalPointerEntry(TNode<JSTypedArray> holder);
void SetJSTypedArrayOnHeapDataPtr(TNode<JSTypedArray> holder,
TNode<ByteArray> base,
TNode<UintPtrT> offset);
diff --git a/deps/v8/src/builtins/builtins-utils-gen.h b/deps/v8/src/builtins/builtins-utils-gen.h
index f9e2ba74fa..3a812b62b8 100644
--- a/deps/v8/src/builtins/builtins-utils-gen.h
+++ b/deps/v8/src/builtins/builtins-utils-gen.h
@@ -5,6 +5,7 @@
#ifndef V8_BUILTINS_BUILTINS_UTILS_GEN_H_
#define V8_BUILTINS_BUILTINS_UTILS_GEN_H_
+#include "include/cppgc/source-location.h"
#include "src/builtins/builtins-descriptors.h"
namespace v8 {
@@ -26,27 +27,35 @@ class CodeAssemblerState;
//
// In the body of the builtin function the arguments can be accessed
// as "Parameter(n)".
-#define TF_BUILTIN(Name, AssemblerBase) \
- class Name##Assembler : public AssemblerBase { \
- public: \
- using Descriptor = Builtin_##Name##_InterfaceDescriptor; \
- \
- explicit Name##Assembler(compiler::CodeAssemblerState* state) \
- : AssemblerBase(state) {} \
- void Generate##Name##Impl(); \
- \
- Node* Parameter(Descriptor::ParameterIndices index) { \
- return CodeAssembler::Parameter(static_cast<int>(index)); \
- } \
- }; \
- void Builtins::Generate_##Name(compiler::CodeAssemblerState* state) { \
- Name##Assembler assembler(state); \
- state->SetInitialDebugInformation(#Name, __FILE__, __LINE__); \
- if (Builtins::KindOf(Builtins::k##Name) == Builtins::TFJ) { \
- assembler.PerformStackCheck(assembler.GetJSContextParameter()); \
- } \
- assembler.Generate##Name##Impl(); \
- } \
+#define TF_BUILTIN(Name, AssemblerBase) \
+ class Name##Assembler : public AssemblerBase { \
+ public: \
+ using Descriptor = Builtin_##Name##_InterfaceDescriptor; \
+ \
+ explicit Name##Assembler(compiler::CodeAssemblerState* state) \
+ : AssemblerBase(state) {} \
+ void Generate##Name##Impl(); \
+ \
+ template <class T> \
+ TNode<T> Parameter( \
+ Descriptor::ParameterIndices index, \
+ cppgc::SourceLocation loc = cppgc::SourceLocation::Current()) { \
+ return CodeAssembler::Parameter<T>(static_cast<int>(index), loc); \
+ } \
+ \
+ template <class T> \
+ TNode<T> UncheckedParameter(Descriptor::ParameterIndices index) { \
+ return CodeAssembler::UncheckedParameter<T>(static_cast<int>(index)); \
+ } \
+ }; \
+ void Builtins::Generate_##Name(compiler::CodeAssemblerState* state) { \
+ Name##Assembler assembler(state); \
+ state->SetInitialDebugInformation(#Name, __FILE__, __LINE__); \
+ if (Builtins::KindOf(Builtins::k##Name) == Builtins::TFJ) { \
+ assembler.PerformStackCheck(assembler.GetJSContextParameter()); \
+ } \
+ assembler.Generate##Name##Impl(); \
+ } \
void Name##Assembler::Generate##Name##Impl()
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-utils-inl.h b/deps/v8/src/builtins/builtins-utils-inl.h
index 82d5fe2873..10f03a3d91 100644
--- a/deps/v8/src/builtins/builtins-utils-inl.h
+++ b/deps/v8/src/builtins/builtins-utils-inl.h
@@ -23,20 +23,12 @@ Handle<Object> BuiltinArguments::atOrUndefined(Isolate* isolate,
Handle<Object> BuiltinArguments::receiver() const { return at<Object>(0); }
Handle<JSFunction> BuiltinArguments::target() const {
-#ifdef V8_REVERSE_JSARGS
int index = kTargetOffset;
-#else
- int index = Arguments::length() - 1 - kTargetOffset;
-#endif
return Handle<JSFunction>(address_of_arg_at(index));
}
Handle<HeapObject> BuiltinArguments::new_target() const {
-#ifdef V8_REVERSE_JSARGS
int index = kNewTargetOffset;
-#else
- int index = Arguments::length() - 1 - kNewTargetOffset;
-#endif
return Handle<JSFunction>(address_of_arg_at(index));
}
diff --git a/deps/v8/src/builtins/builtins-utils.h b/deps/v8/src/builtins/builtins-utils.h
index 3bed3bc651..e5f420a20d 100644
--- a/deps/v8/src/builtins/builtins-utils.h
+++ b/deps/v8/src/builtins/builtins-utils.h
@@ -52,12 +52,7 @@ class BuiltinArguments : public JavaScriptArguments {
static constexpr int kNumExtraArgs = 4;
static constexpr int kNumExtraArgsWithReceiver = 5;
-
-#ifdef V8_REVERSE_JSARGS
static constexpr int kArgsOffset = 4;
-#else
- static constexpr int kArgsOffset = 0;
-#endif
inline Handle<Object> atOrUndefined(Isolate* isolate, int index) const;
inline Handle<Object> receiver() const;
diff --git a/deps/v8/src/builtins/builtins-wasm-gen.cc b/deps/v8/src/builtins/builtins-wasm-gen.cc
index d4e92d165d..a996161e2f 100644
--- a/deps/v8/src/builtins/builtins-wasm-gen.cc
+++ b/deps/v8/src/builtins/builtins-wasm-gen.cc
@@ -44,12 +44,12 @@ TNode<FixedArray> WasmBuiltinsAssembler::LoadManagedObjectMapsFromInstance(
}
TF_BUILTIN(WasmFloat32ToNumber, WasmBuiltinsAssembler) {
- TNode<Float32T> val = UncheckedCast<Float32T>(Parameter(Descriptor::kValue));
+ auto val = UncheckedParameter<Float32T>(Descriptor::kValue);
Return(ChangeFloat32ToTagged(val));
}
TF_BUILTIN(WasmFloat64ToNumber, WasmBuiltinsAssembler) {
- TNode<Float64T> val = UncheckedCast<Float64T>(Parameter(Descriptor::kValue));
+ auto val = UncheckedParameter<Float64T>(Descriptor::kValue);
Return(ChangeFloat64ToTagged(val));
}
@@ -59,18 +59,14 @@ TF_BUILTIN(WasmI32AtomicWait32, WasmBuiltinsAssembler) {
return;
}
- TNode<Uint32T> address =
- UncheckedCast<Uint32T>(Parameter(Descriptor::kAddress));
+ auto address = UncheckedParameter<Uint32T>(Descriptor::kAddress);
TNode<Number> address_number = ChangeUint32ToTagged(address);
- TNode<Int32T> expected_value =
- UncheckedCast<Int32T>(Parameter(Descriptor::kExpectedValue));
+ auto expected_value = UncheckedParameter<Int32T>(Descriptor::kExpectedValue);
TNode<Number> expected_value_number = ChangeInt32ToTagged(expected_value);
- TNode<IntPtrT> timeout_low =
- UncheckedCast<IntPtrT>(Parameter(Descriptor::kTimeoutLow));
- TNode<IntPtrT> timeout_high =
- UncheckedCast<IntPtrT>(Parameter(Descriptor::kTimeoutHigh));
+ auto timeout_low = UncheckedParameter<IntPtrT>(Descriptor::kTimeoutLow);
+ auto timeout_high = UncheckedParameter<IntPtrT>(Descriptor::kTimeoutHigh);
TNode<BigInt> timeout = BigIntFromInt32Pair(timeout_low, timeout_high);
TNode<WasmInstanceObject> instance = LoadInstanceFromFrame();
@@ -88,21 +84,18 @@ TF_BUILTIN(WasmI64AtomicWait32, WasmBuiltinsAssembler) {
return;
}
- TNode<Uint32T> address =
- UncheckedCast<Uint32T>(Parameter(Descriptor::kAddress));
+ auto address = UncheckedParameter<Uint32T>(Descriptor::kAddress);
TNode<Number> address_number = ChangeUint32ToTagged(address);
- TNode<IntPtrT> expected_value_low =
- UncheckedCast<IntPtrT>(Parameter(Descriptor::kExpectedValueLow));
- TNode<IntPtrT> expected_value_high =
- UncheckedCast<IntPtrT>(Parameter(Descriptor::kExpectedValueHigh));
+ auto expected_value_low =
+ UncheckedParameter<IntPtrT>(Descriptor::kExpectedValueLow);
+ auto expected_value_high =
+ UncheckedParameter<IntPtrT>(Descriptor::kExpectedValueHigh);
TNode<BigInt> expected_value =
BigIntFromInt32Pair(expected_value_low, expected_value_high);
- TNode<IntPtrT> timeout_low =
- UncheckedCast<IntPtrT>(Parameter(Descriptor::kTimeoutLow));
- TNode<IntPtrT> timeout_high =
- UncheckedCast<IntPtrT>(Parameter(Descriptor::kTimeoutHigh));
+ auto timeout_low = UncheckedParameter<IntPtrT>(Descriptor::kTimeoutLow);
+ auto timeout_high = UncheckedParameter<IntPtrT>(Descriptor::kTimeoutHigh);
TNode<BigInt> timeout = BigIntFromInt32Pair(timeout_low, timeout_high);
TNode<WasmInstanceObject> instance = LoadInstanceFromFrame();
@@ -115,9 +108,9 @@ TF_BUILTIN(WasmI64AtomicWait32, WasmBuiltinsAssembler) {
}
TF_BUILTIN(WasmAllocateArrayWithRtt, WasmBuiltinsAssembler) {
- TNode<Map> map = CAST(Parameter(Descriptor::kMap));
- TNode<Smi> length = CAST(Parameter(Descriptor::kLength));
- TNode<Smi> element_size = CAST(Parameter(Descriptor::kElementSize));
+ auto map = Parameter<Map>(Descriptor::kMap);
+ auto length = Parameter<Smi>(Descriptor::kLength);
+ auto element_size = Parameter<Smi>(Descriptor::kElementSize);
TNode<IntPtrT> untagged_length = SmiUntag(length);
// instance_size = WasmArray::kHeaderSize
// + RoundUp(element_size * length, kObjectAlignment)
@@ -134,5 +127,27 @@ TF_BUILTIN(WasmAllocateArrayWithRtt, WasmBuiltinsAssembler) {
Return(result);
}
+TF_BUILTIN(WasmAllocatePair, WasmBuiltinsAssembler) {
+ TNode<WasmInstanceObject> instance = LoadInstanceFromFrame();
+ TNode<HeapObject> value1 = Parameter<HeapObject>(Descriptor::kValue1);
+ TNode<HeapObject> value2 = Parameter<HeapObject>(Descriptor::kValue2);
+
+ TNode<IntPtrT> roots = LoadObjectField<IntPtrT>(
+ instance, WasmInstanceObject::kIsolateRootOffset);
+ TNode<Map> map = CAST(Load(
+ MachineType::AnyTagged(), roots,
+ IntPtrConstant(IsolateData::root_slot_offset(RootIndex::kTuple2Map))));
+
+ TNode<IntPtrT> instance_size =
+ TimesTaggedSize(LoadMapInstanceSizeInWords(map));
+ TNode<Tuple2> result = UncheckedCast<Tuple2>(Allocate(instance_size));
+
+ StoreMap(result, map);
+ StoreObjectField(result, Tuple2::kValue1Offset, value1);
+ StoreObjectField(result, Tuple2::kValue2Offset, value2);
+
+ Return(result);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins.cc b/deps/v8/src/builtins/builtins.cc
index 31682f3974..541f9ffac9 100644
--- a/deps/v8/src/builtins/builtins.cc
+++ b/deps/v8/src/builtins/builtins.cc
@@ -335,7 +335,8 @@ class OffHeapTrampolineGenerator {
public:
explicit OffHeapTrampolineGenerator(Isolate* isolate)
: isolate_(isolate),
- masm_(isolate, CodeObjectRequired::kYes,
+ masm_(isolate, AssemblerOptions::DefaultForOffHeapTrampoline(isolate),
+ CodeObjectRequired::kYes,
ExternalAssemblerBuffer(buffer_, kBufferSize)) {}
CodeDesc Generate(Address off_heap_entry, TrampolineType type) {
@@ -347,6 +348,7 @@ class OffHeapTrampolineGenerator {
masm_.CodeEntry();
masm_.JumpToInstructionStream(off_heap_entry);
} else {
+ DCHECK_EQ(type, TrampolineType::kAbort);
masm_.Trap();
}
}
@@ -484,6 +486,7 @@ bool Builtins::CodeObjectIsExecutable(int builtin_index) {
case Builtins::kArgumentsAdaptorTrampoline:
case Builtins::kHandleApiCall:
case Builtins::kInstantiateAsmJs:
+ case Builtins::kGenericJSToWasmWrapper:
// TODO(delphick): Remove this when calls to it have the trampoline inlined
// or are converted to use kCallBuiltinPointer.
diff --git a/deps/v8/src/builtins/cast.tq b/deps/v8/src/builtins/cast.tq
index 1562b7b4dd..362086f879 100644
--- a/deps/v8/src/builtins/cast.tq
+++ b/deps/v8/src/builtins/cast.tq
@@ -158,6 +158,7 @@ macro Cast<A : type extends Object>(implicit context: Context)(o: Object): A
otherwise CastError;
}
+// This is required for casting MaybeObject to Object.
Cast<Smi>(o: Object): Smi
labels CastError {
return TaggedToSmi(o) otherwise CastError;
@@ -667,7 +668,19 @@ UnsafeCast<RegExpMatchInfo>(implicit context: Context)(o: Object):
return %RawDownCast<RegExpMatchInfo>(o);
}
-macro CastOrDefault<T: type, Arg: type, Default: type>(
- implicit context: Context)(x: Arg, default: Default): T|Default {
+macro UnsafeCast<A : type extends WeakHeapObject>(o: A|Object): A {
+ assert(IsWeakOrCleared(o));
+ return %RawDownCast<A>(o);
+}
+
+macro
+CastOrDefault<T: type, Arg: type, Default: type>(implicit context: Context)(
+ x: Arg, default: Default): T|Default {
return Cast<T>(x) otherwise return default;
}
+
+// This is required for casting MaybeObject to Object.
+Cast<Object>(o: Object): Object
+labels _CastError {
+ return o;
+}
diff --git a/deps/v8/src/builtins/constants-table-builder.cc b/deps/v8/src/builtins/constants-table-builder.cc
index 97565f2e37..25fa878634 100644
--- a/deps/v8/src/builtins/constants-table-builder.cc
+++ b/deps/v8/src/builtins/constants-table-builder.cc
@@ -46,15 +46,12 @@ uint32_t BuiltinsConstantsTableBuilder::AddObject(Handle<Object> object) {
DCHECK(!object->IsCode());
#endif
- uint32_t* maybe_key = map_.Find(object);
- if (maybe_key == nullptr) {
+ auto find_result = map_.FindOrInsert(object);
+ if (!find_result.already_exists) {
DCHECK(object->IsHeapObject());
- uint32_t index = map_.size();
- map_.Set(object, index);
- return index;
- } else {
- return *maybe_key;
+ *find_result.entry = map_.size() - 1;
}
+ return *find_result.entry;
}
namespace {
@@ -85,7 +82,7 @@ void BuiltinsConstantsTableBuilder::PatchSelfReference(
uint32_t key;
if (map_.Delete(self_reference, &key)) {
DCHECK(code_object->IsCode());
- map_.Set(code_object, key);
+ map_.Insert(code_object, key);
}
}
@@ -96,7 +93,7 @@ void BuiltinsConstantsTableBuilder::PatchBasicBlockCountersReference(
uint32_t key;
if (map_.Delete(ReadOnlyRoots(isolate_).basic_block_counters_marker(),
&key)) {
- map_.Set(counters, key);
+ map_.Insert(counters, key);
}
}
diff --git a/deps/v8/src/builtins/convert.tq b/deps/v8/src/builtins/convert.tq
index 6ac9901028..6f6cbb1f68 100644
--- a/deps/v8/src/builtins/convert.tq
+++ b/deps/v8/src/builtins/convert.tq
@@ -115,6 +115,11 @@ FromConstexpr<IterationKind, constexpr IterationKind>(
return %RawDownCast<IterationKind>(Unsigned(%FromConstexpr<int32>(c)));
}
+FromConstexpr<string::TrimMode, string::constexpr TrimMode>(
+ c: string::constexpr TrimMode): string::TrimMode {
+ return %RawDownCast<string::TrimMode>(Unsigned(%FromConstexpr<int32>(c)));
+}
+
macro Convert<To: type, From: type>(i: From): To {
return i;
}
diff --git a/deps/v8/src/builtins/generate-bytecodes-builtins-list.cc b/deps/v8/src/builtins/generate-bytecodes-builtins-list.cc
index 8266807b43..7317402fd5 100644
--- a/deps/v8/src/builtins/generate-bytecodes-builtins-list.cc
+++ b/deps/v8/src/builtins/generate-bytecodes-builtins-list.cc
@@ -11,6 +11,9 @@ namespace v8 {
namespace internal {
namespace interpreter {
+const int kIllegalBytecodeHandler = -1;
+const int kIllegalBytecodeHandlerEncoding = 255;
+
void WriteBytecode(std::ofstream& out, Bytecode bytecode,
OperandScale operand_scale, int* count, int offset_table[],
int table_index) {
@@ -22,7 +25,7 @@ void WriteBytecode(std::ofstream& out, Bytecode bytecode,
offset_table[table_index] = *count;
(*count)++;
} else {
- offset_table[table_index] = -1;
+ offset_table[table_index] = kIllegalBytecodeHandler;
}
}
@@ -32,6 +35,7 @@ void WriteHeader(const char* header_filename) {
out << "// Automatically generated from interpreter/bytecodes.h\n"
<< "// The following list macro is used to populate the builtins list\n"
<< "// with the bytecode handlers\n\n"
+ << "#include <stdint.h>\n\n"
<< "#ifndef V8_BUILTINS_GENERATED_BYTECODES_BUILTINS_LIST\n"
<< "#define V8_BUILTINS_GENERATED_BYTECODES_BUILTINS_LIST\n\n"
<< "namespace v8 {\n"
@@ -60,19 +64,25 @@ void WriteHeader(const char* header_filename) {
CHECK_GT(single_count, wide_count);
CHECK_EQ(single_count, Bytecodes::kBytecodeCount);
CHECK_EQ(wide_count, extra_wide_count);
- out << "\n\nconst int kNumberOfBytecodeHandlers = " << single_count << ";\n"
- << "const int kNumberOfWideBytecodeHandlers = " << wide_count << ";\n\n"
- << "// Mapping from (Bytecode + OperandScaleAsIndex * |Bytecodes|) to\n"
- << "// a dense form with all the illegal Bytecode/OperandScale\n"
- << "// combinations removed. Used to index into the builtins table.\n"
- << "constexpr int kBytecodeToBuiltinsMapping[" << kTableSize << "] = {\n"
- << " ";
+ out << "\n\nconstexpr int kNumberOfBytecodeHandlers = " << single_count
+ << ";\n"
+ << "constexpr int kNumberOfWideBytecodeHandlers = " << wide_count
+ << ";\n\n"
+ << "constexpr uint8_t kIllegalBytecodeHandlerEncoding = "
+ << kIllegalBytecodeHandlerEncoding << ";\n\n"
+ << "// Mapping from Bytecode to a dense form with all the illegal\n"
+ << "// wide Bytecodes removed. Used to index into the builtins table.\n"
+ << "constexpr uint8_t kWideBytecodeToBuiltinsMapping["
+ << "kNumberOfBytecodeHandlers] = { \n";
- for (int i = 0; i < kTableSize; ++i) {
- if (i == single_count || i == 2 * single_count) {
- out << "\n ";
+ for (int i = single_count; i < 2 * single_count; ++i) {
+ int offset = offset_table[i];
+ if (offset == kIllegalBytecodeHandler) {
+ offset = kIllegalBytecodeHandlerEncoding;
+ } else {
+ offset -= single_count;
}
- out << offset_table[i] << ", ";
+ out << offset << ", ";
}
out << "};\n\n"
diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc
index 41181410b5..d5f82cd3d9 100644
--- a/deps/v8/src/builtins/ia32/builtins-ia32.cc
+++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc
@@ -72,48 +72,6 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
namespace {
-enum StackLimitKind { kInterruptStackLimit, kRealStackLimit };
-
-void CompareStackLimit(MacroAssembler* masm, Register with,
- StackLimitKind kind) {
- DCHECK(masm->root_array_available());
- Isolate* isolate = masm->isolate();
- // Address through the root register. No load is needed.
- ExternalReference limit =
- kind == StackLimitKind::kRealStackLimit
- ? ExternalReference::address_of_real_jslimit(isolate)
- : ExternalReference::address_of_jslimit(isolate);
- DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
-
- intptr_t offset =
- TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
- __ cmp(with, Operand(kRootRegister, offset));
-}
-
-void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
- Register scratch, Label* stack_overflow,
- bool include_receiver = false) {
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- ExternalReference real_stack_limit =
- ExternalReference::address_of_real_jslimit(masm->isolate());
- // Compute the space that is left as a negative number in scratch. If
- // we already overflowed, this will be a positive number.
- __ mov(scratch, __ ExternalReferenceAsOperand(real_stack_limit, scratch));
- __ sub(scratch, esp);
- // Add the size of the arguments.
- static_assert(kSystemPointerSize == 4,
- "The next instruction assumes kSystemPointerSize == 4");
- __ lea(scratch, Operand(scratch, num_args, times_system_pointer_size, 0));
- if (include_receiver) {
- __ add(scratch, Immediate(kSystemPointerSize));
- }
- // See if we overflowed, i.e. scratch is positive.
- __ cmp(scratch, Immediate(0));
- __ j(greater, stack_overflow); // Signed comparison.
-}
-
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax: number of arguments
@@ -124,7 +82,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
Label stack_overflow;
- Generate_StackOverflowCheck(masm, eax, ecx, &stack_overflow);
+ __ StackOverflowCheck(eax, ecx, &stack_overflow);
// Enter a construct frame.
{
@@ -136,7 +94,11 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ push(eax);
__ SmiUntag(eax);
-#ifdef V8_REVERSE_JSARGS
+ // TODO(victorgomes): When the arguments adaptor is completely removed, we
+ // should get the formal parameter count and copy the arguments in its
+ // correct position (including any undefined), instead of delaying this to
+ // InvokeFunction.
+
// Set up pointer to first argument (skip receiver).
__ lea(esi, Operand(ebp, StandardFrameConstants::kCallerSPOffset +
kSystemPointerSize));
@@ -144,14 +106,6 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ PushArray(esi, eax, ecx);
// The receiver for the builtin/api call.
__ PushRoot(RootIndex::kTheHoleValue);
-#else
- // The receiver for the builtin/api call.
- __ PushRoot(RootIndex::kTheHoleValue);
- // Set up pointer to last argument. We are using esi as scratch register.
- __ lea(esi, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
- // Copy arguments to the expression stack.
- __ PushArray(esi, eax, ecx);
-#endif
// Call the function.
// eax: number of arguments (untagged)
@@ -196,168 +150,133 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// -- sp[...]: constructor arguments
// -----------------------------------
+ FrameScope scope(masm, StackFrame::MANUAL);
// Enter a construct frame.
- {
- FrameScope scope(masm, StackFrame::CONSTRUCT);
- Label post_instantiation_deopt_entry, not_create_implicit_receiver;
+ __ EnterFrame(StackFrame::CONSTRUCT);
- // Preserve the incoming parameters on the stack.
- __ mov(ecx, eax);
- __ SmiTag(ecx);
- __ Push(esi);
- __ Push(ecx);
- __ Push(edi);
- __ PushRoot(RootIndex::kTheHoleValue);
- __ Push(edx);
-
- // ----------- S t a t e -------------
- // -- sp[0*kSystemPointerSize]: new target
- // -- sp[1*kSystemPointerSize]: padding
- // -- edi and sp[2*kSystemPointerSize]: constructor function
- // -- sp[3*kSystemPointerSize]: argument count
- // -- sp[4*kSystemPointerSize]: context
- // -----------------------------------
-
- __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(eax, FieldOperand(eax, SharedFunctionInfo::kFlagsOffset));
- __ DecodeField<SharedFunctionInfo::FunctionKindBits>(eax);
- __ JumpIfIsInRange(eax, kDefaultDerivedConstructor, kDerivedConstructor,
- ecx, &not_create_implicit_receiver, Label::kNear);
-
- // If not derived class constructor: Allocate the new receiver object.
- __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
- eax);
- __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject),
- RelocInfo::CODE_TARGET);
- __ jmp(&post_instantiation_deopt_entry, Label::kNear);
-
- // Else: use TheHoleValue as receiver for constructor call
- __ bind(&not_create_implicit_receiver);
- __ LoadRoot(eax, RootIndex::kTheHoleValue);
+ Label post_instantiation_deopt_entry, not_create_implicit_receiver;
- // ----------- S t a t e -------------
- // -- eax: implicit receiver
- // -- Slot 4 / sp[0*kSystemPointerSize]: new target
- // -- Slot 3 / sp[1*kSystemPointerSize]: padding
- // -- Slot 2 / sp[2*kSystemPointerSize]: constructor function
- // -- Slot 1 / sp[3*kSystemPointerSize]: number of arguments (tagged)
- // -- Slot 0 / sp[4*kSystemPointerSize]: context
- // -----------------------------------
- // Deoptimizer enters here.
- masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
- masm->pc_offset());
- __ bind(&post_instantiation_deopt_entry);
+ // Preserve the incoming parameters on the stack.
+ __ mov(ecx, eax);
+ __ SmiTag(ecx);
+ __ Push(esi);
+ __ Push(ecx);
+ __ Push(edi);
+ __ PushRoot(RootIndex::kTheHoleValue);
+ __ Push(edx);
- // Restore new target.
- __ Pop(edx);
+ // ----------- S t a t e -------------
+ // -- sp[0*kSystemPointerSize]: new target
+ // -- sp[1*kSystemPointerSize]: padding
+ // -- edi and sp[2*kSystemPointerSize]: constructor function
+ // -- sp[3*kSystemPointerSize]: argument count
+ // -- sp[4*kSystemPointerSize]: context
+ // -----------------------------------
- // Push the allocated receiver to the stack.
- __ Push(eax);
+ __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(eax, FieldOperand(eax, SharedFunctionInfo::kFlagsOffset));
+ __ DecodeField<SharedFunctionInfo::FunctionKindBits>(eax);
+ __ JumpIfIsInRange(eax, kDefaultDerivedConstructor, kDerivedConstructor, ecx,
+ &not_create_implicit_receiver, Label::kNear);
-#ifdef V8_REVERSE_JSARGS
- // We need two copies because we may have to return the original one
- // and the calling conventions dictate that the called function pops the
- // receiver. The second copy is pushed after the arguments, we saved in r8
- // since rax needs to store the number of arguments before
- // InvokingFunction.
- __ movd(xmm0, eax);
+ // If not derived class constructor: Allocate the new receiver object.
+ __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
+ eax);
+ __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject), RelocInfo::CODE_TARGET);
+ __ jmp(&post_instantiation_deopt_entry, Label::kNear);
- // Set up pointer to first argument (skip receiver).
- __ lea(edi, Operand(ebp, StandardFrameConstants::kCallerSPOffset +
- kSystemPointerSize));
-#else
- // We need two copies because we may have to return the original one
- // and the calling conventions dictate that the called function pops the
- // receiver.
- __ Push(eax);
+ // Else: use TheHoleValue as receiver for constructor call
+ __ bind(&not_create_implicit_receiver);
+ __ LoadRoot(eax, RootIndex::kTheHoleValue);
- // Set up pointer to last argument.
- __ lea(edi, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
-#endif
-
- // Restore argument count.
- __ mov(eax, Operand(ebp, ConstructFrameConstants::kLengthOffset));
- __ SmiUntag(eax);
+ // ----------- S t a t e -------------
+ // -- eax: implicit receiver
+ // -- Slot 4 / sp[0*kSystemPointerSize]: new target
+ // -- Slot 3 / sp[1*kSystemPointerSize]: padding
+ // -- Slot 2 / sp[2*kSystemPointerSize]: constructor function
+ // -- Slot 1 / sp[3*kSystemPointerSize]: number of arguments (tagged)
+ // -- Slot 0 / sp[4*kSystemPointerSize]: context
+ // -----------------------------------
+ // Deoptimizer enters here.
+ masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
+ masm->pc_offset());
+ __ bind(&post_instantiation_deopt_entry);
- // Check if we have enough stack space to push all arguments.
- // Argument count in eax. Clobbers ecx.
- Label enough_stack_space, stack_overflow;
- Generate_StackOverflowCheck(masm, eax, ecx, &stack_overflow);
- __ jmp(&enough_stack_space);
+ // Restore new target.
+ __ Pop(edx);
- __ bind(&stack_overflow);
- // Restore context from the frame.
- __ mov(esi, Operand(ebp, ConstructFrameConstants::kContextOffset));
- __ CallRuntime(Runtime::kThrowStackOverflow);
- // This should be unreachable.
- __ int3();
+ // Push the allocated receiver to the stack.
+ __ Push(eax);
- __ bind(&enough_stack_space);
+ // We need two copies because we may have to return the original one
+ // and the calling conventions dictate that the called function pops the
+ // receiver. The second copy is pushed after the arguments, we saved in r8
+ // since rax needs to store the number of arguments before
+ // InvokingFunction.
+ __ movd(xmm0, eax);
- // Copy arguments to the expression stack.
- __ PushArray(edi, eax, ecx);
+ // Set up pointer to first argument (skip receiver).
+ __ lea(edi, Operand(ebp, StandardFrameConstants::kCallerSPOffset +
+ kSystemPointerSize));
-#ifdef V8_REVERSE_JSARGS
- // Push implicit receiver.
- __ movd(ecx, xmm0);
- __ Push(ecx);
-#endif
+ // Restore argument count.
+ __ mov(eax, Operand(ebp, ConstructFrameConstants::kLengthOffset));
+ __ SmiUntag(eax);
- // Restore and and call the constructor function.
- __ mov(edi, Operand(ebp, ConstructFrameConstants::kConstructorOffset));
- __ InvokeFunction(edi, edx, eax, CALL_FUNCTION);
+ // Check if we have enough stack space to push all arguments.
+ // Argument count in eax. Clobbers ecx.
+ Label stack_overflow;
+ __ StackOverflowCheck(eax, ecx, &stack_overflow);
- // ----------- S t a t e -------------
- // -- eax: constructor result
- // -- sp[0*kSystemPointerSize]: implicit receiver
- // -- sp[1*kSystemPointerSize]: padding
- // -- sp[2*kSystemPointerSize]: constructor function
- // -- sp[3*kSystemPointerSize]: number of arguments
- // -- sp[4*kSystemPointerSize]: context
- // -----------------------------------
+ // TODO(victorgomes): When the arguments adaptor is completely removed, we
+ // should get the formal parameter count and copy the arguments in its
+ // correct position (including any undefined), instead of delaying this to
+ // InvokeFunction.
- // Store offset of return address for deoptimizer.
- masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
- masm->pc_offset());
+ // Copy arguments to the expression stack.
+ __ PushArray(edi, eax, ecx);
- // Restore context from the frame.
- __ mov(esi, Operand(ebp, ConstructFrameConstants::kContextOffset));
+ // Push implicit receiver.
+ __ movd(ecx, xmm0);
+ __ Push(ecx);
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, do_throw, leave_frame;
+ // Restore and and call the constructor function.
+ __ mov(edi, Operand(ebp, ConstructFrameConstants::kConstructorOffset));
+ __ InvokeFunction(edi, edx, eax, CALL_FUNCTION);
- // If the result is undefined, we jump out to using the implicit receiver.
- __ JumpIfRoot(eax, RootIndex::kUndefinedValue, &use_receiver, Label::kNear);
+ // ----------- S t a t e -------------
+ // -- eax: constructor result
+ // -- sp[0*kSystemPointerSize]: implicit receiver
+ // -- sp[1*kSystemPointerSize]: padding
+ // -- sp[2*kSystemPointerSize]: constructor function
+ // -- sp[3*kSystemPointerSize]: number of arguments
+ // -- sp[4*kSystemPointerSize]: context
+ // -----------------------------------
- // Otherwise we do a smi check and fall through to check if the return value
- // is a valid receiver.
+ // Store offset of return address for deoptimizer.
+ masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
+ masm->pc_offset());
- // If the result is a smi, it is *not* an object in the ECMA sense.
- __ JumpIfSmi(eax, &use_receiver, Label::kNear);
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
- // If the type of the result (stored in its map) is less than
- // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx);
- __ j(above_equal, &leave_frame, Label::kNear);
- __ jmp(&use_receiver, Label::kNear);
+ Label check_result, use_receiver, do_throw, leave_and_return;
+ // If the result is undefined, we jump out to using the implicit receiver.
+ __ JumpIfNotRoot(eax, RootIndex::kUndefinedValue, &check_result,
+ Label::kNear);
- __ bind(&do_throw);
- __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ mov(eax, Operand(esp, 0 * kSystemPointerSize));
+ __ JumpIfRoot(eax, RootIndex::kTheHoleValue, &do_throw);
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ mov(eax, Operand(esp, 0 * kSystemPointerSize));
- __ JumpIfRoot(eax, RootIndex::kTheHoleValue, &do_throw);
+ __ bind(&leave_and_return);
+ // Restore smi-tagged arguments count from the frame.
+ __ mov(edx, Operand(ebp, ConstructFrameConstants::kLengthOffset));
+ __ LeaveFrame(StackFrame::CONSTRUCT);
- __ bind(&leave_frame);
- // Restore smi-tagged arguments count from the frame.
- __ mov(edx, Operand(ebp, ConstructFrameConstants::kLengthOffset));
- // Leave construct frame.
- }
// Remove caller arguments from the stack and return.
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
__ pop(ecx);
@@ -365,6 +284,34 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
1 * kSystemPointerSize)); // 1 ~ receiver
__ push(ecx);
__ ret(0);
+
+ // Otherwise we do a smi check and fall through to check if the return value
+ // is a valid receiver.
+ __ bind(&check_result);
+
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ __ JumpIfSmi(eax, &use_receiver, Label::kNear);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx);
+ __ j(above_equal, &leave_and_return, Label::kNear);
+ __ jmp(&use_receiver, Label::kNear);
+
+ __ bind(&do_throw);
+ // Restore context from the frame.
+ __ mov(esi, Operand(ebp, ConstructFrameConstants::kContextOffset));
+ __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
+ // This should be unreachable.
+ __ int3();
+
+ __ bind(&stack_overflow);
+ // Restore context from the frame.
+ __ mov(esi, Operand(ebp, ConstructFrameConstants::kContextOffset));
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ // This should be unreachable.
+ __ int3();
}
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
@@ -528,11 +475,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Push the function.
__ push(Operand(scratch1, EntryFrameConstants::kFunctionArgOffset));
-#ifndef V8_REVERSE_JSARGS
- // And the receiver onto the stack.
- __ push(Operand(scratch1, EntryFrameConstants::kReceiverArgOffset));
-#endif
-
// Load the number of arguments and setup pointer to the arguments.
__ mov(eax, Operand(scratch1, EntryFrameConstants::kArgcOffset));
__ mov(scratch1, Operand(scratch1, EntryFrameConstants::kArgvOffset));
@@ -540,7 +482,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Check if we have enough stack space to push all arguments.
// Argument count in eax. Clobbers ecx.
Label enough_stack_space, stack_overflow;
- Generate_StackOverflowCheck(masm, eax, ecx, &stack_overflow);
+ __ StackOverflowCheck(eax, ecx, &stack_overflow);
__ jmp(&enough_stack_space);
__ bind(&stack_overflow);
@@ -551,7 +493,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ bind(&enough_stack_space);
// Copy arguments to the stack in a loop.
-#ifdef V8_REVERSE_JSARGS
Label loop, entry;
__ Move(ecx, eax);
__ jmp(&entry, Label::kNear);
@@ -562,27 +503,12 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ bind(&entry);
__ dec(ecx);
__ j(greater_equal, &loop);
-#else
- Label loop, entry;
- __ Move(ecx, Immediate(0));
- __ jmp(&entry, Label::kNear);
- __ bind(&loop);
- // Push the parameter from argv.
- __ mov(scratch2, Operand(scratch1, ecx, times_system_pointer_size, 0));
- __ push(Operand(scratch2, 0)); // dereference handle
- __ inc(ecx);
- __ bind(&entry);
- __ cmp(ecx, eax);
- __ j(not_equal, &loop);
-#endif
// Load the previous frame pointer to access C arguments
__ mov(scratch2, Operand(ebp, 0));
-#ifdef V8_REVERSE_JSARGS
// Push the receiver onto the stack.
__ push(Operand(scratch2, EntryFrameConstants::kReceiverArgOffset));
-#endif
// Get the new.target and function from the frame.
__ mov(edx, Operand(scratch2, EntryFrameConstants::kNewTargetArgOffset));
@@ -667,23 +593,17 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
- CompareStackLimit(masm, esp, StackLimitKind::kRealStackLimit);
+ __ CompareStackLimit(esp, StackLimitKind::kRealStackLimit);
__ j(below, &stack_overflow);
// Pop return address.
__ PopReturnAddressTo(eax);
-#ifndef V8_REVERSE_JSARGS
- // Push receiver.
- __ Push(FieldOperand(edx, JSGeneratorObject::kReceiverOffset));
-#endif
-
// ----------- S t a t e -------------
// -- eax : return address
// -- edx : the JSGeneratorObject to resume
// -- edi : generator function
// -- esi : generator context
- // -- esp[0] : generator receiver, if V8_REVERSE_JSARGS is not set
// -----------------------------------
{
@@ -695,7 +615,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
ecx, SharedFunctionInfo::kFormalParameterCountOffset));
__ mov(ebx,
FieldOperand(edx, JSGeneratorObject::kParametersAndRegistersOffset));
-#ifdef V8_REVERSE_JSARGS
{
Label done_loop, loop;
__ mov(edi, ecx);
@@ -712,22 +631,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Push receiver.
__ Push(FieldOperand(edx, JSGeneratorObject::kReceiverOffset));
-#else
- {
- Label done_loop, loop;
- __ Set(edi, 0);
-
- __ bind(&loop);
- __ cmp(edi, ecx);
- __ j(greater_equal, &done_loop);
- __ Push(
- FieldOperand(ebx, edi, times_tagged_size, FixedArray::kHeaderSize));
- __ add(edi, Immediate(1));
- __ jmp(&loop);
-
- __ bind(&done_loop);
- }
-#endif
// Restore registers.
__ mov(edi, FieldOperand(edx, JSGeneratorObject::kFunctionOffset));
@@ -804,31 +707,47 @@ static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
Register scratch2) {
- Register args_count = scratch1;
- Register return_pc = scratch2;
-
- // Get the arguments + receiver count.
- __ mov(args_count,
+ Register params_size = scratch1;
+ // Get the size of the formal parameters + receiver (in bytes).
+ __ mov(params_size,
Operand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
- __ mov(args_count,
- FieldOperand(args_count, BytecodeArray::kParameterSizeOffset));
+ __ mov(params_size,
+ FieldOperand(params_size, BytecodeArray::kParameterSizeOffset));
+
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ Register actual_params_size = scratch2;
+ // Compute the size of the actual parameters + receiver (in bytes).
+ __ mov(actual_params_size, Operand(ebp, StandardFrameConstants::kArgCOffset));
+ __ lea(actual_params_size,
+ Operand(actual_params_size, times_system_pointer_size,
+ kSystemPointerSize));
+
+ // If actual is bigger than formal, then we should use it to free up the stack
+ // arguments.
+ Label corrected_args_count;
+ __ cmp(params_size, actual_params_size);
+ __ j(greater_equal, &corrected_args_count, Label::kNear);
+ __ mov(params_size, actual_params_size);
+ __ bind(&corrected_args_count);
+#endif
// Leave the frame (also dropping the register file).
__ leave();
// Drop receiver + arguments.
- __ pop(return_pc);
- __ add(esp, args_count);
- __ push(return_pc);
+ Register return_pc = scratch2;
+ __ PopReturnAddressTo(return_pc);
+ __ add(esp, params_size);
+ __ PushReturnAddressFrom(return_pc);
}
-// Tail-call |function_id| if |smi_entry| == |marker|
+// Tail-call |function_id| if |actual_marker| == |expected_marker|
static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
- Register smi_entry,
- OptimizationMarker marker,
+ Register actual_marker,
+ OptimizationMarker expected_marker,
Runtime::FunctionId function_id) {
Label no_match;
- __ cmp(smi_entry, Immediate(Smi::FromEnum(marker)));
+ __ cmp(actual_marker, expected_marker);
__ j(not_equal, &no_match, Label::kNear);
GenerateTailCallToReturnedCode(masm, function_id);
__ bind(&no_match);
@@ -844,17 +763,22 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
DCHECK(!AreAliased(edx, edi, optimized_code_entry));
Register closure = edi;
+ __ movd(xmm0, eax);
+ __ movd(xmm1, edx);
- __ push(edx);
+ Label heal_optimized_code_slot;
+
+ // If the optimized code is cleared, go to runtime to update the optimization
+ // marker field.
+ __ LoadWeakValue(optimized_code_entry, &heal_optimized_code_slot);
// Check if the optimized code is marked for deopt. If it is, bailout to a
// given label.
- Label found_deoptimized_code;
__ mov(eax,
FieldOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
__ test(FieldOperand(eax, CodeDataContainer::kKindSpecificFlagsOffset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
- __ j(not_zero, &found_deoptimized_code);
+ __ j(not_zero, &heal_optimized_code_slot);
// Optimized code is good, get it into the closure and link the closure
// into the optimized functions list, then tail call the optimized code.
@@ -862,14 +786,17 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
eax);
static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ LoadCodeObjectEntry(ecx, optimized_code_entry);
- __ pop(edx);
+ __ movd(edx, xmm1);
+ __ movd(eax, xmm0);
__ jmp(ecx);
- // Optimized code slot contains deoptimized code, evict it and re-enter
- // the closure's code.
- __ bind(&found_deoptimized_code);
- __ pop(edx);
- GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
+ // Optimized code slot contains deoptimized code or code is cleared and
+ // optimized code marker isn't updated. Evict the code, update the marker
+ // and re-enter the closure's code.
+ __ bind(&heal_optimized_code_slot);
+ __ movd(edx, xmm1);
+ __ movd(eax, xmm0);
+ GenerateTailCallToReturnedCode(masm, Runtime::kHealOptimizedCodeSlot);
}
static void MaybeOptimizeCode(MacroAssembler* masm,
@@ -895,15 +822,11 @@ static void MaybeOptimizeCode(MacroAssembler* masm,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
- {
- // Otherwise, the marker is InOptimizationQueue, so fall through hoping
- // that an interrupt will eventually update the slot with optimized code.
- if (FLAG_debug_code) {
- __ cmp(
- optimization_marker,
- Immediate(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
- __ Assert(equal, AbortReason::kExpectedOptimizationSentinel);
- }
+ // Marker should be one of LogFirstExecution / CompileOptimized /
+ // CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach
+ // here.
+ if (FLAG_debug_code) {
+ __ int3();
}
}
@@ -1031,18 +954,22 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ CmpInstanceType(eax, FEEDBACK_VECTOR_TYPE);
__ j(not_equal, &push_stack_frame);
- // Read off the optimized code slot in the feedback vector.
- // Load the optimized code from the feedback vector and re-use the register.
- Register optimized_code_entry = ecx;
- __ mov(optimized_code_entry,
- FieldOperand(feedback_vector,
- FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
-
- // Check if the optimized code slot is not empty.
- Label optimized_code_slot_not_empty;
- __ cmp(optimized_code_entry,
- Immediate(Smi::FromEnum(OptimizationMarker::kNone)));
- __ j(not_equal, &optimized_code_slot_not_empty);
+ // Load the optimization state from the feedback vector and re-use the
+ // register.
+ Register optimization_state = ecx;
+ // Store feedback_vector. We may need it if we need to load the optimze code
+ // slot entry.
+ __ movd(xmm1, feedback_vector);
+ __ mov(optimization_state,
+ FieldOperand(feedback_vector, FeedbackVector::kFlagsOffset));
+
+ // Check if there is optimized code or a optimization marker that needes to be
+ // processed.
+ Label has_optimized_code_or_marker;
+ __ test(
+ optimization_state,
+ Immediate(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask));
+ __ j(not_zero, &has_optimized_code_or_marker);
Label not_optimized;
__ bind(&not_optimized);
@@ -1108,7 +1035,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Do a stack check to ensure we don't go over the limit.
__ mov(eax, esp);
__ sub(eax, frame_size);
- CompareStackLimit(masm, eax, StackLimitKind::kRealStackLimit);
+ __ CompareStackLimit(eax, StackLimitKind::kRealStackLimit);
__ j(below, &stack_overflow);
// If ok, push undefined as the initial value for all register file entries.
@@ -1139,7 +1066,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Perform interrupt stack check.
// TODO(solanes): Merge with the real stack limit check above.
Label stack_check_interrupt, after_stack_check_interrupt;
- CompareStackLimit(masm, esp, StackLimitKind::kInterruptStackLimit);
+ __ CompareStackLimit(esp, StackLimitKind::kInterruptStackLimit);
__ j(below, &stack_check_interrupt);
__ bind(&after_stack_check_interrupt);
@@ -1214,21 +1141,29 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ jmp(&after_stack_check_interrupt);
- __ bind(&optimized_code_slot_not_empty);
+ __ bind(&has_optimized_code_or_marker);
Label maybe_has_optimized_code;
// Restore actual argument count.
__ movd(eax, xmm0);
- // Check if optimized code marker is actually a weak reference to the
- // optimized code as opposed to an optimization marker.
- __ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code);
- MaybeOptimizeCode(masm, optimized_code_entry);
+
+ // Check if optimized code is available
+ __ test(
+ optimization_state,
+ Immediate(FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker));
+ __ j(zero, &maybe_has_optimized_code);
+
+ Register optimization_marker = optimization_state;
+ __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
+ MaybeOptimizeCode(masm, optimization_marker);
// Fall through if there's no runnable optimized code.
__ jmp(&not_optimized);
__ bind(&maybe_has_optimized_code);
- // Load code entry from the weak reference, if it was cleared, resume
- // execution of unoptimized code.
- __ LoadWeakValue(optimized_code_entry, &not_optimized);
+ Register optimized_code_entry = optimization_marker;
+ __ movd(optimized_code_entry, xmm1);
+ __ mov(
+ optimized_code_entry,
+ FieldOperand(feedback_vector, FeedbackVector::kMaybeOptimizedCodeOffset));
TailCallOptimizedCodeSlot(masm, optimized_code_entry);
__ bind(&compile_lazy);
@@ -1253,19 +1188,11 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
Label loop_header, loop_check;
__ jmp(&loop_check);
__ bind(&loop_header);
-#ifdef V8_REVERSE_JSARGS
__ Push(Operand(array_limit, 0));
__ bind(&loop_check);
__ add(array_limit, Immediate(kSystemPointerSize));
__ cmp(array_limit, start_address);
__ j(below_equal, &loop_header, Label::kNear);
-#else
- __ Push(Operand(start_address, 0));
- __ sub(start_address, Immediate(kSystemPointerSize));
- __ bind(&loop_check);
- __ cmp(start_address, array_limit);
- __ j(above, &loop_header, Label::kNear);
-#endif
}
// static
@@ -1285,16 +1212,13 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
const Register argv = ecx;
Label stack_overflow;
-
-#ifdef V8_REVERSE_JSARGS
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// The spread argument should not be pushed.
__ dec(eax);
}
-#endif
// Add a stack check before pushing the arguments.
- Generate_StackOverflowCheck(masm, eax, scratch, &stack_overflow, true);
+ __ StackOverflowCheck(eax, scratch, &stack_overflow, true);
__ movd(xmm0, eax); // Spill number of arguments.
@@ -1304,7 +1228,6 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// Pop return address to allow tail-call after pushing arguments.
__ PopReturnAddressTo(eax);
-#ifdef V8_REVERSE_JSARGS
if (receiver_mode != ConvertReceiverMode::kNullOrUndefined) {
__ add(scratch, Immediate(1)); // Add one for receiver.
}
@@ -1328,34 +1251,12 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
__ PushRoot(RootIndex::kUndefinedValue);
}
-#else
- __ add(scratch, Immediate(1)); // Add one for receiver.
-
- // Push "undefined" as the receiver arg if we need to.
- if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
- __ PushRoot(RootIndex::kUndefinedValue);
- __ sub(scratch, Immediate(1)); // Subtract one for receiver.
- }
-
- // Find the address of the last argument.
- __ shl(scratch, kSystemPointerSizeLog2);
- __ neg(scratch);
- __ add(scratch, argv);
- Generate_InterpreterPushArgs(masm, scratch, argv);
-
- if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
- __ Pop(ecx); // Pass the spread in a register
- }
-#endif
__ PushReturnAddressFrom(eax);
__ movd(eax, xmm0); // Restore number of arguments.
// Call the target.
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
-#ifndef V8_REVERSE_JSARGS
- __ sub(eax, Immediate(1)); // Subtract one for spread
-#endif
__ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread),
RelocInfo::CODE_TARGET);
} else {
@@ -1395,7 +1296,7 @@ void Generate_InterpreterPushZeroAndArgsAndReturnAddress(
// | addtl. slot | | receiver slot |
// Check for stack overflow before we increment the stack pointer.
- Generate_StackOverflowCheck(masm, num_args, scratch1, stack_overflow, true);
+ __ StackOverflowCheck(num_args, scratch1, stack_overflow, true);
// Step 1 - Update the stack pointer.
@@ -1416,7 +1317,6 @@ void Generate_InterpreterPushZeroAndArgsAndReturnAddress(
// Step 3 copy arguments to correct locations.
// Slot meant for receiver contains return address. Reset it so that
// we will not incorrectly interpret return address as an object.
-#ifdef V8_REVERSE_JSARGS
__ mov(Operand(esp, (num_slots_to_move + 1) * kSystemPointerSize),
Immediate(0));
__ mov(scratch1, Immediate(0));
@@ -1433,26 +1333,6 @@ void Generate_InterpreterPushZeroAndArgsAndReturnAddress(
__ inc(scratch1);
__ cmp(scratch1, eax);
__ j(less_equal, &loop_header, Label::kNear);
-
-#else
- __ mov(Operand(esp, num_args, times_system_pointer_size,
- (num_slots_to_move + 1) * kSystemPointerSize),
- Immediate(0));
- __ mov(scratch1, num_args);
-
- Label loop_header, loop_check;
- __ jmp(&loop_check);
- __ bind(&loop_header);
- __ mov(scratch2, Operand(start_addr, 0));
- __ mov(Operand(esp, scratch1, times_system_pointer_size,
- num_slots_to_move * kSystemPointerSize),
- scratch2);
- __ sub(start_addr, Immediate(kSystemPointerSize));
- __ sub(scratch1, Immediate(1));
- __ bind(&loop_check);
- __ cmp(scratch1, Immediate(0));
- __ j(greater, &loop_header, Label::kNear);
-#endif
}
} // anonymous namespace
@@ -1472,12 +1352,10 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// -----------------------------------
Label stack_overflow;
-#ifdef V8_REVERSE_JSARGS
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// The spread argument should not be pushed.
__ dec(eax);
}
-#endif
// Push arguments and move return address and stack spill slots to the top of
// stack. The eax register is readonly. The ecx register will be modified. edx
@@ -1513,17 +1391,10 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
__ Drop(1); // The allocation site is unused.
__ Pop(kJavaScriptCallNewTargetRegister);
__ Pop(kJavaScriptCallTargetRegister);
-#ifdef V8_REVERSE_JSARGS
// Pass the spread in the register ecx, overwriting ecx.
__ mov(ecx, Operand(ecx, 0));
__ PushReturnAddressFrom(eax);
__ movd(eax, xmm0); // Reload number of arguments.
-#else
- __ Pop(ecx); // Pop the spread (i.e. the first argument), overwriting ecx.
- __ PushReturnAddressFrom(eax);
- __ movd(eax, xmm0); // Reload number of arguments.
- __ sub(eax, Immediate(1)); // The actual argc thus decrements by one.
-#endif
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
RelocInfo::CODE_TARGET);
} else {
@@ -1680,7 +1551,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
const RegisterConfiguration* config(RegisterConfiguration::Default());
int allocatable_register_count = config->num_allocatable_general_registers();
if (with_result) {
-#ifdef V8_REVERSE_JSARGS
if (java_script_builtin) {
// xmm0 is not included in the allocateable registers.
__ movd(xmm0, eax);
@@ -1693,14 +1563,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
BuiltinContinuationFrameConstants::kFixedFrameSize),
eax);
}
-#else
- // Overwrite the hole inserted by the deoptimizer with the return value from
- // the LAZY deopt point.
- __ mov(Operand(esp, config->num_allocatable_general_registers() *
- kSystemPointerSize +
- BuiltinContinuationFrameConstants::kFixedFrameSize),
- eax);
-#endif
}
// Replace the builtin index Smi on the stack with the start address of the
@@ -1718,7 +1580,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
__ SmiUntag(Register::from_code(code));
}
}
-#ifdef V8_REVERSE_JSARGS
if (with_result && java_script_builtin) {
// Overwrite the hole inserted by the deoptimizer with the return value from
// the LAZY deopt point. eax contains the arguments count, the return value
@@ -1727,7 +1588,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
BuiltinContinuationFrameConstants::kFixedFrameSize),
xmm0);
}
-#endif
__ mov(
ebp,
Operand(esp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
@@ -1775,10 +1635,9 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc
// -- esp[0] : return address
- // The order of args depends on V8_REVERSE_JSARGS
- // -- args[0] : receiver
- // -- args[1] : thisArg
- // -- args[2] : argArray
+ // -- esp[1] : receiver
+ // -- esp[2] : thisArg
+ // -- esp[3] : argArray
// -----------------------------------
// 1. Load receiver into xmm0, argArray into edx (if present), remove all
@@ -1845,15 +1704,13 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// Stack Layout:
// esp[0] : Return address
- // esp[8] : Argument n
- // esp[16] : Argument n-1
+ // esp[8] : Argument 0 (receiver: callable to call)
+ // esp[16] : Argument 1
// ...
- // esp[8 * n] : Argument 1
- // esp[8 * (n + 1)] : Receiver (callable to call)
- // NOTE: The order of args are reversed if V8_REVERSE_JSARGS
+ // esp[8 * n] : Argument n-1
+ // esp[8 * (n + 1)] : Argument n
// eax contains the number of arguments, n, not counting the receiver.
-#ifdef V8_REVERSE_JSARGS
// 1. Get the callable to call (passed as receiver) from the stack.
{
StackArgumentsAccessor args(eax);
@@ -1878,43 +1735,8 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// original callable), making the original first argument the new receiver.
__ PushReturnAddressFrom(edx);
__ dec(eax); // One fewer argument (first argument is new receiver).
-#else
- // 1. Make sure we have at least one argument.
- {
- Label done;
- __ test(eax, eax);
- __ j(not_zero, &done, Label::kNear);
- __ PopReturnAddressTo(edx);
- __ PushRoot(RootIndex::kUndefinedValue);
- __ PushReturnAddressFrom(edx);
- __ inc(eax);
- __ bind(&done);
- }
-
- // 2. Get the callable to call (passed as receiver) from the stack.
- {
- StackArgumentsAccessor args(eax);
- __ mov(edi, args.GetReceiverOperand());
- }
- // 3. Shift arguments and return address one slot down on the stack
- // (overwriting the original receiver). Adjust argument count to make
- // the original first argument the new receiver.
- {
- Label loop;
- __ mov(ecx, eax);
- __ bind(&loop);
- __ mov(edx, Operand(esp, ecx, times_system_pointer_size, 0));
- __ mov(Operand(esp, ecx, times_system_pointer_size, kSystemPointerSize),
- edx);
- __ dec(ecx);
- __ j(not_sign, &loop); // While non-negative (to copy return address).
- __ pop(edx); // Discard copy of return address.
- __ dec(eax); // One fewer argument (first argument is new receiver).
- }
-#endif
-
- // 4. Call the callable.
+ // 5. Call the callable.
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
@@ -1922,11 +1744,10 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc
// -- esp[0] : return address
- // The order of args depends on V8_REVERSE_JSARGS
- // -- args[0] : receiver
- // -- args[1] : target
- // -- args[2] : thisArgument
- // -- args[3] : argumentsList
+ // -- esp[4] : receiver
+ // -- esp[8] : target (if argc >= 1)
+ // -- esp[12] : thisArgument (if argc >= 2)
+ // -- esp[16] : argumentsList (if argc == 3)
// -----------------------------------
// 1. Load target into edi (if present), argumentsList into edx (if present),
@@ -1981,11 +1802,10 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc
// -- esp[0] : return address
- // The order of args depends on V8_REVERSE_JSARGS
- // -- args[0] : receiver
- // -- args[1] : target
- // -- args[2] : argumentsList
- // -- args[3] : new.target (optional)
+ // -- esp[4] : receiver
+ // -- esp[8] : target
+ // -- esp[12] : argumentsList
+ // -- esp[16] : new.target (optional)
// -----------------------------------
// 1. Load target into edi (if present), argumentsList into ecx (if present),
@@ -2126,9 +1946,8 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
- Generate_StackOverflowCheck(masm, kArgumentsLength, edx, &stack_overflow);
+ __ StackOverflowCheck(kArgumentsLength, edx, &stack_overflow);
-#ifdef V8_REVERSE_JSARGS
__ movd(xmm4, kArgumentsList); // Spill the arguments list.
// Move the arguments already in the stack,
@@ -2177,29 +1996,6 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ jmp(&loop);
__ bind(&done);
}
-#else // !V8_REVERSE_JSARGS
- // Push additional arguments onto the stack.
- {
- __ PopReturnAddressTo(edx);
- __ Move(eax, Immediate(0));
- Label done, push, loop;
- __ bind(&loop);
- __ cmp(eax, kArgumentsLength);
- __ j(equal, &done, Label::kNear);
- // Turn the hole into undefined as we go.
- __ mov(edi, FieldOperand(kArgumentsList, eax, times_tagged_size,
- FixedArray::kHeaderSize));
- __ CompareRoot(edi, RootIndex::kTheHoleValue);
- __ j(not_equal, &push, Label::kNear);
- __ LoadRoot(edi, RootIndex::kUndefinedValue);
- __ bind(&push);
- __ Push(edi);
- __ inc(eax);
- __ jmp(&loop);
- __ bind(&done);
- __ PushReturnAddressFrom(edx);
- }
-#endif // !V8_REVERSE_JSARGS
// Restore eax, edi and edx.
__ movd(esi, xmm3); // Restore the context.
@@ -2255,6 +2051,12 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ movd(xmm1, edx); // Preserve new.target (in case of [[Construct]]).
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ // TODO(victorgomes): Remove this copy when all the arguments adaptor frame
+ // code is erased.
+ __ mov(scratch, ebp);
+ __ mov(edx, Operand(ebp, StandardFrameConstants::kArgCOffset));
+#else
// Check if we have an arguments adaptor frame below the function frame.
Label arguments_adaptor, arguments_done;
__ mov(scratch, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
@@ -2277,6 +2079,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ SmiUntag(edx);
}
__ bind(&arguments_done);
+#endif
Label stack_done, stack_overflow;
__ sub(edx, ecx);
@@ -2294,9 +2097,8 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
// -----------------------------------
// Forward the arguments from the caller frame.
-#ifdef V8_REVERSE_JSARGS
__ movd(xmm2, edi); // Preserve the target to call.
- Generate_StackOverflowCheck(masm, edx, edi, &stack_overflow);
+ __ StackOverflowCheck(edx, edi, &stack_overflow);
__ movd(xmm3, ebx); // Preserve root register.
Register scratch = ebx;
@@ -2350,20 +2152,6 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ movd(ebx, xmm3); // Restore root register.
__ movd(edi, xmm2); // Restore the target to call.
-#else
- Generate_StackOverflowCheck(masm, edx, ecx, &stack_overflow);
- Label loop;
- __ add(eax, edx);
- __ PopReturnAddressTo(ecx);
- __ bind(&loop);
- {
- __ dec(edx);
- __ Push(Operand(scratch, edx, times_system_pointer_size,
- kFPOnStackSize + kPCOnStackSize));
- __ j(not_zero, &loop);
- }
- __ PushReturnAddressFrom(ecx);
-#endif
}
__ bind(&stack_done);
@@ -2374,9 +2162,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ Jump(code, RelocInfo::CODE_TARGET);
__ bind(&stack_overflow);
-#ifdef V8_REVERSE_JSARGS
__ movd(edi, xmm2); // Restore the target to call.
-#endif
__ movd(esi, xmm0); // Restore the context.
__ TailCallRuntime(Runtime::kThrowStackOverflow);
}
@@ -2504,7 +2290,6 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ SmiUntag(edx);
__ test(edx, edx);
__ j(zero, &no_bound_arguments);
-#ifdef V8_REVERSE_JSARGS
{
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
@@ -2517,7 +2302,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Check the stack for overflow.
{
Label done, stack_overflow;
- Generate_StackOverflowCheck(masm, edx, ecx, &stack_overflow);
+ __ StackOverflowCheck(edx, ecx, &stack_overflow);
__ jmp(&done);
__ bind(&stack_overflow);
{
@@ -2564,85 +2349,6 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Restore context.
__ movd(esi, xmm3);
}
-#else // !V8_REVERSE_JSARGS
- {
- // ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
- // -- xmm0 : new.target (only in case of [[Construct]])
- // -- edi : target (checked to be a JSBoundFunction)
- // -- ecx : the [[BoundArguments]] (implemented as FixedArray)
- // -- edx : the number of [[BoundArguments]]
- // -----------------------------------
-
- // Reserve stack space for the [[BoundArguments]].
- {
- Label done;
- __ lea(ecx, Operand(edx, times_system_pointer_size, 0));
- __ sub(esp, ecx); // Not Windows-friendly, but corrected below.
- // Check the stack for overflow. We are not trying to catch interruptions
- // (i.e. debug break and preemption) here, so check the "real stack
- // limit".
- CompareStackLimit(masm, esp, StackLimitKind::kRealStackLimit);
- __ j(above_equal, &done, Label::kNear);
- // Restore the stack pointer.
- __ lea(esp, Operand(esp, edx, times_system_pointer_size, 0));
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ EnterFrame(StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kThrowStackOverflow);
- }
- __ bind(&done);
- }
-#if V8_OS_WIN
- // Correctly allocate the stack space that was checked above.
- {
- Label win_done;
- __ cmp(ecx, TurboAssemblerBase::kStackPageSize);
- __ j(less_equal, &win_done, Label::kNear);
- // Reset esp and walk through the range touching every page.
- __ lea(esp, Operand(esp, edx, times_system_pointer_size, 0));
- __ AllocateStackSpace(ecx);
- __ bind(&win_done);
- }
-#endif
-
- // Adjust effective number of arguments to include return address.
- __ inc(eax);
-
- // Relocate arguments and return address down the stack.
- {
- Label loop;
- __ Set(ecx, 0);
- __ lea(edx, Operand(esp, edx, times_system_pointer_size, 0));
- __ bind(&loop);
- __ movd(xmm1, Operand(edx, ecx, times_system_pointer_size, 0));
- __ movd(Operand(esp, ecx, times_system_pointer_size, 0), xmm1);
- __ inc(ecx);
- __ cmp(ecx, eax);
- __ j(less, &loop);
- }
-
- // Copy [[BoundArguments]] to the stack (below the arguments).
- {
- Label loop;
- __ mov(ecx, FieldOperand(edi, JSBoundFunction::kBoundArgumentsOffset));
- __ mov(edx, FieldOperand(ecx, FixedArray::kLengthOffset));
- __ SmiUntag(edx);
- __ bind(&loop);
- __ dec(edx);
- __ movd(xmm1, FieldOperand(ecx, edx, times_tagged_size,
- FixedArray::kHeaderSize));
- __ movd(Operand(esp, eax, times_system_pointer_size, 0), xmm1);
- __ lea(eax, Operand(eax, 1));
- __ j(greater, &loop);
- }
-
- // Adjust effective number of arguments (eax contains the number of
- // arguments from the call plus return address plus the number of
- // [[BoundArguments]]), so we need to subtract one for the return address.
- __ dec(eax);
- }
-#endif // !V8_REVERSE_JSARGS
__ bind(&no_bound_arguments);
__ movd(edx, xmm0); // Reload edx.
@@ -2865,16 +2571,12 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
EnterArgumentsAdaptorFrame(masm);
// edi is used as a scratch register. It should be restored from the frame
// when needed.
- Generate_StackOverflowCheck(masm, kExpectedNumberOfArgumentsRegister, edi,
- &stack_overflow);
+ __ StackOverflowCheck(kExpectedNumberOfArgumentsRegister, edi,
+ &stack_overflow);
// Copy receiver and all expected arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
-#ifdef V8_REVERSE_JSARGS
__ lea(edi, Operand(ebp, ecx, times_system_pointer_size, offset));
-#else
- __ lea(edi, Operand(ebp, eax, times_system_pointer_size, offset));
-#endif
__ mov(eax, -1); // account for receiver
Label copy;
@@ -2893,13 +2595,12 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
EnterArgumentsAdaptorFrame(masm);
// edi is used as a scratch register. It should be restored from the frame
// when needed.
- Generate_StackOverflowCheck(masm, kExpectedNumberOfArgumentsRegister, edi,
- &stack_overflow);
+ __ StackOverflowCheck(kExpectedNumberOfArgumentsRegister, edi,
+ &stack_overflow);
// Remember expected arguments in xmm0.
__ movd(xmm0, kExpectedNumberOfArgumentsRegister);
-#ifdef V8_REVERSE_JSARGS
// Remember new target.
__ movd(xmm1, edx);
@@ -2927,32 +2628,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Restore new.target
__ movd(edx, xmm1);
-#else // !V8_REVERSE_JSARGS
- // Copy receiver and all actual arguments.
- const int offset = StandardFrameConstants::kCallerSPOffset;
- __ lea(edi, Operand(ebp, eax, times_system_pointer_size, offset));
- // ecx = expected - actual.
- __ sub(kExpectedNumberOfArgumentsRegister, eax);
- // eax = -actual - 1
- __ neg(eax);
- __ sub(eax, Immediate(1));
-
- Label copy;
- __ bind(&copy);
- __ inc(eax);
- __ push(Operand(edi, 0));
- __ sub(edi, Immediate(kSystemPointerSize));
- __ test(eax, eax);
- __ j(not_zero, &copy);
-
- // Fill remaining expected arguments with undefined values.
- Label fill;
- __ bind(&fill);
- __ inc(eax);
- __ Push(Immediate(masm->isolate()->factory()->undefined_value()));
- __ cmp(eax, kExpectedNumberOfArgumentsRegister);
- __ j(less, &fill);
-#endif // !V8_REVERSE_JSARGS
// Restore expected arguments.
__ movd(eax, xmm0);
@@ -3539,12 +3214,12 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// -- eax : call data
// -- edi : holder
// -- esp[0] : return address
- // -- esp[4] : last argument
+ // -- esp[8] : argument 0 (receiver)
+ // -- esp[16] : argument 1
// -- ...
- // -- esp[argc * 4] : first argument
- // -- esp[(argc + 1) * 4] : receiver
+ // -- esp[argc * 8] : argument (argc - 1)
+ // -- esp[(argc + 1) * 8] : argument argc
// -----------------------------------
- // NOTE: The order of args are reversed if V8_REVERSE_JSARGS
Register api_function_address = edx;
Register argc = ecx;
@@ -3614,13 +3289,8 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// FunctionCallbackInfo::values_ (points at the first varargs argument passed
// on the stack).
-#ifdef V8_REVERSE_JSARGS
__ lea(scratch,
Operand(scratch, (FCA::kArgsLength + 1) * kSystemPointerSize));
-#else
- __ lea(scratch, Operand(scratch, argc, times_system_pointer_size,
- (FCA::kArgsLength - 1) * kSystemPointerSize));
-#endif
__ mov(ApiParameterOperand(kApiArgc + 1), scratch);
// FunctionCallbackInfo::length_.
@@ -4118,6 +3788,205 @@ void Builtins::Generate_MemMove(MacroAssembler* masm) {
MemMoveEmitPopAndReturn(masm);
}
+namespace {
+
+void Generate_DeoptimizationEntry(MacroAssembler* masm,
+ DeoptimizeKind deopt_kind) {
+ Isolate* isolate = masm->isolate();
+
+ // Save all general purpose registers before messing with them.
+ const int kNumberOfRegisters = Register::kNumRegisters;
+
+ const int kDoubleRegsSize = kDoubleSize * XMMRegister::kNumRegisters;
+ __ AllocateStackSpace(kDoubleRegsSize);
+ const RegisterConfiguration* config = RegisterConfiguration::Default();
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ XMMRegister xmm_reg = XMMRegister::from_code(code);
+ int offset = code * kDoubleSize;
+ __ movsd(Operand(esp, offset), xmm_reg);
+ }
+
+ __ pushad();
+
+ ExternalReference c_entry_fp_address =
+ ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate);
+ __ mov(masm->ExternalReferenceAsOperand(c_entry_fp_address, esi), ebp);
+
+ const int kSavedRegistersAreaSize =
+ kNumberOfRegisters * kSystemPointerSize + kDoubleRegsSize;
+
+ // Get the address of the location in the code object
+ // and compute the fp-to-sp delta in register edx.
+ __ mov(ecx, Operand(esp, kSavedRegistersAreaSize));
+ __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 1 * kSystemPointerSize));
+
+ __ sub(edx, ebp);
+ __ neg(edx);
+
+ // Allocate a new deoptimizer object.
+ __ PrepareCallCFunction(6, eax);
+ __ mov(eax, Immediate(0));
+ Label context_check;
+ __ mov(edi, Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ JumpIfSmi(edi, &context_check);
+ __ mov(eax, Operand(ebp, StandardFrameConstants::kFunctionOffset));
+ __ bind(&context_check);
+ __ mov(Operand(esp, 0 * kSystemPointerSize), eax); // Function.
+ __ mov(Operand(esp, 1 * kSystemPointerSize),
+ Immediate(static_cast<int>(deopt_kind)));
+ __ mov(Operand(esp, 2 * kSystemPointerSize),
+ Immediate(Deoptimizer::kFixedExitSizeMarker)); // Bailout id.
+ __ mov(Operand(esp, 3 * kSystemPointerSize), ecx); // Code address or 0.
+ __ mov(Operand(esp, 4 * kSystemPointerSize), edx); // Fp-to-sp delta.
+ __ Move(Operand(esp, 5 * kSystemPointerSize),
+ Immediate(ExternalReference::isolate_address(masm->isolate())));
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
+ }
+
+ // Preserve deoptimizer object in register eax and get the input
+ // frame descriptor pointer.
+ __ mov(esi, Operand(eax, Deoptimizer::input_offset()));
+
+ // Fill in the input registers.
+ for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
+ int offset =
+ (i * kSystemPointerSize) + FrameDescription::registers_offset();
+ __ pop(Operand(esi, offset));
+ }
+
+ int double_regs_offset = FrameDescription::double_registers_offset();
+ // Fill in the double input registers.
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ int dst_offset = code * kDoubleSize + double_regs_offset;
+ int src_offset = code * kDoubleSize;
+ __ movsd(xmm0, Operand(esp, src_offset));
+ __ movsd(Operand(esi, dst_offset), xmm0);
+ }
+
+ // Clear FPU all exceptions.
+ // TODO(ulan): Find out why the TOP register is not zero here in some cases,
+ // and check that the generated code never deoptimizes with unbalanced stack.
+ __ fnclex();
+
+ // Mark the stack as not iterable for the CPU profiler which won't be able to
+ // walk the stack without the return address.
+ __ mov_b(__ ExternalReferenceAsOperand(
+ ExternalReference::stack_is_iterable_address(isolate), edx),
+ Immediate(0));
+
+ // Remove the return address and the double registers.
+ __ add(esp, Immediate(kDoubleRegsSize + 1 * kSystemPointerSize));
+
+ // Compute a pointer to the unwinding limit in register ecx; that is
+ // the first stack slot not part of the input frame.
+ __ mov(ecx, Operand(esi, FrameDescription::frame_size_offset()));
+ __ add(ecx, esp);
+
+ // Unwind the stack down to - but not including - the unwinding
+ // limit and copy the contents of the activation frame to the input
+ // frame description.
+ __ lea(edx, Operand(esi, FrameDescription::frame_content_offset()));
+ Label pop_loop_header;
+ __ jmp(&pop_loop_header);
+ Label pop_loop;
+ __ bind(&pop_loop);
+ __ pop(Operand(edx, 0));
+ __ add(edx, Immediate(sizeof(uint32_t)));
+ __ bind(&pop_loop_header);
+ __ cmp(ecx, esp);
+ __ j(not_equal, &pop_loop);
+
+ // Compute the output frame in the deoptimizer.
+ __ push(eax);
+ __ PrepareCallCFunction(1, esi);
+ __ mov(Operand(esp, 0 * kSystemPointerSize), eax);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
+ }
+ __ pop(eax);
+
+ __ mov(esp, Operand(eax, Deoptimizer::caller_frame_top_offset()));
+
+ // Replace the current (input) frame with the output frames.
+ Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
+ // Outer loop state: eax = current FrameDescription**, edx = one
+ // past the last FrameDescription**.
+ __ mov(edx, Operand(eax, Deoptimizer::output_count_offset()));
+ __ mov(eax, Operand(eax, Deoptimizer::output_offset()));
+ __ lea(edx, Operand(eax, edx, times_system_pointer_size, 0));
+ __ jmp(&outer_loop_header);
+ __ bind(&outer_push_loop);
+ // Inner loop state: esi = current FrameDescription*, ecx = loop
+ // index.
+ __ mov(esi, Operand(eax, 0));
+ __ mov(ecx, Operand(esi, FrameDescription::frame_size_offset()));
+ __ jmp(&inner_loop_header);
+ __ bind(&inner_push_loop);
+ __ sub(ecx, Immediate(sizeof(uint32_t)));
+ __ push(Operand(esi, ecx, times_1, FrameDescription::frame_content_offset()));
+ __ bind(&inner_loop_header);
+ __ test(ecx, ecx);
+ __ j(not_zero, &inner_push_loop);
+ __ add(eax, Immediate(kSystemPointerSize));
+ __ bind(&outer_loop_header);
+ __ cmp(eax, edx);
+ __ j(below, &outer_push_loop);
+
+ // In case of a failed STUB, we have to restore the XMM registers.
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ XMMRegister xmm_reg = XMMRegister::from_code(code);
+ int src_offset = code * kDoubleSize + double_regs_offset;
+ __ movsd(xmm_reg, Operand(esi, src_offset));
+ }
+
+ // Push pc and continuation from the last output frame.
+ __ push(Operand(esi, FrameDescription::pc_offset()));
+ __ push(Operand(esi, FrameDescription::continuation_offset()));
+
+ // Push the registers from the last output frame.
+ for (int i = 0; i < kNumberOfRegisters; i++) {
+ int offset =
+ (i * kSystemPointerSize) + FrameDescription::registers_offset();
+ __ push(Operand(esi, offset));
+ }
+
+ __ mov_b(__ ExternalReferenceAsOperand(
+ ExternalReference::stack_is_iterable_address(isolate), edx),
+ Immediate(1));
+
+ // Restore the registers from the stack.
+ __ popad();
+
+ __ InitializeRootRegister();
+
+ // Return to the continuation point.
+ __ ret(0);
+}
+
+} // namespace
+
+void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Bailout(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kBailout);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
+}
+
#undef __
} // namespace internal
diff --git a/deps/v8/src/builtins/ic-dynamic-map-checks.tq b/deps/v8/src/builtins/ic-dynamic-map-checks.tq
new file mode 100644
index 0000000000..745ab711c1
--- /dev/null
+++ b/deps/v8/src/builtins/ic-dynamic-map-checks.tq
@@ -0,0 +1,155 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be:
+// Context found in the LICENSE file.
+
+namespace ic {
+
+const kSuccess: constexpr int32
+ generates 'static_cast<int>(DynamicMapChecksStatus::kSuccess)';
+const kBailout: constexpr int32
+ generates 'static_cast<int>(DynamicMapChecksStatus::kBailout)';
+const kDeopt: constexpr int32
+ generates 'static_cast<int>(DynamicMapChecksStatus::kDeopt)';
+extern runtime TryMigrateInstance(implicit context: Context)(Object): Object;
+extern macro LoadFeedbackVectorForStub(): FeedbackVector;
+
+macro PerformMapAndHandlerCheck(
+ entry: constexpr int32, polymorphicArray: WeakFixedArray,
+ weakActualMap: WeakHeapObject,
+ actualHandler: Smi|DataHandler): void labels Next,
+ Deopt {
+ const mapIndex = FeedbackIteratorMapIndexForEntry(entry);
+ assert(mapIndex < polymorphicArray.length_intptr);
+
+ const maybeCachedMap = UnsafeCast<WeakHeapObject>(polymorphicArray[mapIndex]);
+ if (maybeCachedMap != weakActualMap) {
+ goto Next;
+ }
+
+ const handlerIndex = FeedbackIteratorHandlerIndexForEntry(entry);
+ assert(handlerIndex < polymorphicArray.length_intptr);
+ const maybeHandler =
+ Cast<Object>(polymorphicArray[handlerIndex]) otherwise unreachable;
+ if (TaggedNotEqual(maybeHandler, actualHandler)) {
+ goto Deopt;
+ }
+}
+
+macro PerformPolymorphicCheck(
+ expectedPolymorphicArray: HeapObject, actualMap: Map,
+ actualHandler: Smi|DataHandler): int32 {
+ if (!Is<WeakFixedArray>(expectedPolymorphicArray)) {
+ return kDeopt;
+ }
+ try {
+ const polymorphicArray =
+ UnsafeCast<WeakFixedArray>(expectedPolymorphicArray);
+ const weakActualMap = MakeWeak(actualMap);
+ const length = polymorphicArray.length_intptr;
+ assert(length > 0);
+
+ try {
+ if (length >= FeedbackIteratorSizeFor(4)) goto Len4;
+ if (length == FeedbackIteratorSizeFor(3)) goto Len3;
+ if (length == FeedbackIteratorSizeFor(2)) goto Len2;
+ if (length == FeedbackIteratorSizeFor(1)) goto Len1;
+
+ unreachable;
+ } label Len4 {
+ PerformMapAndHandlerCheck(
+ 3, polymorphicArray, weakActualMap, actualHandler) otherwise Len3,
+ Deopt;
+ return kSuccess;
+ } label Len3 {
+ PerformMapAndHandlerCheck(
+ 2, polymorphicArray, weakActualMap, actualHandler) otherwise Len2,
+ Deopt;
+ return kSuccess;
+ } label Len2 {
+ PerformMapAndHandlerCheck(
+ 1, polymorphicArray, weakActualMap, actualHandler) otherwise Len1,
+ Deopt;
+ return kSuccess;
+ } label Len1 {
+ PerformMapAndHandlerCheck(
+ 0, polymorphicArray, weakActualMap, actualHandler)
+ otherwise Bailout, Deopt;
+ return kSuccess;
+ }
+ } label Bailout {
+ return kBailout;
+ } label Deopt {
+ return kDeopt;
+ }
+}
+
+macro PerformMonomorphicCheck(
+ feedbackVector: FeedbackVector, slotIndex: intptr, expectedMap: HeapObject,
+ actualMap: Map, actualHandler: Smi|DataHandler): int32 {
+ if (TaggedEqual(expectedMap, actualMap)) {
+ const handlerIndex = slotIndex + 1;
+ assert(handlerIndex < feedbackVector.length_intptr);
+ const maybeHandler =
+ Cast<Object>(feedbackVector[handlerIndex]) otherwise unreachable;
+ if (TaggedEqual(actualHandler, maybeHandler)) {
+ return kSuccess;
+ }
+
+ return kDeopt;
+ }
+
+ return kBailout;
+}
+
+// This builtin performs map checks by dynamically looking at the
+// feedback in the feedback vector.
+//
+// There are two major cases handled by this builtin:
+// (a) Monormorphic check
+// (b) Polymorphic check
+//
+// For the monormophic check, the incoming map is migrated and checked
+// against the map and handler in the feedback vector. Otherwise, we
+// bailout to the runtime.
+//
+// For the polymorphic check, the feedback vector is iterated over and
+// each of the maps & handers are compared against the incoming map and
+// handler.
+//
+// If any of the map and associated handler checks pass then we return
+// kSuccess status.
+//
+// If any of the map check passes but the associated handler check
+// fails then we return kFailure status.
+//
+// For other cases, we bailout to the runtime.
+builtin DynamicMapChecks(implicit context: Context)(
+ slotIndex: intptr, actualValue: HeapObject,
+ actualHandler: Smi|DataHandler): int32 {
+ const feedbackVector = LoadFeedbackVectorForStub();
+ let actualMap = actualValue.map;
+ const feedback = feedbackVector[slotIndex];
+ try {
+ const maybePolymorphicArray =
+ GetHeapObjectIfStrong(feedback) otherwise MigrateAndDoMonomorphicCheck;
+ return PerformPolymorphicCheck(
+ maybePolymorphicArray, actualMap, actualHandler);
+ } label MigrateAndDoMonomorphicCheck {
+ const expectedMap = GetHeapObjectAssumeWeak(feedback) otherwise Deopt;
+ if (IsDeprecatedMap(actualMap)) {
+ // TODO(gsathya): Should this migration happen before the
+ // polymorphic check?
+ const result = TryMigrateInstance(actualValue);
+ if (TaggedIsSmi(result)) {
+ return kDeopt;
+ }
+ actualMap = actualValue.map;
+ }
+ return PerformMonomorphicCheck(
+ feedbackVector, slotIndex, expectedMap, actualMap, actualHandler);
+ } label Deopt {
+ return kDeopt;
+ }
+}
+
+} // namespace ic
diff --git a/deps/v8/src/builtins/ic.tq b/deps/v8/src/builtins/ic.tq
index f6fecc557f..848d7aad58 100644
--- a/deps/v8/src/builtins/ic.tq
+++ b/deps/v8/src/builtins/ic.tq
@@ -50,10 +50,14 @@ macro IsUninitialized(feedback: MaybeObject): bool {
}
extern macro LoadFeedbackVectorSlot(FeedbackVector, uintptr): MaybeObject;
+extern operator '[]' macro LoadFeedbackVectorSlot(
+ FeedbackVector, intptr): MaybeObject;
extern macro StoreFeedbackVectorSlot(
FeedbackVector, uintptr, MaybeObject): void;
extern macro StoreWeakReferenceInFeedbackVector(
FeedbackVector, uintptr, HeapObject): MaybeObject;
extern macro ReportFeedbackUpdate(FeedbackVector, uintptr, constexpr string);
+extern operator '.length_intptr' macro LoadFeedbackVectorLength(FeedbackVector):
+ intptr;
} // namespace ic
diff --git a/deps/v8/src/builtins/internal.tq b/deps/v8/src/builtins/internal.tq
index c377a2a179..7830cffb30 100644
--- a/deps/v8/src/builtins/internal.tq
+++ b/deps/v8/src/builtins/internal.tq
@@ -47,4 +47,47 @@ builtin BytecodeBudgetInterruptFromCode(implicit context: Context)(
tail runtime::BytecodeBudgetInterruptFromCode(feedbackCell);
}
+extern transitioning macro ForInPrepareForTorque(
+ Map | FixedArray, uintptr, Undefined | FeedbackVector): FixedArray;
+
+transitioning builtin ForInPrepare(implicit _context: Context)(
+ enumerator: Map|FixedArray, slot: uintptr,
+ maybeFeedbackVector: Undefined|FeedbackVector): FixedArray {
+ return ForInPrepareForTorque(enumerator, slot, maybeFeedbackVector);
+}
+
+extern transitioning builtin ForInFilter(implicit context: Context)(
+ JSAny, HeapObject): JSAny;
+extern enum ForInFeedback extends uint31 { kAny, ...}
+extern macro UpdateFeedback(
+ SmiTagged<ForInFeedback>, Undefined | FeedbackVector, uintptr);
+
+@export
+transitioning macro ForInNextSlow(
+ context: Context, slot: uintptr, receiver: JSAnyNotSmi, key: JSAny,
+ cacheType: Object, maybeFeedbackVector: Undefined|FeedbackVector): JSAny {
+ assert(receiver.map != cacheType); // Handled on the fast path.
+ UpdateFeedback(
+ SmiTag<ForInFeedback>(ForInFeedback::kAny), maybeFeedbackVector, slot);
+ return ForInFilter(key, receiver);
+}
+
+// Note: the untagged {slot} parameter must be in the first couple of args to
+// guarantee it's allocated in a register.
+transitioning builtin ForInNext(
+ context: Context, slot: uintptr, receiver: JSAnyNotSmi,
+ cacheArray: FixedArray, cacheType: Object, cacheIndex: Smi,
+ maybeFeedbackVector: Undefined|FeedbackVector): JSAny {
+ // Load the next key from the enumeration array.
+ const key = UnsafeCast<JSAny>(cacheArray.objects[cacheIndex]);
+
+ if (receiver.map == cacheType) {
+ // The enum cache is in use for {receiver}, the {key} is definitely valid.
+ return key;
+ }
+
+ return ForInNextSlow(
+ context, slot, receiver, key, cacheType, maybeFeedbackVector);
+}
+
} // namespace internal
diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc
index 66700a7119..cba65817a4 100644
--- a/deps/v8/src/builtins/mips/builtins-mips.cc
+++ b/deps/v8/src/builtins/mips/builtins-mips.cc
@@ -68,23 +68,6 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
namespace {
-enum StackLimitKind { kInterruptStackLimit, kRealStackLimit };
-
-void LoadStackLimit(MacroAssembler* masm, Register destination,
- StackLimitKind kind) {
- DCHECK(masm->root_array_available());
- Isolate* isolate = masm->isolate();
- ExternalReference limit =
- kind == StackLimitKind::kRealStackLimit
- ? ExternalReference::address_of_real_jslimit(isolate)
- : ExternalReference::address_of_jslimit(isolate);
- DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
-
- intptr_t offset =
- TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
- __ Lw(destination, MemOperand(kRootRegister, static_cast<int32_t>(offset)));
-}
-
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
@@ -103,7 +86,6 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ SmiTag(a0);
__ Push(cp, a0);
__ SmiUntag(a0);
-#ifdef V8_REVERSE_JSARGS
// Set up pointer to last argument (skip receiver).
__ Addu(
t2, fp,
@@ -112,15 +94,6 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ PushArray(t2, a0, t3, t0);
// The receiver for the builtin/api call.
__ PushRoot(RootIndex::kTheHoleValue);
-#else
- // The receiver for the builtin/api call.
- __ PushRoot(RootIndex::kTheHoleValue);
- // Set up pointer to last argument.
- __ Addu(t2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // Copy arguments and receiver to the expression stack.
- __ PushArray(t2, a0, t3, t0);
-#endif
// Call the function.
// a0: number of arguments (untagged)
@@ -141,22 +114,6 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ Ret();
}
-static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
- Register scratch1, Register scratch2,
- Label* stack_overflow) {
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- LoadStackLimit(masm, scratch1, StackLimitKind::kRealStackLimit);
- // Make scratch1 the space we have left. The stack might already be overflowed
- // here which will cause scratch1 to become negative.
- __ subu(scratch1, sp, scratch1);
- // Check if the arguments will overflow the stack.
- __ sll(scratch2, num_args, kPointerSizeLog2);
- // Signed comparison.
- __ Branch(stack_overflow, le, scratch1, Operand(scratch2));
-}
-
} // namespace
// The construct stub for ES5 constructor functions and ES6 class constructors.
@@ -222,7 +179,6 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Restore new target.
__ Pop(a3);
-#ifdef V8_REVERSE_JSARGS
// Push the allocated receiver to the stack.
__ Push(v0);
// We need two copies because we may have to return the original one
@@ -235,15 +191,6 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ Addu(
t2, fp,
Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
-#else
- // Push the allocated receiver to the stack. We need two copies
- // because we may have to return the original one and the calling
- // conventions dictate that the called function pops the receiver.
- __ Push(v0, v0);
-
- // Set up pointer to last argument.
- __ Addu(t2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
-#endif
// ----------- S t a t e -------------
// -- r3: new target
@@ -261,7 +208,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ SmiUntag(a0);
Label enough_stack_space, stack_overflow;
- Generate_StackOverflowCheck(masm, a0, t0, t1, &stack_overflow);
+ __ StackOverflowCheck(a0, t0, t1, &stack_overflow);
__ Branch(&enough_stack_space);
__ bind(&stack_overflow);
@@ -273,14 +220,17 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ bind(&enough_stack_space);
+ // TODO(victorgomes): When the arguments adaptor is completely removed, we
+ // should get the formal parameter count and copy the arguments in its
+ // correct position (including any undefined), instead of delaying this to
+ // InvokeFunction.
+
// Copy arguments and receiver to the expression stack.
__ PushArray(t2, a0, t0, t1);
-#ifdef V8_REVERSE_JSARGS
// We need two copies because we may have to return the original one
// and the calling conventions dictate that the called function pops the
// receiver. The second copy is pushed after the arguments.
__ Push(s0);
-#endif
// Call the function.
__ InvokeFunctionWithNewTarget(a1, a3, a0, CALL_FUNCTION);
@@ -359,7 +309,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
Label okay;
- LoadStackLimit(masm, scratch1, StackLimitKind::kRealStackLimit);
+ __ LoadStackLimit(scratch1, MacroAssembler::StackLimitKind::kRealStackLimit);
// Make a2 the space we have left. The stack might already be overflowed
// here which will cause a2 to become negative.
__ Subu(scratch1, sp, scratch1);
@@ -592,7 +542,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Copy arguments to the stack in a loop.
// a0: argc
// s0: argv, i.e. points to first arg
-#ifdef V8_REVERSE_JSARGS
Label loop, entry;
__ Lsa(t2, s0, a0, kPointerSizeLog2);
__ b(&entry);
@@ -608,23 +557,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Push the receiver.
__ Push(a3);
-#else
- // Push the receiver.
- __ Push(a3);
-
- Label loop, entry;
- __ Lsa(t2, s0, a0, kPointerSizeLog2);
- __ b(&entry);
- __ nop(); // Branch delay slot nop.
- // t2 points past last arg.
- __ bind(&loop);
- __ lw(t0, MemOperand(s0)); // Read next parameter.
- __ addiu(s0, s0, kPointerSize);
- __ lw(t0, MemOperand(t0)); // Dereference handle.
- __ push(t0); // Push parameter.
- __ bind(&entry);
- __ Branch(&loop, ne, s0, Operand(t2));
-#endif
// a0: argc
// a1: function
@@ -722,21 +654,15 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
- LoadStackLimit(masm, kScratchReg, StackLimitKind::kRealStackLimit);
+ __ LoadStackLimit(kScratchReg,
+ MacroAssembler::StackLimitKind::kRealStackLimit);
__ Branch(&stack_overflow, lo, sp, Operand(kScratchReg));
-#ifndef V8_REVERSE_JSARGS
- // Push receiver.
- __ lw(t1, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
- __ Push(t1);
-#endif
-
// ----------- S t a t e -------------
// -- a1 : the JSGeneratorObject to resume
// -- t0 : generator function
// -- cp : generator context
// -- ra : return address
- // -- sp[0] : generator receiver
// -----------------------------------
// Copy the function arguments from the generator object's register file.
@@ -747,7 +673,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ lw(t1,
FieldMemOperand(a1, JSGeneratorObject::kParametersAndRegistersOffset));
{
-#ifdef V8_REVERSE_JSARGS
Label done_loop, loop;
__ bind(&loop);
__ Subu(a3, a3, Operand(1));
@@ -760,19 +685,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Push receiver.
__ Lw(kScratchReg, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
__ Push(kScratchReg);
-#else
- Label done_loop, loop;
- __ Move(t2, zero_reg);
- __ bind(&loop);
- __ Subu(a3, a3, Operand(1));
- __ Branch(&done_loop, lt, a3, Operand(zero_reg));
- __ Lsa(kScratchReg, t1, t2, kPointerSizeLog2);
- __ lw(kScratchReg, FieldMemOperand(kScratchReg, FixedArray::kHeaderSize));
- __ Push(kScratchReg);
- __ Addu(t2, t2, Operand(1));
- __ Branch(&loop);
- __ bind(&done_loop);
-#endif
}
// Underlying function needs to have bytecode available.
@@ -844,29 +756,44 @@ static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
OMIT_SMI_CHECK);
}
-static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
- Register args_count = scratch;
+static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
+ Register scratch2) {
+ Register params_size = scratch1;
- // Get the arguments + receiver count.
- __ lw(args_count,
+ // Get the size of the formal parameters + receiver (in bytes).
+ __ lw(params_size,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
- __ lw(args_count,
- FieldMemOperand(args_count, BytecodeArray::kParameterSizeOffset));
+ __ lw(params_size,
+ FieldMemOperand(params_size, BytecodeArray::kParameterSizeOffset));
+
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ Register actual_params_size = scratch2;
+ // Compute the size of the actual parameters + receiver (in bytes).
+ __ Lw(actual_params_size,
+ MemOperand(fp, StandardFrameConstants::kArgCOffset));
+ __ sll(actual_params_size, actual_params_size, kPointerSizeLog2);
+ __ Addu(actual_params_size, actual_params_size, Operand(kSystemPointerSize));
+
+ // If actual is bigger than formal, then we should use it to free up the stack
+ // arguments.
+ __ slt(t2, params_size, actual_params_size);
+ __ movn(params_size, actual_params_size, t2);
+#endif
// Leave the frame (also dropping the register file).
__ LeaveFrame(StackFrame::INTERPRETED);
// Drop receiver + arguments.
- __ Addu(sp, sp, args_count);
+ __ Addu(sp, sp, params_size);
}
-// Tail-call |function_id| if |smi_entry| == |marker|
+// Tail-call |function_id| if |actual_marker| == |expected_marker|
static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
- Register smi_entry,
- OptimizationMarker marker,
+ Register actual_marker,
+ OptimizationMarker expected_marker,
Runtime::FunctionId function_id) {
Label no_match;
- __ Branch(&no_match, ne, smi_entry, Operand(Smi::FromEnum(marker)));
+ __ Branch(&no_match, ne, actual_marker, Operand(expected_marker));
GenerateTailCallToReturnedCode(masm, function_id);
__ bind(&no_match);
}
@@ -882,16 +809,21 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
DCHECK(!AreAliased(optimized_code_entry, a1, a3, scratch1, scratch2));
Register closure = a1;
+ Label heal_optimized_code_slot;
+
+ // If the optimized code is cleared, go to runtime to update the optimization
+ // marker field.
+ __ LoadWeakValue(optimized_code_entry, optimized_code_entry,
+ &heal_optimized_code_slot);
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
- Label found_deoptimized_code;
__ Lw(scratch1,
FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
__ Lw(scratch1,
FieldMemOperand(scratch1, CodeDataContainer::kKindSpecificFlagsOffset));
__ And(scratch1, scratch1, Operand(1 << Code::kMarkedForDeoptimizationBit));
- __ Branch(&found_deoptimized_code, ne, scratch1, Operand(zero_reg));
+ __ Branch(&heal_optimized_code_slot, ne, scratch1, Operand(zero_reg));
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
@@ -903,10 +835,11 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
__ Addu(a2, optimized_code_entry, Code::kHeaderSize - kHeapObjectTag);
__ Jump(a2);
- // Optimized code slot contains deoptimized code, evict it and re-enter the
- // closure's code.
- __ bind(&found_deoptimized_code);
- GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
+ // Optimized code slot contains deoptimized code or code is cleared and
+ // optimized code marker isn't updated. Evict the code, update the marker
+ // and re-enter the closure's code.
+ __ bind(&heal_optimized_code_slot);
+ GenerateTailCallToReturnedCode(masm, Runtime::kHealOptimizedCodeSlot);
}
static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
@@ -916,7 +849,8 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
// -- a3 : new target (preserved for callee if needed, and caller)
// -- a1 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
- // -- optimization_marker : a Smi containing a non-zero optimization marker.
+ // -- optimization_marker : a int32 containing a non-zero optimization
+ // marker.
// -----------------------------------
DCHECK(!AreAliased(feedback_vector, a1, a3, optimization_marker));
@@ -933,12 +867,11 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
- // Otherwise, the marker is InOptimizationQueue, so fall through hoping
- // that an interrupt will eventually update the slot with optimized code.
+ // Marker should be one of LogFirstExecution / CompileOptimized /
+ // CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach
+ // here.
if (FLAG_debug_code) {
- __ Assert(eq, AbortReason::kExpectedOptimizationSentinel,
- optimization_marker,
- Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
+ __ stop();
}
}
@@ -1066,18 +999,18 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ lhu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
__ Branch(&push_stack_frame, ne, t0, Operand(FEEDBACK_VECTOR_TYPE));
- // Read off the optimized code slot in the feedback vector, and if there
+ // Read off the optimization state in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
- Register optimized_code_entry = t0;
- __ Lw(optimized_code_entry,
- FieldMemOperand(feedback_vector,
- FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
+ Register optimization_state = t0;
+ __ Lw(optimization_state,
+ FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
- // Check if the optimized code slot is not empty.
- Label optimized_code_slot_not_empty;
+ // Check if the optimized code slot is not empty or has a optimization marker.
+ Label has_optimized_code_or_marker;
- __ Branch(&optimized_code_slot_not_empty, ne, optimized_code_entry,
- Operand(Smi::FromEnum(OptimizationMarker::kNone)));
+ __ andi(t1, optimization_state,
+ FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask);
+ __ Branch(&has_optimized_code_or_marker, ne, t1, Operand(zero_reg));
Label not_optimized;
__ bind(&not_optimized);
@@ -1122,7 +1055,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Do a stack check to ensure we don't go over the limit.
__ Subu(t1, sp, Operand(t0));
- LoadStackLimit(masm, a2, StackLimitKind::kRealStackLimit);
+ __ LoadStackLimit(a2, MacroAssembler::StackLimitKind::kRealStackLimit);
__ Branch(&stack_overflow, lo, t1, Operand(a2));
// If ok, push undefined as the initial value for all register file entries.
@@ -1154,7 +1087,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Perform interrupt stack check.
// TODO(solanes): Merge with the real stack limit check above.
Label stack_check_interrupt, after_stack_check_interrupt;
- LoadStackLimit(masm, a2, StackLimitKind::kInterruptStackLimit);
+ __ LoadStackLimit(a2, MacroAssembler::StackLimitKind::kInterruptStackLimit);
__ Branch(&stack_check_interrupt, lo, sp, Operand(a2));
__ bind(&after_stack_check_interrupt);
@@ -1196,7 +1129,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bind(&do_return);
// The return value is in v0.
- LeaveInterpreterFrame(masm, t0);
+ LeaveInterpreterFrame(masm, t0, t1);
__ Jump(ra);
__ bind(&stack_check_interrupt);
@@ -1223,19 +1156,26 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ jmp(&after_stack_check_interrupt);
- __ bind(&optimized_code_slot_not_empty);
+ __ bind(&has_optimized_code_or_marker);
+
Label maybe_has_optimized_code;
- // Check if optimized code marker is actually a weak reference to the
- // optimized code as opposed to an optimization marker.
- __ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code);
- MaybeOptimizeCode(masm, feedback_vector, optimized_code_entry);
+ // Check if optimized code marker is available
+ __ andi(t1, optimization_state,
+ FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker);
+ __ Branch(&maybe_has_optimized_code, eq, t1, Operand(zero_reg));
+
+ Register optimization_marker = optimization_state;
+ __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
+ MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
// Fall through if there's no runnable optimized code.
__ jmp(&not_optimized);
__ bind(&maybe_has_optimized_code);
- // Load code entry from the weak reference, if it was cleared, resume
- // execution of unoptimized code.
- __ LoadWeakValue(optimized_code_entry, optimized_code_entry, &not_optimized);
+ Register optimized_code_entry = optimization_state;
+ __ Lw(optimization_marker,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kMaybeOptimizedCodeOffset));
+
TailCallOptimizedCodeSlot(masm, optimized_code_entry, t1, t3);
__ bind(&compile_lazy);
@@ -1259,12 +1199,8 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
__ Subu(start_address, start_address, scratch);
// Push the arguments.
-#ifdef V8_REVERSE_JSARGS
__ PushArray(start_address, num_args, scratch, scratch2,
TurboAssembler::PushArrayOrder::kReverse);
-#else
- __ PushArray(start_address, num_args, scratch, scratch2);
-#endif
}
// static
@@ -1280,19 +1216,15 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// -- a1 : the target to call (can be any Object).
// -----------------------------------
Label stack_overflow;
-
-#ifdef V8_REVERSE_JSARGS
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// The spread argument should not be pushed.
__ Subu(a0, a0, Operand(1));
}
-#endif
__ Addu(t0, a0, Operand(1)); // Add one for receiver.
- Generate_StackOverflowCheck(masm, t0, t4, t1, &stack_overflow);
+ __ StackOverflowCheck(t0, t4, t1, &stack_overflow);
-#ifdef V8_REVERSE_JSARGS
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
// Don't copy receiver.
__ mov(t0, a0);
@@ -1311,21 +1243,6 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// is below that.
__ Lw(a2, MemOperand(a2, -kSystemPointerSize));
}
-#else
- // Push "undefined" as the receiver arg if we need to.
- if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
- __ PushRoot(RootIndex::kUndefinedValue);
- __ mov(t0, a0); // No receiver.
- }
-
- // This function modifies a2, t4 and t1.
- Generate_InterpreterPushArgs(masm, t0, a2, t4, t1);
-
- if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
- __ Pop(a2); // Pass the spread in a register
- __ Subu(a0, a0, Operand(1)); // Subtract one for spread
- }
-#endif
// Call the target.
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
@@ -1356,9 +1273,8 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// -----------------------------------
Label stack_overflow;
__ addiu(t2, a0, 1);
- Generate_StackOverflowCheck(masm, t2, t1, t0, &stack_overflow);
+ __ StackOverflowCheck(t2, t1, t0, &stack_overflow);
-#ifdef V8_REVERSE_JSARGS
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// The spread argument should not be pushed.
__ Subu(a0, a0, Operand(1));
@@ -1378,20 +1294,6 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
} else {
__ AssertUndefinedOrAllocationSite(a2, t0);
}
-#else
- // Push a slot for the receiver.
- __ push(zero_reg);
-
- // This function modified t4, t1 and t0.
- Generate_InterpreterPushArgs(masm, a0, t4, t1, t0);
-
- if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
- __ Pop(a2); // Pass the spread in a register
- __ Subu(a0, a0, Operand(1)); // Subtract one for spread
- } else {
- __ AssertUndefinedOrAllocationSite(a2, t0);
- }
-#endif
if (mode == InterpreterPushArgsMode::kArrayFunction) {
__ AssertFunction(a1);
@@ -1555,7 +1457,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
Register scratch = temps.Acquire(); // Temp register is not allocatable.
// Register scratch = t3;
if (with_result) {
-#ifdef V8_REVERSE_JSARGS
if (java_script_builtin) {
__ mov(scratch, v0);
} else {
@@ -1566,15 +1467,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
sp, config->num_allocatable_general_registers() * kPointerSize +
BuiltinContinuationFrameConstants::kFixedFrameSize));
}
-#else
- // Overwrite the hole inserted by the deoptimizer with the return value from
- // the LAZY deopt point.
- __ sw(v0,
- MemOperand(
- sp, config->num_allocatable_general_registers() * kPointerSize +
- BuiltinContinuationFrameConstants::kFixedFrameSize));
- USE(scratch);
-#endif
}
for (int i = allocatable_register_count - 1; i >= 0; --i) {
int code = config->GetAllocatableGeneralCode(i);
@@ -1584,7 +1476,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
}
}
-#ifdef V8_REVERSE_JSARGS
if (with_result && java_script_builtin) {
// Overwrite the hole inserted by the deoptimizer with the return value from
// the LAZY deopt point. t0 contains the arguments count, the return value
@@ -1597,7 +1488,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
__ Subu(a0, a0,
Operand(BuiltinContinuationFrameConstants::kFixedSlotCount));
}
-#endif
__ lw(fp, MemOperand(
sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
@@ -1680,9 +1570,9 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argc
- // -- sp[0] : argArray
+ // -- sp[0] : receiver
// -- sp[4] : thisArg
- // -- sp[8] : receiver
+ // -- sp[8] : argArray
// -----------------------------------
// 1. Load receiver into a1, argArray into a2 (if present), remove all
@@ -1693,7 +1583,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ LoadRoot(a2, RootIndex::kUndefinedValue);
__ mov(a3, a2);
// Lsa() cannot be used hare as scratch value used later.
-#ifdef V8_REVERSE_JSARGS
__ lw(a1, MemOperand(sp)); // receiver
__ Branch(&no_arg, eq, a0, Operand(zero_reg));
__ lw(a3, MemOperand(sp, kSystemPointerSize)); // thisArg
@@ -1702,22 +1591,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ bind(&no_arg);
__ Lsa(sp, sp, a0, kPointerSizeLog2);
__ sw(a3, MemOperand(sp));
-#else
- Register scratch = t0;
- __ sll(scratch, a0, kPointerSizeLog2);
- __ Addu(a0, sp, Operand(scratch));
- __ lw(a1, MemOperand(a0)); // receiver
- __ Subu(a0, a0, Operand(kPointerSize));
- __ Branch(&no_arg, lt, a0, Operand(sp));
- __ lw(a2, MemOperand(a0)); // thisArg
- __ Subu(a0, a0, Operand(kPointerSize));
- __ Branch(&no_arg, lt, a0, Operand(sp));
- __ lw(a3, MemOperand(a0)); // argArray
- __ bind(&no_arg);
- __ Addu(sp, sp, Operand(scratch));
- __ sw(a2, MemOperand(sp));
- __ mov(a2, a3);
-#endif
}
// ----------- S t a t e -------------
@@ -1750,7 +1623,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// static
void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
-#ifdef V8_REVERSE_JSARGS
// 1. Get the callable to call (passed as receiver) from the stack.
__ Pop(a1);
@@ -1766,42 +1638,6 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// 3. Adjust the actual number of arguments.
__ addiu(a0, a0, -1);
-#else
- // 1. Make sure we have at least one argument.
- // a0: actual number of arguments
- {
- Label done;
- __ Branch(&done, ne, a0, Operand(zero_reg));
- __ PushRoot(RootIndex::kUndefinedValue);
- __ Addu(a0, a0, Operand(1));
- __ bind(&done);
- }
-
- // 2. Get the function to call (passed as receiver) from the stack.
- // a0: actual number of arguments
- __ LoadReceiver(a1, a0);
-
- // 3. Shift arguments and return address one slot down on the stack
- // (overwriting the original receiver). Adjust argument count to make
- // the original first argument the new receiver.
- // a0: actual number of arguments
- // a1: function
- {
- Label loop;
- // Calculate the copy start address (destination). Copy end address is sp.
- __ Lsa(a2, sp, a0, kPointerSizeLog2);
-
- __ bind(&loop);
- __ lw(kScratchReg, MemOperand(a2, -kPointerSize));
- __ sw(kScratchReg, MemOperand(a2));
- __ Subu(a2, a2, Operand(kPointerSize));
- __ Branch(&loop, ne, a2, Operand(sp));
- // Adjust the actual number of arguments and remove the top element
- // (which is a copy of the last argument).
- __ Subu(a0, a0, Operand(1));
- __ Pop();
- }
-#endif
// 4. Call the callable.
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
@@ -1810,10 +1646,10 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argc
- // -- sp[0] : argumentsList
- // -- sp[4] : thisArgument
- // -- sp[8] : target
- // -- sp[12] : receiver
+ // -- sp[0] : receiver
+ // -- sp[4] : target (if argc >= 1)
+ // -- sp[8] : thisArgument (if argc >= 2)
+ // -- sp[12] : argumentsList (if argc == 3)
// -----------------------------------
// 1. Load target into a1 (if present), argumentsList into a0 (if present),
@@ -1824,7 +1660,6 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ LoadRoot(a1, RootIndex::kUndefinedValue);
__ mov(a2, a1);
__ mov(a3, a1);
-#ifdef V8_REVERSE_JSARGS
__ Branch(&no_arg, eq, a0, Operand(zero_reg));
__ lw(a1, MemOperand(sp, kSystemPointerSize)); // target
__ Branch(&no_arg, eq, a0, Operand(1));
@@ -1834,25 +1669,6 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ bind(&no_arg);
__ Lsa(sp, sp, a0, kPointerSizeLog2);
__ sw(a3, MemOperand(sp));
-#else
- Register scratch = t0;
- __ sll(scratch, a0, kPointerSizeLog2);
- __ mov(a0, scratch);
- __ Subu(a0, a0, Operand(kPointerSize));
- __ Branch(&no_arg, lt, a0, Operand(zero_reg));
- __ Addu(a0, sp, Operand(a0));
- __ lw(a1, MemOperand(a0)); // target
- __ Subu(a0, a0, Operand(kPointerSize));
- __ Branch(&no_arg, lt, a0, Operand(sp));
- __ lw(a2, MemOperand(a0)); // thisArgument
- __ Subu(a0, a0, Operand(kPointerSize));
- __ Branch(&no_arg, lt, a0, Operand(sp));
- __ lw(a3, MemOperand(a0)); // argumentsList
- __ bind(&no_arg);
- __ Addu(sp, sp, Operand(scratch));
- __ sw(a2, MemOperand(sp));
- __ mov(a2, a3);
-#endif
}
// ----------- S t a t e -------------
@@ -1873,12 +1689,11 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argc
- // -- sp[0] : new.target (optional)
- // -- sp[4] : argumentsList
- // -- sp[8] : target
- // -- sp[12] : receiver
+ // -- sp[0] : receiver
+ // -- sp[4] : target
+ // -- sp[8] : argumentsList
+ // -- sp[12] : new.target (optional)
// -----------------------------------
- // NOTE: The order of args in the stack are reversed if V8_REVERSE_JSARGS
// 1. Load target into a1 (if present), argumentsList into a2 (if present),
// new.target into a3 (if present, otherwise use target), remove all
@@ -1888,7 +1703,6 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
Label no_arg;
__ LoadRoot(a1, RootIndex::kUndefinedValue);
__ mov(a2, a1);
-#ifdef V8_REVERSE_JSARGS
__ mov(t0, a1);
__ Branch(&no_arg, eq, a0, Operand(zero_reg));
__ lw(a1, MemOperand(sp, kSystemPointerSize)); // target
@@ -1900,25 +1714,6 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ bind(&no_arg);
__ Lsa(sp, sp, a0, kPointerSizeLog2);
__ sw(t0, MemOperand(sp)); // set undefined to the receiver
-#else
- Register scratch = t0;
- // Lsa() cannot be used hare as scratch value used later.
- __ sll(scratch, a0, kPointerSizeLog2);
- __ Addu(a0, sp, Operand(scratch));
- __ sw(a2, MemOperand(a0)); // receiver
- __ Subu(a0, a0, Operand(kPointerSize));
- __ Branch(&no_arg, lt, a0, Operand(sp));
- __ lw(a1, MemOperand(a0)); // target
- __ mov(a3, a1); // new.target defaults to target
- __ Subu(a0, a0, Operand(kPointerSize));
- __ Branch(&no_arg, lt, a0, Operand(sp));
- __ lw(a2, MemOperand(a0)); // argumentsList
- __ Subu(a0, a0, Operand(kPointerSize));
- __ Branch(&no_arg, lt, a0, Operand(sp));
- __ lw(a3, MemOperand(a0)); // new.target
- __ bind(&no_arg);
- __ Addu(sp, sp, Operand(scratch));
-#endif
}
// ----------- S t a t e -------------
@@ -1991,9 +1786,8 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// Check for stack overflow.
Label stack_overflow;
- Generate_StackOverflowCheck(masm, t0, kScratchReg, t1, &stack_overflow);
+ __ StackOverflowCheck(t0, kScratchReg, t1, &stack_overflow);
-#ifdef V8_REVERSE_JSARGS
// Move the arguments already in the stack,
// including the receiver and the return address.
{
@@ -2014,7 +1808,6 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ Addu(dest, dest, Operand(kSystemPointerSize));
__ Branch(&copy, ge, t1, Operand(zero_reg));
}
-#endif
// Push arguments onto the stack (thisArgument is already on the stack).
{
@@ -2029,12 +1822,8 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ Branch(&push, ne, t1, Operand(kScratchReg));
__ LoadRoot(kScratchReg, RootIndex::kUndefinedValue);
__ bind(&push);
-#ifdef V8_REVERSE_JSARGS
__ Sw(kScratchReg, MemOperand(t4, 0));
__ Addu(t4, t4, Operand(kSystemPointerSize));
-#else
- __ Push(kScratchReg);
-#endif
__ Branch(&loop);
__ bind(&done);
__ Addu(a0, a0, t2);
@@ -2076,6 +1865,13 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ bind(&new_target_constructor);
}
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ // TODO(victorgomes): Remove this copy when all the arguments adaptor frame
+ // code is erased.
+ __ mov(t3, fp);
+ __ Lw(t2, MemOperand(fp, StandardFrameConstants::kArgCOffset));
+#else
+
// Check if we have an arguments adaptor frame below the function frame.
Label arguments_adaptor, arguments_done;
__ lw(t3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -2097,17 +1893,16 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ SmiUntag(t2);
}
__ bind(&arguments_done);
+#endif
Label stack_done, stack_overflow;
__ Subu(t2, t2, a2);
__ Branch(&stack_done, le, t2, Operand(zero_reg));
{
// Check for stack overflow.
- Generate_StackOverflowCheck(masm, t2, t0, t1, &stack_overflow);
+ __ StackOverflowCheck(t2, t0, t1, &stack_overflow);
// Forward the arguments from the caller frame.
-
-#ifdef V8_REVERSE_JSARGS
// Point to the first argument to copy (skipping the receiver).
__ Addu(t3, t3,
Operand(CommonFrameConstants::kFixedFrameSizeAboveFp +
@@ -2134,28 +1929,20 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ Addu(dest, dest, Operand(kSystemPointerSize));
__ Branch(&copy, ge, t7, Operand(zero_reg));
}
-#endif
// Copy arguments from the caller frame.
// TODO(victorgomes): Consider using forward order as potentially more cache
// friendly.
{
Label loop;
-#ifndef V8_REVERSE_JSARGS
- __ Addu(t3, t3, Operand(CommonFrameConstants::kFixedFrameSizeAboveFp));
-#endif
__ Addu(a0, a0, t2);
__ bind(&loop);
{
__ Subu(t2, t2, Operand(1));
__ Lsa(kScratchReg, t3, t2, kPointerSizeLog2);
__ lw(kScratchReg, MemOperand(kScratchReg));
-#ifdef V8_REVERSE_JSARGS
__ Lsa(t0, a2, t2, kPointerSizeLog2);
__ Sw(kScratchReg, MemOperand(t0));
-#else
- __ push(kScratchReg);
-#endif
__ Branch(&loop, ne, t2, Operand(zero_reg));
}
}
@@ -2304,7 +2091,8 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
__ Subu(t1, sp, Operand(t1));
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
- LoadStackLimit(masm, kScratchReg, StackLimitKind::kRealStackLimit);
+ __ LoadStackLimit(kScratchReg,
+ MacroAssembler::StackLimitKind::kRealStackLimit);
__ Branch(&done, hs, t1, Operand(kScratchReg));
{
FrameScope scope(masm, StackFrame::MANUAL);
@@ -2314,7 +2102,6 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
__ bind(&done);
}
-#ifdef V8_REVERSE_JSARGS
// Pop receiver.
__ Pop(t1);
@@ -2335,42 +2122,6 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// Push receiver.
__ Push(t1);
-#else
- __ mov(sp, t1);
- // Relocate arguments down the stack.
- {
- Label loop, done_loop;
- __ mov(t1, zero_reg);
- __ bind(&loop);
- __ Branch(&done_loop, gt, t1, Operand(a0));
- __ Lsa(t2, sp, t0, kPointerSizeLog2);
- __ lw(kScratchReg, MemOperand(t2));
- __ Lsa(t2, sp, t1, kPointerSizeLog2);
- __ sw(kScratchReg, MemOperand(t2));
- __ Addu(t0, t0, Operand(1));
- __ Addu(t1, t1, Operand(1));
- __ Branch(&loop);
- __ bind(&done_loop);
- }
-
- // Copy [[BoundArguments]] to the stack (below the arguments).
- {
- Label loop, done_loop;
- __ lw(t0, FieldMemOperand(a2, FixedArray::kLengthOffset));
- __ SmiUntag(t0);
- __ Addu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ bind(&loop);
- __ Subu(t0, t0, Operand(1));
- __ Branch(&done_loop, lt, t0, Operand(zero_reg));
- __ Lsa(t1, a2, t0, kPointerSizeLog2);
- __ lw(kScratchReg, MemOperand(t1));
- __ Lsa(t1, sp, a0, kPointerSizeLog2);
- __ sw(kScratchReg, MemOperand(t1));
- __ Addu(a0, a0, Operand(1));
- __ Branch(&loop);
- __ bind(&done_loop);
- }
-#endif
// Call the [[BoundTargetFunction]] via the Call builtin.
__ lw(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
@@ -2482,7 +2233,8 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ Subu(t1, sp, Operand(t1));
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
- LoadStackLimit(masm, kScratchReg, StackLimitKind::kRealStackLimit);
+ __ LoadStackLimit(kScratchReg,
+ MacroAssembler::StackLimitKind::kRealStackLimit);
__ Branch(&done, hs, t1, Operand(kScratchReg));
{
FrameScope scope(masm, StackFrame::MANUAL);
@@ -2492,7 +2244,6 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ bind(&done);
}
-#ifdef V8_REVERSE_JSARGS
// Pop receiver
__ Pop(t1);
@@ -2513,42 +2264,6 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// Push receiver.
__ Push(t1);
-#else
- __ mov(sp, t1);
- // Relocate arguments down the stack.
- {
- Label loop, done_loop;
- __ mov(t1, zero_reg);
- __ bind(&loop);
- __ Branch(&done_loop, ge, t1, Operand(a0));
- __ Lsa(t2, sp, t0, kPointerSizeLog2);
- __ lw(kScratchReg, MemOperand(t2));
- __ Lsa(t2, sp, t1, kPointerSizeLog2);
- __ sw(kScratchReg, MemOperand(t2));
- __ Addu(t0, t0, Operand(1));
- __ Addu(t1, t1, Operand(1));
- __ Branch(&loop);
- __ bind(&done_loop);
- }
-
- // Copy [[BoundArguments]] to the stack (below the arguments).
- {
- Label loop, done_loop;
- __ lw(t0, FieldMemOperand(a2, FixedArray::kLengthOffset));
- __ SmiUntag(t0);
- __ Addu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ bind(&loop);
- __ Subu(t0, t0, Operand(1));
- __ Branch(&done_loop, lt, t0, Operand(zero_reg));
- __ Lsa(t1, a2, t0, kPointerSizeLog2);
- __ lw(kScratchReg, MemOperand(t1));
- __ Lsa(t1, sp, a0, kPointerSizeLog2);
- __ sw(kScratchReg, MemOperand(t1));
- __ Addu(a0, a0, Operand(1));
- __ Branch(&loop);
- __ bind(&done_loop);
- }
-#endif
// Patch new.target to [[BoundTargetFunction]] if new.target equals target.
{
@@ -2639,14 +2354,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// a3: new target (passed through to callee)
__ bind(&enough);
EnterArgumentsAdaptorFrame(masm);
- Generate_StackOverflowCheck(masm, a2, t1, kScratchReg, &stack_overflow);
+ __ StackOverflowCheck(a2, t1, kScratchReg, &stack_overflow);
// Calculate copy start address into a0 and copy end address into t1.
-#ifdef V8_REVERSE_JSARGS
__ Lsa(a0, fp, a2, kPointerSizeLog2);
-#else
- __ Lsa(a0, fp, a0, kPointerSizeLog2 - kSmiTagSize);
-#endif
// Adjust for return address and receiver.
__ Addu(a0, a0, Operand(2 * kPointerSize));
// Compute copy end address.
@@ -2673,9 +2384,8 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Too few parameters: Actual < expected.
__ bind(&too_few);
EnterArgumentsAdaptorFrame(masm);
- Generate_StackOverflowCheck(masm, a2, t1, kScratchReg, &stack_overflow);
+ __ StackOverflowCheck(a2, t1, kScratchReg, &stack_overflow);
-#ifdef V8_REVERSE_JSARGS
// Fill the remaining expected arguments with undefined.
__ LoadRoot(t0, RootIndex::kUndefinedValue);
__ SmiUntag(t2, a0);
@@ -2705,50 +2415,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ Branch(USE_DELAY_SLOT, &copy, ne, a0, Operand(fp));
__ Subu(a0, a0, Operand(kSystemPointerSize));
-#else
- // Calculate copy start address into a0 and copy end address into t3.
- // a0: actual number of arguments as a smi
- // a1: function
- // a2: expected number of arguments
- // a3: new target (passed through to callee)
- __ Lsa(a0, fp, a0, kPointerSizeLog2 - kSmiTagSize);
- // Adjust for return address and receiver.
- __ Addu(a0, a0, Operand(2 * kPointerSize));
- // Compute copy end address. Also adjust for return address.
- __ Addu(t3, fp, kPointerSize);
-
- // Copy the arguments (including the receiver) to the new stack frame.
- // a0: copy start address
- // a1: function
- // a2: expected number of arguments
- // a3: new target (passed through to callee)
- // t3: copy end address
- Label copy;
- __ bind(&copy);
- __ lw(t0, MemOperand(a0)); // Adjusted above for return addr and receiver.
- __ Subu(sp, sp, kPointerSize);
- __ Subu(a0, a0, kPointerSize);
- __ Branch(USE_DELAY_SLOT, &copy, ne, a0, Operand(t3));
- __ sw(t0, MemOperand(sp)); // In the delay slot.
-
- // Fill the remaining expected arguments with undefined.
- // a1: function
- // a2: expected number of arguments
- // a3: new target (passed through to callee)
- __ LoadRoot(t0, RootIndex::kUndefinedValue);
- __ sll(t2, a2, kPointerSizeLog2);
- __ Subu(t1, fp, Operand(t2));
- // Adjust for frame.
- __ Subu(t1, t1,
- Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp +
- kPointerSize));
-
- Label fill;
- __ bind(&fill);
- __ Subu(sp, sp, kPointerSize);
- __ Branch(USE_DELAY_SLOT, &fill, ne, sp, Operand(t1));
- __ sw(t0, MemOperand(sp));
-#endif
}
// Call the entry point.
@@ -3243,11 +2909,10 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// -- a2 : arguments count (not including the receiver)
// -- a3 : call data
// -- a0 : holder
- // --
- // -- sp[0] : last argument
+ // -- sp[0] : receiver
+ // -- sp[8] : first argument
// -- ...
- // -- sp[(argc - 1) * 4] : first argument
- // -- sp[(argc + 0) * 4] : receiver
+ // -- sp[(argc) * 8] : last argument
// -----------------------------------
Register api_function_address = a1;
@@ -3322,15 +2987,8 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// FunctionCallbackInfo::values_ (points at the first varargs argument passed
// on the stack).
-#ifdef V8_REVERSE_JSARGS
__ Addu(scratch, scratch,
Operand((FCA::kArgsLength + 1) * kSystemPointerSize));
-#else
- __ Addu(scratch, scratch,
- Operand((FCA::kArgsLength - 1) * kSystemPointerSize));
- __ sll(t2, argc, kSystemPointerSizeLog2);
- __ Addu(scratch, scratch, t2);
-#endif
__ sw(scratch, MemOperand(sp, 2 * kPointerSize));
// FunctionCallbackInfo::length_.
@@ -3977,6 +3635,219 @@ void Builtins::Generate_MemCopyUint8Uint8(MacroAssembler* masm) {
}
}
+namespace {
+
+// This code tries to be close to ia32 code so that any changes can be
+// easily ported.
+void Generate_DeoptimizationEntry(MacroAssembler* masm,
+ DeoptimizeKind deopt_kind) {
+ Isolate* isolate = masm->isolate();
+
+ // Unlike on ARM we don't save all the registers, just the useful ones.
+ // For the rest, there are gaps on the stack, so the offsets remain the same.
+ static constexpr int kNumberOfRegisters = Register::kNumRegisters;
+
+ RegList restored_regs = kJSCallerSaved | kCalleeSaved;
+ RegList saved_regs = restored_regs | sp.bit() | ra.bit();
+
+ static constexpr int kDoubleRegsSize =
+ kDoubleSize * DoubleRegister::kNumRegisters;
+
+ // Save all FPU registers before messing with them.
+ __ Subu(sp, sp, Operand(kDoubleRegsSize));
+ const RegisterConfiguration* config = RegisterConfiguration::Default();
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
+ int offset = code * kDoubleSize;
+ __ Sdc1(fpu_reg, MemOperand(sp, offset));
+ }
+
+ // Push saved_regs (needed to populate FrameDescription::registers_).
+ // Leave gaps for other registers.
+ __ Subu(sp, sp, kNumberOfRegisters * kPointerSize);
+ for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
+ if ((saved_regs & (1 << i)) != 0) {
+ __ sw(ToRegister(i), MemOperand(sp, kPointerSize * i));
+ }
+ }
+
+ __ li(a2,
+ ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate));
+ __ sw(fp, MemOperand(a2));
+
+ static constexpr int kSavedRegistersAreaSize =
+ (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
+
+ __ li(a2, Operand(Deoptimizer::kFixedExitSizeMarker));
+ // Get the address of the location in the code object (a3) (return
+ // address for lazy deoptimization) and compute the fp-to-sp delta in
+ // register t0.
+ __ mov(a3, ra);
+ __ Addu(t0, sp, Operand(kSavedRegistersAreaSize));
+ __ Subu(t0, fp, t0);
+
+ // Allocate a new deoptimizer object.
+ __ PrepareCallCFunction(6, t1);
+ // Pass four arguments in a0 to a3 and fifth & sixth arguments on stack.
+ __ mov(a0, zero_reg);
+ Label context_check;
+ __ lw(a1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ JumpIfSmi(a1, &context_check);
+ __ lw(a0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ __ bind(&context_check);
+ __ li(a1, Operand(static_cast<int>(deopt_kind)));
+ // a2: bailout id already loaded.
+ // a3: code address or 0 already loaded.
+ __ sw(t0, CFunctionArgumentOperand(5)); // Fp-to-sp delta.
+ __ li(t1, ExternalReference::isolate_address(isolate));
+ __ sw(t1, CFunctionArgumentOperand(6)); // Isolate.
+ // Call Deoptimizer::New().
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
+ }
+
+ // Preserve "deoptimizer" object in register v0 and get the input
+ // frame descriptor pointer to a1 (deoptimizer->input_);
+ // Move deopt-obj to a0 for call to Deoptimizer::ComputeOutputFrames() below.
+ __ mov(a0, v0);
+ __ lw(a1, MemOperand(v0, Deoptimizer::input_offset()));
+
+ // Copy core registers into FrameDescription::registers_[kNumRegisters].
+ DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
+ for (int i = 0; i < kNumberOfRegisters; i++) {
+ int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+ if ((saved_regs & (1 << i)) != 0) {
+ __ lw(a2, MemOperand(sp, i * kPointerSize));
+ __ sw(a2, MemOperand(a1, offset));
+ } else if (FLAG_debug_code) {
+ __ li(a2, kDebugZapValue);
+ __ sw(a2, MemOperand(a1, offset));
+ }
+ }
+
+ int double_regs_offset = FrameDescription::double_registers_offset();
+ // Copy FPU registers to
+ // double_registers_[DoubleRegister::kNumAllocatableRegisters]
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ int dst_offset = code * kDoubleSize + double_regs_offset;
+ int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize;
+ __ Ldc1(f0, MemOperand(sp, src_offset));
+ __ Sdc1(f0, MemOperand(a1, dst_offset));
+ }
+
+ // Remove the saved registers from the stack.
+ __ Addu(sp, sp, Operand(kSavedRegistersAreaSize));
+
+ // Compute a pointer to the unwinding limit in register a2; that is
+ // the first stack slot not part of the input frame.
+ __ lw(a2, MemOperand(a1, FrameDescription::frame_size_offset()));
+ __ Addu(a2, a2, sp);
+
+ // Unwind the stack down to - but not including - the unwinding
+ // limit and copy the contents of the activation frame to the input
+ // frame description.
+ __ Addu(a3, a1, Operand(FrameDescription::frame_content_offset()));
+ Label pop_loop;
+ Label pop_loop_header;
+ __ BranchShort(&pop_loop_header);
+ __ bind(&pop_loop);
+ __ pop(t0);
+ __ sw(t0, MemOperand(a3, 0));
+ __ addiu(a3, a3, sizeof(uint32_t));
+ __ bind(&pop_loop_header);
+ __ BranchShort(&pop_loop, ne, a2, Operand(sp));
+
+ // Compute the output frame in the deoptimizer.
+ __ push(a0); // Preserve deoptimizer object across call.
+ // a0: deoptimizer object; a1: scratch.
+ __ PrepareCallCFunction(1, a1);
+ // Call Deoptimizer::ComputeOutputFrames().
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
+ }
+ __ pop(a0); // Restore deoptimizer object (class Deoptimizer).
+
+ __ lw(sp, MemOperand(a0, Deoptimizer::caller_frame_top_offset()));
+
+ // Replace the current (input) frame with the output frames.
+ Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
+ // Outer loop state: t0 = current "FrameDescription** output_",
+ // a1 = one past the last FrameDescription**.
+ __ lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
+ __ lw(t0, MemOperand(a0, Deoptimizer::output_offset())); // t0 is output_.
+ __ Lsa(a1, t0, a1, kPointerSizeLog2);
+ __ BranchShort(&outer_loop_header);
+ __ bind(&outer_push_loop);
+ // Inner loop state: a2 = current FrameDescription*, a3 = loop index.
+ __ lw(a2, MemOperand(t0, 0)); // output_[ix]
+ __ lw(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
+ __ BranchShort(&inner_loop_header);
+ __ bind(&inner_push_loop);
+ __ Subu(a3, a3, Operand(sizeof(uint32_t)));
+ __ Addu(t2, a2, Operand(a3));
+ __ lw(t3, MemOperand(t2, FrameDescription::frame_content_offset()));
+ __ push(t3);
+ __ bind(&inner_loop_header);
+ __ BranchShort(&inner_push_loop, ne, a3, Operand(zero_reg));
+
+ __ Addu(t0, t0, Operand(kPointerSize));
+ __ bind(&outer_loop_header);
+ __ BranchShort(&outer_push_loop, lt, t0, Operand(a1));
+
+ __ lw(a1, MemOperand(a0, Deoptimizer::input_offset()));
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
+ int src_offset = code * kDoubleSize + double_regs_offset;
+ __ Ldc1(fpu_reg, MemOperand(a1, src_offset));
+ }
+
+ // Push pc and continuation from the last output frame.
+ __ lw(t2, MemOperand(a2, FrameDescription::pc_offset()));
+ __ push(t2);
+ __ lw(t2, MemOperand(a2, FrameDescription::continuation_offset()));
+ __ push(t2);
+
+ // Technically restoring 'at' should work unless zero_reg is also restored
+ // but it's safer to check for this.
+ DCHECK(!(at.bit() & restored_regs));
+ // Restore the registers from the last output frame.
+ __ mov(at, a2);
+ for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
+ int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+ if ((restored_regs & (1 << i)) != 0) {
+ __ lw(ToRegister(i), MemOperand(at, offset));
+ }
+ }
+
+ __ pop(at); // Get continuation, leave pc on stack.
+ __ pop(ra);
+ __ Jump(at);
+ __ stop();
+}
+
+} // namespace
+
+void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Bailout(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kBailout);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
+}
+
#undef __
} // namespace internal
diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc
index 04fce6b2a1..1027ec35e5 100644
--- a/deps/v8/src/builtins/mips64/builtins-mips64.cc
+++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc
@@ -67,24 +67,6 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
namespace {
-enum StackLimitKind { kInterruptStackLimit, kRealStackLimit };
-
-void LoadStackLimit(MacroAssembler* masm, Register destination,
- StackLimitKind kind) {
- DCHECK(masm->root_array_available());
- Isolate* isolate = masm->isolate();
- ExternalReference limit =
- kind == StackLimitKind::kRealStackLimit
- ? ExternalReference::address_of_real_jslimit(isolate)
- : ExternalReference::address_of_jslimit(isolate);
- DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
-
- intptr_t offset =
- TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
- CHECK(is_int32(offset));
- __ Ld(destination, MemOperand(kRootRegister, static_cast<int32_t>(offset)));
-}
-
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
@@ -104,7 +86,6 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ Push(cp, a0);
__ SmiUntag(a0);
-#ifdef V8_REVERSE_JSARGS
// Set up pointer to last argument (skip receiver).
__ Daddu(
t2, fp,
@@ -113,15 +94,6 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ PushArray(t2, a0, t3, t0);
// The receiver for the builtin/api call.
__ PushRoot(RootIndex::kTheHoleValue);
-#else
- // The receiver for the builtin/api call.
- __ PushRoot(RootIndex::kTheHoleValue);
- // Set up pointer to last argument.
- __ Daddu(t2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // Copy arguments and receiver to the expression stack.
- __ PushArray(t2, a0, t3, t0);
-#endif
// Call the function.
// a0: number of arguments (untagged)
@@ -143,22 +115,6 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ Ret();
}
-static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
- Register scratch1, Register scratch2,
- Label* stack_overflow) {
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- LoadStackLimit(masm, scratch1, StackLimitKind::kRealStackLimit);
- // Make scratch1 the space we have left. The stack might already be overflowed
- // here which will cause scratch1 to become negative.
- __ dsubu(scratch1, sp, scratch1);
- // Check if the arguments will overflow the stack.
- __ dsll(scratch2, num_args, kPointerSizeLog2);
- // Signed comparison.
- __ Branch(stack_overflow, le, scratch1, Operand(scratch2));
-}
-
} // namespace
// The construct stub for ES5 constructor functions and ES6 class constructors.
@@ -224,7 +180,6 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Restore new target.
__ Pop(a3);
-#ifdef V8_REVERSE_JSARGS
// Push the allocated receiver to the stack.
__ Push(v0);
@@ -237,15 +192,6 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Set up pointer to last argument.
__ Daddu(t2, fp, Operand(StandardFrameConstants::kCallerSPOffset +
kSystemPointerSize));
-#else
- // Push the allocated receiver to the stack. We need two copies
- // because we may have to return the original one and the calling
- // conventions dictate that the called function pops the receiver.
- __ Push(v0, v0);
-
- // Set up pointer to last argument.
- __ Daddu(t2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
-#endif
// ----------- S t a t e -------------
// -- r3: new target
@@ -263,7 +209,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ SmiUntag(a0);
Label enough_stack_space, stack_overflow;
- Generate_StackOverflowCheck(masm, a0, t0, t1, &stack_overflow);
+ __ StackOverflowCheck(a0, t0, t1, &stack_overflow);
__ Branch(&enough_stack_space);
__ bind(&stack_overflow);
@@ -275,14 +221,17 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ bind(&enough_stack_space);
+ // TODO(victorgomes): When the arguments adaptor is completely removed, we
+ // should get the formal parameter count and copy the arguments in its
+ // correct position (including any undefined), instead of delaying this to
+ // InvokeFunction.
+
// Copy arguments and receiver to the expression stack.
__ PushArray(t2, a0, t0, t1);
-#ifdef V8_REVERSE_JSARGS
// We need two copies because we may have to return the original one
// and the calling conventions dictate that the called function pops the
// receiver. The second copy is pushed after the arguments,
__ Push(a6);
-#endif
// Call the function.
__ InvokeFunctionWithNewTarget(a1, a3, a0, CALL_FUNCTION);
@@ -400,21 +349,15 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
- LoadStackLimit(masm, kScratchReg, StackLimitKind::kRealStackLimit);
+ __ LoadStackLimit(kScratchReg,
+ MacroAssembler::StackLimitKind::kRealStackLimit);
__ Branch(&stack_overflow, lo, sp, Operand(kScratchReg));
-#ifndef V8_REVERSE_JSARGS
- // Push receiver.
- __ Ld(a5, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
- __ Push(a5);
-#endif
-
// ----------- S t a t e -------------
// -- a1 : the JSGeneratorObject to resume
// -- a4 : generator function
// -- cp : generator context
// -- ra : return address
- // -- sp[0] : generator receiver
// -----------------------------------
// Push holes for arguments to generator function. Since the parser forced
@@ -427,7 +370,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Ld(t1,
FieldMemOperand(a1, JSGeneratorObject::kParametersAndRegistersOffset));
{
-#ifdef V8_REVERSE_JSARGS
Label done_loop, loop;
__ bind(&loop);
__ Dsubu(a3, a3, Operand(1));
@@ -440,19 +382,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Push receiver.
__ Ld(kScratchReg, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
__ Push(kScratchReg);
-#else
- Label done_loop, loop;
- __ Move(t2, zero_reg);
- __ bind(&loop);
- __ Dsubu(a3, a3, Operand(1));
- __ Branch(&done_loop, lt, a3, Operand(zero_reg));
- __ Dlsa(kScratchReg, t1, t2, kPointerSizeLog2);
- __ Ld(kScratchReg, FieldMemOperand(kScratchReg, FixedArray::kHeaderSize));
- __ Push(kScratchReg);
- __ Daddu(t2, t2, Operand(1));
- __ Branch(&loop);
- __ bind(&done_loop);
-#endif
}
// Underlying function needs to have bytecode available.
@@ -524,7 +453,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
Label okay;
- LoadStackLimit(masm, scratch1, StackLimitKind::kRealStackLimit);
+ __ LoadStackLimit(scratch1, MacroAssembler::StackLimitKind::kRealStackLimit);
// Make a2 the space we have left. The stack might already be overflowed
// here which will cause r2 to become negative.
__ dsubu(scratch1, sp, scratch1);
@@ -771,7 +700,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Copy arguments to the stack in a loop.
// a4: argc
// a5: argv, i.e. points to first arg
-#ifdef V8_REVERSE_JSARGS
Label loop, entry;
__ Dlsa(s1, a5, a4, kPointerSizeLog2);
__ b(&entry);
@@ -787,24 +715,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Push the receive.
__ Push(a3);
-#else
- // Push the receive.
- __ Push(a3);
-
- Label loop, entry;
- __ Dlsa(s1, a5, a4, kPointerSizeLog2);
- __ b(&entry);
- __ nop(); // Branch delay slot nop.
- // s1 points past last arg.
- __ bind(&loop);
- __ Ld(s2, MemOperand(a5)); // Read next parameter.
- __ daddiu(a5, a5, kPointerSize);
- __ Ld(s2, MemOperand(s2)); // Dereference handle.
- __ push(s2); // Push parameter.
- __ bind(&entry);
- __ Branch(&loop, ne, a5, Operand(s1));
-
-#endif
// a0: argc
// a1: function
@@ -863,28 +773,44 @@ static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
OMIT_SMI_CHECK);
}
-static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
- Register args_count = scratch;
+static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
+ Register scratch2) {
+ Register params_size = scratch1;
- // Get the arguments + receiver count.
- __ Ld(args_count,
+ // Get the size of the formal parameters + receiver (in bytes).
+ __ Ld(params_size,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
- __ Lw(t0, FieldMemOperand(args_count, BytecodeArray::kParameterSizeOffset));
+ __ Lw(params_size,
+ FieldMemOperand(params_size, BytecodeArray::kParameterSizeOffset));
+
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ Register actual_params_size = scratch2;
+ // Compute the size of the actual parameters + receiver (in bytes).
+ __ Ld(actual_params_size,
+ MemOperand(fp, StandardFrameConstants::kArgCOffset));
+ __ dsll(actual_params_size, actual_params_size, kPointerSizeLog2);
+ __ Daddu(actual_params_size, actual_params_size, Operand(kSystemPointerSize));
+
+ // If actual is bigger than formal, then we should use it to free up the stack
+ // arguments.
+ __ slt(t2, params_size, actual_params_size);
+ __ movn(params_size, actual_params_size, t2);
+#endif
// Leave the frame (also dropping the register file).
__ LeaveFrame(StackFrame::INTERPRETED);
// Drop receiver + arguments.
- __ Daddu(sp, sp, args_count);
+ __ Daddu(sp, sp, params_size);
}
-// Tail-call |function_id| if |smi_entry| == |marker|
+// Tail-call |function_id| if |actual_marker| == |expected_marker|
static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
- Register smi_entry,
- OptimizationMarker marker,
+ Register actual_marker,
+ OptimizationMarker expected_marker,
Runtime::FunctionId function_id) {
Label no_match;
- __ Branch(&no_match, ne, smi_entry, Operand(Smi::FromEnum(marker)));
+ __ Branch(&no_match, ne, actual_marker, Operand(expected_marker));
GenerateTailCallToReturnedCode(masm, function_id);
__ bind(&no_match);
}
@@ -900,15 +826,20 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
DCHECK(!AreAliased(optimized_code_entry, a1, a3, scratch1, scratch2));
Register closure = a1;
+ Label heal_optimized_code_slot;
+
+ // If the optimized code is cleared, go to runtime to update the optimization
+ // marker field.
+ __ LoadWeakValue(optimized_code_entry, optimized_code_entry,
+ &heal_optimized_code_slot);
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
- Label found_deoptimized_code;
__ Ld(a5,
FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
__ Lw(a5, FieldMemOperand(a5, CodeDataContainer::kKindSpecificFlagsOffset));
__ And(a5, a5, Operand(1 << Code::kMarkedForDeoptimizationBit));
- __ Branch(&found_deoptimized_code, ne, a5, Operand(zero_reg));
+ __ Branch(&heal_optimized_code_slot, ne, a5, Operand(zero_reg));
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
@@ -922,10 +853,11 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(a2);
- // Optimized code slot contains deoptimized code, evict it and re-enter the
- // closure's code.
- __ bind(&found_deoptimized_code);
- GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
+ // Optimized code slot contains deoptimized code or code is cleared and
+ // optimized code marker isn't updated. Evict the code, update the marker
+ // and re-enter the closure's code.
+ __ bind(&heal_optimized_code_slot);
+ GenerateTailCallToReturnedCode(masm, Runtime::kHealOptimizedCodeSlot);
}
static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
@@ -935,7 +867,8 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
// -- a3 : new target (preserved for callee if needed, and caller)
// -- a1 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
- // -- optimization_marker : a Smi containing a non-zero optimization marker.
+ // -- optimization_marker : a int32 containing a non-zero optimization
+ // marker.
// -----------------------------------
DCHECK(!AreAliased(feedback_vector, a1, a3, optimization_marker));
@@ -952,12 +885,11 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
- // Otherwise, the marker is InOptimizationQueue, so fall through hoping
- // that an interrupt will eventually update the slot with optimized code.
+ // Marker should be one of LogFirstExecution / CompileOptimized /
+ // CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach
+ // here.
if (FLAG_debug_code) {
- __ Assert(eq, AbortReason::kExpectedOptimizationSentinel,
- optimization_marker,
- Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
+ __ stop();
}
}
@@ -1085,18 +1017,18 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Lhu(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset));
__ Branch(&push_stack_frame, ne, a4, Operand(FEEDBACK_VECTOR_TYPE));
- // Read off the optimized code slot in the feedback vector, and if there
+ // Read off the optimization state in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
- Register optimized_code_entry = a4;
- __ Ld(optimized_code_entry,
- FieldMemOperand(feedback_vector,
- FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
+ Register optimization_state = a4;
+ __ Lw(optimization_state,
+ FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
- // Check if the optimized code slot is not empty.
- Label optimized_code_slot_not_empty;
+ // Check if the optimized code slot is not empty or has a optimization marker.
+ Label has_optimized_code_or_marker;
- __ Branch(&optimized_code_slot_not_empty, ne, optimized_code_entry,
- Operand(Smi::FromEnum(OptimizationMarker::kNone)));
+ __ andi(t0, optimization_state,
+ FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask);
+ __ Branch(&has_optimized_code_or_marker, ne, t0, Operand(zero_reg));
Label not_optimized;
__ bind(&not_optimized);
@@ -1141,7 +1073,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Do a stack check to ensure we don't go over the limit.
__ Dsubu(a5, sp, Operand(a4));
- LoadStackLimit(masm, a2, StackLimitKind::kRealStackLimit);
+ __ LoadStackLimit(a2, MacroAssembler::StackLimitKind::kRealStackLimit);
__ Branch(&stack_overflow, lo, a5, Operand(a2));
// If ok, push undefined as the initial value for all register file entries.
@@ -1173,7 +1105,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Perform interrupt stack check.
// TODO(solanes): Merge with the real stack limit check above.
Label stack_check_interrupt, after_stack_check_interrupt;
- LoadStackLimit(masm, a5, StackLimitKind::kInterruptStackLimit);
+ __ LoadStackLimit(a5, MacroAssembler::StackLimitKind::kInterruptStackLimit);
__ Branch(&stack_check_interrupt, lo, sp, Operand(a5));
__ bind(&after_stack_check_interrupt);
@@ -1216,7 +1148,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bind(&do_return);
// The return value is in v0.
- LeaveInterpreterFrame(masm, t0);
+ LeaveInterpreterFrame(masm, t0, t1);
__ Jump(ra);
__ bind(&stack_check_interrupt);
@@ -1243,19 +1175,25 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ jmp(&after_stack_check_interrupt);
- __ bind(&optimized_code_slot_not_empty);
+ __ bind(&has_optimized_code_or_marker);
Label maybe_has_optimized_code;
- // Check if optimized code marker is actually a weak reference to the
- // optimized code as opposed to an optimization marker.
- __ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code);
- MaybeOptimizeCode(masm, feedback_vector, optimized_code_entry);
+ // Check if optimized code marker is available
+ __ andi(t0, optimization_state,
+ FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker);
+ __ Branch(&maybe_has_optimized_code, eq, t0, Operand(zero_reg));
+
+ Register optimization_marker = optimization_state;
+ __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
+ MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
// Fall through if there's no runnable optimized code.
__ jmp(&not_optimized);
__ bind(&maybe_has_optimized_code);
- // Load code entry from the weak reference, if it was cleared, resume
- // execution of unoptimized code.
- __ LoadWeakValue(optimized_code_entry, optimized_code_entry, &not_optimized);
+ Register optimized_code_entry = optimization_state;
+ __ Ld(optimization_marker,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kMaybeOptimizedCodeOffset));
+
TailCallOptimizedCodeSlot(masm, optimized_code_entry, t3, a5);
__ bind(&compile_lazy);
@@ -1280,12 +1218,8 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
__ Dsubu(start_address, start_address, scratch);
// Push the arguments.
-#ifdef V8_REVERSE_JSARGS
- __ PushArray(start_address, num_args, scratch, scratch2,
- TurboAssembler::PushArrayOrder::kReverse);
-#else
- __ PushArray(start_address, num_args, scratch, scratch2);
-#endif
+ __ PushArray(start_address, num_args, scratch, scratch2,
+ TurboAssembler::PushArrayOrder::kReverse);
}
// static
@@ -1301,19 +1235,15 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// -- a1 : the target to call (can be any Object).
// -----------------------------------
Label stack_overflow;
-
-#ifdef V8_REVERSE_JSARGS
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// The spread argument should not be pushed.
__ Dsubu(a0, a0, Operand(1));
}
-#endif
__ Daddu(a3, a0, Operand(1)); // Add one for receiver.
- Generate_StackOverflowCheck(masm, a3, a4, t0, &stack_overflow);
+ __ StackOverflowCheck(a3, a4, t0, &stack_overflow);
-#ifdef V8_REVERSE_JSARGS
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
// Don't copy receiver.
__ mov(a3, a0);
@@ -1332,21 +1262,6 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// is below that.
__ Ld(a2, MemOperand(a2, -kSystemPointerSize));
}
-#else
- // Push "undefined" as the receiver arg if we need to.
- if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
- __ PushRoot(RootIndex::kUndefinedValue);
- __ mov(a3, a0);
- }
-
- // This function modifies a2, t0 and a4.
- Generate_InterpreterPushArgs(masm, a3, a2, a4, t0);
-
- if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
- __ Pop(a2); // Pass the spread in a register
- __ Dsubu(a0, a0, Operand(1)); // Subtract one for spread
- }
-#endif
// Call the target.
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
@@ -1377,9 +1292,8 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// -----------------------------------
Label stack_overflow;
__ daddiu(a6, a0, 1);
- Generate_StackOverflowCheck(masm, a6, a5, t0, &stack_overflow);
+ __ StackOverflowCheck(a6, a5, t0, &stack_overflow);
-#ifdef V8_REVERSE_JSARGS
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// The spread argument should not be pushed.
__ Dsubu(a0, a0, Operand(1));
@@ -1399,20 +1313,6 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
} else {
__ AssertUndefinedOrAllocationSite(a2, t0);
}
-#else
- // Push a slot for the receiver.
- __ push(zero_reg);
-
- // This function modifies t0, a4 and a5.
- Generate_InterpreterPushArgs(masm, a0, a4, a5, t0);
-
- if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
- __ Pop(a2); // Pass the spread in a register
- __ Dsubu(a0, a0, Operand(1)); // Subtract one for spread
- } else {
- __ AssertUndefinedOrAllocationSite(a2, t0);
- }
-#endif
if (mode == InterpreterPushArgsMode::kArrayFunction) {
__ AssertFunction(a1);
@@ -1573,7 +1473,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
int allocatable_register_count = config->num_allocatable_general_registers();
Register scratch = t3;
if (with_result) {
-#ifdef V8_REVERSE_JSARGS
if (java_script_builtin) {
__ mov(scratch, v0);
} else {
@@ -1584,15 +1483,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
sp, config->num_allocatable_general_registers() * kPointerSize +
BuiltinContinuationFrameConstants::kFixedFrameSize));
}
-#else
- // Overwrite the hole inserted by the deoptimizer with the return value from
- // the LAZY deopt point.
- __ Sd(v0,
- MemOperand(
- sp, config->num_allocatable_general_registers() * kPointerSize +
- BuiltinContinuationFrameConstants::kFixedFrameSize));
- USE(scratch);
-#endif
}
for (int i = allocatable_register_count - 1; i >= 0; --i) {
int code = config->GetAllocatableGeneralCode(i);
@@ -1602,7 +1492,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
}
}
-#ifdef V8_REVERSE_JSARGS
if (with_result && java_script_builtin) {
// Overwrite the hole inserted by the deoptimizer with the return value from
// the LAZY deopt point. t0 contains the arguments count, the return value
@@ -1615,7 +1504,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
__ Dsubu(a0, a0,
Operand(BuiltinContinuationFrameConstants::kFixedSlotCount));
}
-#endif
__ Ld(fp, MemOperand(
sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
@@ -1697,9 +1585,9 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argc
- // -- sp[0] : argArray
+ // -- sp[0] : receiver
// -- sp[4] : thisArg
- // -- sp[8] : receiver
+ // -- sp[8] : argArray
// -----------------------------------
Register argc = a0;
@@ -1718,7 +1606,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// Claim (2 - argc) dummy arguments form the stack, to put the stack in a
// consistent state for a simple pop operation.
-#ifdef V8_REVERSE_JSARGS
__ mov(scratch, argc);
__ Ld(this_arg, MemOperand(sp, kPointerSize));
__ Ld(arg_array, MemOperand(sp, 2 * kPointerSize));
@@ -1729,18 +1616,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ Ld(receiver, MemOperand(sp));
__ Dlsa(sp, sp, argc, kSystemPointerSizeLog2);
__ Sd(this_arg, MemOperand(sp));
-#else
- __ Dsubu(sp, sp, Operand(2 * kPointerSize));
- __ Dlsa(sp, sp, argc, kPointerSizeLog2);
- __ mov(scratch, argc);
- __ Pop(this_arg, arg_array); // Overwrite argc
- __ Movz(arg_array, undefined_value, scratch); // if argc == 0
- __ Movz(this_arg, undefined_value, scratch); // if argc == 0
- __ Dsubu(scratch, scratch, Operand(1));
- __ Movz(arg_array, undefined_value, scratch); // if argc == 1
- __ Ld(receiver, MemOperand(sp));
- __ Sd(this_arg, MemOperand(sp));
-#endif
}
// ----------- S t a t e -------------
@@ -1775,7 +1650,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// static
void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
-#ifdef V8_REVERSE_JSARGS
// 1. Get the callable to call (passed as receiver) from the stack.
{
__ Pop(a1);
@@ -1793,42 +1667,6 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// 3. Adjust the actual number of arguments.
__ daddiu(a0, a0, -1);
-#else
- // 1. Make sure we have at least one argument.
- // a0: actual number of arguments
- {
- Label done;
- __ Branch(&done, ne, a0, Operand(zero_reg));
- __ PushRoot(RootIndex::kUndefinedValue);
- __ Daddu(a0, a0, Operand(1));
- __ bind(&done);
- }
-
- // 2. Get the function to call (passed as receiver) from the stack.
- // a0: actual number of arguments
- __ LoadReceiver(a1, a0);
-
- // 3. Shift arguments and return address one slot down on the stack
- // (overwriting the original receiver). Adjust argument count to make
- // the original first argument the new receiver.
- // a0: actual number of arguments
- // a1: function
- {
- Label loop;
- // Calculate the copy start address (destination). Copy end address is sp.
- __ Dlsa(a2, sp, a0, kPointerSizeLog2);
-
- __ bind(&loop);
- __ Ld(kScratchReg, MemOperand(a2, -kPointerSize));
- __ Sd(kScratchReg, MemOperand(a2));
- __ Dsubu(a2, a2, Operand(kPointerSize));
- __ Branch(&loop, ne, a2, Operand(sp));
- // Adjust the actual number of arguments and remove the top element
- // (which is a copy of the last argument).
- __ Dsubu(a0, a0, Operand(1));
- __ Pop();
- }
-#endif
// 4. Call the callable.
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
@@ -1837,10 +1675,10 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argc
- // -- sp[0] : argumentsList (if argc ==3)
- // -- sp[4] : thisArgument (if argc >=2)
- // -- sp[8] : target (if argc >=1)
- // -- sp[12] : receiver
+ // -- sp[0] : receiver
+ // -- sp[8] : target (if argc >= 1)
+ // -- sp[16] : thisArgument (if argc >= 2)
+ // -- sp[24] : argumentsList (if argc == 3)
// -----------------------------------
Register argc = a0;
@@ -1859,7 +1697,6 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// Claim (3 - argc) dummy arguments form the stack, to put the stack in a
// consistent state for a simple pop operation.
-#ifdef V8_REVERSE_JSARGS
__ mov(scratch, argc);
__ Ld(target, MemOperand(sp, kPointerSize));
__ Ld(this_argument, MemOperand(sp, 2 * kPointerSize));
@@ -1875,22 +1712,6 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ Dlsa(sp, sp, argc, kSystemPointerSizeLog2);
__ Sd(this_argument, MemOperand(sp, 0)); // Overwrite receiver
-#else
- __ Dsubu(sp, sp, Operand(3 * kPointerSize));
- __ Dlsa(sp, sp, argc, kPointerSizeLog2);
- __ mov(scratch, argc);
- __ Pop(target, this_argument, arguments_list);
- __ Movz(arguments_list, undefined_value, scratch); // if argc == 0
- __ Movz(this_argument, undefined_value, scratch); // if argc == 0
- __ Movz(target, undefined_value, scratch); // if argc == 0
- __ Dsubu(scratch, scratch, Operand(1));
- __ Movz(arguments_list, undefined_value, scratch); // if argc == 1
- __ Movz(this_argument, undefined_value, scratch); // if argc == 1
- __ Dsubu(scratch, scratch, Operand(1));
- __ Movz(arguments_list, undefined_value, scratch); // if argc == 2
-
- __ Sd(this_argument, MemOperand(sp, 0)); // Overwrite receiver
-#endif
}
// ----------- S t a t e -------------
@@ -1912,12 +1733,11 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argc
- // -- sp[0] : new.target (optional) (dummy value if argc <= 2)
- // -- sp[4] : argumentsList (dummy value if argc <= 1)
- // -- sp[8] : target (dummy value if argc == 0)
- // -- sp[12] : receiver
+ // -- sp[0] : receiver
+ // -- sp[8] : target
+ // -- sp[16] : argumentsList
+ // -- sp[24] : new.target (optional)
// -----------------------------------
- // NOTE: The order of args in the stack are reversed if V8_REVERSE_JSARGS
Register argc = a0;
Register arguments_list = a2;
@@ -1936,7 +1756,6 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// Claim (3 - argc) dummy arguments form the stack, to put the stack in a
// consistent state for a simple pop operation.
-#ifdef V8_REVERSE_JSARGS
__ mov(scratch, argc);
__ Ld(target, MemOperand(sp, kPointerSize));
__ Ld(arguments_list, MemOperand(sp, 2 * kPointerSize));
@@ -1952,22 +1771,6 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ Dlsa(sp, sp, argc, kSystemPointerSizeLog2);
__ Sd(undefined_value, MemOperand(sp, 0)); // Overwrite receiver
-#else
- __ Dsubu(sp, sp, Operand(3 * kPointerSize));
- __ Dlsa(sp, sp, argc, kPointerSizeLog2);
- __ mov(scratch, argc);
- __ Pop(target, arguments_list, new_target);
- __ Movz(arguments_list, undefined_value, scratch); // if argc == 0
- __ Movz(new_target, undefined_value, scratch); // if argc == 0
- __ Movz(target, undefined_value, scratch); // if argc == 0
- __ Dsubu(scratch, scratch, Operand(1));
- __ Movz(arguments_list, undefined_value, scratch); // if argc == 1
- __ Movz(new_target, target, scratch); // if argc == 1
- __ Dsubu(scratch, scratch, Operand(1));
- __ Movz(new_target, target, scratch); // if argc == 2
-
- __ Sd(undefined_value, MemOperand(sp, 0)); // Overwrite receiver
-#endif
}
// ----------- S t a t e -------------
@@ -2044,9 +1847,8 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// Check for stack overflow.
Label stack_overflow;
- Generate_StackOverflowCheck(masm, len, kScratchReg, a5, &stack_overflow);
+ __ StackOverflowCheck(len, kScratchReg, a5, &stack_overflow);
-#ifdef V8_REVERSE_JSARGS
// Move the arguments already in the stack,
// including the receiver and the return address.
{
@@ -2067,7 +1869,6 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ Daddu(dest, dest, Operand(kSystemPointerSize));
__ Branch(&copy, ge, t0, Operand(zero_reg));
}
-#endif
// Push arguments onto the stack (thisArgument is already on the stack).
{
@@ -2087,13 +1888,9 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ Branch(&push, ne, a5, Operand(t1));
__ LoadRoot(a5, RootIndex::kUndefinedValue);
__ bind(&push);
-#ifdef V8_REVERSE_JSARGS
__ Sd(a5, MemOperand(a7, 0));
__ Daddu(a7, a7, Operand(kSystemPointerSize));
__ Daddu(scratch, scratch, Operand(kSystemPointerSize));
-#else
- __ Push(a5);
-#endif
__ Branch(&loop, ne, scratch, Operand(sp));
__ bind(&done);
}
@@ -2134,6 +1931,13 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ bind(&new_target_constructor);
}
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ // TODO(victorgomes): Remove this copy when all the arguments adaptor frame
+ // code is erased.
+ __ mov(a6, fp);
+ __ Ld(a7, MemOperand(fp, StandardFrameConstants::kArgCOffset));
+#else
+
// Check if we have an arguments adaptor frame below the function frame.
Label arguments_adaptor, arguments_done;
__ Ld(a6, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -2155,17 +1959,17 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
MemOperand(a6, ArgumentsAdaptorFrameConstants::kLengthOffset));
}
__ bind(&arguments_done);
+#endif
Label stack_done, stack_overflow;
__ Subu(a7, a7, a2);
__ Branch(&stack_done, le, a7, Operand(zero_reg));
{
// Check for stack overflow.
- Generate_StackOverflowCheck(masm, a7, a4, a5, &stack_overflow);
+ __ StackOverflowCheck(a7, a4, a5, &stack_overflow);
// Forward the arguments from the caller frame.
-#ifdef V8_REVERSE_JSARGS
// Point to the first argument to copy (skipping the receiver).
__ Daddu(a6, a6,
Operand(CommonFrameConstants::kFixedFrameSizeAboveFp +
@@ -2192,28 +1996,20 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ Daddu(dest, dest, Operand(kSystemPointerSize));
__ Branch(&copy, ge, t2, Operand(zero_reg));
}
-#endif
// Copy arguments from the caller frame.
// TODO(victorgomes): Consider using forward order as potentially more cache
// friendly.
{
Label loop;
-#ifndef V8_REVERSE_JSARGS
- __ Daddu(a6, a6, Operand(CommonFrameConstants::kFixedFrameSizeAboveFp));
-#endif
__ Daddu(a0, a0, a7);
__ bind(&loop);
{
__ Subu(a7, a7, Operand(1));
__ Dlsa(t0, a6, a7, kPointerSizeLog2);
__ Ld(kScratchReg, MemOperand(t0));
-#ifdef V8_REVERSE_JSARGS
__ Dlsa(t0, a2, a7, kPointerSizeLog2);
__ Sd(kScratchReg, MemOperand(t0));
-#else
- __ push(kScratchReg);
-#endif
__ Branch(&loop, ne, a7, Operand(zero_reg));
}
}
@@ -2361,7 +2157,8 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
__ Dsubu(t0, sp, Operand(a5));
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
- LoadStackLimit(masm, kScratchReg, StackLimitKind::kRealStackLimit);
+ __ LoadStackLimit(kScratchReg,
+ MacroAssembler::StackLimitKind::kRealStackLimit);
__ Branch(&done, hs, t0, Operand(kScratchReg));
{
FrameScope scope(masm, StackFrame::MANUAL);
@@ -2371,7 +2168,6 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
__ bind(&done);
}
-#ifdef V8_REVERSE_JSARGS
// Pop receiver.
__ Pop(t0);
@@ -2393,41 +2189,6 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// Push receiver.
__ Push(t0);
-#else
- __ mov(sp, t0);
- // Relocate arguments down the stack.
- {
- Label loop, done_loop;
- __ mov(a5, zero_reg);
- __ bind(&loop);
- __ Branch(&done_loop, gt, a5, Operand(a0));
- __ Dlsa(a6, sp, a4, kPointerSizeLog2);
- __ Ld(kScratchReg, MemOperand(a6));
- __ Dlsa(a6, sp, a5, kPointerSizeLog2);
- __ Sd(kScratchReg, MemOperand(a6));
- __ Daddu(a4, a4, Operand(1));
- __ Daddu(a5, a5, Operand(1));
- __ Branch(&loop);
- __ bind(&done_loop);
- }
-
- // Copy [[BoundArguments]] to the stack (below the arguments).
- {
- Label loop, done_loop;
- __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
- __ Daddu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ bind(&loop);
- __ Dsubu(a4, a4, Operand(1));
- __ Branch(&done_loop, lt, a4, Operand(zero_reg));
- __ Dlsa(a5, a2, a4, kPointerSizeLog2);
- __ Ld(kScratchReg, MemOperand(a5));
- __ Dlsa(a5, sp, a0, kPointerSizeLog2);
- __ Sd(kScratchReg, MemOperand(a5));
- __ Daddu(a0, a0, Operand(1));
- __ Branch(&loop);
- __ bind(&done_loop);
- }
-#endif
// Call the [[BoundTargetFunction]] via the Call builtin.
__ Ld(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
@@ -2536,7 +2297,8 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ Dsubu(t0, sp, Operand(a5));
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
- LoadStackLimit(masm, kScratchReg, StackLimitKind::kRealStackLimit);
+ __ LoadStackLimit(kScratchReg,
+ MacroAssembler::StackLimitKind::kRealStackLimit);
__ Branch(&done, hs, t0, Operand(kScratchReg));
{
FrameScope scope(masm, StackFrame::MANUAL);
@@ -2546,7 +2308,6 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ bind(&done);
}
-#ifdef V8_REVERSE_JSARGS
// Pop receiver.
__ Pop(t0);
@@ -2568,41 +2329,6 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// Push receiver.
__ Push(t0);
-#else
- __ mov(sp, t0);
- // Relocate arguments down the stack.
- {
- Label loop, done_loop;
- __ mov(a5, zero_reg);
- __ bind(&loop);
- __ Branch(&done_loop, ge, a5, Operand(a0));
- __ Dlsa(a6, sp, a4, kPointerSizeLog2);
- __ Ld(kScratchReg, MemOperand(a6));
- __ Dlsa(a6, sp, a5, kPointerSizeLog2);
- __ Sd(kScratchReg, MemOperand(a6));
- __ Daddu(a4, a4, Operand(1));
- __ Daddu(a5, a5, Operand(1));
- __ Branch(&loop);
- __ bind(&done_loop);
- }
-
- // Copy [[BoundArguments]] to the stack (below the arguments).
- {
- Label loop, done_loop;
- __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
- __ Daddu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ bind(&loop);
- __ Dsubu(a4, a4, Operand(1));
- __ Branch(&done_loop, lt, a4, Operand(zero_reg));
- __ Dlsa(a5, a2, a4, kPointerSizeLog2);
- __ Ld(kScratchReg, MemOperand(a5));
- __ Dlsa(a5, sp, a0, kPointerSizeLog2);
- __ Sd(kScratchReg, MemOperand(a5));
- __ Daddu(a0, a0, Operand(1));
- __ Branch(&loop);
- __ bind(&done_loop);
- }
-#endif
// Patch new.target to [[BoundTargetFunction]] if new.target equals target.
{
@@ -2693,16 +2419,11 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// a3: new target (passed through to callee)
__ bind(&enough);
EnterArgumentsAdaptorFrame(masm);
- Generate_StackOverflowCheck(masm, a2, a5, kScratchReg, &stack_overflow);
+ __ StackOverflowCheck(a2, a5, kScratchReg, &stack_overflow);
// Calculate copy start address into a0 and copy end address into a4.
-#ifdef V8_REVERSE_JSARGS
__ dsll(a0, a2, kPointerSizeLog2);
__ Daddu(a0, fp, a0);
-#else
- __ SmiScale(a0, a0, kPointerSizeLog2);
- __ Daddu(a0, fp, a0);
-#endif
// Adjust for return address and receiver.
__ Daddu(a0, a0, Operand(2 * kPointerSize));
@@ -2730,9 +2451,8 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Too few parameters: Actual < expected.
__ bind(&too_few);
EnterArgumentsAdaptorFrame(masm);
- Generate_StackOverflowCheck(masm, a2, a5, kScratchReg, &stack_overflow);
+ __ StackOverflowCheck(a2, a5, kScratchReg, &stack_overflow);
-#ifdef V8_REVERSE_JSARGS
// Fill the remaining expected arguments with undefined.
__ LoadRoot(t0, RootIndex::kUndefinedValue);
__ SmiUntag(t1, a0);
@@ -2763,51 +2483,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ Branch(USE_DELAY_SLOT, &copy, ne, a0, Operand(fp));
__ Dsubu(a0, a0, Operand(kSystemPointerSize));
-#else
- // Calculate copy start address into a0 and copy end address into a7.
- // a0: actual number of arguments as a smi
- // a1: function
- // a2: expected number of arguments
- // a3: new target (passed through to callee)
- __ SmiScale(a0, a0, kPointerSizeLog2);
- __ Daddu(a0, fp, a0);
- // Adjust for return address and receiver.
- __ Daddu(a0, a0, Operand(2 * kPointerSize));
- // Compute copy end address. Also adjust for return address.
- __ Daddu(a7, fp, kPointerSize);
-
- // Copy the arguments (including the receiver) to the new stack frame.
- // a0: copy start address
- // a1: function
- // a2: expected number of arguments
- // a3: new target (passed through to callee)
- // a7: copy end address
- Label copy;
- __ bind(&copy);
- __ Ld(a4, MemOperand(a0)); // Adjusted above for return addr and receiver.
- __ Dsubu(sp, sp, kPointerSize);
- __ Dsubu(a0, a0, kPointerSize);
- __ Branch(USE_DELAY_SLOT, &copy, ne, a0, Operand(a7));
- __ Sd(a4, MemOperand(sp)); // In the delay slot.
-
- // Fill the remaining expected arguments with undefined.
- // a1: function
- // a2: expected number of arguments
- // a3: new target (passed through to callee)
- __ LoadRoot(a5, RootIndex::kUndefinedValue);
- __ dsll(a6, a2, kPointerSizeLog2);
- __ Dsubu(a4, fp, Operand(a6));
- // Adjust for frame.
- __ Dsubu(a4, a4,
- Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp +
- kPointerSize));
-
- Label fill;
- __ bind(&fill);
- __ Dsubu(sp, sp, kPointerSize);
- __ Branch(USE_DELAY_SLOT, &fill, ne, sp, Operand(a4));
- __ Sd(a5, MemOperand(sp));
-#endif
}
// Call the entry point.
@@ -3304,11 +2979,10 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// -- a2 : arguments count (not including the receiver)
// -- a3 : call data
// -- a0 : holder
- // --
- // -- sp[0] : last argument
+ // -- sp[0] : receiver
+ // -- sp[8] : first argument
// -- ...
- // -- sp[(argc - 1) * 8] : first argument
- // -- sp[(argc + 0) * 8] : receiver
+ // -- sp[(argc) * 8] : last argument
// -----------------------------------
Register api_function_address = a1;
@@ -3385,15 +3059,8 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// FunctionCallbackInfo::values_ (points at the first varargs argument passed
// on the stack).
-#ifdef V8_REVERSE_JSARGS
__ Daddu(scratch, scratch,
Operand((FCA::kArgsLength + 1) * kSystemPointerSize));
-#else
- __ Daddu(scratch, scratch,
- Operand((FCA::kArgsLength - 1) * kSystemPointerSize));
- __ dsll(t2, argc, kSystemPointerSizeLog2);
- __ Daddu(scratch, scratch, t2);
-#endif
__ Sd(scratch, MemOperand(sp, 2 * kPointerSize));
@@ -3533,6 +3200,218 @@ void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
__ Jump(t9);
}
+namespace {
+
+// This code tries to be close to ia32 code so that any changes can be
+// easily ported.
+void Generate_DeoptimizationEntry(MacroAssembler* masm,
+ DeoptimizeKind deopt_kind) {
+ Isolate* isolate = masm->isolate();
+
+ // Unlike on ARM we don't save all the registers, just the useful ones.
+ // For the rest, there are gaps on the stack, so the offsets remain the same.
+ const int kNumberOfRegisters = Register::kNumRegisters;
+
+ RegList restored_regs = kJSCallerSaved | kCalleeSaved;
+ RegList saved_regs = restored_regs | sp.bit() | ra.bit();
+
+ const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
+
+ // Save all double FPU registers before messing with them.
+ __ Dsubu(sp, sp, Operand(kDoubleRegsSize));
+ const RegisterConfiguration* config = RegisterConfiguration::Default();
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
+ int offset = code * kDoubleSize;
+ __ Sdc1(fpu_reg, MemOperand(sp, offset));
+ }
+
+ // Push saved_regs (needed to populate FrameDescription::registers_).
+ // Leave gaps for other registers.
+ __ Dsubu(sp, sp, kNumberOfRegisters * kPointerSize);
+ for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
+ if ((saved_regs & (1 << i)) != 0) {
+ __ Sd(ToRegister(i), MemOperand(sp, kPointerSize * i));
+ }
+ }
+
+ __ li(a2,
+ ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate));
+ __ Sd(fp, MemOperand(a2));
+
+ const int kSavedRegistersAreaSize =
+ (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
+
+ __ li(a2, Operand(Deoptimizer::kFixedExitSizeMarker));
+ // Get the address of the location in the code object (a3) (return
+ // address for lazy deoptimization) and compute the fp-to-sp delta in
+ // register a4.
+ __ mov(a3, ra);
+ __ Daddu(a4, sp, Operand(kSavedRegistersAreaSize));
+
+ __ Dsubu(a4, fp, a4);
+
+ // Allocate a new deoptimizer object.
+ __ PrepareCallCFunction(6, a5);
+ // Pass six arguments, according to n64 ABI.
+ __ mov(a0, zero_reg);
+ Label context_check;
+ __ Ld(a1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ JumpIfSmi(a1, &context_check);
+ __ Ld(a0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ __ bind(&context_check);
+ __ li(a1, Operand(static_cast<int>(deopt_kind)));
+ // a2: bailout id already loaded.
+ // a3: code address or 0 already loaded.
+ // a4: already has fp-to-sp delta.
+ __ li(a5, ExternalReference::isolate_address(isolate));
+
+ // Call Deoptimizer::New().
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
+ }
+
+ // Preserve "deoptimizer" object in register v0 and get the input
+ // frame descriptor pointer to a1 (deoptimizer->input_);
+ // Move deopt-obj to a0 for call to Deoptimizer::ComputeOutputFrames() below.
+ __ mov(a0, v0);
+ __ Ld(a1, MemOperand(v0, Deoptimizer::input_offset()));
+
+ // Copy core registers into FrameDescription::registers_[kNumRegisters].
+ DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
+ for (int i = 0; i < kNumberOfRegisters; i++) {
+ int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+ if ((saved_regs & (1 << i)) != 0) {
+ __ Ld(a2, MemOperand(sp, i * kPointerSize));
+ __ Sd(a2, MemOperand(a1, offset));
+ } else if (FLAG_debug_code) {
+ __ li(a2, kDebugZapValue);
+ __ Sd(a2, MemOperand(a1, offset));
+ }
+ }
+
+ int double_regs_offset = FrameDescription::double_registers_offset();
+ // Copy FPU registers to
+ // double_registers_[DoubleRegister::kNumAllocatableRegisters]
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ int dst_offset = code * kDoubleSize + double_regs_offset;
+ int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize;
+ __ Ldc1(f0, MemOperand(sp, src_offset));
+ __ Sdc1(f0, MemOperand(a1, dst_offset));
+ }
+
+ // Remove the saved registers from the stack.
+ __ Daddu(sp, sp, Operand(kSavedRegistersAreaSize));
+
+ // Compute a pointer to the unwinding limit in register a2; that is
+ // the first stack slot not part of the input frame.
+ __ Ld(a2, MemOperand(a1, FrameDescription::frame_size_offset()));
+ __ Daddu(a2, a2, sp);
+
+ // Unwind the stack down to - but not including - the unwinding
+ // limit and copy the contents of the activation frame to the input
+ // frame description.
+ __ Daddu(a3, a1, Operand(FrameDescription::frame_content_offset()));
+ Label pop_loop;
+ Label pop_loop_header;
+ __ BranchShort(&pop_loop_header);
+ __ bind(&pop_loop);
+ __ pop(a4);
+ __ Sd(a4, MemOperand(a3, 0));
+ __ daddiu(a3, a3, sizeof(uint64_t));
+ __ bind(&pop_loop_header);
+ __ BranchShort(&pop_loop, ne, a2, Operand(sp));
+ // Compute the output frame in the deoptimizer.
+ __ push(a0); // Preserve deoptimizer object across call.
+ // a0: deoptimizer object; a1: scratch.
+ __ PrepareCallCFunction(1, a1);
+ // Call Deoptimizer::ComputeOutputFrames().
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
+ }
+ __ pop(a0); // Restore deoptimizer object (class Deoptimizer).
+
+ __ Ld(sp, MemOperand(a0, Deoptimizer::caller_frame_top_offset()));
+
+ // Replace the current (input) frame with the output frames.
+ Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
+ // Outer loop state: a4 = current "FrameDescription** output_",
+ // a1 = one past the last FrameDescription**.
+ __ Lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
+ __ Ld(a4, MemOperand(a0, Deoptimizer::output_offset())); // a4 is output_.
+ __ Dlsa(a1, a4, a1, kPointerSizeLog2);
+ __ BranchShort(&outer_loop_header);
+ __ bind(&outer_push_loop);
+ // Inner loop state: a2 = current FrameDescription*, a3 = loop index.
+ __ Ld(a2, MemOperand(a4, 0)); // output_[ix]
+ __ Ld(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
+ __ BranchShort(&inner_loop_header);
+ __ bind(&inner_push_loop);
+ __ Dsubu(a3, a3, Operand(sizeof(uint64_t)));
+ __ Daddu(a6, a2, Operand(a3));
+ __ Ld(a7, MemOperand(a6, FrameDescription::frame_content_offset()));
+ __ push(a7);
+ __ bind(&inner_loop_header);
+ __ BranchShort(&inner_push_loop, ne, a3, Operand(zero_reg));
+
+ __ Daddu(a4, a4, Operand(kPointerSize));
+ __ bind(&outer_loop_header);
+ __ BranchShort(&outer_push_loop, lt, a4, Operand(a1));
+
+ __ Ld(a1, MemOperand(a0, Deoptimizer::input_offset()));
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
+ int src_offset = code * kDoubleSize + double_regs_offset;
+ __ Ldc1(fpu_reg, MemOperand(a1, src_offset));
+ }
+
+ // Push pc and continuation from the last output frame.
+ __ Ld(a6, MemOperand(a2, FrameDescription::pc_offset()));
+ __ push(a6);
+ __ Ld(a6, MemOperand(a2, FrameDescription::continuation_offset()));
+ __ push(a6);
+
+ // Technically restoring 'at' should work unless zero_reg is also restored
+ // but it's safer to check for this.
+ DCHECK(!(at.bit() & restored_regs));
+ // Restore the registers from the last output frame.
+ __ mov(at, a2);
+ for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
+ int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+ if ((restored_regs & (1 << i)) != 0) {
+ __ Ld(ToRegister(i), MemOperand(at, offset));
+ }
+ }
+
+ __ pop(at); // Get continuation, leave pc on stack.
+ __ pop(ra);
+ __ Jump(at);
+ __ stop();
+}
+
+} // namespace
+
+void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Bailout(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kBailout);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
+}
+
#undef __
} // namespace internal
diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc
index 8f262818ab..efd65e2971 100644
--- a/deps/v8/src/builtins/ppc/builtins-ppc.cc
+++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc
@@ -123,7 +123,6 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ Push(cp, r3);
__ SmiUntag(r3, SetRC);
-#ifdef V8_REVERSE_JSARGS
// Set up pointer to last argument (skip receiver).
__ addi(
r7, fp,
@@ -132,15 +131,6 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ PushArray(r7, r3, r8, r0);
// The receiver for the builtin/api call.
__ PushRoot(RootIndex::kTheHoleValue);
-#else
- // The receiver for the builtin/api call.
- __ PushRoot(RootIndex::kTheHoleValue);
- // Set up pointer to last argument.
- __ addi(r7, fp, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // Copy arguments and receiver to the expression stack.
- __ PushArray(r7, r3, r8, r0);
-#endif
// Call the function.
// r3: number of arguments (untagged)
@@ -240,7 +230,6 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Restore new target.
__ Pop(r6);
-#ifdef V8_REVERSE_JSARGS
// Push the allocated receiver to the stack.
__ Push(r3);
// We need two copies because we may have to return the original one
@@ -254,15 +243,6 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ addi(
r7, fp,
Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
-#else
- // Push the allocated receiver to the stack. We need two copies
- // because we may have to return the original one and the calling
- // conventions dictate that the called function pops the receiver.
- __ Push(r3, r3);
-
- // Set up pointer to last argument.
- __ addi(r7, fp, Operand(StandardFrameConstants::kCallerSPOffset));
-#endif
// ----------- S t a t e -------------
// -- r6: new target
@@ -295,10 +275,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Copy arguments and receiver to the expression stack.
__ PushArray(r7, r3, r8, r0);
-#ifdef V8_REVERSE_JSARGS
// Push implicit receiver.
__ Push(r9);
-#endif
// Call the function.
{
@@ -435,19 +413,11 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ cmpl(sp, scratch);
__ blt(&stack_overflow);
-#ifndef V8_REVERSE_JSARGS
- // Push receiver.
- __ LoadTaggedPointerField(
- scratch, FieldMemOperand(r4, JSGeneratorObject::kReceiverOffset));
- __ Push(scratch);
-#endif
-
// ----------- S t a t e -------------
// -- r4 : the JSGeneratorObject to resume
// -- r7 : generator function
// -- cp : generator context
// -- lr : return address
- // -- sp[0] : generator receiver
// -----------------------------------
// Copy the function arguments from the generator object's register file.
@@ -459,9 +429,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
r5,
FieldMemOperand(r4, JSGeneratorObject::kParametersAndRegistersOffset));
{
-#ifdef V8_REVERSE_JSARGS
Label done_loop, loop;
-
__ mr(r9, r6);
__ bind(&loop);
@@ -481,24 +449,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ LoadAnyTaggedField(
scratch, FieldMemOperand(r4, JSGeneratorObject::kReceiverOffset));
__ Push(scratch);
-#else
- Label loop, done_loop;
- __ cmpi(r6, Operand::Zero());
- __ ble(&done_loop);
-
- // setup r9 to first element address - kTaggedSize
- __ addi(r9, r5,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag - kTaggedSize));
-
- __ mtctr(r6);
- __ bind(&loop);
- __ LoadAnyTaggedField(scratch, MemOperand(r9, kTaggedSize));
- __ addi(r9, r9, Operand(kTaggedSize));
- __ push(scratch);
- __ bdnz(&loop);
-
- __ bind(&done_loop);
-#endif
}
// Underlying function needs to have bytecode available.
@@ -514,6 +464,10 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Resume (Ignition/TurboFan) generator object.
{
+ __ LoadP(r3, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadHalfWord(
+ r3,
+ FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
// We abuse new.target both to indicate that this is a resume call and to
// pass in the generator object. In ordinary calls, new.target is always
// undefined because generator functions are non-constructable.
@@ -804,7 +758,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// r4: function
// r7: argc
// r8: argv, i.e. points to first arg
-#ifdef V8_REVERSE_JSARGS
Label loop, done;
__ cmpi(r7, Operand::Zero());
__ beq(&done);
@@ -823,24 +776,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Push the receiver.
__ Push(r6);
-#else
- // Push the receiver.
- __ Push(r6);
-
- Label loop, done;
- __ cmpi(r7, Operand::Zero());
- __ beq(&done);
-
- __ mtctr(r7);
- __ subi(r8, r8, Operand(kSystemPointerSize));
- __ bind(&loop);
- __ LoadPU(r9, MemOperand(r8, kSystemPointerSize)); // read next parameter
- __ LoadP(r0, MemOperand(r9)); // dereference handle
- __ push(r0); // push parameter
- __ bdnz(&loop);
- __ bind(&done);
-#endif
-
// r3: argc
// r4: function
// r6: new.target
@@ -918,13 +853,13 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
__ add(sp, sp, args_count);
}
-// Tail-call |function_id| if |smi_entry| == |marker|
+// Tail-call |function_id| if |actual_marker| == |expected_marker|
static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
- Register smi_entry,
- OptimizationMarker marker,
+ Register actual_marker,
+ OptimizationMarker expected_marker,
Runtime::FunctionId function_id) {
Label no_match;
- __ CmpSmiLiteral(smi_entry, Smi::FromEnum(marker), r0);
+ __ cmpi(actual_marker, Operand(expected_marker));
__ bne(&no_match);
GenerateTailCallToReturnedCode(masm, function_id);
__ bind(&no_match);
@@ -941,10 +876,15 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
DCHECK(!AreAliased(r4, r6, optimized_code_entry, scratch));
Register closure = r4;
+ Label heal_optimized_code_slot;
+
+ // If the optimized code is cleared, go to runtime to update the optimization
+ // marker field.
+ __ LoadWeakValue(optimized_code_entry, optimized_code_entry,
+ &heal_optimized_code_slot);
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
- Label found_deoptimized_code;
__ LoadTaggedPointerField(
scratch,
FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
@@ -952,7 +892,7 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
scratch,
FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
__ TestBit(scratch, Code::kMarkedForDeoptimizationBit, r0);
- __ bne(&found_deoptimized_code, cr0);
+ __ bne(&heal_optimized_code_slot, cr0);
// Optimized code is good, get it into the closure and link the closure
// into the optimized functions list, then tail call the optimized code.
@@ -962,10 +902,11 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
__ LoadCodeObjectEntry(r5, optimized_code_entry);
__ Jump(r5);
- // Optimized code slot contains deoptimized code, evict it and re-enter
- // the closure's code.
- __ bind(&found_deoptimized_code);
- GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
+ // Optimized code slot contains deoptimized code or code is cleared and
+ // optimized code marker isn't updated. Evict the code, update the marker
+ // and re-enter the closure's code.
+ __ bind(&heal_optimized_code_slot);
+ GenerateTailCallToReturnedCode(masm, Runtime::kHealOptimizedCodeSlot);
}
static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
@@ -975,7 +916,8 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
// -- r6 : new target (preserved for callee if needed, and caller)
// -- r4 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
- // -- optimization_marker : a Smi containing a non-zero optimization marker.
+ // -- optimization_marker : a int32 containing a non-zero optimization
+ // marker.
// -----------------------------------
DCHECK(!AreAliased(feedback_vector, r4, r6, optimization_marker));
@@ -992,13 +934,11 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
- // Otherwise, the marker is InOptimizationQueue, so fall through hoping
- // that an interrupt will eventually update the slot with optimized code.
+ // Marker should be one of LogFirstExecution / CompileOptimized /
+ // CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach
+ // here.
if (FLAG_debug_code) {
- __ CmpSmiLiteral(optimization_marker,
- Smi::FromEnum(OptimizationMarker::kInOptimizationQueue),
- r0);
- __ Assert(eq, AbortReason::kExpectedOptimizationSentinel);
+ __ stop();
}
}
@@ -1135,18 +1075,19 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ cmpi(r7, Operand(FEEDBACK_VECTOR_TYPE));
__ bne(&push_stack_frame);
- Register optimized_code_entry = r7;
+ Register optimization_state = r7;
- // Read off the optimized code slot in the feedback vector.
- __ LoadAnyTaggedField(
- optimized_code_entry,
- FieldMemOperand(feedback_vector,
- FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
- // Check if the optimized code slot is not empty.
- Label optimized_code_slot_not_empty;
- __ CmpSmiLiteral(optimized_code_entry,
- Smi::FromEnum(OptimizationMarker::kNone), r0);
- __ bne(&optimized_code_slot_not_empty);
+ // Read off the optimization state in the feedback vector.
+ __ LoadWord(optimization_state,
+ FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset),
+ r0);
+
+ // Check if the optimized code slot is not empty or has a optimization marker.
+ Label has_optimized_code_or_marker;
+ __ TestBitMask(optimization_state,
+ FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask,
+ r0);
+ __ bne(&has_optimized_code_or_marker, cr0);
Label not_optimized;
__ bind(&not_optimized);
@@ -1233,8 +1174,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Perform interrupt stack check.
// TODO(solanes): Merge with the real stack limit check above.
Label stack_check_interrupt, after_stack_check_interrupt;
- LoadStackLimit(masm, r6, StackLimitKind::kInterruptStackLimit);
- __ cmpl(sp, r6);
+ LoadStackLimit(masm, r0, StackLimitKind::kInterruptStackLimit);
+ __ cmpl(sp, r0);
__ blt(&stack_check_interrupt);
__ bind(&after_stack_check_interrupt);
@@ -1299,25 +1240,33 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
- __ SmiTag(r6, kInterpreterBytecodeOffsetRegister);
- __ StoreP(r6,
+ __ SmiTag(r0, kInterpreterBytecodeOffsetRegister);
+ __ StoreP(r0,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ jmp(&after_stack_check_interrupt);
- __ bind(&optimized_code_slot_not_empty);
+ __ bind(&has_optimized_code_or_marker);
Label maybe_has_optimized_code;
- // Check if optimized code marker is actually a weak reference to the
- // optimized code.
- __ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code);
- MaybeOptimizeCode(masm, feedback_vector, optimized_code_entry);
+
+ // Check if optimized code is available
+ __ TestBitMask(optimization_state,
+ FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker,
+ r0);
+ __ beq(&maybe_has_optimized_code, cr0);
+
+ Register optimization_marker = optimization_state;
+ __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
+ MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
// Fall through if there's no runnable optimized code.
__ jmp(&not_optimized);
__ bind(&maybe_has_optimized_code);
- // Load code entry from the weak reference, if it was cleared, resume
- // execution of unoptimized code.
- __ LoadWeakValue(optimized_code_entry, optimized_code_entry, &not_optimized);
+ Register optimized_code_entry = optimization_state;
+ __ LoadAnyTaggedField(
+ optimization_marker,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kMaybeOptimizedCodeOffset));
TailCallOptimizedCodeSlot(masm, optimized_code_entry, r9);
__ bind(&compile_lazy);
@@ -1336,12 +1285,8 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
__ ShiftLeftImm(scratch, scratch, Operand(kSystemPointerSizeLog2));
__ sub(start_address, start_address, scratch);
// Push the arguments.
-#ifdef V8_REVERSE_JSARGS
__ PushArray(start_address, num_args, scratch, r0,
TurboAssembler::PushArrayOrder::kReverse);
-#else
- __ PushArray(start_address, num_args, scratch, r0);
-#endif
}
// static
@@ -1358,19 +1303,15 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// -----------------------------------
Label stack_overflow;
-#ifdef V8_REVERSE_JSARGS
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// The spread argument should not be pushed.
__ subi(r3, r3, Operand(1));
}
-#endif
// Calculate number of arguments (add one for receiver).
__ addi(r6, r3, Operand(1));
-
Generate_StackOverflowCheck(masm, r6, ip, &stack_overflow);
-#ifdef V8_REVERSE_JSARGS
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
// Don't copy receiver. Argument count is correct.
__ mr(r6, r3);
@@ -1389,21 +1330,6 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// lies in the next interpreter register.
__ LoadP(r5, MemOperand(r5, -kSystemPointerSize));
}
-#else
- // Push "undefined" as the receiver arg if we need to.
- if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
- __ PushRoot(RootIndex::kUndefinedValue);
- __ mr(r6, r3); // Argument count is correct.
- }
-
- // Push the arguments. r5, r6, r7 will be modified.
- Generate_InterpreterPushArgs(masm, r6, r5, r7);
-
- if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
- __ Pop(r5); // Pass the spread in a register
- __ subi(r3, r3, Operand(1)); // Subtract one for spread
- }
-#endif
// Call the target.
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
@@ -1436,7 +1362,6 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
__ addi(r8, r3, Operand(1));
Generate_StackOverflowCheck(masm, r8, ip, &stack_overflow);
-#ifdef V8_REVERSE_JSARGS
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// The spread argument should not be pushed.
__ subi(r3, r3, Operand(1));
@@ -1458,22 +1383,6 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
} else {
__ AssertUndefinedOrAllocationSite(r5, r8);
}
-#else
-
- // Push a slot for the receiver to be constructed.
- __ li(r0, Operand::Zero());
- __ push(r0);
-
- // Push the arguments (skip if none).
- Generate_InterpreterPushArgs(masm, r3, r7, r8);
- if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
- __ Pop(r5); // Pass the spread in a register
- __ subi(r3, r3, Operand(1)); // Subtract one for spread
- } else {
- __ AssertUndefinedOrAllocationSite(r5, r8);
- }
-
-#endif
if (mode == InterpreterPushArgsMode::kArrayFunction) {
__ AssertFunction(r4);
@@ -1642,7 +1551,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
int allocatable_register_count = config->num_allocatable_general_registers();
Register scratch = ip;
if (with_result) {
-#ifdef V8_REVERSE_JSARGS
if (java_script_builtin) {
__ mr(scratch, r3);
} else {
@@ -1654,16 +1562,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
kSystemPointerSize +
BuiltinContinuationFrameConstants::kFixedFrameSize));
}
-#else
- // Overwrite the hole inserted by the deoptimizer with the return value from
- // the LAZY deopt point.
- __ StoreP(
- r3,
- MemOperand(sp, config->num_allocatable_general_registers() *
- kSystemPointerSize +
- BuiltinContinuationFrameConstants::kFixedFrameSize));
- USE(scratch);
-#endif
}
for (int i = allocatable_register_count - 1; i >= 0; --i) {
int code = config->GetAllocatableGeneralCode(i);
@@ -1672,7 +1570,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
__ SmiUntag(Register::from_code(code));
}
}
-#ifdef V8_REVERSE_JSARGS
if (java_script_builtin && with_result) {
// Overwrite the hole inserted by the deoptimizer with the return value from
// the LAZY deopt point. r0 contains the arguments count, the return value
@@ -1685,7 +1582,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
__ subi(r3, r3,
Operand(BuiltinContinuationFrameConstants::kFixedSlotCount));
}
-#endif
__ LoadP(
fp,
MemOperand(sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
@@ -1783,9 +1679,9 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : argc
- // -- sp[0] : argArray
+ // -- sp[0] : receiver
// -- sp[4] : thisArg
- // -- sp[8] : receiver
+ // -- sp[8] : argArray
// -----------------------------------
// 1. Load receiver into r4, argArray into r5 (if present), remove all
@@ -1795,9 +1691,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ LoadRoot(r8, RootIndex::kUndefinedValue);
__ mr(r5, r8);
-#ifdef V8_REVERSE_JSARGS
Label done;
-
__ LoadP(r4, MemOperand(sp)); // receiver
__ cmpi(r3, Operand(1));
__ blt(&done);
@@ -1807,24 +1701,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ LoadP(r5, MemOperand(sp, 2 * kSystemPointerSize)); // argArray
__ bind(&done);
-#else
- Label done;
- __ ShiftLeftImm(r4, r3, Operand(kSystemPointerSizeLog2));
- __ LoadPX(r4, MemOperand(sp, r4)); // receiver
-
- __ li(r0, Operand(1));
- __ sub(r7, r3, r0, LeaveOE, SetRC);
- __ blt(&done, cr0);
- __ ShiftLeftImm(r8, r7, Operand(kSystemPointerSizeLog2));
- __ LoadPX(r8, MemOperand(sp, r8));
-
- __ sub(r7, r7, r0, LeaveOE, SetRC);
- __ blt(&done, cr0);
- __ ShiftLeftImm(r5, r7, Operand(kSystemPointerSizeLog2));
- __ LoadPX(r5, MemOperand(sp, r5));
-
- __ bind(&done);
-#endif
__ ShiftLeftImm(ip, r3, Operand(kSystemPointerSizeLog2));
__ add(sp, sp, ip);
__ StoreP(r8, MemOperand(sp));
@@ -1860,7 +1736,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// static
void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
-#ifdef V8_REVERSE_JSARGS
// 1. Get the callable to call (passed as receiver) from the stack.
__ Pop(r4);
@@ -1877,46 +1752,6 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// 3. Adjust the actual number of arguments.
__ subi(r3, r3, Operand(1));
-#else
- // 1. Make sure we have at least one argument.
- // r3: actual number of arguments
- {
- Label done;
- __ cmpi(r3, Operand::Zero());
- __ bne(&done);
- __ PushRoot(RootIndex::kUndefinedValue);
- __ addi(r3, r3, Operand(1));
- __ bind(&done);
- }
-
- // 2. Get the callable to call (passed as receiver) from the stack.
- // r3: actual number of arguments
- __ LoadReceiver(r4, r3);
-
- // 3. Shift arguments and return address one slot down on the stack
- // (overwriting the original receiver). Adjust argument count to make
- // the original first argument the new receiver.
- // r3: actual number of arguments
- // r4: callable
- {
- Register scratch = r6;
- Label loop;
- // Calculate the copy start address (destination). Copy end address is sp.
- __ ShiftLeftImm(r5, r3, Operand(kSystemPointerSizeLog2));
- __ add(r5, sp, r5);
-
- __ mtctr(r3);
- __ bind(&loop);
- __ LoadP(scratch, MemOperand(r5, -kSystemPointerSize));
- __ StoreP(scratch, MemOperand(r5));
- __ subi(r5, r5, Operand(kSystemPointerSize));
- __ bdnz(&loop);
- // Adjust the actual number of arguments and remove the top element
- // (which is a copy of the last argument).
- __ subi(r3, r3, Operand(1));
- __ pop();
- }
-#endif
// 4. Call the callable.
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
@@ -1925,10 +1760,10 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : argc
- // -- sp[0] : argumentsList
- // -- sp[4] : thisArgument
- // -- sp[8] : target
- // -- sp[12] : receiver
+ // -- sp[0] : receiver
+ // -- sp[4] : target (if argc >= 1)
+ // -- sp[8] : thisArgument (if argc >= 2)
+ // -- sp[12] : argumentsList (if argc == 3)
// -----------------------------------
// 1. Load target into r4 (if present), argumentsList into r5 (if present),
@@ -1939,9 +1774,7 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ mr(r8, r4);
__ mr(r5, r4);
-#ifdef V8_REVERSE_JSARGS
Label done;
-
__ cmpi(r3, Operand(1));
__ blt(&done);
__ LoadP(r4, MemOperand(sp, kSystemPointerSize)); // thisArg
@@ -1953,26 +1786,6 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ LoadP(r5, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
__ bind(&done);
-#else
- Label done;
- __ li(r0, Operand(1));
- __ sub(r7, r3, r0, LeaveOE, SetRC);
- __ blt(&done, cr0);
- __ ShiftLeftImm(r4, r7, Operand(kSystemPointerSizeLog2));
- __ LoadPX(r4, MemOperand(sp, r4)); // receiver
-
- __ sub(r7, r7, r0, LeaveOE, SetRC);
- __ blt(&done, cr0);
- __ ShiftLeftImm(r8, r7, Operand(kSystemPointerSizeLog2));
- __ LoadPX(r8, MemOperand(sp, r8));
-
- __ sub(r7, r7, r0, LeaveOE, SetRC);
- __ blt(&done, cr0);
- __ ShiftLeftImm(r5, r7, Operand(kSystemPointerSizeLog2));
- __ LoadPX(r5, MemOperand(sp, r5));
-
- __ bind(&done);
-#endif
__ ShiftLeftImm(ip, r3, Operand(kSystemPointerSizeLog2));
__ add(sp, sp, ip);
__ StoreP(r8, MemOperand(sp));
@@ -1996,12 +1809,11 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : argc
- // -- sp[0] : new.target (optional)
- // -- sp[4] : argumentsList
- // -- sp[8] : target
- // -- sp[12] : receiver
+ // -- sp[0] : receiver
+ // -- sp[4] : target
+ // -- sp[8] : argumentsList
+ // -- sp[12] : new.target (optional)
// -----------------------------------
- // NOTE: The order of args in the stack are reversed if V8_REVERSE_JSARGS
// 1. Load target into r4 (if present), argumentsList into r5 (if present),
// new.target into r6 (if present, otherwise use target), remove all
@@ -2011,9 +1823,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ LoadRoot(r4, RootIndex::kUndefinedValue);
__ mr(r5, r4);
-#ifdef V8_REVERSE_JSARGS
Label done;
-
__ mr(r7, r4);
__ cmpi(r3, Operand(1));
__ blt(&done);
@@ -2029,31 +1839,6 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ ShiftLeftImm(r0, r3, Operand(kSystemPointerSizeLog2));
__ add(sp, sp, r0);
__ StoreP(r7, MemOperand(sp));
-#else
- Label done;
- __ ShiftLeftImm(ip, r3, Operand(kSystemPointerSizeLog2));
- __ StorePX(r5, MemOperand(sp, ip));
- __ li(r0, Operand(1));
- __ sub(r7, r3, r0, LeaveOE, SetRC);
- __ blt(&done, cr0);
- __ ShiftLeftImm(r4, r7, Operand(kSystemPointerSizeLog2));
- __ LoadPX(r4, MemOperand(sp, r4)); // receiver
-
- __ mr(r6, r4);
- __ sub(r7, r7, r0, LeaveOE, SetRC);
- __ blt(&done, cr0);
- __ ShiftLeftImm(r5, r7, Operand(kSystemPointerSizeLog2));
- __ LoadPX(r5, MemOperand(sp, r5));
-
- __ sub(r7, r7, r0, LeaveOE, SetRC);
- __ blt(&done, cr0);
- __ ShiftLeftImm(r6, r7, Operand(kSystemPointerSizeLog2));
- __ LoadPX(r6, MemOperand(sp, r6));
-
- __ bind(&done);
- __ ShiftLeftImm(r0, r3, Operand(kSystemPointerSizeLog2));
- __ add(sp, sp, r0);
-#endif
}
// ----------- S t a t e -------------
@@ -2142,7 +1927,6 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Label stack_overflow;
Generate_StackOverflowCheck(masm, r7, scratch, &stack_overflow);
-#ifdef V8_REVERSE_JSARGS
// Move the arguments already in the stack,
// including the receiver and the return address.
{
@@ -2161,7 +1945,6 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ StorePU(r0, MemOperand(dest, kSystemPointerSize));
__ bdnz(&copy);
}
-#endif
// Push arguments onto the stack (thisArgument is already on the stack).
{
@@ -2178,11 +1961,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ bne(&skip);
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
__ bind(&skip);
-#ifdef V8_REVERSE_JSARGS
__ StorePU(scratch, MemOperand(r8, kSystemPointerSize));
-#else
- __ push(scratch);
-#endif
__ bdnz(&loop);
__ bind(&no_args);
__ add(r3, r3, r7);
@@ -2270,7 +2049,6 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
Generate_StackOverflowCheck(masm, r8, scratch, &stack_overflow);
// Forward the arguments from the caller frame.
-#ifdef V8_REVERSE_JSARGS
// Point to the first argument to copy (skipping the receiver).
__ addi(r7, r7,
Operand(CommonFrameConstants::kFixedFrameSizeAboveFp +
@@ -2296,15 +2074,11 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ StorePU(r0, MemOperand(dest, kSystemPointerSize));
__ bdnz(&copy);
}
-#endif
// Copy arguments from the caller frame.
// TODO(victorgomes): Consider using forward order as potentially more cache
// friendly.
{
Label loop;
-#ifndef V8_REVERSE_JSARGS
- __ addi(r7, r7, Operand(CommonFrameConstants::kFixedFrameSizeAboveFp));
-#endif
__ add(r3, r3, r8);
__ addi(r5, r5, Operand(kSystemPointerSize));
__ bind(&loop);
@@ -2312,11 +2086,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ subi(r8, r8, Operand(1));
__ ShiftLeftImm(scratch, r8, Operand(kSystemPointerSizeLog2));
__ LoadPX(r0, MemOperand(r7, scratch));
-#ifdef V8_REVERSE_JSARGS
__ StorePX(r0, MemOperand(r5, scratch));
-#else
- __ push(r0);
-#endif
__ cmpi(r8, Operand::Zero());
__ bne(&loop);
}
@@ -2480,7 +2250,6 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ bind(&done);
}
-#ifdef V8_REVERSE_JSARGS
// Pop receiver.
__ Pop(r8);
@@ -2503,44 +2272,6 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Push receiver.
__ Push(r8);
-#else
- __ mr(scratch, sp);
- __ mr(sp, r0);
-
- // Relocate arguments down the stack.
- // -- r3 : the number of arguments (not including the receiver)
- // -- r9 : the previous stack pointer
- // -- r10: the size of the [[BoundArguments]]
- {
- Label skip, loop;
- __ li(r8, Operand::Zero());
- __ cmpi(r3, Operand::Zero());
- __ beq(&skip);
- __ mtctr(r3);
- __ bind(&loop);
- __ LoadPX(r0, MemOperand(scratch, r8));
- __ StorePX(r0, MemOperand(sp, r8));
- __ addi(r8, r8, Operand(kSystemPointerSize));
- __ bdnz(&loop);
- __ bind(&skip);
- }
-
- // Copy [[BoundArguments]] to the stack (below the arguments).
- {
- Label loop;
- __ ShiftLeftImm(r10, r7, Operand(kTaggedSizeLog2));
- __ addi(r10, r10, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(r5, r5, r10);
- __ mtctr(r7);
- __ bind(&loop);
- __ LoadAnyTaggedField(ip, MemOperand(r5, -kTaggedSize), r0);
- __ StorePX(ip, MemOperand(sp, r8));
- __ addi(r8, r8, Operand(kSystemPointerSize));
- __ addi(r5, r5, Operand(-kTaggedSize));
- __ bdnz(&loop);
- __ add(r3, r3, r7);
- }
-#endif
}
__ bind(&no_bound_arguments);
}
@@ -2736,17 +2467,12 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -- r6 : new target (passed through to callee)
// -----------------------------------
- Label dont_adapt_arguments, stack_overflow, skip_adapt_arguments;
+ Label dont_adapt_arguments, stack_overflow;
__ cmpli(r5, Operand(kDontAdaptArgumentsSentinel));
__ beq(&dont_adapt_arguments);
__ LoadTaggedPointerField(
r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kFlagsOffset));
-#ifndef V8_REVERSE_JSARGS
- __ TestBitMask(r7, SharedFunctionInfo::IsSafeToSkipArgumentsAdaptorBit::kMask,
- r0);
- __ bne(&skip_adapt_arguments, cr0);
-#endif
// -------------------------------------------
// Adapt arguments.
@@ -2767,13 +2493,8 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r4: function
// r5: expected number of arguments
// r6: new target (passed through to callee)
-#ifdef V8_REVERSE_JSARGS
__ ShiftLeftImm(r3, r5, Operand(kSystemPointerSizeLog2));
__ add(r3, r3, fp);
-#else
- __ SmiToPtrArrayOffset(r3, r3);
- __ add(r3, r3, fp);
-#endif
// adjust for return address and receiver
__ addi(r3, r3, Operand(2 * kSystemPointerSize));
__ ShiftLeftImm(r7, r5, Operand(kSystemPointerSizeLog2));
@@ -2803,7 +2524,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
EnterArgumentsAdaptorFrame(masm);
Generate_StackOverflowCheck(masm, r5, r8, &stack_overflow);
-#ifdef V8_REVERSE_JSARGS
// Fill the remaining expected arguments with undefined.
// r0: actual number of arguments as a smi
// r1: function
@@ -2848,47 +2568,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ cmp(r3, fp); // Compare before moving to next argument.
__ subi(r3, r3, Operand(kSystemPointerSize));
__ b(ne, &copy);
-#else
- // Calculate copy start address into r0 and copy end address is fp.
- // r3: actual number of arguments as a smi
- // r4: function
- // r5: expected number of arguments
- // r6: new target (passed through to callee)
- __ SmiToPtrArrayOffset(r3, r3);
- __ add(r3, r3, fp);
-
- // Copy the arguments (including the receiver) to the new stack frame.
- // r3: copy start address
- // r4: function
- // r5: expected number of arguments
- // r6: new target (passed through to callee)
- Label copy;
- __ bind(&copy);
- // Adjust load for return address and receiver.
- __ LoadP(r0, MemOperand(r3, 2 * kSystemPointerSize));
- __ push(r0);
- __ cmp(r3, fp); // Compare before moving to next argument.
- __ subi(r3, r3, Operand(kSystemPointerSize));
- __ bne(&copy);
-
- // Fill the remaining expected arguments with undefined.
- // r4: function
- // r5: expected number of arguments
- // r6: new target (passed through to callee)
- __ LoadRoot(r0, RootIndex::kUndefinedValue);
- __ ShiftLeftImm(r7, r5, Operand(kSystemPointerSizeLog2));
- __ sub(r7, fp, r7);
- // Adjust for frame.
- __ subi(r7, r7,
- Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp +
- kSystemPointerSize));
-
- Label fill;
- __ bind(&fill);
- __ push(r0);
- __ cmp(sp, r7);
- __ bne(&fill);
-#endif
}
// Call the entry point.
@@ -2911,42 +2590,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
}
// -------------------------------------------
- // Skip adapt arguments.
- // -------------------------------------------
- __ bind(&skip_adapt_arguments);
- {
- // The callee cannot observe the actual arguments, so it's safe to just
- // pass the expected arguments by massaging the stack appropriately. See
- // http://bit.ly/v8-faster-calls-with-arguments-mismatch for details.
- Label under_application, over_application;
- __ cmp(r3, r5);
- __ blt(&under_application);
-
- __ bind(&over_application);
- {
- // Remove superfluous parameters from the stack.
- __ sub(r7, r3, r5);
- __ mr(r3, r5);
- __ ShiftLeftImm(r7, r7, Operand(kSystemPointerSizeLog2));
- __ add(sp, sp, r7);
- __ b(&dont_adapt_arguments);
- }
-
- __ bind(&under_application);
- {
- // Fill remaining expected arguments with undefined values.
- Label fill;
- __ LoadRoot(r7, RootIndex::kUndefinedValue);
- __ bind(&fill);
- __ addi(r3, r3, Operand(1));
- __ push(r7);
- __ cmp(r3, r5);
- __ blt(&fill);
- __ b(&dont_adapt_arguments);
- }
- }
-
- // -------------------------------------------
// Dont adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
@@ -3451,12 +3094,11 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// -- r5 : arguments count (not including the receiver)
// -- r6 : call data
// -- r3 : holder
- // -- sp[0] : last argument
+ // -- sp[0] : receiver
+ // -- sp[8] : first argument
// -- ...
- // -- sp[(argc - 1)* 4] : first argument
- // -- sp[(argc + 0) * 4] : receiver
+ // -- sp[(argc) * 8] : last argument
// -----------------------------------
- // NOTE: The order of args are reversed if V8_REVERSE_JSARGS
Register api_function_address = r4;
Register argc = r5;
@@ -3531,15 +3173,8 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// FunctionCallbackInfo::values_ (points at the first varargs argument passed
// on the stack).
-#ifdef V8_REVERSE_JSARGS
__ addi(scratch, scratch,
Operand((FCA::kArgsLength + 1) * kSystemPointerSize));
-#else
- __ addi(scratch, scratch,
- Operand((FCA::kArgsLength - 1) * kSystemPointerSize));
- __ ShiftLeftImm(ip, argc, Operand(kSystemPointerSizeLog2));
- __ add(scratch, scratch, ip);
-#endif
__ StoreP(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 2) *
kSystemPointerSize));
@@ -3698,6 +3333,252 @@ void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
__ blr();
}
+namespace {
+
+// This code tries to be close to ia32 code so that any changes can be
+// easily ported.
+void Generate_DeoptimizationEntry(MacroAssembler* masm,
+ DeoptimizeKind deopt_kind) {
+ Isolate* isolate = masm->isolate();
+
+ // Unlike on ARM we don't save all the registers, just the useful ones.
+ // For the rest, there are gaps on the stack, so the offsets remain the same.
+ const int kNumberOfRegisters = Register::kNumRegisters;
+
+ RegList restored_regs = kJSCallerSaved | kCalleeSaved;
+ RegList saved_regs = restored_regs | sp.bit();
+
+ const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
+
+ // Save all double registers before messing with them.
+ __ subi(sp, sp, Operand(kDoubleRegsSize));
+ const RegisterConfiguration* config = RegisterConfiguration::Default();
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ const DoubleRegister dreg = DoubleRegister::from_code(code);
+ int offset = code * kDoubleSize;
+ __ stfd(dreg, MemOperand(sp, offset));
+ }
+
+ // Push saved_regs (needed to populate FrameDescription::registers_).
+ // Leave gaps for other registers.
+ __ subi(sp, sp, Operand(kNumberOfRegisters * kSystemPointerSize));
+ for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
+ if ((saved_regs & (1 << i)) != 0) {
+ __ StoreP(ToRegister(i), MemOperand(sp, kSystemPointerSize * i));
+ }
+ }
+ {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ Move(scratch, ExternalReference::Create(
+ IsolateAddressId::kCEntryFPAddress, isolate));
+ __ StoreP(fp, MemOperand(scratch));
+ }
+ const int kSavedRegistersAreaSize =
+ (kNumberOfRegisters * kSystemPointerSize) + kDoubleRegsSize;
+
+ // Get the bailout id is passed as r29 by the caller.
+ __ mr(r5, r29);
+
+ __ mov(r5, Operand(Deoptimizer::kFixedExitSizeMarker));
+ // Get the address of the location in the code object (r6) (return
+ // address for lazy deoptimization) and compute the fp-to-sp delta in
+ // register r7.
+ __ mflr(r6);
+ __ addi(r7, sp, Operand(kSavedRegistersAreaSize));
+ __ sub(r7, fp, r7);
+
+ // Allocate a new deoptimizer object.
+ // Pass six arguments in r3 to r8.
+ __ PrepareCallCFunction(6, r8);
+ __ li(r3, Operand::Zero());
+ Label context_check;
+ __ LoadP(r4, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ JumpIfSmi(r4, &context_check);
+ __ LoadP(r3, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ __ bind(&context_check);
+ __ li(r4, Operand(static_cast<int>(deopt_kind)));
+ // r5: bailout id already loaded.
+ // r6: code address or 0 already loaded.
+ // r7: Fp-to-sp delta.
+ __ Move(r8, ExternalReference::isolate_address(isolate));
+ // Call Deoptimizer::New().
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
+ }
+
+ // Preserve "deoptimizer" object in register r3 and get the input
+ // frame descriptor pointer to r4 (deoptimizer->input_);
+ __ LoadP(r4, MemOperand(r3, Deoptimizer::input_offset()));
+
+ // Copy core registers into FrameDescription::registers_[kNumRegisters].
+ DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
+ for (int i = 0; i < kNumberOfRegisters; i++) {
+ int offset =
+ (i * kSystemPointerSize) + FrameDescription::registers_offset();
+ __ LoadP(r5, MemOperand(sp, i * kSystemPointerSize));
+ __ StoreP(r5, MemOperand(r4, offset));
+ }
+
+ int double_regs_offset = FrameDescription::double_registers_offset();
+ // Copy double registers to
+ // double_registers_[DoubleRegister::kNumRegisters]
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ int dst_offset = code * kDoubleSize + double_regs_offset;
+ int src_offset =
+ code * kDoubleSize + kNumberOfRegisters * kSystemPointerSize;
+ __ lfd(d0, MemOperand(sp, src_offset));
+ __ stfd(d0, MemOperand(r4, dst_offset));
+ }
+
+ // Mark the stack as not iterable for the CPU profiler which won't be able to
+ // walk the stack without the return address.
+ {
+ UseScratchRegisterScope temps(masm);
+ Register is_iterable = temps.Acquire();
+ Register zero = r7;
+ __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
+ __ li(zero, Operand(0));
+ __ stb(zero, MemOperand(is_iterable));
+ }
+
+ // Remove the saved registers from the stack.
+ __ addi(sp, sp, Operand(kSavedRegistersAreaSize));
+
+ // Compute a pointer to the unwinding limit in register r5; that is
+ // the first stack slot not part of the input frame.
+ __ LoadP(r5, MemOperand(r4, FrameDescription::frame_size_offset()));
+ __ add(r5, r5, sp);
+
+ // Unwind the stack down to - but not including - the unwinding
+ // limit and copy the contents of the activation frame to the input
+ // frame description.
+ __ addi(r6, r4, Operand(FrameDescription::frame_content_offset()));
+ Label pop_loop;
+ Label pop_loop_header;
+ __ b(&pop_loop_header);
+ __ bind(&pop_loop);
+ __ pop(r7);
+ __ StoreP(r7, MemOperand(r6, 0));
+ __ addi(r6, r6, Operand(kSystemPointerSize));
+ __ bind(&pop_loop_header);
+ __ cmp(r5, sp);
+ __ bne(&pop_loop);
+
+ // Compute the output frame in the deoptimizer.
+ __ push(r3); // Preserve deoptimizer object across call.
+ // r3: deoptimizer object; r4: scratch.
+ __ PrepareCallCFunction(1, r4);
+ // Call Deoptimizer::ComputeOutputFrames().
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
+ }
+ __ pop(r3); // Restore deoptimizer object (class Deoptimizer).
+
+ __ LoadP(sp, MemOperand(r3, Deoptimizer::caller_frame_top_offset()));
+
+ // Replace the current (input) frame with the output frames.
+ Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
+ // Outer loop state: r7 = current "FrameDescription** output_",
+ // r4 = one past the last FrameDescription**.
+ __ lwz(r4, MemOperand(r3, Deoptimizer::output_count_offset()));
+ __ LoadP(r7, MemOperand(r3, Deoptimizer::output_offset())); // r7 is output_.
+ __ ShiftLeftImm(r4, r4, Operand(kSystemPointerSizeLog2));
+ __ add(r4, r7, r4);
+ __ b(&outer_loop_header);
+
+ __ bind(&outer_push_loop);
+ // Inner loop state: r5 = current FrameDescription*, r6 = loop index.
+ __ LoadP(r5, MemOperand(r7, 0)); // output_[ix]
+ __ LoadP(r6, MemOperand(r5, FrameDescription::frame_size_offset()));
+ __ b(&inner_loop_header);
+
+ __ bind(&inner_push_loop);
+ __ addi(r6, r6, Operand(-sizeof(intptr_t)));
+ __ add(r9, r5, r6);
+ __ LoadP(r9, MemOperand(r9, FrameDescription::frame_content_offset()));
+ __ push(r9);
+
+ __ bind(&inner_loop_header);
+ __ cmpi(r6, Operand::Zero());
+ __ bne(&inner_push_loop); // test for gt?
+
+ __ addi(r7, r7, Operand(kSystemPointerSize));
+ __ bind(&outer_loop_header);
+ __ cmp(r7, r4);
+ __ blt(&outer_push_loop);
+
+ __ LoadP(r4, MemOperand(r3, Deoptimizer::input_offset()));
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ const DoubleRegister dreg = DoubleRegister::from_code(code);
+ int src_offset = code * kDoubleSize + double_regs_offset;
+ __ lfd(dreg, MemOperand(r4, src_offset));
+ }
+
+ // Push pc, and continuation from the last output frame.
+ __ LoadP(r9, MemOperand(r5, FrameDescription::pc_offset()));
+ __ push(r9);
+ __ LoadP(r9, MemOperand(r5, FrameDescription::continuation_offset()));
+ __ push(r9);
+
+ // Restore the registers from the last output frame.
+ {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ DCHECK(!(scratch.bit() & restored_regs));
+ __ mr(scratch, r5);
+ for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
+ int offset =
+ (i * kSystemPointerSize) + FrameDescription::registers_offset();
+ if ((restored_regs & (1 << i)) != 0) {
+ __ LoadP(ToRegister(i), MemOperand(scratch, offset));
+ }
+ }
+ }
+
+ {
+ UseScratchRegisterScope temps(masm);
+ Register is_iterable = temps.Acquire();
+ Register one = r7;
+ __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
+ __ li(one, Operand(1));
+ __ stb(one, MemOperand(is_iterable));
+ }
+
+ {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ pop(scratch); // get continuation, leave pc on stack
+ __ pop(r0);
+ __ mtlr(r0);
+ __ Jump(scratch);
+ }
+
+ __ stop();
+}
+
+} // namespace
+
+void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Bailout(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kBailout);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
+}
#undef __
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/regexp.tq b/deps/v8/src/builtins/regexp.tq
index e09ddf3d7c..7c043efa55 100644
--- a/deps/v8/src/builtins/regexp.tq
+++ b/deps/v8/src/builtins/regexp.tq
@@ -184,7 +184,8 @@ extern enum Flag constexpr 'JSRegExp::Flag' {
kMultiline,
kSticky,
kUnicode,
- kDotAll
+ kDotAll,
+ kLinear
}
const kRegExpPrototypeOldFlagGetter: constexpr int31
@@ -244,6 +245,13 @@ transitioning javascript builtin RegExpPrototypeMultilineGetter(
'RegExp.prototype.multiline');
}
+transitioning javascript builtin RegExpPrototypeLinearGetter(
+ js-implicit context: NativeContext, receiver: JSAny)(): JSAny {
+ return FlagGetter(
+ receiver, Flag::kLinear, kRegExpPrototypeOldFlagGetter,
+ 'RegExp.prototype.linear');
+}
+
// ES #sec-get-regexp.prototype.dotAll
transitioning javascript builtin RegExpPrototypeDotAllGetter(
js-implicit context: NativeContext, receiver: JSAny)(): JSAny {
diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc
index 3743df4ddb..8cc3a949c3 100644
--- a/deps/v8/src/builtins/s390/builtins-s390.cc
+++ b/deps/v8/src/builtins/s390/builtins-s390.cc
@@ -122,7 +122,6 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ Push(cp, r2);
__ SmiUntag(r2);
-#ifdef V8_REVERSE_JSARGS
// Set up pointer to last argument (skip receiver).
__ la(r6, MemOperand(fp, StandardFrameConstants::kCallerSPOffset +
kSystemPointerSize));
@@ -130,15 +129,6 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ PushArray(r6, r2, r1, r0);
// The receiver for the builtin/api call.
__ PushRoot(RootIndex::kTheHoleValue);
-#else
- // The receiver for the builtin/api call.
- __ PushRoot(RootIndex::kTheHoleValue);
- // Set up pointer to last argument.
- __ la(r6, MemOperand(fp, StandardFrameConstants::kCallerSPOffset));
-
- // Copy arguments and receiver to the expression stack.
- __ PushArray(r6, r2, r1, r0);
-#endif
// Call the function.
// r2: number of arguments
@@ -236,7 +226,6 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Restore new target.
__ Pop(r5);
-#ifdef V8_REVERSE_JSARGS
// Push the allocated receiver to the stack.
__ Push(r2);
// We need two copies because we may have to return the original one
@@ -249,15 +238,6 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Set up pointer to first argument (skip receiver).
__ la(r6, MemOperand(fp, StandardFrameConstants::kCallerSPOffset +
kSystemPointerSize));
-#else
- // Push the allocated receiver to the stack. We need two copies
- // because we may have to return the original one and the calling
- // conventions dictate that the called function pops the receiver.
- __ Push(r2, r2);
-
- // Set up pointer to last argument.
- __ la(r6, MemOperand(fp, StandardFrameConstants::kCallerSPOffset));
-#endif
// ----------- S t a t e -------------
// -- r5: new target
@@ -290,10 +270,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Copy arguments and receiver to the expression stack.
__ PushArray(r6, r2, r1, r0);
-#ifdef V8_REVERSE_JSARGS
// Push implicit receiver.
__ Push(r8);
-#endif
// Call the function.
__ InvokeFunctionWithNewTarget(r3, r5, r2, CALL_FUNCTION);
@@ -428,19 +406,11 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ CmpLogicalP(sp, scratch);
__ blt(&stack_overflow);
-#ifndef V8_REVERSE_JSARGS
- // Push receiver.
- __ LoadTaggedPointerField(
- scratch, FieldMemOperand(r3, JSGeneratorObject::kReceiverOffset));
- __ Push(scratch);
-#endif
-
// ----------- S t a t e -------------
// -- r3 : the JSGeneratorObject to resume
// -- r6 : generator function
// -- cp : generator context
// -- lr : return address
- // -- sp[0] : generator receiver
// -----------------------------------
// Copy the function arguments from the generator object's register file.
@@ -452,7 +422,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
r4,
FieldMemOperand(r3, JSGeneratorObject::kParametersAndRegistersOffset));
{
-#ifdef V8_REVERSE_JSARGS
Label done_loop, loop;
__ LoadRR(r8, r5);
@@ -472,34 +441,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ LoadAnyTaggedField(
scratch, FieldMemOperand(r3, JSGeneratorObject::kReceiverOffset));
__ Push(scratch);
-#else
- Label loop, done_loop;
- __ ShiftLeftP(r1, r5, Operand(kSystemPointerSizeLog2));
- __ SubP(sp, r1);
-
- __ ShiftLeftP(r5, r5, Operand(kTaggedSizeLog2));
-
- // ip = stack offset
- // r5 = parameter array offset
- __ LoadImmP(ip, Operand::Zero());
- __ SubP(r5, Operand(kTaggedSize));
- __ blt(&done_loop);
-
- __ lghi(r1, Operand(-kTaggedSize));
-
- __ bind(&loop);
-
- // parameter copy loop
- __ LoadAnyTaggedField(r0, FieldMemOperand(r4, r5, FixedArray::kHeaderSize));
- __ StoreP(r0, MemOperand(sp, ip));
-
- // update offsets
- __ lay(ip, MemOperand(ip, kSystemPointerSize));
-
- __ BranchRelativeOnIdxHighP(r5, r1, &loop);
-
- __ bind(&done_loop);
-#endif
}
// Underlying function needs to have bytecode available.
@@ -515,6 +456,10 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Resume (Ignition/TurboFan) generator object.
{
+ __ LoadP(r2, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadHalfWordP(
+ r2,
+ FieldMemOperand(r2, SharedFunctionInfo::kFormalParameterCountOffset));
// We abuse new.target both to indicate that this is a resume call and to
// pass in the generator object. In ordinary calls, new.target is always
// undefined because generator functions are non-constructable.
@@ -867,8 +812,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// r7: scratch reg to hold scaled argc
// r8: scratch reg to hold arg handle
// r9: scratch reg to hold index into argv
-
-#ifdef V8_REVERSE_JSARGS
Label argLoop, argExit;
__ ShiftLeftP(r9, r2, Operand(kSystemPointerSizeLog2));
@@ -891,28 +834,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Push the receiver.
__ Push(r5);
-#else
- // Push the receiver.
- __ Push(r5);
-
- Label argLoop, argExit;
-
- __ LoadRR(r9, r6);
- __ ltgr(r7, r2);
- __ beq(&argExit, Label::kNear);
- __ bind(&argLoop);
-
- __ LoadP(r8, MemOperand(r9)); // read next parameter
- __ LoadP(r0, MemOperand(r8)); // dereference handle
- __ Push(r0);
- __ la(r9, MemOperand(r9, kSystemPointerSize)); // r9++;
- // __ lay(r7, MemOperand(r7, -kSystemPointerSize));
- __ SubP(r7, r7, Operand(1));
- __ bgt(&argLoop);
-
- __ bind(&argExit);
-#endif
-
// Setup new.target, argc and function.
__ LoadRR(r5, r3);
__ LoadRR(r3, r4);
@@ -990,13 +911,13 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
__ AddP(sp, sp, args_count);
}
-// Tail-call |function_id| if |smi_entry| == |marker|
+// Tail-call |function_id| if |actual_marker| == |expected_marker|
static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
- Register smi_entry,
- OptimizationMarker marker,
+ Register actual_marker,
+ OptimizationMarker expected_marker,
Runtime::FunctionId function_id) {
Label no_match;
- __ CmpSmiLiteral(smi_entry, Smi::FromEnum(marker), r0);
+ __ CmpP(actual_marker, Operand(expected_marker));
__ bne(&no_match);
GenerateTailCallToReturnedCode(masm, function_id);
__ bind(&no_match);
@@ -1013,17 +934,22 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
DCHECK(!AreAliased(r3, r5, optimized_code_entry, scratch));
Register closure = r3;
+ Label heal_optimized_code_slot;
+
+ // If the optimized code is cleared, go to runtime to update the optimization
+ // marker field.
+ __ LoadWeakValue(optimized_code_entry, optimized_code_entry,
+ &heal_optimized_code_slot);
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
- Label found_deoptimized_code;
__ LoadTaggedPointerField(
scratch,
FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
__ LoadW(scratch, FieldMemOperand(
scratch, CodeDataContainer::kKindSpecificFlagsOffset));
__ TestBit(scratch, Code::kMarkedForDeoptimizationBit, r0);
- __ bne(&found_deoptimized_code);
+ __ bne(&heal_optimized_code_slot);
// Optimized code is good, get it into the closure and link the closure
// into the optimized functions list, then tail call the optimized code.
@@ -1033,10 +959,11 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
__ LoadCodeObjectEntry(r4, optimized_code_entry);
__ Jump(r4);
- // Optimized code slot contains deoptimized code, evict it and re-enter
- // the closure's code.
- __ bind(&found_deoptimized_code);
- GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
+ // Optimized code slot contains deoptimized code or code is cleared and
+ // optimized code marker isn't updated. Evict the code, update the marker
+ // and re-enter the closure's code.
+ __ bind(&heal_optimized_code_slot);
+ GenerateTailCallToReturnedCode(masm, Runtime::kHealOptimizedCodeSlot);
}
static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
@@ -1046,7 +973,8 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
// -- r5 : new target (preserved for callee if needed, and caller)
// -- r3 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
- // -- optimization_marker : a Smi containing a non-zero optimization marker.
+ // -- optimization_marker : a int32 containing a non-zero optimization
+ // marker.
// -----------------------------------
DCHECK(!AreAliased(feedback_vector, r3, r5, optimization_marker));
@@ -1063,13 +991,11 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
- // Otherwise, the marker is InOptimizationQueue, so fall through hoping
- // that an interrupt will eventually update the slot with optimized code.
+ // Marker should be one of LogFirstExecution / CompileOptimized /
+ // CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach
+ // here.
if (FLAG_debug_code) {
- __ CmpSmiLiteral(optimization_marker,
- Smi::FromEnum(OptimizationMarker::kInOptimizationQueue),
- r0);
- __ Assert(eq, AbortReason::kExpectedOptimizationSentinel);
+ __ stop();
}
}
@@ -1207,19 +1133,18 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ CmpP(r6, Operand(FEEDBACK_VECTOR_TYPE));
__ bne(&push_stack_frame);
- Register optimized_code_entry = r6;
+ Register optimization_state = r6;
- // Read off the optimized code slot in the feedback vector.
- __ LoadAnyTaggedField(
- optimized_code_entry,
- FieldMemOperand(feedback_vector,
- FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
+ // Read off the optimization state in the feedback vector.
+ __ LoadW(optimization_state,
+ FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
- // Check if the optimized code slot is not empty.
- Label optimized_code_slot_not_empty;
- __ CmpSmiLiteral(optimized_code_entry,
- Smi::FromEnum(OptimizationMarker::kNone), r0);
- __ bne(&optimized_code_slot_not_empty);
+ // Check if the optimized code slot is not empty or has a optimization marker.
+ Label has_optimized_code_or_marker;
+ __ TestBitMask(optimization_state,
+ FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask,
+ r0);
+ __ bne(&has_optimized_code_or_marker);
Label not_optimized;
__ bind(&not_optimized);
@@ -1287,7 +1212,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
}
// If the bytecode array has a valid incoming new target or generator object
- // register, initialize it with incoming value which was passed in r6.
+ // register, initialize it with incoming value which was passed in r5.
Label no_incoming_new_target_or_generator_register;
__ LoadW(r8, FieldMemOperand(
kInterpreterBytecodeArrayRegister,
@@ -1301,9 +1226,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Perform interrupt stack check.
// TODO(solanes): Merge with the real stack limit check above.
Label stack_check_interrupt, after_stack_check_interrupt;
- __ LoadP(r5,
+ __ LoadP(r0,
StackLimitAsMemOperand(masm, StackLimitKind::kInterruptStackLimit));
- __ CmpLogicalP(sp, r5);
+ __ CmpLogicalP(sp, r0);
__ blt(&stack_check_interrupt);
__ bind(&after_stack_check_interrupt);
@@ -1350,15 +1275,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
LeaveInterpreterFrame(masm, r4);
__ Ret();
- __ bind(&optimized_code_slot_not_empty);
- Label maybe_has_optimized_code;
- // Check if optimized code marker is actually a weak reference to the
- // optimized code.
- __ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code);
- MaybeOptimizeCode(masm, feedback_vector, optimized_code_entry);
- // Fall through if there's no runnable optimized code.
- __ jmp(&not_optimized);
-
__ bind(&stack_check_interrupt);
// Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset
// for the call to the StackGuard.
@@ -1378,16 +1294,33 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
- __ SmiTag(r5, kInterpreterBytecodeOffsetRegister);
- __ StoreP(r5,
+ __ SmiTag(r0, kInterpreterBytecodeOffsetRegister);
+ __ StoreP(r0,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ jmp(&after_stack_check_interrupt);
+ __ bind(&has_optimized_code_or_marker);
+ Label maybe_has_optimized_code;
+
+ // Check if optimized code is available
+ __ TestBitMask(optimization_state,
+ FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker,
+ r0);
+ __ beq(&maybe_has_optimized_code);
+
+ Register optimization_marker = optimization_state;
+ __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
+ MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
+ // Fall through if there's no runnable optimized code.
+ __ jmp(&not_optimized);
+
__ bind(&maybe_has_optimized_code);
- // Load code entry from the weak reference, if it was cleared, resume
- // execution of unoptimized code.
- __ LoadWeakValue(optimized_code_entry, optimized_code_entry, &not_optimized);
+ Register optimized_code_entry = optimization_state;
+ __ LoadAnyTaggedField(
+ optimization_marker,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kMaybeOptimizedCodeOffset));
TailCallOptimizedCodeSlot(masm, optimized_code_entry, r8);
__ bind(&compile_lazy);
@@ -1406,12 +1339,8 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
__ ShiftLeftP(scratch, scratch, Operand(kSystemPointerSizeLog2));
__ SubP(start_address, start_address, scratch);
// Push the arguments.
-#ifdef V8_REVERSE_JSARGS
__ PushArray(start_address, num_args, r1, scratch,
TurboAssembler::PushArrayOrder::kReverse);
-#else
- __ PushArray(start_address, num_args, r1, scratch);
-#endif
}
// static
@@ -1427,19 +1356,15 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// -- r3 : the target to call (can be any Object).
// -----------------------------------
Label stack_overflow;
-
-#ifdef V8_REVERSE_JSARGS
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// The spread argument should not be pushed.
__ SubP(r2, r2, Operand(1));
}
-#endif
// Calculate number of arguments (AddP one for receiver).
__ AddP(r5, r2, Operand(1));
Generate_StackOverflowCheck(masm, r5, ip, &stack_overflow);
-#ifdef V8_REVERSE_JSARGS
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
// Don't copy receiver. Argument count is correct.
__ LoadRR(r5, r2);
@@ -1458,20 +1383,6 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// lies in the next interpreter register.
__ LoadP(r4, MemOperand(r4, -kSystemPointerSize));
}
-#else
- // Push "undefined" as the receiver arg if we need to.
- if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
- __ PushRoot(RootIndex::kUndefinedValue);
- __ LoadRR(r5, r2); // Argument count is correct.
- }
-
- // Push the arguments.
- Generate_InterpreterPushArgs(masm, r5, r4, r6);
- if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
- __ Pop(r4); // Pass the spread in a register
- __ SubP(r2, r2, Operand(1)); // Subtract one for spread
- }
-#endif
// Call the target.
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
@@ -1504,7 +1415,6 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
__ AddP(r7, r2, Operand(1));
Generate_StackOverflowCheck(masm, r7, ip, &stack_overflow);
-#ifdef V8_REVERSE_JSARGS
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// The spread argument should not be pushed.
__ SubP(r2, r2, Operand(1));
@@ -1526,22 +1436,6 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
} else {
__ AssertUndefinedOrAllocationSite(r4, r7);
}
-#else
- // Push a slot for the receiver to be constructed.
- __ LoadImmP(r0, Operand::Zero());
- __ push(r0);
-
- // Push the arguments (skip if none).
- Generate_InterpreterPushArgs(masm, r2, r6, r7);
-
- if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
- __ Pop(r4); // Pass the spread in a register
- __ SubP(r2, r2, Operand(1)); // Subtract one for spread
- } else {
- __ AssertUndefinedOrAllocationSite(r4, r7);
- }
-
-#endif
if (mode == InterpreterPushArgsMode::kArrayFunction) {
__ AssertFunction(r3);
@@ -1707,7 +1601,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
int allocatable_register_count = config->num_allocatable_general_registers();
Register scratch = ip;
if (with_result) {
-#ifdef V8_REVERSE_JSARGS
if (java_script_builtin) {
__ LoadRR(scratch, r2);
} else {
@@ -1719,16 +1612,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
kSystemPointerSize +
BuiltinContinuationFrameConstants::kFixedFrameSize));
}
-#else
- // Overwrite the hole inserted by the deoptimizer with the return value from
- // the LAZY deopt point.
- __ StoreP(
- r2,
- MemOperand(sp, config->num_allocatable_general_registers() *
- kSystemPointerSize +
- BuiltinContinuationFrameConstants::kFixedFrameSize));
- USE(scratch);
-#endif
}
for (int i = allocatable_register_count - 1; i >= 0; --i) {
int code = config->GetAllocatableGeneralCode(i);
@@ -1737,7 +1620,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
__ SmiUntag(Register::from_code(code));
}
}
-#ifdef V8_REVERSE_JSARGS
if (java_script_builtin && with_result) {
// Overwrite the hole inserted by the deoptimizer with the return value from
// the LAZY deopt point. r0 contains the arguments count, the return value
@@ -1750,7 +1632,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
__ SubP(r2, r2,
Operand(BuiltinContinuationFrameConstants::kFixedSlotCount));
}
-#endif
__ LoadP(
fp,
MemOperand(sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
@@ -1840,9 +1721,9 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : argc
- // -- sp[0] : argArray
+ // -- sp[0] : receiver
// -- sp[4] : thisArg
- // -- sp[8] : receiver
+ // -- sp[8] : argArray
// -----------------------------------
// 1. Load receiver into r3, argArray into r4 (if present), remove all
@@ -1851,8 +1732,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
{
__ LoadRoot(r7, RootIndex::kUndefinedValue);
__ LoadRR(r4, r7);
-
-#ifdef V8_REVERSE_JSARGS
Label done;
__ LoadP(r3, MemOperand(sp)); // receiver
@@ -1864,23 +1743,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ LoadP(r4, MemOperand(sp, 2 * kSystemPointerSize)); // argArray
__ bind(&done);
-#else
- Label done;
- __ ShiftLeftP(r1, r2, Operand(kSystemPointerSizeLog2));
- __ LoadP(r3, MemOperand(sp, r1)); // receiver
-
- __ SubP(r6, r2, Operand(1));
- __ blt(&done);
- __ ShiftLeftP(r1, r6, Operand(kSystemPointerSizeLog2));
- __ LoadP(r7, MemOperand(sp, r1));
-
- __ SubP(r6, r6, Operand(1));
- __ blt(&done);
- __ ShiftLeftP(r1, r6, Operand(kSystemPointerSizeLog2));
- __ LoadP(r4, MemOperand(sp, r1));
-
- __ bind(&done);
-#endif
__ ShiftLeftP(r1, r2, Operand(kSystemPointerSizeLog2));
__ lay(sp, MemOperand(sp, r1));
__ StoreP(r7, MemOperand(sp));
@@ -1916,7 +1778,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// static
void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
-#ifdef V8_REVERSE_JSARGS
// 1. Get the callable to call (passed as receiver) from the stack.
__ Pop(r3);
@@ -1933,46 +1794,6 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// 3. Adjust the actual number of arguments.
__ SubP(r2, r2, Operand(1));
-#else
- // 1. Make sure we have at least one argument.
- // r2: actual number of arguments
- {
- Label done;
- __ CmpP(r2, Operand::Zero());
- __ bne(&done, Label::kNear);
- __ PushRoot(RootIndex::kUndefinedValue);
- __ AddP(r2, Operand(1));
- __ bind(&done);
- }
-
- // r2: actual number of arguments
- // 2. Get the callable to call (passed as receiver) from the stack.
- __ LoadReceiver(r3, r2);
-
- // 3. Shift arguments and return address one slot down on the stack
- // (overwriting the original receiver). Adjust argument count to make
- // the original first argument the new receiver.
- // r2: actual number of arguments
- // r3: callable
- {
- Register scratch = r5;
- Label loop;
- // Calculate the copy start address (destination). Copy end address is sp.
- __ ShiftLeftP(r4, r2, Operand(kSystemPointerSizeLog2));
- __ lay(r4, MemOperand(sp, r4));
-
- __ bind(&loop);
- __ LoadP(scratch, MemOperand(r4, -kSystemPointerSize));
- __ StoreP(scratch, MemOperand(r4));
- __ SubP(r4, Operand(kSystemPointerSize));
- __ CmpP(r4, sp);
- __ bne(&loop);
- // Adjust the actual number of arguments and remove the top element
- // (which is a copy of the last argument).
- __ SubP(r2, Operand(1));
- __ pop();
- }
-#endif
// 4. Call the callable.
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
@@ -1981,10 +1802,10 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : argc
- // -- sp[0] : argumentsList
- // -- sp[4] : thisArgument
- // -- sp[8] : target
- // -- sp[12] : receiver
+ // -- sp[0] : receiver
+ // -- sp[4] : target (if argc >= 1)
+ // -- sp[8] : thisArgument (if argc >= 2)
+ // -- sp[12] : argumentsList (if argc == 3)
// -----------------------------------
// 1. Load target into r3 (if present), argumentsList into r4 (if present),
@@ -1995,7 +1816,6 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ LoadRR(r7, r3);
__ LoadRR(r4, r3);
-#ifdef V8_REVERSE_JSARGS
Label done;
__ cghi(r2, Operand(1));
@@ -2009,25 +1829,6 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ LoadP(r4, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
__ bind(&done);
-#else
- Label done;
- __ SubP(r6, r2, Operand(1));
- __ blt(&done);
- __ ShiftLeftP(r1, r6, Operand(kSystemPointerSizeLog2));
- __ LoadP(r3, MemOperand(sp, r1)); // receiver
-
- __ SubP(r6, r6, Operand(1));
- __ blt(&done);
- __ ShiftLeftP(r1, r6, Operand(kSystemPointerSizeLog2));
- __ LoadP(r7, MemOperand(sp, r1));
-
- __ SubP(r6, r6, Operand(1));
- __ blt(&done);
- __ ShiftLeftP(r1, r6, Operand(kSystemPointerSizeLog2));
- __ LoadP(r4, MemOperand(sp, r1));
-
- __ bind(&done);
-#endif
__ ShiftLeftP(r1, r2, Operand(kSystemPointerSizeLog2));
__ lay(sp, MemOperand(sp, r1));
__ StoreP(r7, MemOperand(sp));
@@ -2051,12 +1852,11 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : argc
- // -- sp[0] : new.target (optional)
- // -- sp[4] : argumentsList
- // -- sp[8] : target
- // -- sp[12] : receiver
+ // -- sp[0] : receiver
+ // -- sp[4] : target
+ // -- sp[8] : argumentsList
+ // -- sp[12] : new.target (optional)
// -----------------------------------
- // NOTE: The order of args in the stack are reversed if V8_REVERSE_JSARGS
// 1. Load target into r3 (if present), argumentsList into r4 (if present),
// new.target into r5 (if present, otherwise use target), remove all
@@ -2066,7 +1866,6 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ LoadRoot(r3, RootIndex::kUndefinedValue);
__ LoadRR(r4, r3);
-#ifdef V8_REVERSE_JSARGS
Label done;
__ LoadRR(r6, r3);
@@ -2084,30 +1883,6 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ ShiftLeftP(r1, r2, Operand(kSystemPointerSizeLog2));
__ lay(sp, MemOperand(sp, r1));
__ StoreP(r6, MemOperand(sp));
-#else
- Label done;
- __ ShiftLeftP(r1, r2, Operand(kSystemPointerSizeLog2));
- __ StoreP(r4, MemOperand(sp, r1));
- __ SubP(r6, r2, Operand(1));
- __ blt(&done);
- __ ShiftLeftP(r1, r6, Operand(kSystemPointerSizeLog2));
- __ LoadP(r3, MemOperand(sp, r1)); // receiver
-
- __ LoadRR(r5, r3);
- __ SubP(r6, r6, Operand(1));
- __ blt(&done);
- __ ShiftLeftP(r1, r6, Operand(kSystemPointerSizeLog2));
- __ LoadP(r4, MemOperand(sp, r1));
-
- __ SubP(r6, r6, Operand(1));
- __ blt(&done);
- __ ShiftLeftP(r1, r6, Operand(kSystemPointerSizeLog2));
- __ LoadP(r5, MemOperand(sp, r1));
-
- __ bind(&done);
- __ ShiftLeftP(r1, r2, Operand(kSystemPointerSizeLog2));
- __ lay(sp, MemOperand(sp, r1));
-#endif
}
// ----------- S t a t e -------------
@@ -2206,7 +1981,6 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Label stack_overflow;
Generate_StackOverflowCheck(masm, r6, scratch, &stack_overflow);
-#ifdef V8_REVERSE_JSARGS
// Move the arguments already in the stack,
// including the receiver and the return address.
{
@@ -2228,7 +2002,6 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ bind(&check);
__ b(ge, &copy);
}
-#endif
// Push arguments onto the stack (thisArgument is already on the stack).
{
@@ -2245,12 +2018,8 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ bne(&skip, Label::kNear);
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
__ bind(&skip);
-#ifdef V8_REVERSE_JSARGS
__ StoreP(scratch, MemOperand(r7));
__ lay(r7, MemOperand(r7, kSystemPointerSize));
-#else
- __ Push(scratch);
-#endif
__ BranchOnCount(r1, &loop);
__ bind(&no_args);
__ AddP(r2, r2, r6);
@@ -2338,7 +2107,6 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
Generate_StackOverflowCheck(masm, r7, scratch, &stack_overflow);
// Forward the arguments from the caller frame.
-#ifdef V8_REVERSE_JSARGS
__ LoadRR(r5, r5);
// Point to the first argument to copy (skipping the receiver).
__ AddP(r6, r6,
@@ -2369,26 +2137,19 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ bind(&check);
__ b(ge, &copy);
}
-#endif
+
// Copy arguments from the caller frame.
// TODO(victorgomes): Consider using forward order as potentially more cache
// friendly.
{
Label loop;
-#ifndef V8_REVERSE_JSARGS
- __ AddP(r6, r6, Operand(CommonFrameConstants::kFixedFrameSizeAboveFp));
-#endif
__ AddP(r2, r2, r7);
__ bind(&loop);
{
__ SubP(r7, r7, Operand(1));
__ ShiftLeftP(r1, r7, Operand(kSystemPointerSizeLog2));
__ LoadP(scratch, MemOperand(r6, r1));
-#ifdef V8_REVERSE_JSARGS
__ StoreP(scratch, MemOperand(r4, r1));
-#else
- __ push(scratch);
-#endif
__ CmpP(r7, Operand::Zero());
__ bne(&loop);
}
@@ -2552,7 +2313,6 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ bind(&done);
}
-#ifdef V8_REVERSE_JSARGS
// Pop receiver.
__ Pop(r7);
@@ -2574,42 +2334,6 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Push receiver.
__ Push(r7);
-#else
- __ LoadRR(scratch, sp);
- __ LoadRR(sp, r1);
-
- // Relocate arguments down the stack.
- // -- r2 : the number of arguments (not including the receiver)
- // -- r8 : the previous stack pointer
- {
- Label skip, loop;
- __ LoadImmP(r7, Operand::Zero());
- __ CmpP(r2, Operand::Zero());
- __ beq(&skip);
- __ LoadRR(r1, r2);
- __ bind(&loop);
- __ LoadP(r0, MemOperand(scratch, r7));
- __ StoreP(r0, MemOperand(sp, r7));
- __ lay(r7, MemOperand(r7, kSystemPointerSize));
- __ BranchOnCount(r1, &loop);
- __ bind(&skip);
- }
-
- // Copy [[BoundArguments]] to the stack (below the arguments).
- {
- Label loop;
- __ ShiftLeftP(r9, r6, Operand(kTaggedSizeLog2));
- __ lay(r4, MemOperand(r4, r9, FixedArray::kHeaderSize - kHeapObjectTag));
- __ LoadRR(r1, r6);
- __ bind(&loop);
- __ LoadAnyTaggedField(ip, MemOperand(r4, -kTaggedSize), r0);
- __ lay(r4, MemOperand(r4, -kTaggedSize));
- __ StoreP(ip, MemOperand(sp, r7));
- __ lay(r7, MemOperand(r7, kSystemPointerSize));
- __ BranchOnCount(r1, &loop);
- __ AddP(r2, r2, r6);
- }
-#endif
}
__ bind(&no_bound_arguments);
}
@@ -2804,18 +2528,12 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -- r5 : new target (passed through to callee)
// -----------------------------------
- Label dont_adapt_arguments, stack_overflow, skip_adapt_arguments;
+ Label dont_adapt_arguments, stack_overflow;
__ tmll(r4, Operand(kDontAdaptArgumentsSentinel));
__ b(Condition(1), &dont_adapt_arguments);
__ LoadTaggedPointerField(
r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
__ LoadlW(r6, FieldMemOperand(r6, SharedFunctionInfo::kFlagsOffset));
-#ifndef V8_REVERSE_JSARGS
- __ tmlh(r6,
- Operand(SharedFunctionInfo::IsSafeToSkipArgumentsAdaptorBit::kMask >>
- 16));
- __ bne(&skip_adapt_arguments);
-#endif
// -------------------------------------------
// Adapt arguments.
@@ -2836,13 +2554,8 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r3: function
// r4: expected number of arguments
// r5: new target (passed through to callee)
-#ifdef V8_REVERSE_JSARGS
__ ShiftLeftP(r2, r4, Operand(kSystemPointerSizeLog2));
__ AddP(r2, fp);
-#else
- __ SmiToPtrArrayOffset(r2, r2);
- __ AddP(r2, fp);
-#endif
// adjust for return address and receiver
__ AddP(r2, r2, Operand(2 * kSystemPointerSize));
__ ShiftLeftP(r6, r4, Operand(kSystemPointerSizeLog2));
@@ -2872,7 +2585,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
EnterArgumentsAdaptorFrame(masm);
Generate_StackOverflowCheck(masm, r4, r7, &stack_overflow);
-#ifdef V8_REVERSE_JSARGS
// Fill the remaining expected arguments with undefined.
// r0: actual number of arguments as a smi
// r1: function
@@ -2917,46 +2629,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ CmpP(r2, fp); // Compare before moving to next argument.
__ lay(r2, MemOperand(r2, -kSystemPointerSize));
__ b(ne, &copy);
-#else
- // Calculate copy start address into r0 and copy end address is fp.
- // r2: actual number of arguments as a smi
- // r3: function
- // r4: expected number of arguments
- // r5: new target (passed through to callee)
- __ SmiToPtrArrayOffset(r2, r2);
- __ lay(r2, MemOperand(r2, fp));
-
- // Copy the arguments (including the receiver) to the new stack frame.
- // r2: copy start address
- // r3: function
- // r4: expected number of arguments
- // r5: new target (passed through to callee)
- Label copy;
- __ bind(&copy);
- // Adjust load for return address and receiver.
- __ LoadP(r0, MemOperand(r2, 2 * kSystemPointerSize));
- __ push(r0);
- __ CmpP(r2, fp); // Compare before moving to next argument.
- __ lay(r2, MemOperand(r2, -kSystemPointerSize));
- __ bne(&copy);
-
- // Fill the remaining expected arguments with undefined.
- // r3: function
- // r4: expected number of argumentus
- __ LoadRoot(r0, RootIndex::kUndefinedValue);
- __ ShiftLeftP(r6, r4, Operand(kSystemPointerSizeLog2));
- __ SubP(r6, fp, r6);
- // Adjust for frame.
- __ SubP(r6, r6,
- Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp +
- kSystemPointerSize));
-
- Label fill;
- __ bind(&fill);
- __ push(r0);
- __ CmpP(sp, r6);
- __ bne(&fill);
-#endif
}
// Call the entry point.
@@ -2979,42 +2651,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
}
// -------------------------------------------
- // Skip adapt arguments.
- // -------------------------------------------
- __ bind(&skip_adapt_arguments);
- {
- // The callee cannot observe the actual arguments, so it's safe to just
- // pass the expected arguments by massaging the stack appropriately. See
- // http://bit.ly/v8-faster-calls-with-arguments-mismatch for details.
- Label under_application, over_application;
- __ CmpP(r2, r4);
- __ blt(&under_application);
-
- __ bind(&over_application);
- {
- // Remove superfluous parameters from the stack.
- __ SubP(r6, r2, r4);
- __ lgr(r2, r4);
- __ ShiftLeftP(r6, r6, Operand(kSystemPointerSizeLog2));
- __ lay(sp, MemOperand(sp, r6));
- __ b(&dont_adapt_arguments);
- }
-
- __ bind(&under_application);
- {
- // Fill remaining expected arguments with undefined values.
- Label fill;
- __ LoadRoot(r6, RootIndex::kUndefinedValue);
- __ bind(&fill);
- __ AddP(r2, r2, Operand(1));
- __ push(r6);
- __ CmpP(r2, r4);
- __ blt(&fill);
- __ b(&dont_adapt_arguments);
- }
- }
-
- // -------------------------------------------
// Dont adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
@@ -3507,12 +3143,11 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// -- r4 : arguments count (not including the receiver)
// -- r5 : call data
// -- r2 : holder
- // -- sp[0] : last argument
+ // -- sp[0] : receiver
+ // -- sp[8] : first argument
// -- ...
- // -- sp[(argc - 1) * 4] : first argument
- // -- sp[(argc + 0) * 4] : receiver
+ // -- sp[(argc) * 8] : last argument
// -----------------------------------
- // NOTE: The order of args are reversed if V8_REVERSE_JSARGS
Register api_function_address = r3;
Register argc = r4;
@@ -3587,15 +3222,8 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// FunctionCallbackInfo::values_ (points at the first varargs argument passed
// on the stack).
-#ifdef V8_REVERSE_JSARGS
__ AddP(scratch, scratch,
Operand((FCA::kArgsLength + 1) * kSystemPointerSize));
-#else
- __ AddP(scratch, scratch,
- Operand((FCA::kArgsLength - 1) * kSystemPointerSize));
- __ ShiftLeftP(r1, argc, Operand(kSystemPointerSizeLog2));
- __ AddP(scratch, scratch, r1);
-#endif
__ StoreP(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 2) *
kSystemPointerSize));
@@ -3737,6 +3365,242 @@ void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
__ stop();
}
+namespace {
+
+// This code tries to be close to ia32 code so that any changes can be
+// easily ported.
+void Generate_DeoptimizationEntry(MacroAssembler* masm,
+ DeoptimizeKind deopt_kind) {
+ Isolate* isolate = masm->isolate();
+
+ // Save all the registers onto the stack
+ const int kNumberOfRegisters = Register::kNumRegisters;
+
+ RegList restored_regs = kJSCallerSaved | kCalleeSaved;
+
+ const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
+
+ // Save all double registers before messing with them.
+ __ lay(sp, MemOperand(sp, -kDoubleRegsSize));
+ const RegisterConfiguration* config = RegisterConfiguration::Default();
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ const DoubleRegister dreg = DoubleRegister::from_code(code);
+ int offset = code * kDoubleSize;
+ __ StoreDouble(dreg, MemOperand(sp, offset));
+ }
+
+ // Push all GPRs onto the stack
+ __ lay(sp, MemOperand(sp, -kNumberOfRegisters * kSystemPointerSize));
+ __ StoreMultipleP(r0, sp, MemOperand(sp)); // Save all 16 registers
+
+ __ Move(r1, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
+ isolate));
+ __ StoreP(fp, MemOperand(r1));
+
+ static constexpr int kSavedRegistersAreaSize =
+ (kNumberOfRegisters * kSystemPointerSize) + kDoubleRegsSize;
+
+ __ lgfi(r4, Operand(Deoptimizer::kFixedExitSizeMarker));
+ // Cleanse the Return address for 31-bit
+ __ CleanseP(r14);
+ // Get the address of the location in the code object (r5)(return
+ // address for lazy deoptimization) and compute the fp-to-sp delta in
+ // register r6.
+ __ LoadRR(r5, r14);
+ __ la(r6, MemOperand(sp, kSavedRegistersAreaSize));
+ __ SubP(r6, fp, r6);
+
+ // Allocate a new deoptimizer object.
+ // Pass six arguments in r2 to r7.
+ __ PrepareCallCFunction(6, r7);
+ __ LoadImmP(r2, Operand::Zero());
+ Label context_check;
+ __ LoadP(r3, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ JumpIfSmi(r3, &context_check);
+ __ LoadP(r2, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ __ bind(&context_check);
+ __ LoadImmP(r3, Operand(static_cast<int>(deopt_kind)));
+ // r4: bailout id already loaded.
+ // r5: code address or 0 already loaded.
+ // r6: Fp-to-sp delta.
+ // Parm6: isolate is passed on the stack.
+ __ Move(r7, ExternalReference::isolate_address(isolate));
+ __ StoreP(r7, MemOperand(sp, kStackFrameExtraParamSlot * kSystemPointerSize));
+
+ // Call Deoptimizer::New().
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
+ }
+
+ // Preserve "deoptimizer" object in register r2 and get the input
+ // frame descriptor pointer to r3 (deoptimizer->input_);
+ __ LoadP(r3, MemOperand(r2, Deoptimizer::input_offset()));
+
+ // Copy core registers into FrameDescription::registers_[kNumRegisters].
+ // DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
+ // __ mvc(MemOperand(r3, FrameDescription::registers_offset()),
+ // MemOperand(sp), kNumberOfRegisters * kSystemPointerSize);
+ // Copy core registers into FrameDescription::registers_[kNumRegisters].
+ // TODO(john.yan): optimize the following code by using mvc instruction
+ DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
+ for (int i = 0; i < kNumberOfRegisters; i++) {
+ int offset =
+ (i * kSystemPointerSize) + FrameDescription::registers_offset();
+ __ LoadP(r4, MemOperand(sp, i * kSystemPointerSize));
+ __ StoreP(r4, MemOperand(r3, offset));
+ }
+
+ int double_regs_offset = FrameDescription::double_registers_offset();
+ // Copy double registers to
+ // double_registers_[DoubleRegister::kNumRegisters]
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ int dst_offset = code * kDoubleSize + double_regs_offset;
+ int src_offset =
+ code * kDoubleSize + kNumberOfRegisters * kSystemPointerSize;
+ // TODO(joransiu): MVC opportunity
+ __ LoadDouble(d0, MemOperand(sp, src_offset));
+ __ StoreDouble(d0, MemOperand(r3, dst_offset));
+ }
+
+ // Mark the stack as not iterable for the CPU profiler which won't be able to
+ // walk the stack without the return address.
+ {
+ UseScratchRegisterScope temps(masm);
+ Register is_iterable = temps.Acquire();
+ Register zero = r6;
+ __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
+ __ lhi(zero, Operand(0));
+ __ StoreByte(zero, MemOperand(is_iterable));
+ }
+
+ // Remove the saved registers from the stack.
+ __ la(sp, MemOperand(sp, kSavedRegistersAreaSize));
+
+ // Compute a pointer to the unwinding limit in register r4; that is
+ // the first stack slot not part of the input frame.
+ __ LoadP(r4, MemOperand(r3, FrameDescription::frame_size_offset()));
+ __ AddP(r4, sp);
+
+ // Unwind the stack down to - but not including - the unwinding
+ // limit and copy the contents of the activation frame to the input
+ // frame description.
+ __ la(r5, MemOperand(r3, FrameDescription::frame_content_offset()));
+ Label pop_loop;
+ Label pop_loop_header;
+ __ b(&pop_loop_header, Label::kNear);
+ __ bind(&pop_loop);
+ __ pop(r6);
+ __ StoreP(r6, MemOperand(r5, 0));
+ __ la(r5, MemOperand(r5, kSystemPointerSize));
+ __ bind(&pop_loop_header);
+ __ CmpP(r4, sp);
+ __ bne(&pop_loop);
+
+ // Compute the output frame in the deoptimizer.
+ __ push(r2); // Preserve deoptimizer object across call.
+ // r2: deoptimizer object; r3: scratch.
+ __ PrepareCallCFunction(1, r3);
+ // Call Deoptimizer::ComputeOutputFrames().
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
+ }
+ __ pop(r2); // Restore deoptimizer object (class Deoptimizer).
+
+ __ LoadP(sp, MemOperand(r2, Deoptimizer::caller_frame_top_offset()));
+
+ // Replace the current (input) frame with the output frames.
+ Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
+ // Outer loop state: r6 = current "FrameDescription** output_",
+ // r3 = one past the last FrameDescription**.
+ __ LoadlW(r3, MemOperand(r2, Deoptimizer::output_count_offset()));
+ __ LoadP(r6, MemOperand(r2, Deoptimizer::output_offset())); // r6 is output_.
+ __ ShiftLeftP(r3, r3, Operand(kSystemPointerSizeLog2));
+ __ AddP(r3, r6, r3);
+ __ b(&outer_loop_header, Label::kNear);
+
+ __ bind(&outer_push_loop);
+ // Inner loop state: r4 = current FrameDescription*, r5 = loop index.
+ __ LoadP(r4, MemOperand(r6, 0)); // output_[ix]
+ __ LoadP(r5, MemOperand(r4, FrameDescription::frame_size_offset()));
+ __ b(&inner_loop_header, Label::kNear);
+
+ __ bind(&inner_push_loop);
+ __ SubP(r5, Operand(sizeof(intptr_t)));
+ __ AddP(r8, r4, r5);
+ __ LoadP(r8, MemOperand(r8, FrameDescription::frame_content_offset()));
+ __ push(r8);
+
+ __ bind(&inner_loop_header);
+ __ CmpP(r5, Operand::Zero());
+ __ bne(&inner_push_loop); // test for gt?
+
+ __ AddP(r6, r6, Operand(kSystemPointerSize));
+ __ bind(&outer_loop_header);
+ __ CmpP(r6, r3);
+ __ blt(&outer_push_loop);
+
+ __ LoadP(r3, MemOperand(r2, Deoptimizer::input_offset()));
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ const DoubleRegister dreg = DoubleRegister::from_code(code);
+ int src_offset = code * kDoubleSize + double_regs_offset;
+ __ ld(dreg, MemOperand(r3, src_offset));
+ }
+
+ // Push pc and continuation from the last output frame.
+ __ LoadP(r8, MemOperand(r4, FrameDescription::pc_offset()));
+ __ push(r8);
+ __ LoadP(r8, MemOperand(r4, FrameDescription::continuation_offset()));
+ __ push(r8);
+
+ // Restore the registers from the last output frame.
+ __ LoadRR(r1, r4);
+ for (int i = kNumberOfRegisters - 1; i > 0; i--) {
+ int offset =
+ (i * kSystemPointerSize) + FrameDescription::registers_offset();
+ if ((restored_regs & (1 << i)) != 0) {
+ __ LoadP(ToRegister(i), MemOperand(r1, offset));
+ }
+ }
+
+ {
+ UseScratchRegisterScope temps(masm);
+ Register is_iterable = temps.Acquire();
+ Register one = r6;
+ __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
+ __ lhi(one, Operand(1));
+ __ StoreByte(one, MemOperand(is_iterable));
+ }
+
+ __ pop(ip); // get continuation, leave pc on stack
+ __ pop(r14);
+ __ Jump(ip);
+
+ __ stop();
+}
+
+} // namespace
+
+void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Bailout(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kBailout);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
+}
+
#undef __
} // namespace internal
diff --git a/deps/v8/src/builtins/setup-builtins-internal.cc b/deps/v8/src/builtins/setup-builtins-internal.cc
index 2d7e93c9bb..baf64f7fa7 100644
--- a/deps/v8/src/builtins/setup-builtins-internal.cc
+++ b/deps/v8/src/builtins/setup-builtins-internal.cc
@@ -38,8 +38,7 @@ AssemblerOptions BuiltinAssemblerOptions(Isolate* isolate,
CHECK(!options.use_pc_relative_calls_and_jumps);
CHECK(!options.collect_win64_unwind_info);
- if (!isolate->IsGeneratingEmbeddedBuiltins() ||
- !Builtins::IsIsolateIndependent(builtin_index)) {
+ if (!isolate->IsGeneratingEmbeddedBuiltins()) {
return options;
}
diff --git a/deps/v8/src/builtins/string-trim.tq b/deps/v8/src/builtins/string-trim.tq
new file mode 100644
index 0000000000..eef0ccd84f
--- /dev/null
+++ b/deps/v8/src/builtins/string-trim.tq
@@ -0,0 +1,168 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/builtins/builtins-string-gen.h'
+
+namespace string {
+
+extern enum TrimMode extends uint31 constexpr 'String::TrimMode' {
+ kTrim,
+ kTrimStart,
+ kTrimEnd
+}
+
+@export
+macro IsWhiteSpaceOrLineTerminator(charCode: int32): bool {
+ // 0x0020 - SPACE (Intentionally out of order to fast path a commmon case)
+ if (charCode == Int32Constant(0x0020)) {
+ return true;
+ }
+
+ // 0x0009 - HORIZONTAL TAB
+ if (charCode < Int32Constant(0x0009)) {
+ return false;
+ }
+ // 0x000A - LINE FEED OR NEW LINE
+ // 0x000B - VERTICAL TAB
+ // 0x000C - FORMFEED
+ // 0x000D - HORIZONTAL TAB
+ if (charCode <= Int32Constant(0x000D)) {
+ return true;
+ }
+
+ // Common Non-whitespace characters
+ if (charCode < Int32Constant(0x00A0)) {
+ return false;
+ }
+
+ // 0x00A0 - NO-BREAK SPACE
+ if (charCode == Int32Constant(0x00A0)) {
+ return true;
+ }
+
+ // 0x1680 - Ogham Space Mark
+ if (charCode == Int32Constant(0x1680)) {
+ return true;
+ }
+
+ // 0x2000 - EN QUAD
+ if (charCode < Int32Constant(0x2000)) {
+ return false;
+ }
+ // 0x2001 - EM QUAD
+ // 0x2002 - EN SPACE
+ // 0x2003 - EM SPACE
+ // 0x2004 - THREE-PER-EM SPACE
+ // 0x2005 - FOUR-PER-EM SPACE
+ // 0x2006 - SIX-PER-EM SPACE
+ // 0x2007 - FIGURE SPACE
+ // 0x2008 - PUNCTUATION SPACE
+ // 0x2009 - THIN SPACE
+ // 0x200A - HAIR SPACE
+ if (charCode <= Int32Constant(0x200A)) {
+ return true;
+ }
+
+ // 0x2028 - LINE SEPARATOR
+ if (charCode == Int32Constant(0x2028)) {
+ return true;
+ }
+ // 0x2029 - PARAGRAPH SEPARATOR
+ if (charCode == Int32Constant(0x2029)) {
+ return true;
+ }
+ // 0x202F - NARROW NO-BREAK SPACE
+ if (charCode == Int32Constant(0x202F)) {
+ return true;
+ }
+ // 0x205F - MEDIUM MATHEMATICAL SPACE
+ if (charCode == Int32Constant(0x205F)) {
+ return true;
+ }
+ // 0xFEFF - BYTE ORDER MARK
+ if (charCode == Int32Constant(0xFEFF)) {
+ return true;
+ }
+ // 0x3000 - IDEOGRAPHIC SPACE
+ if (charCode == Int32Constant(0x3000)) {
+ return true;
+ }
+
+ return false;
+}
+
+transitioning macro StringTrim(implicit context: Context)(
+ receiver: JSAny, _arguments: Arguments, methodName: constexpr string,
+ variant: constexpr TrimMode): String {
+ const receiverString: String = ToThisString(receiver, methodName);
+ const stringLength: intptr = receiverString.length_intptr;
+
+ const directString = Cast<DirectString>(receiverString)
+ otherwise return runtime::StringTrim(
+ receiverString, SmiTag<TrimMode>(variant));
+
+ let startIndex: intptr = 0;
+ let endIndex: intptr = stringLength - 1;
+
+ // TODO(duongn): It would probably be more efficient to turn StringTrim into a
+ // tempalate for the different string types and specialize the loop for them.
+ if (variant == TrimMode::kTrim || variant == TrimMode::kTrimStart) {
+ while (true) {
+ if (startIndex == stringLength) {
+ return EmptyStringConstant();
+ }
+ if (!IsWhiteSpaceOrLineTerminator(
+ StringCharCodeAt(directString, Unsigned(startIndex)))) {
+ break;
+ }
+ startIndex++;
+ }
+ }
+
+ if (variant == TrimMode::kTrim || variant == TrimMode::kTrimEnd) {
+ while (true) {
+ if (endIndex == -1) {
+ return EmptyStringConstant();
+ }
+ if (!IsWhiteSpaceOrLineTerminator(
+ StringCharCodeAt(directString, Unsigned(endIndex)))) {
+ break;
+ }
+ endIndex--;
+ }
+ }
+
+ return SubString(
+ receiverString, Unsigned(startIndex), Unsigned(endIndex + 1));
+}
+
+// ES6 #sec-string.prototype.trim
+transitioning javascript builtin
+StringPrototypeTrim(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): String {
+ const methodName: constexpr string = 'String.prototype.trim';
+ return StringTrim(receiver, arguments, methodName, TrimMode::kTrim);
+}
+
+// https://github.com/tc39/proposal-string-left-right-trim
+transitioning javascript builtin
+StringPrototypeTrimStart(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): String {
+ const methodName: constexpr string = 'String.prototype.trimLeft';
+ return StringTrim(receiver, arguments, methodName, TrimMode::kTrimStart);
+}
+
+// https://github.com/tc39/proposal-string-left-right-trim
+transitioning javascript builtin
+StringPrototypeTrimEnd(
+ js-implicit context: NativeContext, receiver: JSAny)(...arguments): String {
+ const methodName: constexpr string = 'String.prototype.trimRight';
+ return StringTrim(receiver, arguments, methodName, TrimMode::kTrimEnd);
+}
+}
+
+namespace runtime {
+extern runtime StringTrim(implicit context: Context)(
+ String, SmiTagged<string::TrimMode>): String;
+}
diff --git a/deps/v8/src/builtins/torque-internal.tq b/deps/v8/src/builtins/torque-internal.tq
index 254663039c..28636bdbfe 100644
--- a/deps/v8/src/builtins/torque-internal.tq
+++ b/deps/v8/src/builtins/torque-internal.tq
@@ -226,6 +226,11 @@ macro DownCastForTorqueClass<T : type extends HeapObject>(o: HeapObject):
extern macro StaticAssert(bool, constexpr string);
+// This is for the implementation of the dot operator. In any context where the
+// dot operator is available, the correct way to get the length of an indexed
+// field x from object o is `(&o.x).length`.
+intrinsic %IndexedFieldLength<T: type>(o: T, f: constexpr string);
+
} // namespace torque_internal
// Indicates that an array-field should not be initialized.
diff --git a/deps/v8/src/builtins/typed-array-createtypedarray.tq b/deps/v8/src/builtins/typed-array-createtypedarray.tq
index ec51efc00a..6e416ddd98 100644
--- a/deps/v8/src/builtins/typed-array-createtypedarray.tq
+++ b/deps/v8/src/builtins/typed-array-createtypedarray.tq
@@ -53,6 +53,7 @@ transitioning macro AllocateTypedArray(implicit context: Context)(
typedArray.byte_offset = byteOffset;
typedArray.byte_length = byteLength;
typedArray.length = length;
+ typed_array::AllocateJSTypedArrayExternalPointerEntry(typedArray);
if constexpr (isOnHeap) {
typed_array::SetJSTypedArrayOnHeapDataPtr(typedArray, elements, byteOffset);
} else {
diff --git a/deps/v8/src/builtins/typed-array-sort.tq b/deps/v8/src/builtins/typed-array-sort.tq
index c32808038d..614852f444 100644
--- a/deps/v8/src/builtins/typed-array-sort.tq
+++ b/deps/v8/src/builtins/typed-array-sort.tq
@@ -114,6 +114,14 @@ transitioning javascript builtin TypedArrayPrototypeSort(
return TypedArraySortFast(context, obj);
}
+ // Throw rather than crash if the TypedArray's size exceeds max FixedArray
+ // size (which we need below).
+ // TODO(4153): Consider redesigning the sort implementation such that we
+ // don't have such a limit.
+ if (len > kFixedArrayMaxLength) {
+ ThrowTypeError(MessageTemplate::kTypedArrayTooLargeToSort);
+ }
+
const comparefn: Callable =
Cast<Callable>(comparefnObj) otherwise unreachable;
const accessor: TypedArrayAccessor =
diff --git a/deps/v8/src/builtins/typed-array.tq b/deps/v8/src/builtins/typed-array.tq
index ca18b432ab..d8fc788dfb 100644
--- a/deps/v8/src/builtins/typed-array.tq
+++ b/deps/v8/src/builtins/typed-array.tq
@@ -157,6 +157,10 @@ macro GetTypedArrayAccessor(elementsKind: ElementsKind): TypedArrayAccessor {
unreachable;
}
+extern macro
+TypedArrayBuiltinsAssembler::AllocateJSTypedArrayExternalPointerEntry(
+ JSTypedArray): void;
+
extern macro TypedArrayBuiltinsAssembler::SetJSTypedArrayOnHeapDataPtr(
JSTypedArray, ByteArray, uintptr): void;
extern macro TypedArrayBuiltinsAssembler::SetJSTypedArrayOffHeapDataPtr(
diff --git a/deps/v8/src/builtins/wasm.tq b/deps/v8/src/builtins/wasm.tq
index fda048518a..411bb0c41e 100644
--- a/deps/v8/src/builtins/wasm.tq
+++ b/deps/v8/src/builtins/wasm.tq
@@ -282,24 +282,32 @@ builtin WasmUint32ToNumber(value: uint32): Number {
return ChangeUint32ToTagged(value);
}
+builtin UintPtr53ToNumber(value: uintptr): Number {
+ if (value <= kSmiMaxValue) return Convert<Smi>(Convert<intptr>(value));
+ const valueFloat = ChangeUintPtrToFloat64(value);
+ // Values need to be within [0..2^53], such that they can be represented as
+ // float64.
+ assert(ChangeFloat64ToUintPtr(valueFloat) == value);
+ return AllocateHeapNumberWithValue(valueFloat);
+}
+
extern builtin I64ToBigInt(intptr): BigInt;
-builtin WasmAtomicNotify(address: uint32, count: uint32): uint32 {
+builtin WasmAtomicNotify(offset: uintptr, count: uint32): uint32 {
const instance: WasmInstanceObject = LoadInstanceFromFrame();
const result: Smi = runtime::WasmAtomicNotify(
- LoadContextFromInstance(instance), instance, WasmUint32ToNumber(address),
+ LoadContextFromInstance(instance), instance, UintPtr53ToNumber(offset),
WasmUint32ToNumber(count));
return Unsigned(SmiToInt32(result));
}
builtin WasmI32AtomicWait64(
- address: uint32, expectedValue: int32, timeout: intptr): uint32 {
+ offset: uintptr, expectedValue: int32, timeout: intptr): uint32 {
if constexpr (Is64()) {
const instance: WasmInstanceObject = LoadInstanceFromFrame();
const result: Smi = runtime::WasmI32AtomicWait(
- LoadContextFromInstance(instance), instance,
- WasmUint32ToNumber(address), WasmInt32ToNumber(expectedValue),
- I64ToBigInt(timeout));
+ LoadContextFromInstance(instance), instance, UintPtr53ToNumber(offset),
+ WasmInt32ToNumber(expectedValue), I64ToBigInt(timeout));
return Unsigned(SmiToInt32(result));
} else {
unreachable;
@@ -307,13 +315,12 @@ builtin WasmI32AtomicWait64(
}
builtin WasmI64AtomicWait64(
- address: uint32, expectedValue: intptr, timeout: intptr): uint32 {
+ offset: uintptr, expectedValue: intptr, timeout: intptr): uint32 {
if constexpr (Is64()) {
const instance: WasmInstanceObject = LoadInstanceFromFrame();
const result: Smi = runtime::WasmI64AtomicWait(
- LoadContextFromInstance(instance), instance,
- WasmUint32ToNumber(address), I64ToBigInt(expectedValue),
- I64ToBigInt(timeout));
+ LoadContextFromInstance(instance), instance, UintPtr53ToNumber(offset),
+ I64ToBigInt(expectedValue), I64ToBigInt(timeout));
return Unsigned(SmiToInt32(result));
} else {
unreachable;
@@ -385,10 +392,6 @@ builtin ThrowWasmTrapFloatUnrepresentable(): JSAny {
tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapFloatUnrepresentable));
}
-builtin ThrowWasmTrapFuncInvalid(): JSAny {
- tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapFuncInvalid));
-}
-
builtin ThrowWasmTrapFuncSigMismatch(): JSAny {
tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapFuncSigMismatch));
}
@@ -424,8 +427,4 @@ builtin ThrowWasmTrapIllegalCast(): JSAny {
builtin ThrowWasmTrapArrayOutOfBounds(): JSAny {
tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapArrayOutOfBounds));
}
-
-builtin ThrowWasmTrapWasmJSFunction(): JSAny {
- tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapWasmJSFunction));
-}
}
diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc
index b94817f6f5..f7eb4658d5 100644
--- a/deps/v8/src/builtins/x64/builtins-x64.cc
+++ b/deps/v8/src/builtins/x64/builtins-x64.cc
@@ -26,6 +26,7 @@
#include "src/objects/smi.h"
#include "src/wasm/baseline/liftoff-assembler-defs.h"
#include "src/wasm/object-access.h"
+#include "src/wasm/wasm-constants.h"
#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-objects.h"
@@ -74,43 +75,6 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
namespace {
-enum StackLimitKind { kInterruptStackLimit, kRealStackLimit };
-
-Operand StackLimitAsOperand(MacroAssembler* masm, StackLimitKind kind) {
- DCHECK(masm->root_array_available());
- Isolate* isolate = masm->isolate();
- ExternalReference limit =
- kind == StackLimitKind::kRealStackLimit
- ? ExternalReference::address_of_real_jslimit(isolate)
- : ExternalReference::address_of_jslimit(isolate);
- DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
-
- intptr_t offset =
- TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
- CHECK(is_int32(offset));
- return Operand(kRootRegister, static_cast<int32_t>(offset));
-}
-
-void Generate_StackOverflowCheck(
- MacroAssembler* masm, Register num_args, Register scratch,
- Label* stack_overflow,
- Label::Distance stack_overflow_distance = Label::kFar) {
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- __ movq(kScratchRegister,
- StackLimitAsOperand(masm, StackLimitKind::kRealStackLimit));
- __ movq(scratch, rsp);
- // Make scratch the space we have left. The stack might already be overflowed
- // here which will cause scratch to become negative.
- __ subq(scratch, kScratchRegister);
- __ sarq(scratch, Immediate(kSystemPointerSizeLog2));
- // Check if the arguments will overflow the stack.
- __ cmpq(scratch, num_args);
- // Signed comparison.
- __ j(less_equal, stack_overflow, stack_overflow_distance);
-}
-
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax: number of arguments
@@ -120,7 +84,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// -----------------------------------
Label stack_overflow;
- Generate_StackOverflowCheck(masm, rax, rcx, &stack_overflow, Label::kFar);
+ __ StackOverflowCheck(rax, rcx, &stack_overflow, Label::kFar);
// Enter a construct frame.
{
@@ -136,7 +100,6 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// correct position (including any undefined), instead of delaying this to
// InvokeFunction.
-#ifdef V8_REVERSE_JSARGS
// Set up pointer to first argument (skip receiver).
__ leaq(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset +
kSystemPointerSize));
@@ -144,14 +107,6 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ PushArray(rbx, rax, rcx);
// The receiver for the builtin/api call.
__ PushRoot(RootIndex::kTheHoleValue);
-#else
- // The receiver for the builtin/api call.
- __ PushRoot(RootIndex::kTheHoleValue);
- // Set up pointer to last argument.
- __ leaq(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
- // Copy arguments to the expression stack.
- __ PushArray(rbx, rax, rcx);
-#endif
// Call the function.
// rax: number of arguments (untagged)
@@ -159,8 +114,6 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// rdx: new target
__ InvokeFunction(rdi, rdx, rax, CALL_FUNCTION);
- // Restore context from the frame.
- __ movq(rsi, Operand(rbp, ConstructFrameConstants::kContextOffset));
// Restore smi-tagged arguments count from the frame.
__ movq(rbx, Operand(rbp, ConstructFrameConstants::kLengthOffset));
@@ -195,176 +148,161 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// -- sp[...]: constructor arguments
// -----------------------------------
+ FrameScope scope(masm, StackFrame::MANUAL);
// Enter a construct frame.
- {
- FrameScope scope(masm, StackFrame::CONSTRUCT);
- Label post_instantiation_deopt_entry, not_create_implicit_receiver;
-
- // Preserve the incoming parameters on the stack.
- __ SmiTag(rcx, rax);
- __ Push(rsi);
- __ Push(rcx);
- __ Push(rdi);
- __ PushRoot(RootIndex::kTheHoleValue);
- __ Push(rdx);
-
- // ----------- S t a t e -------------
- // -- sp[0*kSystemPointerSize]: new target
- // -- sp[1*kSystemPointerSize]: padding
- // -- rdi and sp[2*kSystemPointerSize]: constructor function
- // -- sp[3*kSystemPointerSize]: argument count
- // -- sp[4*kSystemPointerSize]: context
- // -----------------------------------
-
- __ LoadTaggedPointerField(
- rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movl(rbx, FieldOperand(rbx, SharedFunctionInfo::kFlagsOffset));
- __ DecodeField<SharedFunctionInfo::FunctionKindBits>(rbx);
- __ JumpIfIsInRange(rbx, kDefaultDerivedConstructor, kDerivedConstructor,
- &not_create_implicit_receiver, Label::kNear);
-
- // If not derived class constructor: Allocate the new receiver object.
- __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1);
- __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject),
- RelocInfo::CODE_TARGET);
- __ jmp(&post_instantiation_deopt_entry, Label::kNear);
-
- // Else: use TheHoleValue as receiver for constructor call
- __ bind(&not_create_implicit_receiver);
- __ LoadRoot(rax, RootIndex::kTheHoleValue);
-
- // ----------- S t a t e -------------
- // -- rax implicit receiver
- // -- Slot 4 / sp[0*kSystemPointerSize] new target
- // -- Slot 3 / sp[1*kSystemPointerSize] padding
- // -- Slot 2 / sp[2*kSystemPointerSize] constructor function
- // -- Slot 1 / sp[3*kSystemPointerSize] number of arguments (tagged)
- // -- Slot 0 / sp[4*kSystemPointerSize] context
- // -----------------------------------
- // Deoptimizer enters here.
- masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
- masm->pc_offset());
- __ bind(&post_instantiation_deopt_entry);
-
- // Restore new target.
- __ Pop(rdx);
-
- // Push the allocated receiver to the stack.
- __ Push(rax);
-
-#ifdef V8_REVERSE_JSARGS
- // We need two copies because we may have to return the original one
- // and the calling conventions dictate that the called function pops the
- // receiver. The second copy is pushed after the arguments, we saved in r8
- // since rax needs to store the number of arguments before
- // InvokingFunction.
- __ movq(r8, rax);
-
- // Set up pointer to first argument (skip receiver).
- __ leaq(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset +
- kSystemPointerSize));
-#else
- // We need two copies because we may have to return the original one
- // and the calling conventions dictate that the called function pops the
- // receiver.
- __ Push(rax);
-
- // Set up pointer to last argument.
- __ leaq(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
-#endif
-
- // Restore constructor function and argument count.
- __ movq(rdi, Operand(rbp, ConstructFrameConstants::kConstructorOffset));
- __ SmiUntag(rax, Operand(rbp, ConstructFrameConstants::kLengthOffset));
-
- // Check if we have enough stack space to push all arguments.
- // Argument count in rax. Clobbers rcx.
- Label enough_stack_space, stack_overflow;
- Generate_StackOverflowCheck(masm, rax, rcx, &stack_overflow, Label::kNear);
- __ jmp(&enough_stack_space, Label::kNear);
-
- __ bind(&stack_overflow);
- // Restore context from the frame.
- __ movq(rsi, Operand(rbp, ConstructFrameConstants::kContextOffset));
- __ CallRuntime(Runtime::kThrowStackOverflow);
- // This should be unreachable.
- __ int3();
-
- __ bind(&enough_stack_space);
-
- // TODO(victorgomes): When the arguments adaptor is completely removed, we
- // should get the formal parameter count and copy the arguments in its
- // correct position (including any undefined), instead of delaying this to
- // InvokeFunction.
-
- // Copy arguments to the expression stack.
- __ PushArray(rbx, rax, rcx);
+ __ EnterFrame(StackFrame::CONSTRUCT);
+ Label post_instantiation_deopt_entry, not_create_implicit_receiver;
-#ifdef V8_REVERSE_JSARGS
- // Push implicit receiver.
- __ Push(r8);
-#endif
-
- // Call the function.
- __ InvokeFunction(rdi, rdx, rax, CALL_FUNCTION);
+ // Preserve the incoming parameters on the stack.
+ __ SmiTag(rcx, rax);
+ __ Push(rsi);
+ __ Push(rcx);
+ __ Push(rdi);
+ __ PushRoot(RootIndex::kTheHoleValue);
+ __ Push(rdx);
- // ----------- S t a t e -------------
- // -- rax constructor result
- // -- sp[0*kSystemPointerSize] implicit receiver
- // -- sp[1*kSystemPointerSize] padding
- // -- sp[2*kSystemPointerSize] constructor function
- // -- sp[3*kSystemPointerSize] number of arguments
- // -- sp[4*kSystemPointerSize] context
- // -----------------------------------
+ // ----------- S t a t e -------------
+ // -- sp[0*kSystemPointerSize]: new target
+ // -- sp[1*kSystemPointerSize]: padding
+ // -- rdi and sp[2*kSystemPointerSize]: constructor function
+ // -- sp[3*kSystemPointerSize]: argument count
+ // -- sp[4*kSystemPointerSize]: context
+ // -----------------------------------
- // Store offset of return address for deoptimizer.
- masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
- masm->pc_offset());
+ __ LoadTaggedPointerField(
+ rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movl(rbx, FieldOperand(rbx, SharedFunctionInfo::kFlagsOffset));
+ __ DecodeField<SharedFunctionInfo::FunctionKindBits>(rbx);
+ __ JumpIfIsInRange(rbx, kDefaultDerivedConstructor, kDerivedConstructor,
+ &not_create_implicit_receiver, Label::kNear);
- // Restore context from the frame.
- __ movq(rsi, Operand(rbp, ConstructFrameConstants::kContextOffset));
+ // If not derived class constructor: Allocate the new receiver object.
+ __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1);
+ __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject), RelocInfo::CODE_TARGET);
+ __ jmp(&post_instantiation_deopt_entry, Label::kNear);
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, do_throw, leave_frame;
+ // Else: use TheHoleValue as receiver for constructor call
+ __ bind(&not_create_implicit_receiver);
+ __ LoadRoot(rax, RootIndex::kTheHoleValue);
- // If the result is undefined, we jump out to using the implicit receiver.
- __ JumpIfRoot(rax, RootIndex::kUndefinedValue, &use_receiver, Label::kNear);
+ // ----------- S t a t e -------------
+ // -- rax implicit receiver
+ // -- Slot 4 / sp[0*kSystemPointerSize] new target
+ // -- Slot 3 / sp[1*kSystemPointerSize] padding
+ // -- Slot 2 / sp[2*kSystemPointerSize] constructor function
+ // -- Slot 1 / sp[3*kSystemPointerSize] number of arguments (tagged)
+ // -- Slot 0 / sp[4*kSystemPointerSize] context
+ // -----------------------------------
+ // Deoptimizer enters here.
+ masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
+ masm->pc_offset());
+ __ bind(&post_instantiation_deopt_entry);
+
+ // Restore new target.
+ __ Pop(rdx);
+
+ // Push the allocated receiver to the stack.
+ __ Push(rax);
+
+ // We need two copies because we may have to return the original one
+ // and the calling conventions dictate that the called function pops the
+ // receiver. The second copy is pushed after the arguments, we saved in r8
+ // since rax needs to store the number of arguments before
+ // InvokingFunction.
+ __ movq(r8, rax);
+
+ // Set up pointer to first argument (skip receiver).
+ __ leaq(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset +
+ kSystemPointerSize));
+
+ // Restore constructor function and argument count.
+ __ movq(rdi, Operand(rbp, ConstructFrameConstants::kConstructorOffset));
+ __ SmiUntag(rax, Operand(rbp, ConstructFrameConstants::kLengthOffset));
+
+ // Check if we have enough stack space to push all arguments.
+ // Argument count in rax. Clobbers rcx.
+ Label stack_overflow;
+ __ StackOverflowCheck(rax, rcx, &stack_overflow);
- // Otherwise we do a smi check and fall through to check if the return value
- // is a valid receiver.
+ // TODO(victorgomes): When the arguments adaptor is completely removed, we
+ // should get the formal parameter count and copy the arguments in its
+ // correct position (including any undefined), instead of delaying this to
+ // InvokeFunction.
- // If the result is a smi, it is *not* an object in the ECMA sense.
- __ JumpIfSmi(rax, &use_receiver, Label::kNear);
+ // Copy arguments to the expression stack.
+ __ PushArray(rbx, rax, rcx);
- // If the type of the result (stored in its map) is less than
- // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ CmpObjectType(rax, FIRST_JS_RECEIVER_TYPE, rcx);
- __ j(above_equal, &leave_frame, Label::kNear);
- __ jmp(&use_receiver, Label::kNear);
+ // Push implicit receiver.
+ __ Push(r8);
- __ bind(&do_throw);
- __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
+ // Call the function.
+ __ InvokeFunction(rdi, rdx, rax, CALL_FUNCTION);
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ movq(rax, Operand(rsp, 0 * kSystemPointerSize));
- __ JumpIfRoot(rax, RootIndex::kTheHoleValue, &do_throw, Label::kNear);
+ // ----------- S t a t e -------------
+ // -- rax constructor result
+ // -- sp[0*kSystemPointerSize] implicit receiver
+ // -- sp[1*kSystemPointerSize] padding
+ // -- sp[2*kSystemPointerSize] constructor function
+ // -- sp[3*kSystemPointerSize] number of arguments
+ // -- sp[4*kSystemPointerSize] context
+ // -----------------------------------
- __ bind(&leave_frame);
- // Restore the arguments count.
- __ movq(rbx, Operand(rbp, ConstructFrameConstants::kLengthOffset));
- // Leave construct frame.
- }
+ // Store offset of return address for deoptimizer.
+ masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
+ masm->pc_offset());
+
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, do_throw, leave_and_return, check_result;
+
+ // If the result is undefined, we'll use the implicit receiver. Otherwise we
+ // do a smi check and fall through to check if the return value is a valid
+ // receiver.
+ __ JumpIfNotRoot(rax, RootIndex::kUndefinedValue, &check_result,
+ Label::kNear);
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ movq(rax, Operand(rsp, 0 * kSystemPointerSize));
+ __ JumpIfRoot(rax, RootIndex::kTheHoleValue, &do_throw, Label::kNear);
+
+ __ bind(&leave_and_return);
+ // Restore the arguments count.
+ __ movq(rbx, Operand(rbp, ConstructFrameConstants::kLengthOffset));
+ __ LeaveFrame(StackFrame::CONSTRUCT);
// Remove caller arguments from the stack and return.
__ PopReturnAddressTo(rcx);
SmiIndex index = masm->SmiToIndex(rbx, rbx, kSystemPointerSizeLog2);
__ leaq(rsp, Operand(rsp, index.reg, index.scale, 1 * kSystemPointerSize));
__ PushReturnAddressFrom(rcx);
__ ret(0);
+
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ __ bind(&check_result);
+ __ JumpIfSmi(rax, &use_receiver, Label::kNear);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CmpObjectType(rax, FIRST_JS_RECEIVER_TYPE, rcx);
+ __ j(above_equal, &leave_and_return, Label::kNear);
+ __ jmp(&use_receiver);
+
+ __ bind(&do_throw);
+ // Restore context from the frame.
+ __ movq(rsi, Operand(rbp, ConstructFrameConstants::kContextOffset));
+ __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
+ // We don't return here.
+ __ int3();
+
+ __ bind(&stack_overflow);
+ // Restore the context from the frame.
+ __ movq(rsi, Operand(rbp, ConstructFrameConstants::kContextOffset));
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ // This should be unreachable.
+ __ int3();
}
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
@@ -617,11 +555,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Push the function onto the stack.
__ Push(rdi);
-#ifndef V8_REVERSE_JSARGS
- // Push the receiver onto the stack.
- __ Push(arg_reg_4);
-#endif
-
#ifdef V8_TARGET_OS_WIN
// Load the previous frame pointer to access C arguments on stack
__ movq(kScratchRegister, Operand(rbp, 0));
@@ -632,30 +565,24 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Load the number of arguments and setup pointer to the arguments.
__ movq(rax, r8);
__ movq(rbx, r9);
-#ifdef V8_REVERSE_JSARGS
__ movq(r9, arg_reg_4); // Temporarily saving the receiver.
-#endif
#endif // V8_TARGET_OS_WIN
- // Current stack contents if V8_REVERSE_JSARGS:
+ // Current stack contents:
// [rsp + kSystemPointerSize] : Internal frame
// [rsp] : function
- // Current stack contents if not V8_REVERSE_JSARGS:
- // [rsp + 2 * kSystemPointerSize] : Internal frame
- // [rsp + kSystemPointerSize] : function
- // [rsp] : receiver
// Current register contents:
// rax : argc
// rbx : argv
// rsi : context
// rdi : function
// rdx : new.target
- // r9 : receiver, if V8_REVERSE_JSARGS
+ // r9 : receiver
// Check if we have enough stack space to push all arguments.
// Argument count in rax. Clobbers rcx.
Label enough_stack_space, stack_overflow;
- Generate_StackOverflowCheck(masm, rax, rcx, &stack_overflow, Label::kNear);
+ __ StackOverflowCheck(rax, rcx, &stack_overflow, Label::kNear);
__ jmp(&enough_stack_space, Label::kNear);
__ bind(&stack_overflow);
@@ -668,7 +595,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Copy arguments to the stack in a loop.
// Register rbx points to array of pointers to handle locations.
// Push the values of these handles.
-#ifdef V8_REVERSE_JSARGS
Label loop, entry;
__ movq(rcx, rax);
__ jmp(&entry, Label::kNear);
@@ -681,18 +607,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Push the receiver.
__ Push(r9);
-#else
- Label loop, entry;
- __ Set(rcx, 0); // Set loop variable to 0.
- __ jmp(&entry, Label::kNear);
- __ bind(&loop);
- __ movq(kScratchRegister, Operand(rbx, rcx, times_system_pointer_size, 0));
- __ Push(Operand(kScratchRegister, 0)); // dereference handle
- __ addq(rcx, Immediate(1));
- __ bind(&entry);
- __ cmpq(rcx, rax);
- __ j(not_equal, &loop, Label::kNear);
-#endif
// Invoke the builtin code.
Handle<Code> builtin = is_construct
@@ -779,24 +693,17 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
- __ cmpq(rsp, StackLimitAsOperand(masm, StackLimitKind::kRealStackLimit));
+ __ cmpq(rsp, __ StackLimitAsOperand(StackLimitKind::kRealStackLimit));
__ j(below, &stack_overflow);
// Pop return address.
__ PopReturnAddressTo(rax);
-#ifndef V8_REVERSE_JSARGS
- // Push receiver.
- __ PushTaggedPointerField(
- FieldOperand(rdx, JSGeneratorObject::kReceiverOffset), decompr_scratch1);
-#endif
-
// ----------- S t a t e -------------
// -- rax : return address
// -- rdx : the JSGeneratorObject to resume
// -- rdi : generator function
// -- rsi : generator context
- // -- rsp[0] : generator receiver, if V8_REVERSE_JSARGS is not set
// -----------------------------------
// Copy the function arguments from the generator object's register file.
@@ -809,7 +716,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
rbx, FieldOperand(rdx, JSGeneratorObject::kParametersAndRegistersOffset));
{
-#ifdef V8_REVERSE_JSARGS
{
Label done_loop, loop;
__ movq(r9, rcx);
@@ -829,21 +735,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ PushTaggedPointerField(
FieldOperand(rdx, JSGeneratorObject::kReceiverOffset),
decompr_scratch1);
-#else
- Label done_loop, loop;
- __ Set(r9, 0);
-
- __ bind(&loop);
- __ cmpl(r9, rcx);
- __ j(greater_equal, &done_loop, Label::kNear);
- __ PushTaggedAnyField(
- FieldOperand(rbx, r9, times_tagged_size, FixedArray::kHeaderSize),
- decompr_scratch1);
- __ addl(r9, Immediate(1));
- __ jmp(&loop);
-
- __ bind(&done_loop);
-#endif
}
// Underlying function needs to have bytecode available.
@@ -957,13 +848,13 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ PushReturnAddressFrom(return_pc);
}
-// Tail-call |function_id| if |smi_entry| == |marker|
+// Tail-call |function_id| if |actual_marker| == |expected_marker|
static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
- Register smi_entry,
- OptimizationMarker marker,
+ Register actual_marker,
+ OptimizationMarker expected_marker,
Runtime::FunctionId function_id) {
Label no_match;
- __ SmiCompare(smi_entry, Smi::FromEnum(marker));
+ __ Cmp(actual_marker, expected_marker);
__ j(not_equal, &no_match);
GenerateTailCallToReturnedCode(masm, function_id);
__ bind(&no_match);
@@ -994,12 +885,11 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
OptimizationMarker::kCompileOptimizedConcurrent,
Runtime::kCompileOptimized_Concurrent);
- // Otherwise, the marker is InOptimizationQueue, so fall through hoping
- // that an interrupt will eventually update the slot with optimized code.
+ // Marker should be one of LogFirstExecution / CompileOptimized /
+ // CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach
+ // here.
if (FLAG_debug_code) {
- __ SmiCompare(optimization_marker,
- Smi::FromEnum(OptimizationMarker::kInOptimizationQueue));
- __ Assert(equal, AbortReason::kExpectedOptimizationSentinel);
+ __ int3();
}
}
@@ -1014,15 +904,20 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
Register closure = rdi;
+ Label heal_optimized_code_slot;
+
+ // If the optimized code is cleared, go to runtime to update the optimization
+ // marker field.
+ __ LoadWeakValue(optimized_code_entry, &heal_optimized_code_slot);
+
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
- Label found_deoptimized_code;
__ LoadTaggedPointerField(
scratch1,
FieldOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
__ testl(FieldOperand(scratch1, CodeDataContainer::kKindSpecificFlagsOffset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
- __ j(not_zero, &found_deoptimized_code);
+ __ j(not_zero, &heal_optimized_code_slot);
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
@@ -1032,10 +927,11 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
__ Move(rcx, optimized_code_entry);
__ JumpCodeObject(rcx);
- // Optimized code slot contains deoptimized code, evict it and re-enter the
- // closure's code.
- __ bind(&found_deoptimized_code);
- GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
+ // Optimized code slot contains deoptimized code or code is cleared and
+ // optimized code marker isn't updated. Evict the code, update the marker
+ // and re-enter the closure's code.
+ __ bind(&heal_optimized_code_slot);
+ GenerateTailCallToReturnedCode(masm, Runtime::kHealOptimizedCodeSlot);
}
// Advance the current bytecode offset. This simulates what all bytecode
@@ -1169,20 +1065,18 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ CmpInstanceType(rcx, FEEDBACK_VECTOR_TYPE);
__ j(not_equal, &push_stack_frame);
- // Read off the optimized code slot in the feedback vector, and if there
- // is optimized code or an optimization marker, call that instead.
-
- Register optimized_code_entry = rcx;
-
- __ LoadAnyTaggedField(
- optimized_code_entry,
- FieldOperand(feedback_vector,
- FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
+ // Read off the optimization state in the feedback vector.
+ Register optimization_state = rcx;
+ __ movl(optimization_state,
+ FieldOperand(feedback_vector, FeedbackVector::kFlagsOffset));
- // Check if the optimized code slot is not empty.
- Label optimized_code_slot_not_empty;
- __ Cmp(optimized_code_entry, Smi::FromEnum(OptimizationMarker::kNone));
- __ j(not_equal, &optimized_code_slot_not_empty);
+ // Check if there is optimized code or a optimization marker that needs to be
+ // processed.
+ Label has_optimized_code_or_marker;
+ __ testl(
+ optimization_state,
+ Immediate(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask));
+ __ j(not_zero, &has_optimized_code_or_marker);
Label not_optimized;
__ bind(&not_optimized);
@@ -1231,7 +1125,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Do a stack check to ensure we don't go over the limit.
__ movq(rax, rsp);
__ subq(rax, rcx);
- __ cmpq(rax, StackLimitAsOperand(masm, StackLimitKind::kRealStackLimit));
+ __ cmpq(rax, __ StackLimitAsOperand(StackLimitKind::kRealStackLimit));
__ j(below, &stack_overflow);
// If ok, push undefined as the initial value for all register file entries.
@@ -1263,7 +1157,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Perform interrupt stack check.
// TODO(solanes): Merge with the real stack limit check above.
Label stack_check_interrupt, after_stack_check_interrupt;
- __ cmpq(rsp, StackLimitAsOperand(masm, StackLimitKind::kInterruptStackLimit));
+ __ cmpq(rsp, __ StackLimitAsOperand(StackLimitKind::kInterruptStackLimit));
__ j(below, &stack_check_interrupt);
__ bind(&after_stack_check_interrupt);
@@ -1333,19 +1227,25 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
__ int3(); // Should not return.
- __ bind(&optimized_code_slot_not_empty);
+ __ bind(&has_optimized_code_or_marker);
Label maybe_has_optimized_code;
- // Check if optimized code marker is actually a weak reference to the
- // optimized code as opposed to an optimization marker.
- __ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code);
- MaybeOptimizeCode(masm, feedback_vector, optimized_code_entry);
+
+ __ testl(
+ optimization_state,
+ Immediate(FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker));
+ __ j(zero, &maybe_has_optimized_code);
+
+ Register optimization_marker = optimization_state;
+ __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
+ MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
// Fall through if there's no runnable optimized code.
__ jmp(&not_optimized);
__ bind(&maybe_has_optimized_code);
- // Load code entry from the weak reference, if it was cleared, resume
- // execution of unoptimized code.
- __ LoadWeakValue(optimized_code_entry, &not_optimized);
+ Register optimized_code_entry = optimization_state;
+ __ LoadAnyTaggedField(
+ optimized_code_entry,
+ FieldOperand(feedback_vector, FeedbackVector::kMaybeOptimizedCodeOffset));
TailCallOptimizedCodeSlot(masm, optimized_code_entry, r11, r15);
__ bind(&stack_overflow);
@@ -1364,12 +1264,8 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
Operand(start_address, scratch, times_system_pointer_size,
kSystemPointerSize));
// Push the arguments.
-#ifdef V8_REVERSE_JSARGS
__ PushArray(start_address, num_args, scratch,
TurboAssembler::PushArrayOrder::kReverse);
-#else
- __ PushArray(start_address, num_args, scratch);
-#endif
}
// static
@@ -1386,22 +1282,19 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// -----------------------------------
Label stack_overflow;
-#ifdef V8_REVERSE_JSARGS
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// The spread argument should not be pushed.
__ decl(rax);
}
-#endif
__ leal(rcx, Operand(rax, 1)); // Add one for receiver.
// Add a stack check before pushing arguments.
- Generate_StackOverflowCheck(masm, rcx, rdx, &stack_overflow);
+ __ StackOverflowCheck(rcx, rdx, &stack_overflow);
// Pop return address to allow tail-call after pushing arguments.
__ PopReturnAddressTo(kScratchRegister);
-#ifdef V8_REVERSE_JSARGS
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
// Don't copy receiver.
__ decq(rcx);
@@ -1421,21 +1314,6 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// is below that.
__ movq(rbx, Operand(rbx, -kSystemPointerSize));
}
-#else
- // Push "undefined" as the receiver arg if we need to.
- if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
- __ PushRoot(RootIndex::kUndefinedValue);
- __ decl(rcx); // Subtract one for receiver.
- }
-
- // rbx and rdx will be modified.
- Generate_InterpreterPushArgs(masm, rcx, rbx, rdx);
-
- if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
- __ Pop(rbx); // Pass the spread in a register
- __ decl(rax); // Subtract one for spread
- }
-#endif
// Call the target.
__ PushReturnAddressFrom(kScratchRegister); // Re-push return address.
@@ -1473,12 +1351,11 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
Label stack_overflow;
// Add a stack check before pushing arguments.
- Generate_StackOverflowCheck(masm, rax, r8, &stack_overflow);
+ __ StackOverflowCheck(rax, r8, &stack_overflow);
// Pop return address to allow tail-call after pushing arguments.
__ PopReturnAddressTo(kScratchRegister);
-#ifdef V8_REVERSE_JSARGS
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// The spread argument should not be pushed.
__ decl(rax);
@@ -1489,22 +1366,10 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// Push slot for the receiver to be constructed.
__ Push(Immediate(0));
-#else
- // Push slot for the receiver to be constructed.
- __ Push(Immediate(0));
-
- // rcx and r8 will be modified.
- Generate_InterpreterPushArgs(masm, rax, rcx, r8);
-#endif
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
-#ifdef V8_REVERSE_JSARGS
// Pass the spread in the register rbx.
__ movq(rbx, Operand(rcx, -kSystemPointerSize));
-#else
- __ Pop(rbx); // Pass the spread in a register
- __ decl(rax); // Subtract one for spread
-#endif
// Push return address in preparation for the tail-call.
__ PushReturnAddressFrom(kScratchRegister);
} else {
@@ -1673,7 +1538,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
const RegisterConfiguration* config(RegisterConfiguration::Default());
int allocatable_register_count = config->num_allocatable_general_registers();
if (with_result) {
-#ifdef V8_REVERSE_JSARGS
if (java_script_builtin) {
// kScratchRegister is not included in the allocateable registers.
__ movq(kScratchRegister, rax);
@@ -1686,15 +1550,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
BuiltinContinuationFrameConstants::kFixedFrameSize),
rax);
}
-#else
- // Overwrite the hole inserted by the deoptimizer with the return value from
- // the LAZY deopt point.
- __ movq(
- Operand(rsp, config->num_allocatable_general_registers() *
- kSystemPointerSize +
- BuiltinContinuationFrameConstants::kFixedFrameSize),
- rax);
-#endif
}
for (int i = allocatable_register_count - 1; i >= 0; --i) {
int code = config->GetAllocatableGeneralCode(i);
@@ -1703,7 +1558,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
__ SmiUntag(Register::from_code(code));
}
}
-#ifdef V8_REVERSE_JSARGS
if (with_result && java_script_builtin) {
// Overwrite the hole inserted by the deoptimizer with the return value from
// the LAZY deopt point. rax contains the arguments count, the return value
@@ -1712,7 +1566,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
BuiltinContinuationFrameConstants::kFixedFrameSize),
kScratchRegister);
}
-#endif
__ movq(
rbp,
Operand(rsp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
@@ -1770,10 +1623,9 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argc
// -- rsp[0] : return address
- // The order of args depends on V8_REVERSE_JSARGS
- // -- args[0] : receiver
- // -- args[1] : thisArg
- // -- args[2] : argArray
+ // -- rsp[1] : receiver
+ // -- rsp[2] : thisArg
+ // -- rsp[3] : argArray
// -----------------------------------
// 1. Load receiver into rdi, argArray into rbx (if present), remove all
@@ -1836,15 +1688,13 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// Stack Layout:
// rsp[0] : Return address
- // rsp[8] : Argument n
- // rsp[16] : Argument n-1
+ // rsp[8] : Argument 0 (receiver: callable to call)
+ // rsp[16] : Argument 1
// ...
- // rsp[8 * n] : Argument 1
- // rsp[8 * (n + 1)] : Argument 0 (receiver: callable to call)
- // NOTE: The order of args are reversed if V8_REVERSE_JSARGS
+ // rsp[8 * n] : Argument n-1
+ // rsp[8 * (n + 1)] : Argument n
// rax contains the number of arguments, n, not counting the receiver.
-#ifdef V8_REVERSE_JSARGS
// 1. Get the callable to call (passed as receiver) from the stack.
{
StackArgumentsAccessor args(rax);
@@ -1870,43 +1720,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
__ PushReturnAddressFrom(rbx);
__ decq(rax); // One fewer argument (first argument is new receiver).
-#else
- // 1. Make sure we have at least one argument.
- {
- Label done;
- __ testq(rax, rax);
- __ j(not_zero, &done, Label::kNear);
- __ PopReturnAddressTo(rbx);
- __ PushRoot(RootIndex::kUndefinedValue);
- __ PushReturnAddressFrom(rbx);
- __ incq(rax);
- __ bind(&done);
- }
-
- // 2. Get the callable to call (passed as receiver) from the stack.
- {
- StackArgumentsAccessor args(rax);
- __ movq(rdi, args.GetReceiverOperand());
- }
-
- // 3. Shift arguments and return address one slot down on the stack
- // (overwriting the original receiver). Adjust argument count to make
- // the original first argument the new receiver.
- {
- Label loop;
- __ movq(rcx, rax);
- StackArgumentsAccessor args(rcx);
- __ bind(&loop);
- __ movq(rbx, args[1]);
- __ movq(args[0], rbx);
- __ decq(rcx);
- __ j(not_zero, &loop); // While non-zero.
- __ DropUnderReturnAddress(1, rbx); // Drop one slot under return address.
- __ decq(rax); // One fewer argument (first argument is new receiver).
- }
-#endif
-
- // 4. Call the callable.
+ // 5. Call the callable.
// Since we did not create a frame for Function.prototype.call() yet,
// we use a normal Call builtin here.
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
@@ -1916,11 +1730,10 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argc
// -- rsp[0] : return address
- // The order of args depends on V8_REVERSE_JSARGS
- // -- args[0] : receiver
- // -- args[1] : target
- // -- args[2] : thisArgument
- // -- args[3] : argumentsList
+ // -- rsp[8] : receiver
+ // -- rsp[16] : target (if argc >= 1)
+ // -- rsp[24] : thisArgument (if argc >= 2)
+ // -- rsp[32] : argumentsList (if argc == 3)
// -----------------------------------
// 1. Load target into rdi (if present), argumentsList into rbx (if present),
@@ -1968,11 +1781,10 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argc
// -- rsp[0] : return address
- // The order of args depends on V8_REVERSE_JSARGS
- // -- args[0] : receiver
- // -- args[1] : target
- // -- args[2] : argumentsList
- // -- args[3] : new.target (optional)
+ // -- rsp[8] : receiver
+ // -- rsp[16] : target
+ // -- rsp[24] : argumentsList
+ // -- rsp[32] : new.target (optional)
// -----------------------------------
// 1. Load target into rdi (if present), argumentsList into rbx (if present),
@@ -2065,26 +1877,18 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -- rdi : function (passed through to callee)
// -----------------------------------
- Label dont_adapt_arguments, stack_overflow, skip_adapt_arguments;
+ Label dont_adapt_arguments, stack_overflow;
__ cmpq(rbx, Immediate(kDontAdaptArgumentsSentinel));
__ j(equal, &dont_adapt_arguments);
__ LoadTaggedPointerField(
rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
-#ifndef V8_REVERSE_JSARGS
- // This optimization is disabled when the arguments are reversed.
- __ testl(
- FieldOperand(rcx, SharedFunctionInfo::kFlagsOffset),
- Immediate(SharedFunctionInfo::IsSafeToSkipArgumentsAdaptorBit::kMask));
- __ j(not_zero, &skip_adapt_arguments);
-#endif
-
// -------------------------------------------
// Adapt arguments.
// -------------------------------------------
{
EnterArgumentsAdaptorFrame(masm);
- Generate_StackOverflowCheck(masm, rbx, rcx, &stack_overflow);
+ __ StackOverflowCheck(rbx, rcx, &stack_overflow);
Label under_application, over_application, invoke;
__ cmpq(rax, rbx);
@@ -2095,11 +1899,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{
// Copy receiver and all expected arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
-#ifdef V8_REVERSE_JSARGS
__ leaq(r8, Operand(rbp, rbx, times_system_pointer_size, offset));
-#else
- __ leaq(r8, Operand(rbp, rax, times_system_pointer_size, offset));
-#endif
__ Set(rax, -1); // account for receiver
Label copy;
@@ -2115,7 +1915,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Too few parameters: Actual < expected.
__ bind(&under_application);
{
-#ifdef V8_REVERSE_JSARGS
// Fill remaining expected arguments with undefined values.
Label fill;
__ LoadRoot(kScratchRegister, RootIndex::kUndefinedValue);
@@ -2141,29 +1940,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Update actual number of arguments.
__ movq(rax, rbx);
-#else // !V8_REVERSE_JSARGS
- // Copy receiver and all actual arguments.
- const int offset = StandardFrameConstants::kCallerSPOffset;
- __ leaq(r9, Operand(rbp, rax, times_system_pointer_size, offset));
- __ Set(r8, -1); // account for receiver
-
- Label copy;
- __ bind(&copy);
- __ incq(r8);
- __ Push(Operand(r9, 0));
- __ subq(r9, Immediate(kSystemPointerSize));
- __ cmpq(r8, rax);
- __ j(less, &copy);
-
- // Fill remaining expected arguments with undefined values.
- Label fill;
- __ LoadRoot(kScratchRegister, RootIndex::kUndefinedValue);
- __ bind(&fill);
- __ incq(rax);
- __ Push(kScratchRegister);
- __ cmpq(rax, rbx);
- __ j(less, &fill);
-#endif // !V8_REVERSE_JSARGS
}
// Call the entry point.
@@ -2185,44 +1961,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
}
// -------------------------------------------
- // Skip adapt arguments.
- // -------------------------------------------
- __ bind(&skip_adapt_arguments);
- {
- // The callee cannot observe the actual arguments, so it's safe to just
- // pass the expected arguments by massaging the stack appropriately. See
- // http://bit.ly/v8-faster-calls-with-arguments-mismatch for details.
- Label under_application, over_application, invoke;
- __ PopReturnAddressTo(rcx);
- __ cmpq(rax, rbx);
- __ j(less, &under_application, Label::kNear);
-
- __ bind(&over_application);
- {
- // Remove superfluous parameters from the stack.
- __ xchgq(rax, rbx);
- __ subq(rbx, rax);
- __ leaq(rsp, Operand(rsp, rbx, times_system_pointer_size, 0));
- __ jmp(&invoke, Label::kNear);
- }
-
- __ bind(&under_application);
- {
- // Fill remaining expected arguments with undefined values.
- Label fill;
- __ LoadRoot(kScratchRegister, RootIndex::kUndefinedValue);
- __ bind(&fill);
- __ incq(rax);
- __ Push(kScratchRegister);
- __ cmpq(rax, rbx);
- __ j(less, &fill);
- }
-
- __ bind(&invoke);
- __ PushReturnAddressFrom(rcx);
- }
-
- // -------------------------------------------
// Don't adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
@@ -2261,7 +1999,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ j(equal, &ok);
__ CmpInstanceType(map, FIXED_DOUBLE_ARRAY_TYPE);
__ j(not_equal, &fail);
- __ cmpl(rcx, Immediate(0));
+ __ Cmp(rcx, 0);
__ j(equal, &ok);
// Fall through.
__ bind(&fail);
@@ -2271,10 +2009,9 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
}
Label stack_overflow;
- Generate_StackOverflowCheck(masm, rcx, r8, &stack_overflow, Label::kNear);
+ __ StackOverflowCheck(rcx, r8, &stack_overflow, Label::kNear);
// Push additional arguments onto the stack.
-#ifdef V8_REVERSE_JSARGS
// Move the arguments already in the stack,
// including the receiver and the return address.
{
@@ -2321,30 +2058,6 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ bind(&done);
__ addq(rax, current);
}
-#else // !V8_REVERSE_JSARGS
- {
- Register value = scratch;
- __ PopReturnAddressTo(r8);
- __ Set(r9, 0);
- Label done, push, loop;
- __ bind(&loop);
- __ cmpl(r9, rcx);
- __ j(equal, &done, Label::kNear);
- // Turn the hole into undefined as we go.
- __ LoadAnyTaggedField(value, FieldOperand(rbx, r9, times_tagged_size,
- FixedArray::kHeaderSize));
- __ CompareRoot(value, RootIndex::kTheHoleValue);
- __ j(not_equal, &push, Label::kNear);
- __ LoadRoot(value, RootIndex::kUndefinedValue);
- __ bind(&push);
- __ Push(value);
- __ incl(r9);
- __ jmp(&loop);
- __ bind(&done);
- __ PushReturnAddressFrom(r8);
- __ addq(rax, r9);
- }
-#endif
// Tail-call to the actual Call or Construct builtin.
__ Jump(code, RelocInfo::CODE_TARGET);
@@ -2426,10 +2139,9 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
// -----------------------------------
// Check for stack overflow.
- Generate_StackOverflowCheck(masm, r8, r12, &stack_overflow, Label::kNear);
+ __ StackOverflowCheck(r8, r12, &stack_overflow, Label::kNear);
// Forward the arguments from the caller frame.
-#ifdef V8_REVERSE_JSARGS
// Move the arguments already in the stack,
// including the receiver and the return address.
{
@@ -2476,21 +2188,6 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
kScratchRegister);
__ j(not_zero, &loop);
}
-#else
- {
- Label loop;
- __ addl(rax, r8);
- __ PopReturnAddressTo(rcx);
- __ bind(&loop);
- {
- __ decl(r8);
- __ Push(Operand(rbx, r8, times_system_pointer_size,
- kFPOnStackSize + kPCOnStackSize));
- __ j(not_zero, &loop);
- }
- __ PushReturnAddressFrom(rcx);
- }
-#endif
}
__ jmp(&stack_done, Label::kNear);
__ bind(&stack_overflow);
@@ -2652,7 +2349,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// We are not trying to catch interruptions (i.e. debug break and
// preemption) here, so check the "real stack limit".
__ cmpq(kScratchRegister,
- StackLimitAsOperand(masm, StackLimitKind::kRealStackLimit));
+ __ StackLimitAsOperand(StackLimitKind::kRealStackLimit));
__ j(above_equal, &done, Label::kNear);
{
FrameScope scope(masm, StackFrame::MANUAL);
@@ -2662,7 +2359,6 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ bind(&done);
}
-#ifdef V8_REVERSE_JSARGS
// Save Return Address and Receiver into registers.
__ Pop(r8);
__ Pop(r10);
@@ -2690,54 +2386,6 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Recover Receiver and Return Address.
__ Push(r10);
__ Push(r8);
-#else // !V8_REVERSE_JSARGS
- // Reserve stack space for the [[BoundArguments]].
- __ movq(kScratchRegister, rbx);
- __ AllocateStackSpace(kScratchRegister);
-
- // Adjust effective number of arguments to include return address.
- __ incl(rax);
-
- // Relocate arguments and return address down the stack.
- {
- Label loop;
- __ Set(rcx, 0);
- __ addq(rbx, rsp);
- __ bind(&loop);
- __ movq(kScratchRegister,
- Operand(rbx, rcx, times_system_pointer_size, 0));
- __ movq(Operand(rsp, rcx, times_system_pointer_size, 0),
- kScratchRegister);
- __ incl(rcx);
- __ cmpl(rcx, rax);
- __ j(less, &loop);
- }
-
- // Copy [[BoundArguments]] to the stack (below the arguments).
- {
- Label loop;
- __ LoadTaggedPointerField(
- rcx, FieldOperand(rdi, JSBoundFunction::kBoundArgumentsOffset));
- __ SmiUntagField(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
- __ bind(&loop);
- // Instead of doing decl(rbx) here subtract kTaggedSize from the header
- // offset in order be able to move decl(rbx) right before the loop
- // condition. This is necessary in order to avoid flags corruption by
- // pointer decompression code.
- __ LoadAnyTaggedField(
- r12, FieldOperand(rcx, rbx, times_tagged_size,
- FixedArray::kHeaderSize - kTaggedSize));
- __ movq(Operand(rsp, rax, times_system_pointer_size, 0), r12);
- __ leal(rax, Operand(rax, 1));
- __ decl(rbx);
- __ j(greater, &loop);
- }
-
- // Adjust effective number of arguments (rax contains the number of
- // arguments from the call plus return address plus the number of
- // [[BoundArguments]]), so we need to subtract one for the return address.
- __ decl(rax);
-#endif // !V8_REVERSE_JSARGS
}
__ bind(&no_bound_arguments);
}
@@ -3339,6 +2987,27 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
__ EnterFrame(StackFrame::JS_TO_WASM);
// -------------------------------------------
+ // Compute offsets and prepare for GC.
+ // -------------------------------------------
+ // We will have to save a value indicating the GC the number
+ // of values on the top of the stack that have to be scanned before calling
+ // the Wasm function.
+ constexpr int kFrameMarkerOffset = -kSystemPointerSize;
+ constexpr int kGCScanSlotCountOffset =
+ kFrameMarkerOffset - kSystemPointerSize;
+ constexpr int kParamCountOffset = kGCScanSlotCountOffset - kSystemPointerSize;
+ constexpr int kReturnCountOffset = kParamCountOffset - kSystemPointerSize;
+ constexpr int kValueTypesArrayStartOffset =
+ kReturnCountOffset - kSystemPointerSize;
+ // We set and use this slot only when moving parameters into the parameter
+ // registers (so no GC scan is needed).
+ constexpr int kFunctionDataOffset =
+ kValueTypesArrayStartOffset - kSystemPointerSize;
+ constexpr int kLastSpillOffset = kFunctionDataOffset;
+ constexpr int kNumSpillSlots = 5;
+ __ subq(rsp, Immediate(kNumSpillSlots * kSystemPointerSize));
+
+ // -------------------------------------------
// Load the Wasm exported function data and the Wasm instance.
// -------------------------------------------
Register closure = rdi;
@@ -3363,6 +3032,25 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
WasmExportedFunctionData::kInstanceOffset - kHeapObjectTag));
// -------------------------------------------
+ // Increment the call count in function data.
+ // -------------------------------------------
+ __ SmiAddConstant(
+ MemOperand(function_data,
+ WasmExportedFunctionData::kCallCountOffset - kHeapObjectTag),
+ Smi::FromInt(1));
+
+ // -------------------------------------------
+ // Check if the call count reached the threshold.
+ // -------------------------------------------
+ Label compile_wrapper, compile_wrapper_done;
+ __ SmiCompare(
+ MemOperand(function_data,
+ WasmExportedFunctionData::kCallCountOffset - kHeapObjectTag),
+ Smi::FromInt(wasm::kGenericWrapperThreshold));
+ __ j(greater_equal, &compile_wrapper);
+ __ bind(&compile_wrapper_done);
+
+ // -------------------------------------------
// Load values from the signature.
// -------------------------------------------
Register foreign_signature = r11;
@@ -3371,9 +3059,10 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
MemOperand(function_data,
WasmExportedFunctionData::kSignatureOffset - kHeapObjectTag));
Register signature = foreign_signature;
- __ movq(signature,
- MemOperand(foreign_signature, wasm::ObjectAccess::ToTagged(
- Foreign::kForeignAddressOffset)));
+ __ LoadExternalPointerField(
+ signature,
+ FieldOperand(foreign_signature, Foreign::kForeignAddressOffset),
+ kForeignForeignAddressTag);
foreign_signature = no_reg;
Register return_count = r8;
__ movq(return_count,
@@ -3387,29 +3076,12 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
signature = no_reg;
// -------------------------------------------
- // Set up the stack.
+ // Store signature-related values to the stack.
// -------------------------------------------
// We store values on the stack to restore them after function calls.
// We cannot push values onto the stack right before the wasm call. The wasm
// function expects the parameters, that didn't fit into the registers, on the
// top of the stack.
- // We will have to save a value indicating the GC the number
- // of values on the top of the stack that have to be scanned before calling
- // the Wasm function.
- constexpr int kFrameMarkerOffset = -kSystemPointerSize;
- constexpr int kGCScanSlotCountOffset =
- kFrameMarkerOffset - kSystemPointerSize;
- constexpr int kParamCountOffset = kGCScanSlotCountOffset - kSystemPointerSize;
- constexpr int kReturnCountOffset = kParamCountOffset - kSystemPointerSize;
- constexpr int kValueTypesArrayStartOffset =
- kReturnCountOffset - kSystemPointerSize;
- // We set and use this slot only when moving parameters into the parameter
- // registers (so no GC scan is needed).
- constexpr int kFunctionDataOffset =
- kValueTypesArrayStartOffset - kSystemPointerSize;
- constexpr int kLastSpillOffset = kFunctionDataOffset;
- constexpr int kNumSpillSlots = 5;
- __ subq(rsp, Immediate(kNumSpillSlots * kSystemPointerSize));
__ movq(MemOperand(rbp, kParamCountOffset), param_count);
__ movq(MemOperand(rbp, kReturnCountOffset), return_count);
__ movq(MemOperand(rbp, kValueTypesArrayStartOffset), valuetypes_array_ptr);
@@ -3418,17 +3090,11 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
// Parameter handling.
// -------------------------------------------
Label prepare_for_wasm_call;
- __ cmpl(param_count, Immediate(0));
+ __ Cmp(param_count, 0);
// IF we have 0 params: jump through parameter handling.
__ j(equal, &prepare_for_wasm_call);
- // ELSE:
- // Make sure we have the same number of arguments in order to be able to load
- // the arguments using static offsets below.
- __ cmpl(kJavaScriptCallArgCountRegister, param_count);
- __ Check(equal, AbortReason::kInvalidNumberOfJsArgs);
-
// -------------------------------------------
// Create 2 sections for integer and float params.
// -------------------------------------------
@@ -3514,7 +3180,6 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
Register current_param = rbx;
Register param_limit = rdx;
-#ifdef V8_REVERSE_JSARGS
constexpr int kReceiverOnStackSize = kSystemPointerSize;
__ movq(current_param,
Immediate(kFPOnStackSize + kPCOnStackSize + kReceiverOnStackSize));
@@ -3523,13 +3188,6 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
__ addq(param_limit,
Immediate(kFPOnStackSize + kPCOnStackSize + kReceiverOnStackSize));
const int increment = kSystemPointerSize;
-#else
- __ movq(current_param, param_count);
- __ shlq(current_param, Immediate(kSystemPointerSizeLog2));
- __ addq(current_param, Immediate(kFPOnStackSize));
- __ movq(param_limit, Immediate(kFPOnStackSize));
- const int increment = -kSystemPointerSize;
-#endif
Register param = rax;
// We have to check the types of the params. The ValueType array contains
// first the return then the param types.
@@ -3981,6 +3639,30 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
__ Call(BUILTIN_CODE(masm->isolate(), WasmFloat64ToNumber),
RelocInfo::CODE_TARGET);
__ jmp(&return_done);
+
+ // -------------------------------------------
+ // Kick off compilation.
+ // -------------------------------------------
+ __ bind(&compile_wrapper);
+ // Enable GC.
+ MemOperand GCScanSlotPlace = MemOperand(rbp, kGCScanSlotCountOffset);
+ __ movq(GCScanSlotPlace, Immediate(4));
+ // Save registers to the stack.
+ __ pushq(wasm_instance);
+ __ pushq(function_data);
+ // Push the arguments for the runtime call.
+ __ Push(wasm_instance); // first argument
+ __ Push(function_data); // second argument
+ // Set up context.
+ __ Move(kContextRegister, Smi::zero());
+ // Call the runtime function that kicks off compilation.
+ __ CallRuntime(Runtime::kWasmCompileWrapper, 2);
+ // Pop the result.
+ __ movq(r9, kReturnRegister0);
+ // Restore registers from the stack.
+ __ popq(function_data);
+ __ popq(wasm_instance);
+ __ jmp(&compile_wrapper_done);
}
namespace {
@@ -4156,12 +3838,12 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// -- rbx : call data
// -- rdi : holder
// -- rsp[0] : return address
- // -- rsp[8] : argument argc
+ // -- rsp[8] : argument 0 (receiver)
+ // -- rsp[16] : argument 1
// -- ...
- // -- rsp[argc * 8] : argument 1
- // -- rsp[(argc + 1) * 8] : argument 0 (receiver)
+ // -- rsp[argc * 8] : argument (argc - 1)
+ // -- rsp[(argc + 1) * 8] : argument argc
// -----------------------------------
- // NOTE: The order of args are reversed if V8_REVERSE_JSARGS
Register api_function_address = rdx;
Register argc = rcx;
@@ -4220,13 +3902,8 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// FunctionCallbackInfo::values_ (points at the first varargs argument passed
// on the stack).
-#ifdef V8_REVERSE_JSARGS
__ leaq(scratch,
Operand(scratch, (FCA::kArgsLength + 1) * kSystemPointerSize));
-#else
- __ leaq(scratch, Operand(scratch, argc, times_system_pointer_size,
- (FCA::kArgsLength - 1) * kSystemPointerSize));
-#endif
__ movq(StackSpaceOperand(1), scratch);
// FunctionCallbackInfo::length_.
@@ -4336,7 +4013,8 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
scratch, FieldOperand(callback, AccessorInfo::kJsGetterOffset));
__ LoadExternalPointerField(
api_function_address,
- FieldOperand(scratch, Foreign::kForeignAddressOffset));
+ FieldOperand(scratch, Foreign::kForeignAddressOffset),
+ kForeignForeignAddressTag);
// +3 is to skip prolog, return address and name handle.
Operand return_value_operand(
@@ -4352,6 +4030,223 @@ void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
__ int3(); // Unused on this architecture.
}
+namespace {
+
+void Generate_DeoptimizationEntry(MacroAssembler* masm,
+ DeoptimizeKind deopt_kind) {
+ Isolate* isolate = masm->isolate();
+
+ // Save all double registers, they will later be copied to the deoptimizer's
+ // FrameDescription.
+ static constexpr int kDoubleRegsSize =
+ kDoubleSize * XMMRegister::kNumRegisters;
+ __ AllocateStackSpace(kDoubleRegsSize);
+
+ const RegisterConfiguration* config = RegisterConfiguration::Default();
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ XMMRegister xmm_reg = XMMRegister::from_code(code);
+ int offset = code * kDoubleSize;
+ __ Movsd(Operand(rsp, offset), xmm_reg);
+ }
+
+ // Save all general purpose registers, they will later be copied to the
+ // deoptimizer's FrameDescription.
+ static constexpr int kNumberOfRegisters = Register::kNumRegisters;
+ for (int i = 0; i < kNumberOfRegisters; i++) {
+ __ pushq(Register::from_code(i));
+ }
+
+ static constexpr int kSavedRegistersAreaSize =
+ kNumberOfRegisters * kSystemPointerSize + kDoubleRegsSize;
+ static constexpr int kCurrentOffsetToReturnAddress = kSavedRegistersAreaSize;
+ static constexpr int kCurrentOffsetToParentSP =
+ kCurrentOffsetToReturnAddress + kPCOnStackSize;
+
+ __ Store(
+ ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate),
+ rbp);
+
+ // We use this to keep the value of the fifth argument temporarily.
+ // Unfortunately we can't store it directly in r8 (used for passing
+ // this on linux), since it is another parameter passing register on windows.
+ Register arg5 = r11;
+
+ __ movq(arg_reg_3, Immediate(Deoptimizer::kFixedExitSizeMarker));
+ // Get the address of the location in the code object
+ // and compute the fp-to-sp delta in register arg5.
+ __ movq(arg_reg_4, Operand(rsp, kCurrentOffsetToReturnAddress));
+ // Load the fp-to-sp-delta.
+ __ leaq(arg5, Operand(rsp, kCurrentOffsetToParentSP));
+ __ subq(arg5, rbp);
+ __ negq(arg5);
+
+ // Allocate a new deoptimizer object.
+ __ PrepareCallCFunction(6);
+ __ movq(rax, Immediate(0));
+ Label context_check;
+ __ movq(rdi, Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ JumpIfSmi(rdi, &context_check);
+ __ movq(rax, Operand(rbp, StandardFrameConstants::kFunctionOffset));
+ __ bind(&context_check);
+ __ movq(arg_reg_1, rax);
+ __ Set(arg_reg_2, static_cast<int>(deopt_kind));
+ // Args 3 and 4 are already in the right registers.
+
+ // On windows put the arguments on the stack (PrepareCallCFunction
+ // has created space for this). On linux pass the arguments in r8 and r9.
+#ifdef V8_TARGET_OS_WIN
+ __ movq(Operand(rsp, 4 * kSystemPointerSize), arg5);
+ __ LoadAddress(arg5, ExternalReference::isolate_address(isolate));
+ __ movq(Operand(rsp, 5 * kSystemPointerSize), arg5);
+#else
+ __ movq(r8, arg5);
+ __ LoadAddress(r9, ExternalReference::isolate_address(isolate));
+#endif
+
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
+ }
+ // Preserve deoptimizer object in register rax and get the input
+ // frame descriptor pointer.
+ __ movq(rbx, Operand(rax, Deoptimizer::input_offset()));
+
+ // Fill in the input registers.
+ for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
+ int offset =
+ (i * kSystemPointerSize) + FrameDescription::registers_offset();
+ __ PopQuad(Operand(rbx, offset));
+ }
+
+ // Fill in the double input registers.
+ int double_regs_offset = FrameDescription::double_registers_offset();
+ for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
+ int dst_offset = i * kDoubleSize + double_regs_offset;
+ __ popq(Operand(rbx, dst_offset));
+ }
+
+ // Mark the stack as not iterable for the CPU profiler which won't be able to
+ // walk the stack without the return address.
+ __ movb(__ ExternalReferenceAsOperand(
+ ExternalReference::stack_is_iterable_address(isolate)),
+ Immediate(0));
+
+ // Remove the return address from the stack.
+ __ addq(rsp, Immediate(kPCOnStackSize));
+
+ // Compute a pointer to the unwinding limit in register rcx; that is
+ // the first stack slot not part of the input frame.
+ __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
+ __ addq(rcx, rsp);
+
+ // Unwind the stack down to - but not including - the unwinding
+ // limit and copy the contents of the activation frame to the input
+ // frame description.
+ __ leaq(rdx, Operand(rbx, FrameDescription::frame_content_offset()));
+ Label pop_loop_header;
+ __ jmp(&pop_loop_header);
+ Label pop_loop;
+ __ bind(&pop_loop);
+ __ Pop(Operand(rdx, 0));
+ __ addq(rdx, Immediate(sizeof(intptr_t)));
+ __ bind(&pop_loop_header);
+ __ cmpq(rcx, rsp);
+ __ j(not_equal, &pop_loop);
+
+ // Compute the output frame in the deoptimizer.
+ __ pushq(rax);
+ __ PrepareCallCFunction(2);
+ __ movq(arg_reg_1, rax);
+ __ LoadAddress(arg_reg_2, ExternalReference::isolate_address(isolate));
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(ExternalReference::compute_output_frames_function(), 2);
+ }
+ __ popq(rax);
+
+ __ movq(rsp, Operand(rax, Deoptimizer::caller_frame_top_offset()));
+
+ // Replace the current (input) frame with the output frames.
+ Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
+ // Outer loop state: rax = current FrameDescription**, rdx = one past the
+ // last FrameDescription**.
+ __ movl(rdx, Operand(rax, Deoptimizer::output_count_offset()));
+ __ movq(rax, Operand(rax, Deoptimizer::output_offset()));
+ __ leaq(rdx, Operand(rax, rdx, times_system_pointer_size, 0));
+ __ jmp(&outer_loop_header);
+ __ bind(&outer_push_loop);
+ // Inner loop state: rbx = current FrameDescription*, rcx = loop index.
+ __ movq(rbx, Operand(rax, 0));
+ __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
+ __ jmp(&inner_loop_header);
+ __ bind(&inner_push_loop);
+ __ subq(rcx, Immediate(sizeof(intptr_t)));
+ __ Push(Operand(rbx, rcx, times_1, FrameDescription::frame_content_offset()));
+ __ bind(&inner_loop_header);
+ __ testq(rcx, rcx);
+ __ j(not_zero, &inner_push_loop);
+ __ addq(rax, Immediate(kSystemPointerSize));
+ __ bind(&outer_loop_header);
+ __ cmpq(rax, rdx);
+ __ j(below, &outer_push_loop);
+
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ XMMRegister xmm_reg = XMMRegister::from_code(code);
+ int src_offset = code * kDoubleSize + double_regs_offset;
+ __ Movsd(xmm_reg, Operand(rbx, src_offset));
+ }
+
+ // Push pc and continuation from the last output frame.
+ __ PushQuad(Operand(rbx, FrameDescription::pc_offset()));
+ __ PushQuad(Operand(rbx, FrameDescription::continuation_offset()));
+
+ // Push the registers from the last output frame.
+ for (int i = 0; i < kNumberOfRegisters; i++) {
+ int offset =
+ (i * kSystemPointerSize) + FrameDescription::registers_offset();
+ __ PushQuad(Operand(rbx, offset));
+ }
+
+ // Restore the registers from the stack.
+ for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
+ Register r = Register::from_code(i);
+ // Do not restore rsp, simply pop the value into the next register
+ // and overwrite this afterwards.
+ if (r == rsp) {
+ DCHECK_GT(i, 0);
+ r = Register::from_code(i - 1);
+ }
+ __ popq(r);
+ }
+
+ __ movb(__ ExternalReferenceAsOperand(
+ ExternalReference::stack_is_iterable_address(isolate)),
+ Immediate(1));
+
+ // Return to the continuation point.
+ __ ret(0);
+}
+
+} // namespace
+
+void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Bailout(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kBailout);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
+}
+
#undef __
} // namespace internal
diff --git a/deps/v8/src/codegen/DIR_METADATA b/deps/v8/src/codegen/DIR_METADATA
new file mode 100644
index 0000000000..fc018666b1
--- /dev/null
+++ b/deps/v8/src/codegen/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>Compiler"
+} \ No newline at end of file
diff --git a/deps/v8/src/codegen/OWNERS b/deps/v8/src/codegen/OWNERS
index 7b3ad8d1e0..332c1705b5 100644
--- a/deps/v8/src/codegen/OWNERS
+++ b/deps/v8/src/codegen/OWNERS
@@ -17,5 +17,3 @@ solanes@chromium.org
tebbi@chromium.org
titzer@chromium.org
mythria@chromium.org
-
-# COMPONENT: Blink>JavaScript>Compiler
diff --git a/deps/v8/src/codegen/arm/assembler-arm.cc b/deps/v8/src/codegen/arm/assembler-arm.cc
index 00d0644f73..cc5d6299f5 100644
--- a/deps/v8/src/codegen/arm/assembler-arm.cc
+++ b/deps/v8/src/codegen/arm/assembler-arm.cc
@@ -552,6 +552,15 @@ Assembler::~Assembler() { DCHECK_EQ(const_pool_blocked_nesting_, 0); }
void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
SafepointTableBuilder* safepoint_table_builder,
int handler_table_offset) {
+ // As a crutch to avoid having to add manual Align calls wherever we use a
+ // raw workflow to create Code objects (mostly in tests), add another Align
+ // call here. It does no harm - the end of the Code object is aligned to the
+ // (larger) kCodeAlignment anyways.
+ // TODO(jgruber): Consider moving responsibility for proper alignment to
+ // metadata table builders (safepoint, handler, constant pool, code
+ // comments).
+ DataAlign(Code::kMetadataAlignment);
+
// Emit constant pool if necessary.
CheckConstPool(true, false);
DCHECK(pending_32_bit_constants_.empty());
@@ -2649,12 +2658,30 @@ static bool FitsVmovIntImm(uint64_t imm, uint32_t* encoding, uint8_t* cmode) {
return false;
}
+void Assembler::vmov(const DwVfpRegister dst, uint64_t imm) {
+ uint32_t enc;
+ uint8_t cmode;
+ uint8_t op = 0;
+ if (CpuFeatures::IsSupported(NEON) && FitsVmovIntImm(imm, &enc, &cmode)) {
+ CpuFeatureScope scope(this, NEON);
+ // Instruction details available in ARM DDI 0406C.b, A8-937.
+ // 001i1(27-23) | D(22) | 000(21-19) | imm3(18-16) | Vd(15-12) | cmode(11-8)
+ // | 0(7) | 0(6) | op(5) | 4(1) | imm4(3-0)
+ int vd, d;
+ dst.split_code(&vd, &d);
+ emit(kSpecialCondition | 0x05 * B23 | d * B22 | vd * B12 | cmode * B8 |
+ op * B5 | 0x1 * B4 | enc);
+ } else {
+ UNIMPLEMENTED();
+ }
+}
+
void Assembler::vmov(const QwNeonRegister dst, uint64_t imm) {
uint32_t enc;
uint8_t cmode;
uint8_t op = 0;
- if (CpuFeatures::IsSupported(VFPv3) && FitsVmovIntImm(imm, &enc, &cmode)) {
- CpuFeatureScope scope(this, VFPv3);
+ if (CpuFeatures::IsSupported(NEON) && FitsVmovIntImm(imm, &enc, &cmode)) {
+ CpuFeatureScope scope(this, NEON);
// Instruction details available in ARM DDI 0406C.b, A8-937.
// 001i1(27-23) | D(22) | 000(21-19) | imm3(18-16) | Vd(15-12) | cmode(11-8)
// | 0(7) | Q(6) | op(5) | 4(1) | imm4(3-0)
@@ -3677,6 +3704,28 @@ void Assembler::vld1(NeonSize size, const NeonListOperand& dst,
src.rm().code());
}
+// vld1s(ingle element to one lane).
+void Assembler::vld1s(NeonSize size, const NeonListOperand& dst, uint8_t index,
+ const NeonMemOperand& src) {
+ // Instruction details available in ARM DDI 0406C.b, A8.8.322.
+ // 1111(31-28) | 01001(27-23) | D(22) | 10(21-20) | Rn(19-16) |
+ // Vd(15-12) | size(11-10) | index_align(7-4) | Rm(3-0)
+ // See vld1 (single element to all lanes) if size == 0x3, implemented as
+ // vld1r(eplicate).
+ DCHECK_NE(size, 0x3);
+ // Check for valid lane indices.
+ DCHECK_GT(1 << (3 - size), index);
+ // Specifying alignment not supported, use standard alignment.
+ uint8_t index_align = index << (size + 1);
+
+ DCHECK(IsEnabled(NEON));
+ int vd, d;
+ dst.base().split_code(&vd, &d);
+ emit(0xFU * B28 | 4 * B24 | 1 * B23 | d * B22 | 2 * B20 |
+ src.rn().code() * B16 | vd * B12 | size * B10 | index_align * B4 |
+ src.rm().code());
+}
+
// vld1r(eplicate)
void Assembler::vld1r(NeonSize size, const NeonListOperand& dst,
const NeonMemOperand& src) {
diff --git a/deps/v8/src/codegen/arm/assembler-arm.h b/deps/v8/src/codegen/arm/assembler-arm.h
index 18631e2ece..cb8b7628f5 100644
--- a/deps/v8/src/codegen/arm/assembler-arm.h
+++ b/deps/v8/src/codegen/arm/assembler-arm.h
@@ -839,6 +839,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// All these APIs support D0 to D31 and Q0 to Q15.
void vld1(NeonSize size, const NeonListOperand& dst,
const NeonMemOperand& src);
+ // vld1s(ingle element to one lane).
+ void vld1s(NeonSize size, const NeonListOperand& dst, uint8_t index,
+ const NeonMemOperand& src);
void vld1r(NeonSize size, const NeonListOperand& dst,
const NeonMemOperand& src);
void vst1(NeonSize size, const NeonListOperand& src,
@@ -853,6 +856,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vmov(NeonDataType dt, DwVfpRegister dst, int index, Register src);
void vmov(NeonDataType dt, Register dst, DwVfpRegister src, int index);
+ void vmov(DwVfpRegister dst, uint64_t imm);
void vmov(QwNeonRegister dst, uint64_t imm);
void vmov(QwNeonRegister dst, QwNeonRegister src);
void vdup(NeonSize size, QwNeonRegister dst, Register src);
diff --git a/deps/v8/src/codegen/arm/interface-descriptors-arm.cc b/deps/v8/src/codegen/arm/interface-descriptors-arm.cc
index 731d175393..96bf2ae50c 100644
--- a/deps/v8/src/codegen/arm/interface-descriptors-arm.cc
+++ b/deps/v8/src/codegen/arm/interface-descriptors-arm.cc
@@ -278,54 +278,6 @@ void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void BinaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void CallWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void CallWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void ConstructWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void ConstructWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void Compare_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void UnaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 3);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.cc b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
index 319ee39ef7..b72e385d58 100644
--- a/deps/v8/src/codegen/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
@@ -163,12 +163,10 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Builtins::IsIsolateIndependentBuiltin(*code));
int builtin_index = Builtins::kNoBuiltinId;
- bool target_is_isolate_independent_builtin =
- isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
- Builtins::IsIsolateIndependent(builtin_index);
+ bool target_is_builtin =
+ isolate()->builtins()->IsBuiltinHandle(code, &builtin_index);
- if (options().use_pc_relative_calls_and_jumps &&
- target_is_isolate_independent_builtin) {
+ if (options().use_pc_relative_calls_and_jumps && target_is_builtin) {
int32_t code_target_index = AddCodeTarget(code);
b(code_target_index * kInstrSize, cond, RelocInfo::RELATIVE_CODE_TARGET);
return;
@@ -178,13 +176,12 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
// size s.t. pc-relative calls may be used.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- int offset = code->builtin_index() * kSystemPointerSize +
- IsolateData::builtin_entry_table_offset();
+ int offset = IsolateData::builtin_entry_slot_offset(
+ static_cast<Builtins::Name>(code->builtin_index()));
ldr(scratch, MemOperand(kRootRegister, offset));
Jump(scratch, cond);
return;
- } else if (options().inline_offheap_trampolines &&
- target_is_isolate_independent_builtin) {
+ } else if (options().inline_offheap_trampolines && target_is_builtin) {
// Inline the trampoline.
RecordCommentForOffHeapTrampoline(builtin_index);
EmbeddedData d = EmbeddedData::FromBlob();
@@ -258,12 +255,10 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Builtins::IsIsolateIndependentBuiltin(*code));
int builtin_index = Builtins::kNoBuiltinId;
- bool target_is_isolate_independent_builtin =
- isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
- Builtins::IsIsolateIndependent(builtin_index);
+ bool target_is_builtin =
+ isolate()->builtins()->IsBuiltinHandle(code, &builtin_index);
- if (target_is_isolate_independent_builtin &&
- options().use_pc_relative_calls_and_jumps) {
+ if (target_is_builtin && options().use_pc_relative_calls_and_jumps) {
int32_t code_target_index = AddCodeTarget(code);
bl(code_target_index * kInstrSize, cond, RelocInfo::RELATIVE_CODE_TARGET);
return;
@@ -271,13 +266,12 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
// This branch is taken only for specific cctests, where we force isolate
// creation at runtime. At this point, Code space isn't restricted to a
// size s.t. pc-relative calls may be used.
- int offset = code->builtin_index() * kSystemPointerSize +
- IsolateData::builtin_entry_table_offset();
+ int offset = IsolateData::builtin_entry_slot_offset(
+ static_cast<Builtins::Name>(code->builtin_index()));
ldr(ip, MemOperand(kRootRegister, offset));
Call(ip, cond);
return;
- } else if (target_is_isolate_independent_builtin &&
- options().inline_offheap_trampolines) {
+ } else if (target_is_builtin && options().inline_offheap_trampolines) {
// Inline the trampoline.
CallBuiltin(builtin_index);
return;
@@ -1553,22 +1547,102 @@ void TurboAssembler::PrepareForTailCall(Register callee_args_count,
mov(sp, dst_reg);
}
+void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) {
+ DCHECK(root_array_available());
+ Isolate* isolate = this->isolate();
+ ExternalReference limit =
+ kind == StackLimitKind::kRealStackLimit
+ ? ExternalReference::address_of_real_jslimit(isolate)
+ : ExternalReference::address_of_jslimit(isolate);
+ DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
+
+ intptr_t offset =
+ TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
+ CHECK(is_int32(offset));
+ ldr(destination, MemOperand(kRootRegister, offset));
+}
+
+void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch,
+ Label* stack_overflow) {
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ LoadStackLimit(scratch, StackLimitKind::kRealStackLimit);
+ // Make scratch the space we have left. The stack might already be overflowed
+ // here which will cause scratch to become negative.
+ sub(scratch, sp, scratch);
+ // Check if the arguments will overflow the stack.
+ cmp(scratch, Operand(num_args, LSL, kPointerSizeLog2));
+ b(le, stack_overflow); // Signed comparison.
+}
+
void MacroAssembler::InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count,
Label* done, InvokeFlag flag) {
Label regular_invoke;
-
- // Check whether the expected and actual arguments count match. If not,
- // setup registers according to contract with ArgumentsAdaptorTrampoline:
// r0: actual arguments count
// r1: function (passed through to callee)
// r2: expected arguments count
-
- // The code below is made a lot easier because the calling code already sets
- // up actual and expected registers according to the contract.
DCHECK_EQ(actual_parameter_count, r0);
DCHECK_EQ(expected_parameter_count, r2);
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ // If the expected parameter count is equal to the adaptor sentinel, no need
+ // to push undefined value as arguments.
+ cmp(expected_parameter_count, Operand(kDontAdaptArgumentsSentinel));
+ b(eq, &regular_invoke);
+
+ // If overapplication or if the actual argument count is equal to the
+ // formal parameter count, no need to push extra undefined values.
+ sub(expected_parameter_count, expected_parameter_count,
+ actual_parameter_count, SetCC);
+ b(le, &regular_invoke);
+
+ Label stack_overflow;
+ Register scratch = r4;
+ StackOverflowCheck(expected_parameter_count, scratch, &stack_overflow);
+
+ // Underapplication. Move the arguments already in the stack, including the
+ // receiver and the return address.
+ {
+ Label copy, check;
+ Register num = r5, src = r6, dest = r9; // r7 and r8 are context and root.
+ mov(src, sp);
+ // Update stack pointer.
+ lsl(scratch, expected_parameter_count, Operand(kSystemPointerSizeLog2));
+ AllocateStackSpace(scratch);
+ mov(dest, sp);
+ mov(num, actual_parameter_count);
+ b(&check);
+ bind(&copy);
+ ldr(scratch, MemOperand(src, kSystemPointerSize, PostIndex));
+ str(scratch, MemOperand(dest, kSystemPointerSize, PostIndex));
+ sub(num, num, Operand(1), SetCC);
+ bind(&check);
+ b(ge, &copy);
+ }
+
+ // Fill remaining expected arguments with undefined values.
+ LoadRoot(scratch, RootIndex::kUndefinedValue);
+ {
+ Label loop;
+ bind(&loop);
+ str(scratch, MemOperand(r9, kSystemPointerSize, PostIndex));
+ sub(expected_parameter_count, expected_parameter_count, Operand(1), SetCC);
+ b(gt, &loop);
+ }
+ b(&regular_invoke);
+
+ bind(&stack_overflow);
+ {
+ FrameScope frame(this,
+ has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ CallRuntime(Runtime::kThrowStackOverflow);
+ bkpt(0);
+ }
+#else
+ // Check whether the expected and actual arguments count match. If not,
+ // setup registers according to contract with ArgumentsAdaptorTrampoline.
cmp(expected_parameter_count, actual_parameter_count);
b(eq, &regular_invoke);
@@ -1579,7 +1653,8 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
} else {
Jump(adaptor, RelocInfo::CODE_TARGET);
}
- bind(&regular_invoke);
+#endif
+ bind(&regular_invoke);
}
void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
@@ -2140,6 +2215,23 @@ void TurboAssembler::RestoreFPRegs(Register location, Register scratch) {
add(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
}
+void TurboAssembler::SaveFPRegsToHeap(Register location, Register scratch) {
+ CpuFeatureScope scope(this, VFP32DREGS, CpuFeatureScope::kDontCheckSupported);
+ CheckFor32DRegs(scratch);
+ vstm(ia_w, location, d0, d15);
+ vstm(ia_w, location, d16, d31, ne);
+ add(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
+}
+
+void TurboAssembler::RestoreFPRegsFromHeap(Register location,
+ Register scratch) {
+ CpuFeatureScope scope(this, VFP32DREGS, CpuFeatureScope::kDontCheckSupported);
+ CheckFor32DRegs(scratch);
+ vldm(ia_w, location, d0, d15);
+ vldm(ia_w, location, d16, d31, ne);
+ add(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
+}
+
template <typename T>
void TurboAssembler::FloatMaxHelper(T result, T left, T right,
Label* out_of_line) {
@@ -2280,16 +2372,18 @@ void TurboAssembler::FloatMinOutOfLine(DwVfpRegister result, DwVfpRegister left,
}
static const int kRegisterPassedArguments = 4;
+// The hardfloat calling convention passes double arguments in registers d0-d7.
+static const int kDoubleRegisterPassedArguments = 8;
int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments) {
int stack_passed_words = 0;
if (use_eabi_hardfloat()) {
- // In the hard floating point calling convention, we can use all double
+ // In the hard floating point calling convention, we can use the first 8
// registers to pass doubles.
- if (num_double_arguments > DoubleRegister::SupportedRegisterCount()) {
+ if (num_double_arguments > kDoubleRegisterPassedArguments) {
stack_passed_words +=
- 2 * (num_double_arguments - DoubleRegister::SupportedRegisterCount());
+ 2 * (num_double_arguments - kDoubleRegisterPassedArguments);
}
} else {
// In the soft floating point calling convention, every double
@@ -2491,26 +2585,18 @@ void TurboAssembler::ResetSpeculationPoisonRegister() {
mov(kSpeculationPoisonRegister, Operand(-1));
}
-void TurboAssembler::CallForDeoptimization(Address target, int deopt_id,
- Label* exit, DeoptimizeKind kind) {
+void TurboAssembler::CallForDeoptimization(Builtins::Name target, int,
+ Label* exit, DeoptimizeKind kind,
+ Label*) {
+ BlockConstPoolScope block_const_pool(this);
+ ldr(ip, MemOperand(kRootRegister,
+ IsolateData::builtin_entry_slot_offset(target)));
+ Call(ip);
+ DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
+ (kind == DeoptimizeKind::kLazy)
+ ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kNonLazyDeoptExitSize);
USE(exit, kind);
- NoRootArrayScope no_root_array(this);
-
- // Save the deopt id in r10 (we don't need the roots array from now on).
- DCHECK_LE(deopt_id, 0xFFFF);
- if (CpuFeatures::IsSupported(ARMv7)) {
- // On ARMv7, we can use movw (with a maximum immediate of 0xFFFF)
- movw(r10, deopt_id);
- } else {
- // On ARMv6, we might need two instructions.
- mov(r10, Operand(deopt_id & 0xFF)); // Set the low byte.
- if (deopt_id >= 0xFF) {
- orr(r10, r10, Operand(deopt_id & 0xFF00)); // Set the high byte.
- }
- }
-
- Call(target, RelocInfo::RUNTIME_ENTRY);
- CheckConstPool(false, false);
}
void TurboAssembler::Trap() { stop(); }
diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.h b/deps/v8/src/codegen/arm/macro-assembler-arm.h
index a7dc5498b8..a4d6632a07 100644
--- a/deps/v8/src/codegen/arm/macro-assembler-arm.h
+++ b/deps/v8/src/codegen/arm/macro-assembler-arm.h
@@ -17,6 +17,10 @@
namespace v8 {
namespace internal {
+// TODO(victorgomes): Move definition to macro-assembler.h, once all other
+// platforms are updated.
+enum class StackLimitKind { kInterruptStackLimit, kRealStackLimit };
+
// ----------------------------------------------------------------------------
// Static helper functions
@@ -320,10 +324,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// The return address on the stack is used by frame iteration.
void StoreReturnAddressAndCall(Register target);
- // This should only be used when assembling a deoptimizer call because of
- // the CheckConstPool invocation, which is only needed for deoptimization.
- void CallForDeoptimization(Address target, int deopt_id, Label* exit,
- DeoptimizeKind kind);
+ void CallForDeoptimization(Builtins::Name target, int deopt_id, Label* exit,
+ DeoptimizeKind kind,
+ Label* jump_deoptimization_entry_label);
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the sp register.
@@ -395,6 +398,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// values to location, restoring [d0..(d15|d31)].
void RestoreFPRegs(Register location, Register scratch);
+ // As above, but with heap semantics instead of stack semantics, i.e.: the
+ // location starts at the lowest address and grows towards higher addresses,
+ // for both saves and restores.
+ void SaveFPRegsToHeap(Register location, Register scratch);
+ void RestoreFPRegsFromHeap(Register location, Register scratch);
+
// Calculate how much stack space (in bytes) are required to store caller
// registers excluding those specified in the arguments.
int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
@@ -731,11 +740,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// TODO(victorgomes): Remove this function once we stick with the reversed
// arguments order.
MemOperand ReceiverOperand(Register argc) {
-#ifdef V8_REVERSE_JSARGS
return MemOperand(sp, 0);
-#else
- return MemOperand(sp, argc, LSL, kSystemPointerSizeLog2);
-#endif
}
// ---------------------------------------------------------------------------
@@ -781,6 +786,12 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
Register scratch2);
// ---------------------------------------------------------------------------
+ // Stack limit utilities
+ void LoadStackLimit(Register destination, StackLimitKind kind);
+ void StackOverflowCheck(Register num_args, Register scratch,
+ Label* stack_overflow);
+
+ // ---------------------------------------------------------------------------
// Smi utilities
void SmiTag(Register reg, SBit s = LeaveCC);
diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.cc b/deps/v8/src/codegen/arm64/assembler-arm64.cc
index 2e21ab913d..4aaa413d2d 100644
--- a/deps/v8/src/codegen/arm64/assembler-arm64.cc
+++ b/deps/v8/src/codegen/arm64/assembler-arm64.cc
@@ -372,6 +372,15 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
SafepointTableBuilder* safepoint_table_builder,
int handler_table_offset) {
+ // As a crutch to avoid having to add manual Align calls wherever we use a
+ // raw workflow to create Code objects (mostly in tests), add another Align
+ // call here. It does no harm - the end of the Code object is aligned to the
+ // (larger) kCodeAlignment anyways.
+ // TODO(jgruber): Consider moving responsibility for proper alignment to
+ // metadata table builders (safepoint, handler, constant pool, code
+ // comments).
+ DataAlign(Code::kMetadataAlignment);
+
// Emit constant pool if necessary.
ForceConstantPoolEmissionWithoutJump();
DCHECK(constpool_.IsEmpty());
@@ -403,7 +412,9 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
}
void Assembler::Align(int m) {
- DCHECK(m >= 4 && base::bits::IsPowerOfTwo(m));
+ // If not, the loop below won't terminate.
+ DCHECK(IsAligned(pc_offset(), kInstrSize));
+ DCHECK(m >= kInstrSize && base::bits::IsPowerOfTwo(m));
while ((pc_offset() & (m - 1)) != 0) {
nop();
}
@@ -3199,9 +3210,11 @@ void Assembler::movi(const VRegister& vd, const uint64_t imm, Shift shift,
Emit(q | NEONModImmOp(1) | NEONModifiedImmediate_MOVI |
ImmNEONabcdefgh(imm8) | NEONCmode(0xE) | Rd(vd));
} else if (shift == LSL) {
+ DCHECK(is_uint8(imm));
NEONModifiedImmShiftLsl(vd, static_cast<int>(imm), shift_amount,
NEONModifiedImmediate_MOVI);
} else {
+ DCHECK(is_uint8(imm));
NEONModifiedImmShiftMsl(vd, static_cast<int>(imm), shift_amount,
NEONModifiedImmediate_MOVI);
}
diff --git a/deps/v8/src/codegen/arm64/interface-descriptors-arm64.cc b/deps/v8/src/codegen/arm64/interface-descriptors-arm64.cc
index 0c9beba776..f7bccfdbe2 100644
--- a/deps/v8/src/codegen/arm64/interface-descriptors-arm64.cc
+++ b/deps/v8/src/codegen/arm64/interface-descriptors-arm64.cc
@@ -282,54 +282,6 @@ void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void BinaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void CallWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void CallWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void ConstructWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void ConstructWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void Compare_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void UnaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 3);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
index fef1758aaa..69242484bc 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
@@ -503,13 +503,15 @@ void TurboAssembler::Movi(const VRegister& vd, uint64_t imm, Shift shift,
}
void TurboAssembler::Movi(const VRegister& vd, uint64_t hi, uint64_t lo) {
- // TODO(all): Move 128-bit values in a more efficient way.
+ // TODO(v8:11033): Move 128-bit values in a more efficient way.
DCHECK(vd.Is128Bits());
- UseScratchRegisterScope temps(this);
Movi(vd.V2D(), lo);
- Register temp = temps.AcquireX();
- Mov(temp, hi);
- Ins(vd.V2D(), 1, temp);
+ if (lo != hi) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ Mov(temp, hi);
+ Ins(vd.V2D(), 1, temp);
+ }
}
void TurboAssembler::Mvn(const Register& rd, const Operand& operand) {
@@ -869,7 +871,7 @@ bool TurboAssembler::NeedExtraInstructionsOrRegisterBranch(
unresolved_branches_.insert(std::pair<int, FarBranchInfo>(
max_reachable_pc, FarBranchInfo(pc_offset(), label)));
// Also maintain the next pool check.
- next_veneer_pool_check_ = Min(
+ next_veneer_pool_check_ = std::min(
next_veneer_pool_check_, max_reachable_pc - kVeneerDistanceCheckMargin);
}
return need_longer_range;
@@ -1778,8 +1780,7 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
if (options().inline_offheap_trampolines) {
int builtin_index = Builtins::kNoBuiltinId;
- if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
- Builtins::IsIsolateIndependent(builtin_index)) {
+ if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index)) {
// Inline the trampoline.
RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
@@ -1833,8 +1834,7 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode) {
if (options().inline_offheap_trampolines) {
int builtin_index = Builtins::kNoBuiltinId;
- if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
- Builtins::IsIsolateIndependent(builtin_index)) {
+ if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index)) {
// Inline the trampoline.
CallBuiltin(builtin_index);
return;
@@ -1880,6 +1880,13 @@ void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
}
}
+void TurboAssembler::LoadEntryFromBuiltinIndex(Builtins::Name builtin_index,
+ Register destination) {
+ Ldr(destination,
+ MemOperand(kRootRegister,
+ IsolateData::builtin_entry_slot_offset(builtin_index)));
+}
+
void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
LoadEntryFromBuiltinIndex(builtin_index);
Call(builtin_index);
@@ -2005,15 +2012,11 @@ bool TurboAssembler::IsNearCallOffset(int64_t offset) {
return is_int26(offset);
}
-void TurboAssembler::CallForDeoptimization(Address target, int deopt_id,
- Label* exit, DeoptimizeKind kind) {
+void TurboAssembler::CallForDeoptimization(
+ Builtins::Name target, int deopt_id, Label* exit, DeoptimizeKind kind,
+ Label* jump_deoptimization_entry_label) {
BlockPoolsScope scope(this);
- int64_t offset = static_cast<int64_t>(target) -
- static_cast<int64_t>(options().code_range_start);
- DCHECK_EQ(offset % kInstrSize, 0);
- offset = offset / static_cast<int>(kInstrSize);
- DCHECK(IsNearCallOffset(offset));
- near_call(static_cast<int>(offset), RelocInfo::RUNTIME_ENTRY);
+ bl(jump_deoptimization_entry_label);
DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
(kind == DeoptimizeKind::kLazy)
? Deoptimizer::kLazyDeoptExitSize
@@ -2076,23 +2079,148 @@ void TurboAssembler::PrepareForTailCall(Register callee_args_count,
Mov(sp, dst_reg);
}
-void MacroAssembler::InvokePrologue(Register expected_parameter_count,
- Register actual_parameter_count,
- Label* done, InvokeFlag flag) {
- Label regular_invoke;
+void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) {
+ DCHECK(root_array_available());
+ Isolate* isolate = this->isolate();
+ ExternalReference limit =
+ kind == StackLimitKind::kRealStackLimit
+ ? ExternalReference::address_of_real_jslimit(isolate)
+ : ExternalReference::address_of_jslimit(isolate);
+ DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
- // Check whether the expected and actual arguments count match. The registers
- // are set up according to contract with ArgumentsAdaptorTrampoline:
+ intptr_t offset =
+ TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
+ Ldr(destination, MemOperand(kRootRegister, offset));
+}
+
+void MacroAssembler::StackOverflowCheck(Register num_args,
+ Label* stack_overflow) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireX();
+
+ // Check the stack for overflow.
+ // We are not trying to catch interruptions (e.g. debug break and
+ // preemption) here, so the "real stack limit" is checked.
+
+ LoadStackLimit(scratch, StackLimitKind::kRealStackLimit);
+ // Make scratch the space we have left. The stack might already be overflowed
+ // here which will cause scratch to become negative.
+ Sub(scratch, sp, scratch);
+ // Check if the arguments will overflow the stack.
+ Cmp(scratch, Operand(num_args, LSL, kSystemPointerSizeLog2));
+ B(le, stack_overflow);
+}
+
+void MacroAssembler::InvokePrologue(Register formal_parameter_count,
+ Register actual_argument_count, Label* done,
+ InvokeFlag flag) {
// x0: actual arguments count.
// x1: function (passed through to callee).
// x2: expected arguments count.
- // The code below is made a lot easier because the calling code already sets
- // up actual and expected registers according to the contract.
- DCHECK_EQ(actual_parameter_count, x0);
- DCHECK_EQ(expected_parameter_count, x2);
+ // x3: new target
+ Label regular_invoke;
+ DCHECK_EQ(actual_argument_count, x0);
+ DCHECK_EQ(formal_parameter_count, x2);
+
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ // If the formal parameter count is equal to the adaptor sentinel, no need
+ // to push undefined value as arguments.
+ Cmp(formal_parameter_count, Operand(kDontAdaptArgumentsSentinel));
+ B(eq, &regular_invoke);
+
+ // If overapplication or if the actual argument count is equal to the
+ // formal parameter count, no need to push extra undefined values.
+ Register extra_argument_count = x2;
+ Subs(extra_argument_count, formal_parameter_count, actual_argument_count);
+ B(le, &regular_invoke);
+
+ // The stack pointer in arm64 needs to be 16-byte aligned. We might need to
+ // (1) add an extra padding or (2) remove (re-use) the extra padding already
+ // in the stack. Let {slots_to_copy} be the number of slots (arguments) to
+ // move up in the stack and let {slots_to_claim} be the number of extra stack
+ // slots to claim.
+ Label even_extra_count, skip_move;
+ Register slots_to_copy = x4;
+ Register slots_to_claim = x5;
+
+ Add(slots_to_copy, actual_argument_count, 1); // Copy with receiver.
+ Mov(slots_to_claim, extra_argument_count);
+ Tbz(extra_argument_count, 0, &even_extra_count);
+
+ // Calculate {slots_to_claim} when {extra_argument_count} is odd.
+ // If {actual_argument_count} is even, we need one extra padding slot
+ // {slots_to_claim = extra_argument_count + 1}.
+ // If {actual_argument_count} is odd, we know that the
+ // original arguments will have a padding slot that we can reuse
+ // {slots_to_claim = extra_argument_count - 1}.
+ {
+ Register scratch = x11;
+ Add(slots_to_claim, extra_argument_count, 1);
+ And(scratch, actual_argument_count, 1);
+ Eor(scratch, scratch, 1);
+ Sub(slots_to_claim, slots_to_claim, Operand(scratch, LSL, 1));
+ }
+
+ Bind(&even_extra_count);
+ Cbz(slots_to_claim, &skip_move);
+
+ Label stack_overflow;
+ StackOverflowCheck(slots_to_claim, &stack_overflow);
+ Claim(slots_to_claim);
+ // Move the arguments already in the stack including the receiver.
+ {
+ Register src = x6;
+ Register dst = x7;
+ SlotAddress(src, slots_to_claim);
+ SlotAddress(dst, 0);
+ CopyDoubleWords(dst, src, slots_to_copy);
+ }
+
+ Bind(&skip_move);
+ Register actual_argument_with_receiver = x4;
+ Register pointer_next_value = x5;
+ Add(actual_argument_with_receiver, actual_argument_count,
+ 1); // {slots_to_copy} was scratched.
+
+ // Copy extra arguments as undefined values.
+ {
+ Label loop;
+ Register undefined_value = x6;
+ Register count = x7;
+ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
+ SlotAddress(pointer_next_value, actual_argument_with_receiver);
+ Mov(count, extra_argument_count);
+ Bind(&loop);
+ Str(undefined_value,
+ MemOperand(pointer_next_value, kSystemPointerSize, PostIndex));
+ Subs(count, count, 1);
+ Cbnz(count, &loop);
+ }
+
+ // Set padding if needed.
+ {
+ Label skip;
+ Register total_args_slots = x4;
+ Add(total_args_slots, actual_argument_with_receiver, extra_argument_count);
+ Tbz(total_args_slots, 0, &skip);
+ Str(padreg, MemOperand(pointer_next_value));
+ Bind(&skip);
+ }
+ B(&regular_invoke);
+
+ bind(&stack_overflow);
+ {
+ FrameScope frame(this,
+ has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ CallRuntime(Runtime::kThrowStackOverflow);
+ Unreachable();
+ }
+#else
+ // Check whether the expected and actual arguments count match. The registers
+ // are set up according to contract with ArgumentsAdaptorTrampoline.ct.
// If actual == expected perform a regular invocation.
- Cmp(expected_parameter_count, actual_parameter_count);
+ Cmp(formal_parameter_count, actual_argument_count);
B(eq, &regular_invoke);
// The argument counts mismatch, generate a call to the argument adaptor.
@@ -2105,6 +2233,7 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
} else {
Jump(adaptor, RelocInfo::CODE_TARGET);
}
+#endif
Bind(&regular_invoke);
}
@@ -2136,7 +2265,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
Register actual_parameter_count,
InvokeFlag flag) {
// You can't call a function without a valid frame.
- DCHECK(flag == JUMP_FUNCTION || has_frame());
+ DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
DCHECK_EQ(function, x1);
DCHECK_IMPLIES(new_target.is_valid(), new_target == x3);
@@ -2186,11 +2315,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
}
Operand MacroAssembler::ReceiverOperand(Register arg_count) {
-#ifdef V8_REVERSE_JSARGS
return Operand(0);
-#else
- return Operand(arg_count, LSL, kXRegSizeLog2);
-#endif
}
void MacroAssembler::InvokeFunctionWithNewTarget(
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
index 655c44f819..b453a17394 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
@@ -145,6 +145,10 @@ enum PreShiftImmMode {
kAnyShift // Allow any pre-shift.
};
+// TODO(victorgomes): Move definition to macro-assembler.h, once all other
+// platforms are updated.
+enum class StackLimitKind { kInterruptStackLimit, kRealStackLimit };
+
class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
public:
using TurboAssemblerBase::TurboAssemblerBase;
@@ -968,6 +972,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Load the builtin given by the Smi in |builtin_index| into the same
// register.
void LoadEntryFromBuiltinIndex(Register builtin_index);
+ void LoadEntryFromBuiltinIndex(Builtins::Name builtin_index,
+ Register destination);
void CallBuiltinByIndex(Register builtin_index) override;
void CallBuiltin(int builtin_index);
@@ -980,8 +986,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// The return address on the stack is used by frame iteration.
void StoreReturnAddressAndCall(Register target);
- void CallForDeoptimization(Address target, int deopt_id, Label* exit,
- DeoptimizeKind kind);
+ void CallForDeoptimization(Builtins::Name target, int deopt_id, Label* exit,
+ DeoptimizeKind kind,
+ Label* jump_deoptimization_entry_label);
// Calls a C function.
// The called function is not allowed to trigger a
@@ -1781,8 +1788,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
DecodeField<Field>(reg, reg);
}
- // TODO(victorgomes): inline this function once we remove V8_REVERSE_JSARGS
- // flag.
Operand ReceiverOperand(const Register arg_count);
// ---- SMI and Number Utilities ----
@@ -1979,6 +1984,11 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
Register scratch2);
// ---------------------------------------------------------------------------
+ // Stack limit utilities
+ void LoadStackLimit(Register destination, StackLimitKind kind);
+ void StackOverflowCheck(Register num_args, Label* stack_overflow);
+
+ // ---------------------------------------------------------------------------
// Garbage collector support (GC).
// Notify the garbage collector that we wrote a pointer into an object.
@@ -2100,7 +2110,7 @@ class UseScratchRegisterScope {
#endif
available_->Remove(list);
}
- void Include(const Register& reg1, const Register& reg2) {
+ void Include(const Register& reg1, const Register& reg2 = NoReg) {
CPURegList list(reg1, reg2);
Include(list);
}
diff --git a/deps/v8/src/codegen/arm64/register-arm64.h b/deps/v8/src/codegen/arm64/register-arm64.h
index 31620ae965..fbbb0a18da 100644
--- a/deps/v8/src/codegen/arm64/register-arm64.h
+++ b/deps/v8/src/codegen/arm64/register-arm64.h
@@ -524,8 +524,6 @@ using Simd128Register = VRegister;
// Lists of registers.
class V8_EXPORT_PRIVATE CPURegList {
public:
- CPURegList() = default;
-
template <typename... CPURegisters>
explicit CPURegList(CPURegister reg0, CPURegisters... regs)
: list_(CPURegister::ListOf(reg0, regs...)),
diff --git a/deps/v8/src/codegen/assembler.cc b/deps/v8/src/codegen/assembler.cc
index 3d0b7d28e4..f23dccb53e 100644
--- a/deps/v8/src/codegen/assembler.cc
+++ b/deps/v8/src/codegen/assembler.cc
@@ -76,6 +76,15 @@ AssemblerOptions AssemblerOptions::Default(Isolate* isolate) {
return options;
}
+AssemblerOptions AssemblerOptions::DefaultForOffHeapTrampoline(
+ Isolate* isolate) {
+ AssemblerOptions options = AssemblerOptions::Default(isolate);
+ // Off-heap trampolines may not contain any metadata since their metadata
+ // offsets refer to the off-heap metadata area.
+ options.emit_code_comments = false;
+ return options;
+}
+
namespace {
class DefaultAssemblerBuffer : public AssemblerBuffer {
@@ -255,7 +264,9 @@ Handle<HeapObject> AssemblerBase::GetEmbeddedObject(
int Assembler::WriteCodeComments() {
- if (!FLAG_code_comments || code_comments_writer_.entry_count() == 0) return 0;
+ CHECK_IMPLIES(code_comments_writer_.entry_count() > 0,
+ options().emit_code_comments);
+ if (code_comments_writer_.entry_count() == 0) return 0;
int offset = pc_offset();
code_comments_writer_.Emit(this);
int size = pc_offset() - offset;
diff --git a/deps/v8/src/codegen/assembler.h b/deps/v8/src/codegen/assembler.h
index 6419e55cec..626bd04bc8 100644
--- a/deps/v8/src/codegen/assembler.h
+++ b/deps/v8/src/codegen/assembler.h
@@ -180,8 +180,11 @@ struct V8_EXPORT_PRIVATE AssemblerOptions {
// info. This is useful in some platform (Win64) where the unwind info depends
// on a function prologue/epilogue.
bool collect_win64_unwind_info = false;
+ // Whether to emit code comments.
+ bool emit_code_comments = FLAG_code_comments;
static AssemblerOptions Default(Isolate* isolate);
+ static AssemblerOptions DefaultForOffHeapTrampoline(Isolate* isolate);
};
class AssemblerBuffer {
@@ -226,6 +229,8 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
}
// Features are usually enabled by CpuFeatureScope, which also asserts that
// the features are supported before they are enabled.
+ // IMPORTANT: IsEnabled() should only be used by DCHECKs. For real feature
+ // detection, use IsSupported().
bool IsEnabled(CpuFeature f) {
return (enabled_cpu_features_ & (static_cast<uint64_t>(1) << f)) != 0;
}
@@ -235,7 +240,9 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
bool is_constant_pool_available() const {
if (FLAG_enable_embedded_constant_pool) {
- return constant_pool_available_;
+ // We need to disable constant pool here for embeded builtins
+ // because the metadata section is not adjacent to instructions
+ return constant_pool_available_ && !options().isolate_independent_code;
} else {
// Embedded constant pool not supported on this architecture.
UNREACHABLE();
@@ -280,7 +287,7 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
// Record an inline code comment that can be used by a disassembler.
// Use --code-comments to enable.
void RecordComment(const char* msg) {
- if (FLAG_code_comments) {
+ if (options().emit_code_comments) {
code_comments_writer_.Add(pc_offset(), std::string(msg));
}
}
diff --git a/deps/v8/src/codegen/bailout-reason.h b/deps/v8/src/codegen/bailout-reason.h
index e55e691a08..267beb55e3 100644
--- a/deps/v8/src/codegen/bailout-reason.h
+++ b/deps/v8/src/codegen/bailout-reason.h
@@ -30,7 +30,6 @@ namespace internal {
V(kInvalidJumpTableIndex, "Invalid jump table index") \
V(kInvalidParametersAndRegistersInGenerator, \
"invalid parameters and registers in generator") \
- V(kInvalidNumberOfJsArgs, "Invalid number of JS arguments") \
V(kMissingBytecodeArray, "Missing bytecode array from function") \
V(kObjectNotTagged, "The object is not tagged") \
V(kObjectTagged, "The object is tagged") \
@@ -58,6 +57,7 @@ namespace internal {
V(kStackAccessBelowStackPointer, "Stack access below stack pointer") \
V(kStackFrameTypesMustMatch, "Stack frame types must match") \
V(kUnalignedCellInWriteBarrier, "Unaligned cell in write barrier") \
+ V(kUnexpectedAdditionalPopValue, "Unexpected additional pop value") \
V(kUnexpectedElementsKindInArrayConstructor, \
"Unexpected ElementsKind in array constructor") \
V(kUnexpectedFPCRMode, "Unexpected FPCR mode.") \
diff --git a/deps/v8/src/codegen/code-desc.h b/deps/v8/src/codegen/code-desc.h
index 16941b074a..e051bb459c 100644
--- a/deps/v8/src/codegen/code-desc.h
+++ b/deps/v8/src/codegen/code-desc.h
@@ -16,11 +16,11 @@ namespace internal {
// the buffer and grows backward. Inlined metadata sections may exist
// at the end of the instructions.
//
-// │<--------------- buffer_size ----------------------------------->│
-// │<---------------- instr_size ------------->│ │<-reloc_size->│
-// ├───────────────────────────────────────────┼──────┼──────────────┤
-// │ instructions │ data │ free │ reloc info │
-// ├───────────────────────────────────────────┴──────┴──────────────┘
+// |<--------------- buffer_size ----------------------------------->|
+// |<---------------- instr_size ------------->| |<-reloc_size->|
+// |--------------+----------------------------+------+--------------|
+// | instructions | data | free | reloc info |
+// +--------------+----------------------------+------+--------------+
// TODO(jgruber): Add a single chokepoint for specifying the instruction area
// layout (i.e. the order of inlined metadata fields).
@@ -62,6 +62,24 @@ class CodeDesc {
int code_comments_offset = 0;
int code_comments_size = 0;
+ // TODO(jgruber,v8:11036): Remove these functions once CodeDesc fields have
+ // been made consistent with Code layout.
+ int body_size() const { return instr_size + unwinding_info_size; }
+ int instruction_size() const { return safepoint_table_offset; }
+ int metadata_size() const { return body_size() - instruction_size(); }
+ int safepoint_table_offset_relative() const {
+ return safepoint_table_offset - instruction_size();
+ }
+ int handler_table_offset_relative() const {
+ return handler_table_offset - instruction_size();
+ }
+ int constant_pool_offset_relative() const {
+ return constant_pool_offset - instruction_size();
+ }
+ int code_comments_offset_relative() const {
+ return code_comments_offset - instruction_size();
+ }
+
// Relocation info is located at the end of the buffer and not part of the
// instructions area.
@@ -69,10 +87,14 @@ class CodeDesc {
int reloc_size = 0;
// Unwinding information.
- // TODO(jgruber): Pack this into the inlined metadata section.
byte* unwinding_info = nullptr;
int unwinding_info_size = 0;
+ int unwinding_info_offset_relative() const {
+ // TODO(jgruber,v8:11036): Remove this function once unwinding_info setup
+ // is more consistent with other metadata tables.
+ return code_comments_offset_relative() + code_comments_size;
+ }
Assembler* origin = nullptr;
};
diff --git a/deps/v8/src/codegen/code-stub-assembler.cc b/deps/v8/src/codegen/code-stub-assembler.cc
index 184a31c8a3..ca340c69c8 100644
--- a/deps/v8/src/codegen/code-stub-assembler.cc
+++ b/deps/v8/src/codegen/code-stub-assembler.cc
@@ -85,7 +85,7 @@ void CodeStubAssembler::Assert(const NodeGenerator<BoolT>& condition_body,
#endif
}
-void CodeStubAssembler::Assert(SloppyTNode<Word32T> condition_node,
+void CodeStubAssembler::Assert(TNode<Word32T> condition_node,
const char* message, const char* file, int line,
std::initializer_list<ExtraNode> extra_nodes) {
#if defined(DEBUG)
@@ -129,7 +129,7 @@ void CodeStubAssembler::Check(const NodeGenerator<BoolT>& condition_body,
Check(branch, message, file, line, extra_nodes);
}
-void CodeStubAssembler::Check(SloppyTNode<Word32T> condition_node,
+void CodeStubAssembler::Check(TNode<Word32T> condition_node,
const char* message, const char* file, int line,
std::initializer_list<ExtraNode> extra_nodes) {
BranchGenerator branch = [=](Label* ok, Label* not_ok) {
@@ -197,24 +197,26 @@ void CodeStubAssembler::FailAssert(
Unreachable();
}
-TNode<Int32T> CodeStubAssembler::SelectInt32Constant(
- SloppyTNode<BoolT> condition, int true_value, int false_value) {
+TNode<Int32T> CodeStubAssembler::SelectInt32Constant(TNode<BoolT> condition,
+ int true_value,
+ int false_value) {
return SelectConstant<Int32T>(condition, Int32Constant(true_value),
Int32Constant(false_value));
}
-TNode<IntPtrT> CodeStubAssembler::SelectIntPtrConstant(
- SloppyTNode<BoolT> condition, int true_value, int false_value) {
+TNode<IntPtrT> CodeStubAssembler::SelectIntPtrConstant(TNode<BoolT> condition,
+ int true_value,
+ int false_value) {
return SelectConstant<IntPtrT>(condition, IntPtrConstant(true_value),
IntPtrConstant(false_value));
}
TNode<Oddball> CodeStubAssembler::SelectBooleanConstant(
- SloppyTNode<BoolT> condition) {
+ TNode<BoolT> condition) {
return SelectConstant<Oddball>(condition, TrueConstant(), FalseConstant());
}
-TNode<Smi> CodeStubAssembler::SelectSmiConstant(SloppyTNode<BoolT> condition,
+TNode<Smi> CodeStubAssembler::SelectSmiConstant(TNode<BoolT> condition,
Smi true_value,
Smi false_value) {
return SelectConstant<Smi>(condition, SmiConstant(true_value),
@@ -770,8 +772,7 @@ TNode<Smi> CodeStubAssembler::TrySmiAbs(TNode<Smi> a, Label* if_overflow) {
}
}
-TNode<Number> CodeStubAssembler::NumberMax(SloppyTNode<Number> a,
- SloppyTNode<Number> b) {
+TNode<Number> CodeStubAssembler::NumberMax(TNode<Number> a, TNode<Number> b) {
// TODO(danno): This could be optimized by specifically handling smi cases.
TVARIABLE(Number, result);
Label done(this), greater_than_equal_a(this), greater_than_equal_b(this);
@@ -789,8 +790,7 @@ TNode<Number> CodeStubAssembler::NumberMax(SloppyTNode<Number> a,
return result.value();
}
-TNode<Number> CodeStubAssembler::NumberMin(SloppyTNode<Number> a,
- SloppyTNode<Number> b) {
+TNode<Number> CodeStubAssembler::NumberMin(TNode<Number> a, TNode<Number> b) {
// TODO(danno): This could be optimized by specifically handling smi cases.
TVARIABLE(Number, result);
Label done(this), greater_than_equal_a(this), greater_than_equal_b(this);
@@ -1006,7 +1006,7 @@ TNode<Int32T> CodeStubAssembler::TruncateIntPtrToInt32(
return ReinterpretCast<Int32T>(value);
}
-TNode<BoolT> CodeStubAssembler::TaggedIsSmi(SloppyTNode<MaybeObject> a) {
+TNode<BoolT> CodeStubAssembler::TaggedIsSmi(TNode<MaybeObject> a) {
STATIC_ASSERT(kSmiTagMask < kMaxUInt32);
return Word32Equal(
Word32And(TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(a)),
@@ -1014,7 +1014,7 @@ TNode<BoolT> CodeStubAssembler::TaggedIsSmi(SloppyTNode<MaybeObject> a) {
Int32Constant(0));
}
-TNode<BoolT> CodeStubAssembler::TaggedIsNotSmi(SloppyTNode<MaybeObject> a) {
+TNode<BoolT> CodeStubAssembler::TaggedIsNotSmi(TNode<MaybeObject> a) {
return Word32BinaryNot(TaggedIsSmi(a));
}
@@ -1370,13 +1370,122 @@ void CodeStubAssembler::BranchIfToBooleanIsTrue(SloppyTNode<Object> value,
}
}
+TNode<ExternalPointerT> CodeStubAssembler::ChangeUint32ToExternalPointer(
+ TNode<Uint32T> value) {
+ STATIC_ASSERT(kExternalPointerSize == kSystemPointerSize);
+ return ReinterpretCast<ExternalPointerT>(ChangeUint32ToWord(value));
+}
+
+TNode<Uint32T> CodeStubAssembler::ChangeExternalPointerToUint32(
+ TNode<ExternalPointerT> value) {
+ STATIC_ASSERT(kExternalPointerSize == kSystemPointerSize);
+ return Unsigned(TruncateWordToInt32(ReinterpretCast<UintPtrT>(value)));
+}
+
+void CodeStubAssembler::InitializeExternalPointerField(TNode<HeapObject> object,
+ TNode<IntPtrT> offset) {
+#ifdef V8_HEAP_SANDBOX
+ TNode<ExternalReference> external_pointer_table_address = ExternalConstant(
+ ExternalReference::external_pointer_table_address(isolate()));
+ TNode<Uint32T> table_length = UncheckedCast<Uint32T>(
+ Load(MachineType::Uint32(), external_pointer_table_address,
+ UintPtrConstant(Internals::kExternalPointerTableLengthOffset)));
+ TNode<Uint32T> table_capacity = UncheckedCast<Uint32T>(
+ Load(MachineType::Uint32(), external_pointer_table_address,
+ UintPtrConstant(Internals::kExternalPointerTableCapacityOffset)));
+
+ Label grow_table(this, Label::kDeferred), finish(this);
+
+ TNode<BoolT> compare = Uint32LessThan(table_length, table_capacity);
+ Branch(compare, &finish, &grow_table);
+
+ BIND(&grow_table);
+ {
+ TNode<ExternalReference> table_grow_function = ExternalConstant(
+ ExternalReference::external_pointer_table_grow_table_function());
+ CallCFunction(
+ table_grow_function, MachineType::Pointer(),
+ std::make_pair(MachineType::Pointer(), external_pointer_table_address));
+ Goto(&finish);
+ }
+ BIND(&finish);
+
+ TNode<Uint32T> new_table_length = Uint32Add(table_length, Uint32Constant(1));
+ StoreNoWriteBarrier(
+ MachineRepresentation::kWord32, external_pointer_table_address,
+ UintPtrConstant(Internals::kExternalPointerTableLengthOffset),
+ new_table_length);
+
+ TNode<Uint32T> index = table_length;
+ TNode<ExternalPointerT> encoded = ChangeUint32ToExternalPointer(index);
+ StoreObjectFieldNoWriteBarrier<ExternalPointerT>(object, offset, encoded);
+#endif
+}
+
+TNode<RawPtrT> CodeStubAssembler::LoadExternalPointerFromObject(
+ TNode<HeapObject> object, TNode<IntPtrT> offset,
+ ExternalPointerTag external_pointer_tag) {
+#ifdef V8_HEAP_SANDBOX
+ TNode<ExternalReference> external_pointer_table_address = ExternalConstant(
+ ExternalReference::external_pointer_table_address(isolate()));
+ TNode<RawPtrT> table = UncheckedCast<RawPtrT>(
+ Load(MachineType::Pointer(), external_pointer_table_address,
+ UintPtrConstant(Internals::kExternalPointerTableBufferOffset)));
+
+ TNode<ExternalPointerT> encoded =
+ LoadObjectField<ExternalPointerT>(object, offset);
+ TNode<Word32T> index = ChangeExternalPointerToUint32(encoded);
+ // TODO(v8:10391, saelo): bounds check if table is not caged
+ TNode<IntPtrT> table_offset = ElementOffsetFromIndex(
+ ChangeUint32ToWord(index), SYSTEM_POINTER_ELEMENTS, 0);
+
+ TNode<UintPtrT> entry = Load<UintPtrT>(table, table_offset);
+ if (external_pointer_tag != 0) {
+ TNode<UintPtrT> tag = UintPtrConstant(external_pointer_tag);
+ entry = UncheckedCast<UintPtrT>(WordXor(entry, tag));
+ }
+ return UncheckedCast<RawPtrT>(UncheckedCast<WordT>(entry));
+#else
+ return LoadObjectField<RawPtrT>(object, offset);
+#endif // V8_HEAP_SANDBOX
+}
+
+void CodeStubAssembler::StoreExternalPointerToObject(
+ TNode<HeapObject> object, TNode<IntPtrT> offset, TNode<RawPtrT> pointer,
+ ExternalPointerTag external_pointer_tag) {
+#ifdef V8_HEAP_SANDBOX
+ TNode<ExternalReference> external_pointer_table_address = ExternalConstant(
+ ExternalReference::external_pointer_table_address(isolate()));
+ TNode<RawPtrT> table = UncheckedCast<RawPtrT>(
+ Load(MachineType::Pointer(), external_pointer_table_address,
+ UintPtrConstant(Internals::kExternalPointerTableBufferOffset)));
+
+ TNode<ExternalPointerT> encoded =
+ LoadObjectField<ExternalPointerT>(object, offset);
+ TNode<Word32T> index = ChangeExternalPointerToUint32(encoded);
+ // TODO(v8:10391, saelo): bounds check if table is not caged
+ TNode<IntPtrT> table_offset = ElementOffsetFromIndex(
+ ChangeUint32ToWord(index), SYSTEM_POINTER_ELEMENTS, 0);
+
+ TNode<UintPtrT> value = UncheckedCast<UintPtrT>(pointer);
+ if (external_pointer_tag != 0) {
+ TNode<UintPtrT> tag = UintPtrConstant(external_pointer_tag);
+ value = UncheckedCast<UintPtrT>(WordXor(pointer, tag));
+ }
+ StoreNoWriteBarrier(MachineType::PointerRepresentation(), table, table_offset,
+ value);
+#else
+ StoreObjectFieldNoWriteBarrier<RawPtrT>(object, offset, pointer);
+#endif // V8_HEAP_SANDBOX
+}
+
TNode<Object> CodeStubAssembler::LoadFromParentFrame(int offset) {
TNode<RawPtrT> frame_pointer = LoadParentFramePointer();
return LoadFullTagged(frame_pointer, IntPtrConstant(offset));
}
TNode<IntPtrT> CodeStubAssembler::LoadAndUntagObjectField(
- SloppyTNode<HeapObject> object, int offset) {
+ TNode<HeapObject> object, int offset) {
if (SmiValuesAre32Bits()) {
#if V8_TARGET_LITTLE_ENDIAN
offset += 4;
@@ -1388,7 +1497,7 @@ TNode<IntPtrT> CodeStubAssembler::LoadAndUntagObjectField(
}
TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32ObjectField(
- SloppyTNode<HeapObject> object, int offset) {
+ TNode<HeapObject> object, int offset) {
if (SmiValuesAre32Bits()) {
#if V8_TARGET_LITTLE_ENDIAN
offset += 4;
@@ -1400,7 +1509,7 @@ TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32ObjectField(
}
TNode<Float64T> CodeStubAssembler::LoadHeapNumberValue(
- SloppyTNode<HeapObject> object) {
+ TNode<HeapObject> object) {
CSA_ASSERT(this, Word32Or(IsHeapNumber(object), IsOddball(object)));
STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
return LoadObjectField<Float64T>(object, HeapNumber::kValueOffset);
@@ -1413,27 +1522,26 @@ TNode<Map> CodeStubAssembler::GetInstanceTypeMap(InstanceType instance_type) {
return HeapConstant(map_handle);
}
-TNode<Map> CodeStubAssembler::LoadMap(SloppyTNode<HeapObject> object) {
+TNode<Map> CodeStubAssembler::LoadMap(TNode<HeapObject> object) {
return LoadObjectField<Map>(object, HeapObject::kMapOffset);
}
-TNode<Uint16T> CodeStubAssembler::LoadInstanceType(
- SloppyTNode<HeapObject> object) {
+TNode<Uint16T> CodeStubAssembler::LoadInstanceType(TNode<HeapObject> object) {
return LoadMapInstanceType(LoadMap(object));
}
-TNode<BoolT> CodeStubAssembler::HasInstanceType(SloppyTNode<HeapObject> object,
+TNode<BoolT> CodeStubAssembler::HasInstanceType(TNode<HeapObject> object,
InstanceType instance_type) {
return InstanceTypeEqual(LoadInstanceType(object), instance_type);
}
TNode<BoolT> CodeStubAssembler::DoesntHaveInstanceType(
- SloppyTNode<HeapObject> object, InstanceType instance_type) {
+ TNode<HeapObject> object, InstanceType instance_type) {
return Word32NotEqual(LoadInstanceType(object), Int32Constant(instance_type));
}
TNode<BoolT> CodeStubAssembler::TaggedDoesntHaveInstanceType(
- SloppyTNode<HeapObject> any_tagged, InstanceType type) {
+ TNode<HeapObject> any_tagged, InstanceType type) {
/* return Phi <TaggedIsSmi(val), DoesntHaveInstanceType(val, type)> */
TNode<BoolT> tagged_is_smi = TaggedIsSmi(any_tagged);
return Select<BoolT>(
@@ -1441,8 +1549,7 @@ TNode<BoolT> CodeStubAssembler::TaggedDoesntHaveInstanceType(
[=]() { return DoesntHaveInstanceType(any_tagged, type); });
}
-TNode<BoolT> CodeStubAssembler::IsSpecialReceiverMap(SloppyTNode<Map> map) {
- CSA_SLOW_ASSERT(this, IsMap(map));
+TNode<BoolT> CodeStubAssembler::IsSpecialReceiverMap(TNode<Map> map) {
TNode<BoolT> is_special =
IsSpecialReceiverInstanceType(LoadMapInstanceType(map));
uint32_t mask = Map::Bits1::HasNamedInterceptorBit::kMask |
@@ -1470,7 +1577,7 @@ void CodeStubAssembler::GotoIfMapHasSlowProperties(TNode<Map> map,
}
TNode<HeapObject> CodeStubAssembler::LoadFastProperties(
- SloppyTNode<JSReceiver> object) {
+ TNode<JSReceiver> object) {
CSA_SLOW_ASSERT(this, Word32BinaryNot(IsDictionaryMap(LoadMap(object))));
TNode<Object> properties = LoadJSReceiverPropertiesOrHash(object);
return Select<HeapObject>(
@@ -1479,7 +1586,7 @@ TNode<HeapObject> CodeStubAssembler::LoadFastProperties(
}
TNode<HeapObject> CodeStubAssembler::LoadSlowProperties(
- SloppyTNode<JSReceiver> object) {
+ TNode<JSReceiver> object) {
CSA_SLOW_ASSERT(this, IsDictionaryMap(LoadMap(object)));
TNode<Object> properties = LoadJSReceiverPropertiesOrHash(object);
return Select<HeapObject>(
@@ -1496,8 +1603,7 @@ TNode<Object> CodeStubAssembler::LoadJSArgumentsObjectLength(
return LoadObjectField(array, offset);
}
-TNode<Smi> CodeStubAssembler::LoadFastJSArrayLength(
- SloppyTNode<JSArray> array) {
+TNode<Smi> CodeStubAssembler::LoadFastJSArrayLength(TNode<JSArray> array) {
TNode<Number> length = LoadJSArrayLength(array);
CSA_ASSERT(this, Word32Or(IsFastElementsKind(LoadElementsKind(array)),
IsElementsKindInRange(
@@ -1510,13 +1616,13 @@ TNode<Smi> CodeStubAssembler::LoadFastJSArrayLength(
}
TNode<Smi> CodeStubAssembler::LoadFixedArrayBaseLength(
- SloppyTNode<FixedArrayBase> array) {
+ TNode<FixedArrayBase> array) {
CSA_SLOW_ASSERT(this, IsNotWeakFixedArraySubclass(array));
return LoadObjectField<Smi>(array, FixedArrayBase::kLengthOffset);
}
TNode<IntPtrT> CodeStubAssembler::LoadAndUntagFixedArrayBaseLength(
- SloppyTNode<FixedArrayBase> array) {
+ TNode<FixedArrayBase> array) {
return LoadAndUntagObjectField(array, FixedArrayBase::kLengthOffset);
}
@@ -1532,7 +1638,7 @@ TNode<Smi> CodeStubAssembler::LoadWeakFixedArrayLength(
}
TNode<IntPtrT> CodeStubAssembler::LoadAndUntagWeakFixedArrayLength(
- SloppyTNode<WeakFixedArray> array) {
+ TNode<WeakFixedArray> array) {
return LoadAndUntagObjectField(array, WeakFixedArray::kLengthOffset);
}
@@ -1548,59 +1654,48 @@ TNode<Int32T> CodeStubAssembler::LoadNumberOfOwnDescriptors(TNode<Map> map) {
DecodeWord32<Map::Bits3::NumberOfOwnDescriptorsBits>(bit_field3));
}
-TNode<Int32T> CodeStubAssembler::LoadMapBitField(SloppyTNode<Map> map) {
- CSA_SLOW_ASSERT(this, IsMap(map));
+TNode<Int32T> CodeStubAssembler::LoadMapBitField(TNode<Map> map) {
return UncheckedCast<Int32T>(
LoadObjectField<Uint8T>(map, Map::kBitFieldOffset));
}
-TNode<Int32T> CodeStubAssembler::LoadMapBitField2(SloppyTNode<Map> map) {
- CSA_SLOW_ASSERT(this, IsMap(map));
+TNode<Int32T> CodeStubAssembler::LoadMapBitField2(TNode<Map> map) {
return UncheckedCast<Int32T>(
LoadObjectField<Uint8T>(map, Map::kBitField2Offset));
}
-TNode<Uint32T> CodeStubAssembler::LoadMapBitField3(SloppyTNode<Map> map) {
- CSA_SLOW_ASSERT(this, IsMap(map));
+TNode<Uint32T> CodeStubAssembler::LoadMapBitField3(TNode<Map> map) {
return LoadObjectField<Uint32T>(map, Map::kBitField3Offset);
}
-TNode<Uint16T> CodeStubAssembler::LoadMapInstanceType(SloppyTNode<Map> map) {
+TNode<Uint16T> CodeStubAssembler::LoadMapInstanceType(TNode<Map> map) {
return LoadObjectField<Uint16T>(map, Map::kInstanceTypeOffset);
}
-TNode<Int32T> CodeStubAssembler::LoadMapElementsKind(SloppyTNode<Map> map) {
- CSA_SLOW_ASSERT(this, IsMap(map));
+TNode<Int32T> CodeStubAssembler::LoadMapElementsKind(TNode<Map> map) {
TNode<Int32T> bit_field2 = LoadMapBitField2(map);
return Signed(DecodeWord32<Map::Bits2::ElementsKindBits>(bit_field2));
}
-TNode<Int32T> CodeStubAssembler::LoadElementsKind(
- SloppyTNode<HeapObject> object) {
+TNode<Int32T> CodeStubAssembler::LoadElementsKind(TNode<HeapObject> object) {
return LoadMapElementsKind(LoadMap(object));
}
-TNode<DescriptorArray> CodeStubAssembler::LoadMapDescriptors(
- SloppyTNode<Map> map) {
- CSA_SLOW_ASSERT(this, IsMap(map));
+TNode<DescriptorArray> CodeStubAssembler::LoadMapDescriptors(TNode<Map> map) {
return LoadObjectField<DescriptorArray>(map, Map::kInstanceDescriptorsOffset);
}
-TNode<HeapObject> CodeStubAssembler::LoadMapPrototype(SloppyTNode<Map> map) {
- CSA_SLOW_ASSERT(this, IsMap(map));
+TNode<HeapObject> CodeStubAssembler::LoadMapPrototype(TNode<Map> map) {
return LoadObjectField<HeapObject>(map, Map::kPrototypeOffset);
}
-TNode<IntPtrT> CodeStubAssembler::LoadMapInstanceSizeInWords(
- SloppyTNode<Map> map) {
- CSA_SLOW_ASSERT(this, IsMap(map));
+TNode<IntPtrT> CodeStubAssembler::LoadMapInstanceSizeInWords(TNode<Map> map) {
return ChangeInt32ToIntPtr(
LoadObjectField<Uint8T>(map, Map::kInstanceSizeInWordsOffset));
}
TNode<IntPtrT> CodeStubAssembler::LoadMapInobjectPropertiesStartInWords(
- SloppyTNode<Map> map) {
- CSA_SLOW_ASSERT(this, IsMap(map));
+ TNode<Map> map) {
// See Map::GetInObjectPropertiesStartInWords() for details.
CSA_ASSERT(this, IsJSObjectMap(map));
return ChangeInt32ToIntPtr(LoadObjectField<Uint8T>(
@@ -1608,16 +1703,14 @@ TNode<IntPtrT> CodeStubAssembler::LoadMapInobjectPropertiesStartInWords(
}
TNode<IntPtrT> CodeStubAssembler::LoadMapConstructorFunctionIndex(
- SloppyTNode<Map> map) {
- CSA_SLOW_ASSERT(this, IsMap(map));
+ TNode<Map> map) {
// See Map::GetConstructorFunctionIndex() for details.
CSA_ASSERT(this, IsPrimitiveInstanceType(LoadMapInstanceType(map)));
return ChangeInt32ToIntPtr(LoadObjectField<Uint8T>(
map, Map::kInObjectPropertiesStartOrConstructorFunctionIndexOffset));
}
-TNode<Object> CodeStubAssembler::LoadMapConstructor(SloppyTNode<Map> map) {
- CSA_SLOW_ASSERT(this, IsMap(map));
+TNode<Object> CodeStubAssembler::LoadMapConstructor(TNode<Map> map) {
TVARIABLE(Object, result,
LoadObjectField(
map, Map::kConstructorOrBackPointerOrNativeContextOffset));
@@ -1639,13 +1732,12 @@ TNode<Object> CodeStubAssembler::LoadMapConstructor(SloppyTNode<Map> map) {
return result.value();
}
-TNode<WordT> CodeStubAssembler::LoadMapEnumLength(SloppyTNode<Map> map) {
- CSA_SLOW_ASSERT(this, IsMap(map));
+TNode<WordT> CodeStubAssembler::LoadMapEnumLength(TNode<Map> map) {
TNode<Uint32T> bit_field3 = LoadMapBitField3(map);
return DecodeWordFromWord32<Map::Bits3::EnumLengthBits>(bit_field3);
}
-TNode<Object> CodeStubAssembler::LoadMapBackPointer(SloppyTNode<Map> map) {
+TNode<Object> CodeStubAssembler::LoadMapBackPointer(TNode<Map> map) {
TNode<HeapObject> object = CAST(LoadObjectField(
map, Map::kConstructorOrBackPointerOrNativeContextOffset));
return Select<Object>(
@@ -1743,14 +1835,12 @@ TNode<Smi> CodeStubAssembler::LoadStringLengthAsSmi(TNode<String> string) {
return SmiFromIntPtr(LoadStringLengthAsWord(string));
}
-TNode<IntPtrT> CodeStubAssembler::LoadStringLengthAsWord(
- SloppyTNode<String> string) {
+TNode<IntPtrT> CodeStubAssembler::LoadStringLengthAsWord(TNode<String> string) {
return Signed(ChangeUint32ToWord(LoadStringLengthAsWord32(string)));
}
TNode<Uint32T> CodeStubAssembler::LoadStringLengthAsWord32(
- SloppyTNode<String> string) {
- CSA_ASSERT(this, IsString(string));
+ TNode<String> string) {
return LoadObjectField<Uint32T>(string, String::kLengthOffset);
}
@@ -2005,10 +2095,9 @@ TNode<IntPtrT> CodeStubAssembler::LoadPropertyArrayLength(
TNode<RawPtrT> CodeStubAssembler::LoadJSTypedArrayDataPtr(
TNode<JSTypedArray> typed_array) {
- // Data pointer = DecodeExternalPointer(external_pointer) +
- // static_cast<Tagged_t>(base_pointer).
+ // Data pointer = external_pointer + static_cast<Tagged_t>(base_pointer).
TNode<RawPtrT> external_pointer =
- DecodeExternalPointer(LoadJSTypedArrayExternalPointer(typed_array));
+ LoadJSTypedArrayExternalPointerPtr(typed_array);
TNode<IntPtrT> base_pointer;
if (COMPRESS_POINTERS_BOOL) {
@@ -2467,21 +2556,20 @@ TNode<BoolT> CodeStubAssembler::LoadScopeInfoHasExtensionField(
}
void CodeStubAssembler::StoreContextElementNoWriteBarrier(
- SloppyTNode<Context> context, int slot_index, SloppyTNode<Object> value) {
+ TNode<Context> context, int slot_index, SloppyTNode<Object> value) {
int offset = Context::SlotOffset(slot_index);
StoreNoWriteBarrier(MachineRepresentation::kTagged, context,
IntPtrConstant(offset), value);
}
TNode<NativeContext> CodeStubAssembler::LoadNativeContext(
- SloppyTNode<Context> context) {
+ TNode<Context> context) {
TNode<Map> map = LoadMap(context);
return CAST(LoadObjectField(
map, Map::kConstructorOrBackPointerOrNativeContextOffset));
}
-TNode<Context> CodeStubAssembler::LoadModuleContext(
- SloppyTNode<Context> context) {
+TNode<Context> CodeStubAssembler::LoadModuleContext(TNode<Context> context) {
TNode<NativeContext> native_context = LoadNativeContext(context);
TNode<Map> module_map = CAST(
LoadContextElement(native_context, Context::MODULE_CONTEXT_MAP_INDEX));
@@ -2524,7 +2612,7 @@ TNode<Map> CodeStubAssembler::LoadSlowObjectWithNullPrototypeMap(
}
TNode<Map> CodeStubAssembler::LoadJSArrayElementsMap(
- SloppyTNode<Int32T> kind, SloppyTNode<NativeContext> native_context) {
+ SloppyTNode<Int32T> kind, TNode<NativeContext> native_context) {
CSA_ASSERT(this, IsFastElementsKind(kind));
TNode<IntPtrT> offset =
IntPtrAdd(IntPtrConstant(Context::FIRST_JS_ARRAY_MAP_SLOT),
@@ -2533,7 +2621,7 @@ TNode<Map> CodeStubAssembler::LoadJSArrayElementsMap(
}
TNode<Map> CodeStubAssembler::LoadJSArrayElementsMap(
- ElementsKind kind, SloppyTNode<NativeContext> native_context) {
+ ElementsKind kind, TNode<NativeContext> native_context) {
return UncheckedCast<Map>(
LoadContextElement(native_context, Context::ArrayMapIndex(kind)));
}
@@ -2606,7 +2694,7 @@ TNode<HeapObject> CodeStubAssembler::LoadJSFunctionPrototype(
}
TNode<BytecodeArray> CodeStubAssembler::LoadSharedFunctionInfoBytecodeArray(
- SloppyTNode<SharedFunctionInfo> shared) {
+ TNode<SharedFunctionInfo> shared) {
TNode<HeapObject> function_data = LoadObjectField<HeapObject>(
shared, SharedFunctionInfo::kFunctionDataOffset);
@@ -3263,7 +3351,7 @@ TNode<NameDictionary> CodeStubAssembler::CopyNameDictionary(
template <typename CollectionType>
TNode<CollectionType> CodeStubAssembler::AllocateOrderedHashTable() {
- static const int kCapacity = CollectionType::kMinCapacity;
+ static const int kCapacity = CollectionType::kInitialCapacity;
static const int kBucketCount = kCapacity / CollectionType::kLoadFactor;
static const int kDataTableLength = kCapacity * CollectionType::kEntrySize;
static const int kFixedArrayLength =
@@ -3322,7 +3410,6 @@ TNode<JSObject> CodeStubAssembler::AllocateJSObjectFromMap(
TNode<Map> map, base::Optional<TNode<HeapObject>> properties,
base::Optional<TNode<FixedArray>> elements, AllocationFlags flags,
SlackTrackingMode slack_tracking_mode) {
- CSA_ASSERT(this, IsMap(map));
CSA_ASSERT(this, Word32BinaryNot(IsJSFunctionMap(map)));
CSA_ASSERT(this, Word32BinaryNot(InstanceTypeEqual(LoadMapInstanceType(map),
JS_GLOBAL_OBJECT_TYPE)));
@@ -3340,7 +3427,6 @@ void CodeStubAssembler::InitializeJSObjectFromMap(
base::Optional<TNode<HeapObject>> properties,
base::Optional<TNode<FixedArray>> elements,
SlackTrackingMode slack_tracking_mode) {
- CSA_SLOW_ASSERT(this, IsMap(map));
// This helper assumes that the object is in new-space, as guarded by the
// check in AllocatedJSObjectFromMap.
if (!properties) {
@@ -3370,7 +3456,7 @@ void CodeStubAssembler::InitializeJSObjectFromMap(
}
void CodeStubAssembler::InitializeJSObjectBodyNoSlackTracking(
- SloppyTNode<HeapObject> object, SloppyTNode<Map> map,
+ TNode<HeapObject> object, TNode<Map> map,
SloppyTNode<IntPtrT> instance_size, int start_offset) {
STATIC_ASSERT(Map::kNoSlackTracking == 0);
CSA_ASSERT(this, IsClearWord32<Map::Bits3::ConstructionCounterBits>(
@@ -3380,7 +3466,7 @@ void CodeStubAssembler::InitializeJSObjectBodyNoSlackTracking(
}
void CodeStubAssembler::InitializeJSObjectBodyWithSlackTracking(
- SloppyTNode<HeapObject> object, SloppyTNode<Map> map,
+ TNode<HeapObject> object, TNode<Map> map,
SloppyTNode<IntPtrT> instance_size) {
Comment("InitializeJSObjectBodyNoSlackTracking");
@@ -3824,11 +3910,10 @@ template V8_EXPORT_PRIVATE TNode<FixedArrayBase>
template <typename TIndex>
TNode<FixedArray> CodeStubAssembler::ExtractToFixedArray(
- SloppyTNode<FixedArrayBase> source, TNode<TIndex> first,
- TNode<TIndex> count, TNode<TIndex> capacity, TNode<Map> source_map,
- ElementsKind from_kind, AllocationFlags allocation_flags,
- ExtractFixedArrayFlags extract_flags, HoleConversionMode convert_holes,
- TVariable<BoolT>* var_holes_converted,
+ TNode<FixedArrayBase> source, TNode<TIndex> first, TNode<TIndex> count,
+ TNode<TIndex> capacity, TNode<Map> source_map, ElementsKind from_kind,
+ AllocationFlags allocation_flags, ExtractFixedArrayFlags extract_flags,
+ HoleConversionMode convert_holes, TVariable<BoolT>* var_holes_converted,
base::Optional<TNode<Int32T>> source_elements_kind) {
static_assert(
std::is_same<TIndex, Smi>::value || std::is_same<TIndex, IntPtrT>::value,
@@ -4046,8 +4131,8 @@ TNode<FixedArrayBase> CodeStubAssembler::ExtractFixedDoubleArrayFillingHoles(
Label if_hole(this);
- Node* value = LoadElementAndPrepareForStore(
- from_array, var_from_offset.value(), kind, kind, &if_hole);
+ TNode<Float64T> value = LoadDoubleWithHoleCheck(
+ from_array, var_from_offset.value(), &if_hole, MachineType::Float64());
StoreNoWriteBarrier(MachineRepresentation::kFloat64, to_array_adjusted,
to_offset, value);
@@ -4910,7 +4995,7 @@ TNode<Float64T> CodeStubAssembler::TryTaggedToFloat64(
}
TNode<Float64T> CodeStubAssembler::TruncateTaggedToFloat64(
- SloppyTNode<Context> context, SloppyTNode<Object> value) {
+ TNode<Context> context, SloppyTNode<Object> value) {
// We might need to loop once due to ToNumber conversion.
TVARIABLE(Object, var_value, value);
TVARIABLE(Float64T, var_result);
@@ -4940,7 +5025,7 @@ TNode<Float64T> CodeStubAssembler::TruncateTaggedToFloat64(
}
TNode<Word32T> CodeStubAssembler::TruncateTaggedToWord32(
- SloppyTNode<Context> context, SloppyTNode<Object> value) {
+ TNode<Context> context, SloppyTNode<Object> value) {
TVARIABLE(Word32T, var_result);
Label done(this);
TaggedToWord32OrBigIntImpl<Object::Conversion::kToNumber>(context, value,
@@ -5586,13 +5671,11 @@ TNode<BoolT> CodeStubAssembler::InstanceTypeEqual(
return Word32Equal(instance_type, Int32Constant(type));
}
-TNode<BoolT> CodeStubAssembler::IsDictionaryMap(SloppyTNode<Map> map) {
- CSA_SLOW_ASSERT(this, IsMap(map));
+TNode<BoolT> CodeStubAssembler::IsDictionaryMap(TNode<Map> map) {
return IsSetWord32<Map::Bits3::IsDictionaryMapBit>(LoadMapBitField3(map));
}
-TNode<BoolT> CodeStubAssembler::IsExtensibleMap(SloppyTNode<Map> map) {
- CSA_ASSERT(this, IsMap(map));
+TNode<BoolT> CodeStubAssembler::IsExtensibleMap(TNode<Map> map) {
return IsSetWord32<Map::Bits3::IsExtensibleBit>(LoadMapBitField3(map));
}
@@ -5604,18 +5687,15 @@ TNode<BoolT> CodeStubAssembler::IsExtensibleNonPrototypeMap(TNode<Map> map) {
Int32Constant(kExpected));
}
-TNode<BoolT> CodeStubAssembler::IsCallableMap(SloppyTNode<Map> map) {
- CSA_ASSERT(this, IsMap(map));
+TNode<BoolT> CodeStubAssembler::IsCallableMap(TNode<Map> map) {
return IsSetWord32<Map::Bits1::IsCallableBit>(LoadMapBitField(map));
}
-TNode<BoolT> CodeStubAssembler::IsDeprecatedMap(SloppyTNode<Map> map) {
- CSA_ASSERT(this, IsMap(map));
+TNode<BoolT> CodeStubAssembler::IsDeprecatedMap(TNode<Map> map) {
return IsSetWord32<Map::Bits3::IsDeprecatedBit>(LoadMapBitField3(map));
}
-TNode<BoolT> CodeStubAssembler::IsUndetectableMap(SloppyTNode<Map> map) {
- CSA_ASSERT(this, IsMap(map));
+TNode<BoolT> CodeStubAssembler::IsUndetectableMap(TNode<Map> map) {
return IsSetWord32<Map::Bits1::IsUndetectableBit>(LoadMapBitField(map));
}
@@ -5676,7 +5756,7 @@ TNode<BoolT> CodeStubAssembler::IsPromiseSpeciesProtectorCellInvalid() {
}
TNode<BoolT> CodeStubAssembler::IsPrototypeInitialArrayPrototype(
- SloppyTNode<Context> context, SloppyTNode<Map> map) {
+ TNode<Context> context, TNode<Map> map) {
const TNode<NativeContext> native_context = LoadNativeContext(context);
const TNode<Object> initial_array_prototype = LoadContextElement(
native_context, Context::INITIAL_ARRAY_PROTOTYPE_INDEX);
@@ -5685,7 +5765,7 @@ TNode<BoolT> CodeStubAssembler::IsPrototypeInitialArrayPrototype(
}
TNode<BoolT> CodeStubAssembler::IsPrototypeTypedArrayPrototype(
- SloppyTNode<Context> context, SloppyTNode<Map> map) {
+ TNode<Context> context, TNode<Map> map) {
const TNode<NativeContext> native_context = LoadNativeContext(context);
const TNode<Object> typed_array_prototype =
LoadContextElement(native_context, Context::TYPED_ARRAY_PROTOTYPE_INDEX);
@@ -5736,22 +5816,19 @@ TNode<BoolT> CodeStubAssembler::TaggedIsCallable(TNode<Object> object) {
});
}
-TNode<BoolT> CodeStubAssembler::IsCallable(SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsCallable(TNode<HeapObject> object) {
return IsCallableMap(LoadMap(object));
}
-TNode<BoolT> CodeStubAssembler::IsConstructorMap(SloppyTNode<Map> map) {
- CSA_ASSERT(this, IsMap(map));
+TNode<BoolT> CodeStubAssembler::IsConstructorMap(TNode<Map> map) {
return IsSetWord32<Map::Bits1::IsConstructorBit>(LoadMapBitField(map));
}
-TNode<BoolT> CodeStubAssembler::IsConstructor(SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsConstructor(TNode<HeapObject> object) {
return IsConstructorMap(LoadMap(object));
}
-TNode<BoolT> CodeStubAssembler::IsFunctionWithPrototypeSlotMap(
- SloppyTNode<Map> map) {
- CSA_ASSERT(this, IsMap(map));
+TNode<BoolT> CodeStubAssembler::IsFunctionWithPrototypeSlotMap(TNode<Map> map) {
return IsSetWord32<Map::Bits1::HasPrototypeSlotBit>(LoadMapBitField(map));
}
@@ -5838,16 +5915,15 @@ TNode<BoolT> CodeStubAssembler::IsJSReceiverInstanceType(
Int32Constant(FIRST_JS_RECEIVER_TYPE));
}
-TNode<BoolT> CodeStubAssembler::IsJSReceiverMap(SloppyTNode<Map> map) {
+TNode<BoolT> CodeStubAssembler::IsJSReceiverMap(TNode<Map> map) {
return IsJSReceiverInstanceType(LoadMapInstanceType(map));
}
-TNode<BoolT> CodeStubAssembler::IsJSReceiver(SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsJSReceiver(TNode<HeapObject> object) {
return IsJSReceiverMap(LoadMap(object));
}
-TNode<BoolT> CodeStubAssembler::IsNullOrJSReceiver(
- SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsNullOrJSReceiver(TNode<HeapObject> object) {
return UncheckedCast<BoolT>(Word32Or(IsJSReceiver(object), IsNull(object)));
}
@@ -5860,12 +5936,11 @@ TNode<BoolT> CodeStubAssembler::IsJSGlobalProxyInstanceType(
return InstanceTypeEqual(instance_type, JS_GLOBAL_PROXY_TYPE);
}
-TNode<BoolT> CodeStubAssembler::IsJSGlobalProxyMap(SloppyTNode<Map> map) {
+TNode<BoolT> CodeStubAssembler::IsJSGlobalProxyMap(TNode<Map> map) {
return IsJSGlobalProxyInstanceType(LoadMapInstanceType(map));
}
-TNode<BoolT> CodeStubAssembler::IsJSGlobalProxy(
- SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsJSGlobalProxy(TNode<HeapObject> object) {
return IsJSGlobalProxyMap(LoadMap(object));
}
@@ -5880,12 +5955,11 @@ TNode<BoolT> CodeStubAssembler::IsJSObjectInstanceType(
Int32Constant(FIRST_JS_OBJECT_TYPE));
}
-TNode<BoolT> CodeStubAssembler::IsJSObjectMap(SloppyTNode<Map> map) {
- CSA_ASSERT(this, IsMap(map));
+TNode<BoolT> CodeStubAssembler::IsJSObjectMap(TNode<Map> map) {
return IsJSObjectInstanceType(LoadMapInstanceType(map));
}
-TNode<BoolT> CodeStubAssembler::IsJSObject(SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsJSObject(TNode<HeapObject> object) {
return IsJSObjectMap(LoadMap(object));
}
@@ -5899,30 +5973,28 @@ TNode<BoolT> CodeStubAssembler::IsJSFinalizationRegistry(
return IsJSFinalizationRegistryMap(LoadMap(object));
}
-TNode<BoolT> CodeStubAssembler::IsJSPromiseMap(SloppyTNode<Map> map) {
- CSA_ASSERT(this, IsMap(map));
+TNode<BoolT> CodeStubAssembler::IsJSPromiseMap(TNode<Map> map) {
return InstanceTypeEqual(LoadMapInstanceType(map), JS_PROMISE_TYPE);
}
-TNode<BoolT> CodeStubAssembler::IsJSPromise(SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsJSPromise(TNode<HeapObject> object) {
return IsJSPromiseMap(LoadMap(object));
}
-TNode<BoolT> CodeStubAssembler::IsJSProxy(SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsJSProxy(TNode<HeapObject> object) {
return HasInstanceType(object, JS_PROXY_TYPE);
}
-TNode<BoolT> CodeStubAssembler::IsJSStringIterator(
- SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsJSStringIterator(TNode<HeapObject> object) {
return HasInstanceType(object, JS_STRING_ITERATOR_TYPE);
}
TNode<BoolT> CodeStubAssembler::IsJSRegExpStringIterator(
- SloppyTNode<HeapObject> object) {
+ TNode<HeapObject> object) {
return HasInstanceType(object, JS_REG_EXP_STRING_ITERATOR_TYPE);
}
-TNode<BoolT> CodeStubAssembler::IsMap(SloppyTNode<HeapObject> map) {
+TNode<BoolT> CodeStubAssembler::IsMap(TNode<HeapObject> map) {
return IsMetaMap(LoadMap(map));
}
@@ -5931,12 +6003,11 @@ TNode<BoolT> CodeStubAssembler::IsJSPrimitiveWrapperInstanceType(
return InstanceTypeEqual(instance_type, JS_PRIMITIVE_WRAPPER_TYPE);
}
-TNode<BoolT> CodeStubAssembler::IsJSPrimitiveWrapper(
- SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsJSPrimitiveWrapper(TNode<HeapObject> object) {
return IsJSPrimitiveWrapperMap(LoadMap(object));
}
-TNode<BoolT> CodeStubAssembler::IsJSPrimitiveWrapperMap(SloppyTNode<Map> map) {
+TNode<BoolT> CodeStubAssembler::IsJSPrimitiveWrapperMap(TNode<Map> map) {
return IsJSPrimitiveWrapperInstanceType(LoadMapInstanceType(map));
}
@@ -5945,30 +6016,28 @@ TNode<BoolT> CodeStubAssembler::IsJSArrayInstanceType(
return InstanceTypeEqual(instance_type, JS_ARRAY_TYPE);
}
-TNode<BoolT> CodeStubAssembler::IsJSArray(SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsJSArray(TNode<HeapObject> object) {
return IsJSArrayMap(LoadMap(object));
}
-TNode<BoolT> CodeStubAssembler::IsJSArrayMap(SloppyTNode<Map> map) {
+TNode<BoolT> CodeStubAssembler::IsJSArrayMap(TNode<Map> map) {
return IsJSArrayInstanceType(LoadMapInstanceType(map));
}
-TNode<BoolT> CodeStubAssembler::IsJSArrayIterator(
- SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsJSArrayIterator(TNode<HeapObject> object) {
return HasInstanceType(object, JS_ARRAY_ITERATOR_TYPE);
}
TNode<BoolT> CodeStubAssembler::IsJSAsyncGeneratorObject(
- SloppyTNode<HeapObject> object) {
+ TNode<HeapObject> object) {
return HasInstanceType(object, JS_ASYNC_GENERATOR_OBJECT_TYPE);
}
-TNode<BoolT> CodeStubAssembler::IsFixedArray(SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsFixedArray(TNode<HeapObject> object) {
return HasInstanceType(object, FIXED_ARRAY_TYPE);
}
-TNode<BoolT> CodeStubAssembler::IsFixedArraySubclass(
- SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsFixedArraySubclass(TNode<HeapObject> object) {
TNode<Uint16T> instance_type = LoadInstanceType(object);
return UncheckedCast<BoolT>(
Word32And(Int32GreaterThanOrEqual(instance_type,
@@ -5978,7 +6047,7 @@ TNode<BoolT> CodeStubAssembler::IsFixedArraySubclass(
}
TNode<BoolT> CodeStubAssembler::IsNotWeakFixedArraySubclass(
- SloppyTNode<HeapObject> object) {
+ TNode<HeapObject> object) {
TNode<Uint16T> instance_type = LoadInstanceType(object);
return UncheckedCast<BoolT>(Word32Or(
Int32LessThan(instance_type, Int32Constant(FIRST_WEAK_FIXED_ARRAY_TYPE)),
@@ -5986,8 +6055,7 @@ TNode<BoolT> CodeStubAssembler::IsNotWeakFixedArraySubclass(
Int32Constant(LAST_WEAK_FIXED_ARRAY_TYPE))));
}
-TNode<BoolT> CodeStubAssembler::IsPropertyArray(
- SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsPropertyArray(TNode<HeapObject> object) {
return HasInstanceType(object, PROPERTY_ARRAY_TYPE);
}
@@ -6008,7 +6076,7 @@ TNode<BoolT> CodeStubAssembler::IsPromiseReactionJobTask(
// TODO(jgruber): It might we worth creating an empty_double_array constant to
// simplify this case.
TNode<BoolT> CodeStubAssembler::IsFixedArrayWithKindOrEmpty(
- SloppyTNode<FixedArrayBase> object, ElementsKind kind) {
+ TNode<FixedArrayBase> object, ElementsKind kind) {
Label out(this);
TVARIABLE(BoolT, var_result, Int32TrueConstant());
@@ -6024,8 +6092,8 @@ TNode<BoolT> CodeStubAssembler::IsFixedArrayWithKindOrEmpty(
return var_result.value();
}
-TNode<BoolT> CodeStubAssembler::IsFixedArrayWithKind(
- SloppyTNode<HeapObject> object, ElementsKind kind) {
+TNode<BoolT> CodeStubAssembler::IsFixedArrayWithKind(TNode<HeapObject> object,
+ ElementsKind kind) {
if (IsDoubleElementsKind(kind)) {
return IsFixedDoubleArray(object);
} else {
@@ -6035,11 +6103,11 @@ TNode<BoolT> CodeStubAssembler::IsFixedArrayWithKind(
}
}
-TNode<BoolT> CodeStubAssembler::IsBoolean(SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsBoolean(TNode<HeapObject> object) {
return IsBooleanMap(LoadMap(object));
}
-TNode<BoolT> CodeStubAssembler::IsPropertyCell(SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsPropertyCell(TNode<HeapObject> object) {
return IsPropertyCellMap(LoadMap(object));
}
@@ -6048,7 +6116,7 @@ TNode<BoolT> CodeStubAssembler::IsHeapNumberInstanceType(
return InstanceTypeEqual(instance_type, HEAP_NUMBER_TYPE);
}
-TNode<BoolT> CodeStubAssembler::IsOddball(SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsOddball(TNode<HeapObject> object) {
return IsOddballInstanceType(LoadInstanceType(object));
}
@@ -6057,7 +6125,7 @@ TNode<BoolT> CodeStubAssembler::IsOddballInstanceType(
return InstanceTypeEqual(instance_type, ODDBALL_TYPE);
}
-TNode<BoolT> CodeStubAssembler::IsName(SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsName(TNode<HeapObject> object) {
return IsNameInstanceType(LoadInstanceType(object));
}
@@ -6066,7 +6134,7 @@ TNode<BoolT> CodeStubAssembler::IsNameInstanceType(
return Int32LessThanOrEqual(instance_type, Int32Constant(LAST_NAME_TYPE));
}
-TNode<BoolT> CodeStubAssembler::IsString(SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsString(TNode<HeapObject> object) {
return IsStringInstanceType(LoadInstanceType(object));
}
@@ -6134,7 +6202,7 @@ TNode<BoolT> CodeStubAssembler::IsBigIntInstanceType(
return InstanceTypeEqual(instance_type, BIGINT_TYPE);
}
-TNode<BoolT> CodeStubAssembler::IsBigInt(SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsBigInt(TNode<HeapObject> object) {
return IsBigIntInstanceType(LoadInstanceType(object));
}
@@ -6149,7 +6217,7 @@ TNode<BoolT> CodeStubAssembler::IsPrivateName(SloppyTNode<Symbol> symbol) {
return IsSetWord32<Symbol::IsPrivateNameBit>(flags);
}
-TNode<BoolT> CodeStubAssembler::IsHashTable(SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsHashTable(TNode<HeapObject> object) {
TNode<Uint16T> instance_type = LoadInstanceType(object);
return UncheckedCast<BoolT>(
Word32And(Int32GreaterThanOrEqual(instance_type,
@@ -6158,23 +6226,19 @@ TNode<BoolT> CodeStubAssembler::IsHashTable(SloppyTNode<HeapObject> object) {
Int32Constant(LAST_HASH_TABLE_TYPE))));
}
-TNode<BoolT> CodeStubAssembler::IsEphemeronHashTable(
- SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsEphemeronHashTable(TNode<HeapObject> object) {
return HasInstanceType(object, EPHEMERON_HASH_TABLE_TYPE);
}
-TNode<BoolT> CodeStubAssembler::IsNameDictionary(
- SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsNameDictionary(TNode<HeapObject> object) {
return HasInstanceType(object, NAME_DICTIONARY_TYPE);
}
-TNode<BoolT> CodeStubAssembler::IsGlobalDictionary(
- SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsGlobalDictionary(TNode<HeapObject> object) {
return HasInstanceType(object, GLOBAL_DICTIONARY_TYPE);
}
-TNode<BoolT> CodeStubAssembler::IsNumberDictionary(
- SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsNumberDictionary(TNode<HeapObject> object) {
return HasInstanceType(object, NUMBER_DICTIONARY_TYPE);
}
@@ -6187,16 +6251,15 @@ TNode<BoolT> CodeStubAssembler::IsJSFunctionInstanceType(
return InstanceTypeEqual(instance_type, JS_FUNCTION_TYPE);
}
-TNode<BoolT> CodeStubAssembler::IsJSFunction(SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsJSFunction(TNode<HeapObject> object) {
return IsJSFunctionMap(LoadMap(object));
}
-TNode<BoolT> CodeStubAssembler::IsJSBoundFunction(
- SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsJSBoundFunction(TNode<HeapObject> object) {
return HasInstanceType(object, JS_BOUND_FUNCTION_TYPE);
}
-TNode<BoolT> CodeStubAssembler::IsJSFunctionMap(SloppyTNode<Map> map) {
+TNode<BoolT> CodeStubAssembler::IsJSFunctionMap(TNode<Map> map) {
return IsJSFunctionInstanceType(LoadMapInstanceType(map));
}
@@ -6205,16 +6268,15 @@ TNode<BoolT> CodeStubAssembler::IsJSTypedArrayInstanceType(
return InstanceTypeEqual(instance_type, JS_TYPED_ARRAY_TYPE);
}
-TNode<BoolT> CodeStubAssembler::IsJSTypedArrayMap(SloppyTNode<Map> map) {
+TNode<BoolT> CodeStubAssembler::IsJSTypedArrayMap(TNode<Map> map) {
return IsJSTypedArrayInstanceType(LoadMapInstanceType(map));
}
-TNode<BoolT> CodeStubAssembler::IsJSTypedArray(SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsJSTypedArray(TNode<HeapObject> object) {
return IsJSTypedArrayMap(LoadMap(object));
}
-TNode<BoolT> CodeStubAssembler::IsJSArrayBuffer(
- SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsJSArrayBuffer(TNode<HeapObject> object) {
return HasInstanceType(object, JS_ARRAY_BUFFER_TYPE);
}
@@ -6222,7 +6284,7 @@ TNode<BoolT> CodeStubAssembler::IsJSDataView(TNode<HeapObject> object) {
return HasInstanceType(object, JS_DATA_VIEW_TYPE);
}
-TNode<BoolT> CodeStubAssembler::IsJSRegExp(SloppyTNode<HeapObject> object) {
+TNode<BoolT> CodeStubAssembler::IsJSRegExp(TNode<HeapObject> object) {
return HasInstanceType(object, JS_REG_EXP_TYPE);
}
@@ -6235,7 +6297,7 @@ TNode<BoolT> CodeStubAssembler::IsNumeric(SloppyTNode<Object> object) {
});
}
-TNode<BoolT> CodeStubAssembler::IsNumberNormalized(SloppyTNode<Number> number) {
+TNode<BoolT> CodeStubAssembler::IsNumberNormalized(TNode<Number> number) {
TVARIABLE(BoolT, var_result, Int32TrueConstant());
Label out(this);
@@ -6258,7 +6320,7 @@ TNode<BoolT> CodeStubAssembler::IsNumberNormalized(SloppyTNode<Number> number) {
return var_result.value();
}
-TNode<BoolT> CodeStubAssembler::IsNumberPositive(SloppyTNode<Number> number) {
+TNode<BoolT> CodeStubAssembler::IsNumberPositive(TNode<Number> number) {
return Select<BoolT>(
TaggedIsSmi(number), [=] { return TaggedIsPositiveSmi(number); },
[=] { return IsHeapNumberPositive(CAST(number)); });
@@ -6590,8 +6652,7 @@ TNode<RawPtrT> ToDirectStringAssembler::TryToSequential(
if_bailout);
TNode<String> string = var_string_.value();
- TNode<RawPtrT> result =
- DecodeExternalPointer(LoadExternalStringResourceData(CAST(string)));
+ TNode<RawPtrT> result = LoadExternalStringResourceDataPtr(CAST(string));
if (ptr_kind == PTR_TO_STRING) {
result = RawPtrSub(result, IntPtrConstant(SeqOneByteString::kHeaderSize -
kHeapObjectTag));
@@ -6756,174 +6817,154 @@ TNode<String> CodeStubAssembler::NumberToString(TNode<Number> input) {
return result.value();
}
-// TODO(solanes, v8:6949): Refactor this to check for JSReceivers first. If we
-// have a JSReceiver, extract the primitive and fallthrough. Otherwise, continue
-// asking for the other instance types. This will make it so that we can remove
-// the loop (which was looping at most once). Also, see if we can make use of
-// PlainPrimitiveNonNumberToNumber to de-duplicate code, maybe changing it to a
-// TryPlainPrimitiveNonNumberToNumber with a Label* as a parameter.
TNode<Numeric> CodeStubAssembler::NonNumberToNumberOrNumeric(
TNode<Context> context, TNode<HeapObject> input, Object::Conversion mode,
BigIntHandling bigint_handling) {
CSA_ASSERT(this, Word32BinaryNot(IsHeapNumber(input)));
- // We might need to loop once here due to ToPrimitive conversions.
TVARIABLE(HeapObject, var_input, input);
TVARIABLE(Numeric, var_result);
- Label loop(this, &var_input);
- Label end(this);
- Goto(&loop);
- BIND(&loop);
- {
- // Load the current {input} value (known to be a HeapObject).
- TNode<HeapObject> input = var_input.value();
-
- // Dispatch on the {input} instance type.
- TNode<Uint16T> input_instance_type = LoadInstanceType(input);
- Label if_inputisstring(this), if_inputisoddball(this),
- if_inputisbigint(this), if_inputisreceiver(this, Label::kDeferred),
- if_inputisother(this, Label::kDeferred);
- GotoIf(IsStringInstanceType(input_instance_type), &if_inputisstring);
- GotoIf(IsBigIntInstanceType(input_instance_type), &if_inputisbigint);
- GotoIf(InstanceTypeEqual(input_instance_type, ODDBALL_TYPE),
- &if_inputisoddball);
- Branch(IsJSReceiverInstanceType(input_instance_type), &if_inputisreceiver,
- &if_inputisother);
-
- BIND(&if_inputisstring);
+ TVARIABLE(Uint16T, instance_type, LoadInstanceType(var_input.value()));
+ Label end(this), if_inputisreceiver(this, Label::kDeferred),
+ if_inputisnotreceiver(this);
+
+ // We need to handle JSReceiver first since we might need to do two
+ // conversions due to ToPritmive.
+ Branch(IsJSReceiverInstanceType(instance_type.value()), &if_inputisreceiver,
+ &if_inputisnotreceiver);
+
+ BIND(&if_inputisreceiver);
+ {
+ // The {var_input.value()} is a JSReceiver, we need to convert it to a
+ // Primitive first using the ToPrimitive type conversion, preferably
+ // yielding a Number.
+ Callable callable = CodeFactory::NonPrimitiveToPrimitive(
+ isolate(), ToPrimitiveHint::kNumber);
+ TNode<Object> result = CallStub(callable, context, var_input.value());
+
+ // Check if the {result} is already a Number/Numeric.
+ Label if_done(this), if_notdone(this);
+ Branch(mode == Object::Conversion::kToNumber ? IsNumber(result)
+ : IsNumeric(result),
+ &if_done, &if_notdone);
+
+ BIND(&if_done);
{
- // The {input} is a String, use the fast stub to convert it to a Number.
- TNode<String> string_input = CAST(input);
- var_result = StringToNumber(string_input);
+ // The ToPrimitive conversion already gave us a Number/Numeric, so
+ // we're done.
+ var_result = CAST(result);
Goto(&end);
}
- BIND(&if_inputisbigint);
- if (mode == Object::Conversion::kToNumeric) {
- var_result = CAST(input);
- Goto(&end);
- } else {
- DCHECK_EQ(mode, Object::Conversion::kToNumber);
- if (bigint_handling == BigIntHandling::kThrow) {
- Goto(&if_inputisother);
- } else {
- DCHECK_EQ(bigint_handling, BigIntHandling::kConvertToNumber);
- var_result =
- CAST(CallRuntime(Runtime::kBigIntToNumber, context, input));
- Goto(&end);
- }
- }
-
- BIND(&if_inputisoddball);
+ BIND(&if_notdone);
{
- // The {input} is an Oddball, we just need to load the Number value of it.
- var_result = LoadObjectField<Number>(input, Oddball::kToNumberOffset);
- Goto(&end);
+ // We now have a Primitive {result}, but it's not yet a
+ // Number/Numeric.
+ var_input = CAST(result);
+ // We have a new input. Redo the check and reload instance_type.
+ CSA_ASSERT(this, Word32BinaryNot(IsHeapNumber(var_input.value())));
+ instance_type = LoadInstanceType(var_input.value());
+ Goto(&if_inputisnotreceiver);
}
+ }
- BIND(&if_inputisreceiver);
+ BIND(&if_inputisnotreceiver);
+ {
+ Label not_plain_primitive(this), if_inputisbigint(this),
+ if_inputisother(this, Label::kDeferred);
+
+ // String and Oddball cases.
+ TVARIABLE(Number, var_result_number);
+ TryPlainPrimitiveNonNumberToNumber(var_input.value(), &var_result_number,
+ &not_plain_primitive);
+ var_result = var_result_number.value();
+ Goto(&end);
+
+ BIND(&not_plain_primitive);
{
- // The {input} is a JSReceiver, we need to convert it to a Primitive
- // first using the ToPrimitive type conversion, preferably yielding a
- // Number.
- Callable callable = CodeFactory::NonPrimitiveToPrimitive(
- isolate(), ToPrimitiveHint::kNumber);
- TNode<Object> result = CallStub(callable, context, input);
-
- // Check if the {result} is already a Number/Numeric.
- Label if_done(this), if_notdone(this);
- Branch(mode == Object::Conversion::kToNumber ? IsNumber(result)
- : IsNumeric(result),
- &if_done, &if_notdone);
-
- BIND(&if_done);
+ Branch(IsBigIntInstanceType(instance_type.value()), &if_inputisbigint,
+ &if_inputisother);
+
+ BIND(&if_inputisbigint);
{
- // The ToPrimitive conversion already gave us a Number/Numeric, so
- // we're done.
- var_result = CAST(result);
- Goto(&end);
+ if (mode == Object::Conversion::kToNumeric) {
+ var_result = CAST(var_input.value());
+ Goto(&end);
+ } else {
+ DCHECK_EQ(mode, Object::Conversion::kToNumber);
+ if (bigint_handling == BigIntHandling::kThrow) {
+ Goto(&if_inputisother);
+ } else {
+ DCHECK_EQ(bigint_handling, BigIntHandling::kConvertToNumber);
+ var_result = CAST(CallRuntime(Runtime::kBigIntToNumber, context,
+ var_input.value()));
+ Goto(&end);
+ }
+ }
}
- BIND(&if_notdone);
+ BIND(&if_inputisother);
{
- // We now have a Primitive {result}, but it's not yet a
- // Number/Numeric.
- var_input = CAST(result);
- Goto(&loop);
+ // The {var_input.value()} is something else (e.g. Symbol), let the
+ // runtime figure out the correct exception. Note: We cannot tail call
+ // to the runtime here, as js-to-wasm trampolines also use this code
+ // currently, and they declare all outgoing parameters as untagged,
+ // while we would push a tagged object here.
+ auto function_id = mode == Object::Conversion::kToNumber
+ ? Runtime::kToNumber
+ : Runtime::kToNumeric;
+ var_result = CAST(CallRuntime(function_id, context, var_input.value()));
+ Goto(&end);
}
}
-
- BIND(&if_inputisother);
- {
- // The {input} is something else (e.g. Symbol), let the runtime figure
- // out the correct exception.
- // Note: We cannot tail call to the runtime here, as js-to-wasm
- // trampolines also use this code currently, and they declare all
- // outgoing parameters as untagged, while we would push a tagged
- // object here.
- auto function_id = mode == Object::Conversion::kToNumber
- ? Runtime::kToNumber
- : Runtime::kToNumeric;
- var_result = CAST(CallRuntime(function_id, context, input));
- Goto(&end);
- }
}
BIND(&end);
if (mode == Object::Conversion::kToNumber) {
CSA_ASSERT(this, IsNumber(var_result.value()));
- } else {
- DCHECK_EQ(mode, Object::Conversion::kToNumeric);
}
return var_result.value();
}
TNode<Number> CodeStubAssembler::NonNumberToNumber(
- TNode<Context> context, SloppyTNode<HeapObject> input,
+ TNode<Context> context, TNode<HeapObject> input,
BigIntHandling bigint_handling) {
return CAST(NonNumberToNumberOrNumeric(
context, input, Object::Conversion::kToNumber, bigint_handling));
}
-TNode<Number> CodeStubAssembler::PlainPrimitiveNonNumberToNumber(
- TNode<HeapObject> input) {
+void CodeStubAssembler::TryPlainPrimitiveNonNumberToNumber(
+ TNode<HeapObject> input, TVariable<Number>* var_result, Label* if_bailout) {
CSA_ASSERT(this, Word32BinaryNot(IsHeapNumber(input)));
- TVARIABLE(Number, var_result);
Label done(this);
// Dispatch on the {input} instance type.
TNode<Uint16T> input_instance_type = LoadInstanceType(input);
- Label if_inputisstring(this), if_inputisoddball(this);
+ Label if_inputisstring(this);
GotoIf(IsStringInstanceType(input_instance_type), &if_inputisstring);
- CSA_ASSERT(this, InstanceTypeEqual(input_instance_type, ODDBALL_TYPE));
- Goto(&if_inputisoddball);
+ GotoIfNot(InstanceTypeEqual(input_instance_type, ODDBALL_TYPE), if_bailout);
+
+ // The {input} is an Oddball, we just need to load the Number value of it.
+ *var_result = LoadObjectField<Number>(input, Oddball::kToNumberOffset);
+ Goto(&done);
BIND(&if_inputisstring);
{
// The {input} is a String, use the fast stub to convert it to a Number.
- TNode<String> string_input = CAST(input);
- var_result = StringToNumber(string_input);
- Goto(&done);
- }
-
- BIND(&if_inputisoddball);
- {
- // The {input} is an Oddball, we just need to load the Number value of it.
- var_result = LoadObjectField<Number>(input, Oddball::kToNumberOffset);
+ *var_result = StringToNumber(CAST(input));
Goto(&done);
}
BIND(&done);
- return var_result.value();
}
-TNode<Numeric> CodeStubAssembler::NonNumberToNumeric(
- TNode<Context> context, SloppyTNode<HeapObject> input) {
+TNode<Numeric> CodeStubAssembler::NonNumberToNumeric(TNode<Context> context,
+ TNode<HeapObject> input) {
return NonNumberToNumberOrNumeric(context, input,
Object::Conversion::kToNumeric);
}
-TNode<Number> CodeStubAssembler::ToNumber_Inline(SloppyTNode<Context> context,
+TNode<Number> CodeStubAssembler::ToNumber_Inline(TNode<Context> context,
SloppyTNode<Object> input) {
TVARIABLE(Number, var_result);
Label end(this), not_smi(this, Label::kDeferred);
@@ -6982,7 +7023,7 @@ TNode<Number> CodeStubAssembler::ToNumber(TNode<Context> context,
TNode<Number> CodeStubAssembler::PlainPrimitiveToNumber(TNode<Object> input) {
TVARIABLE(Number, var_result);
- Label end(this);
+ Label end(this), fallback(this);
Label not_smi(this, Label::kDeferred);
GotoIfNot(TaggedIsSmi(input), &not_smi);
@@ -7002,8 +7043,10 @@ TNode<Number> CodeStubAssembler::PlainPrimitiveToNumber(TNode<Object> input) {
BIND(&not_heap_number);
{
- var_result = PlainPrimitiveNonNumberToNumber(input_ho);
+ TryPlainPrimitiveNonNumberToNumber(input_ho, &var_result, &fallback);
Goto(&end);
+ BIND(&fallback);
+ Unreachable();
}
}
@@ -7090,7 +7133,7 @@ void CodeStubAssembler::TaggedToNumeric(TNode<Context> context,
}
// ES#sec-touint32
-TNode<Number> CodeStubAssembler::ToUint32(SloppyTNode<Context> context,
+TNode<Number> CodeStubAssembler::ToUint32(TNode<Context> context,
SloppyTNode<Object> input) {
const TNode<Float64T> float_zero = Float64Constant(0.0);
const TNode<Float64T> float_two_32 =
@@ -7193,7 +7236,7 @@ TNode<Number> CodeStubAssembler::ToUint32(SloppyTNode<Context> context,
return CAST(var_result.value());
}
-TNode<String> CodeStubAssembler::ToString_Inline(SloppyTNode<Context> context,
+TNode<String> CodeStubAssembler::ToString_Inline(TNode<Context> context,
SloppyTNode<Object> input) {
TVARIABLE(Object, var_result, input);
Label stub_call(this, Label::kDeferred), out(this);
@@ -7209,7 +7252,7 @@ TNode<String> CodeStubAssembler::ToString_Inline(SloppyTNode<Context> context,
return CAST(var_result.value());
}
-TNode<JSReceiver> CodeStubAssembler::ToObject(SloppyTNode<Context> context,
+TNode<JSReceiver> CodeStubAssembler::ToObject(TNode<Context> context,
SloppyTNode<Object> input) {
return CAST(CallBuiltin(Builtins::kToObject, context, input));
}
@@ -7238,7 +7281,7 @@ TNode<JSReceiver> CodeStubAssembler::ToObject_Inline(TNode<Context> context,
return result.value();
}
-TNode<Number> CodeStubAssembler::ToLength_Inline(SloppyTNode<Context> context,
+TNode<Number> CodeStubAssembler::ToLength_Inline(TNode<Context> context,
SloppyTNode<Object> input) {
TNode<Smi> smi_zero = SmiConstant(0);
return Select<Number>(
@@ -7252,7 +7295,7 @@ TNode<Object> CodeStubAssembler::OrdinaryToPrimitive(
return CallStub(callable, context, input);
}
-TNode<Uint32T> CodeStubAssembler::DecodeWord32(SloppyTNode<Word32T> word32,
+TNode<Uint32T> CodeStubAssembler::DecodeWord32(TNode<Word32T> word32,
uint32_t shift, uint32_t mask) {
DCHECK_EQ((mask >> shift) << shift, mask);
return Unsigned(Word32And(Word32Shr(word32, static_cast<int>(shift)),
@@ -7445,7 +7488,7 @@ void CodeStubAssembler::TryToName(SloppyTNode<Object> key, Label* if_keyisindex,
}
void CodeStubAssembler::TryInternalizeString(
- SloppyTNode<String> string, Label* if_index, TVariable<IntPtrT>* var_index,
+ TNode<String> string, Label* if_index, TVariable<IntPtrT>* var_index,
Label* if_internalized, TVariable<Name>* var_internalized,
Label* if_not_internalized, Label* if_bailout) {
TNode<ExternalReference> function = ExternalConstant(
@@ -8329,10 +8372,12 @@ TNode<NativeContext> CodeStubAssembler::GetCreationContext(
return native_context;
}
-void CodeStubAssembler::DescriptorLookup(
- SloppyTNode<Name> unique_name, SloppyTNode<DescriptorArray> descriptors,
- SloppyTNode<Uint32T> bitfield3, Label* if_found,
- TVariable<IntPtrT>* var_name_index, Label* if_not_found) {
+void CodeStubAssembler::DescriptorLookup(TNode<Name> unique_name,
+ TNode<DescriptorArray> descriptors,
+ TNode<Uint32T> bitfield3,
+ Label* if_found,
+ TVariable<IntPtrT>* var_name_index,
+ Label* if_not_found) {
Comment("DescriptorArrayLookup");
TNode<Uint32T> nof =
DecodeWord32<Map::Bits3::NumberOfOwnDescriptorsBits>(bitfield3);
@@ -8340,9 +8385,11 @@ void CodeStubAssembler::DescriptorLookup(
var_name_index, if_not_found);
}
-void CodeStubAssembler::TransitionLookup(
- SloppyTNode<Name> unique_name, SloppyTNode<TransitionArray> transitions,
- Label* if_found, TVariable<IntPtrT>* var_name_index, Label* if_not_found) {
+void CodeStubAssembler::TransitionLookup(TNode<Name> unique_name,
+ TNode<TransitionArray> transitions,
+ Label* if_found,
+ TVariable<IntPtrT>* var_name_index,
+ Label* if_not_found) {
Comment("TransitionArrayLookup");
TNode<Uint32T> number_of_valid_transitions =
NumberOfEntries<TransitionArray>(transitions);
@@ -8409,11 +8456,11 @@ void CodeStubAssembler::TryLookupPropertyInSimpleObject(
}
void CodeStubAssembler::TryLookupProperty(
- SloppyTNode<HeapObject> object, SloppyTNode<Map> map,
- SloppyTNode<Int32T> instance_type, SloppyTNode<Name> unique_name,
- Label* if_found_fast, Label* if_found_dict, Label* if_found_global,
- TVariable<HeapObject>* var_meta_storage, TVariable<IntPtrT>* var_name_index,
- Label* if_not_found, Label* if_bailout) {
+ TNode<HeapObject> object, TNode<Map> map, SloppyTNode<Int32T> instance_type,
+ TNode<Name> unique_name, Label* if_found_fast, Label* if_found_dict,
+ Label* if_found_global, TVariable<HeapObject>* var_meta_storage,
+ TVariable<IntPtrT>* var_name_index, Label* if_not_found,
+ Label* if_bailout) {
Label if_objectisspecial(this);
GotoIf(IsSpecialReceiverInstanceType(instance_type), &if_objectisspecial);
@@ -8745,22 +8792,21 @@ TNode<Object> CodeStubAssembler::CallGetterIfAccessor(
}
void CodeStubAssembler::TryGetOwnProperty(
- TNode<Context> context, TNode<HeapObject> receiver,
- TNode<JSReceiver> object, TNode<Map> map, TNode<Int32T> instance_type,
- TNode<Name> unique_name, Label* if_found_value,
- TVariable<Object>* var_value, Label* if_not_found, Label* if_bailout) {
+ TNode<Context> context, TNode<Object> receiver, TNode<JSReceiver> object,
+ TNode<Map> map, TNode<Int32T> instance_type, TNode<Name> unique_name,
+ Label* if_found_value, TVariable<Object>* var_value, Label* if_not_found,
+ Label* if_bailout) {
TryGetOwnProperty(context, receiver, object, map, instance_type, unique_name,
if_found_value, var_value, nullptr, nullptr, if_not_found,
if_bailout, kCallJSGetter);
}
void CodeStubAssembler::TryGetOwnProperty(
- TNode<Context> context, TNode<HeapObject> receiver,
- TNode<JSReceiver> object, TNode<Map> map, TNode<Int32T> instance_type,
- TNode<Name> unique_name, Label* if_found_value,
- TVariable<Object>* var_value, TVariable<Uint32T>* var_details,
- TVariable<Object>* var_raw_value, Label* if_not_found, Label* if_bailout,
- GetOwnPropertyMode mode) {
+ TNode<Context> context, TNode<Object> receiver, TNode<JSReceiver> object,
+ TNode<Map> map, TNode<Int32T> instance_type, TNode<Name> unique_name,
+ Label* if_found_value, TVariable<Object>* var_value,
+ TVariable<Uint32T>* var_details, TVariable<Object>* var_raw_value,
+ Label* if_not_found, Label* if_bailout, GetOwnPropertyMode mode) {
DCHECK_EQ(MachineRepresentation::kTagged, var_value->rep());
Comment("TryGetOwnProperty");
CSA_ASSERT(this, IsUniqueNameNoCachedIndex(unique_name));
@@ -9306,14 +9352,14 @@ TNode<BoolT> CodeStubAssembler::IsOffsetInBounds(SloppyTNode<IntPtrT> offset,
}
TNode<HeapObject> CodeStubAssembler::LoadFeedbackCellValue(
- SloppyTNode<JSFunction> closure) {
+ TNode<JSFunction> closure) {
TNode<FeedbackCell> feedback_cell =
LoadObjectField<FeedbackCell>(closure, JSFunction::kFeedbackCellOffset);
return LoadObjectField<HeapObject>(feedback_cell, FeedbackCell::kValueOffset);
}
TNode<HeapObject> CodeStubAssembler::LoadFeedbackVector(
- SloppyTNode<JSFunction> closure) {
+ TNode<JSFunction> closure) {
TVARIABLE(HeapObject, maybe_vector, LoadFeedbackCellValue(closure));
Label done(this);
@@ -9331,7 +9377,7 @@ TNode<HeapObject> CodeStubAssembler::LoadFeedbackVector(
}
TNode<ClosureFeedbackCellArray> CodeStubAssembler::LoadClosureFeedbackArray(
- SloppyTNode<JSFunction> closure) {
+ TNode<JSFunction> closure) {
TVARIABLE(HeapObject, feedback_cell_array, LoadFeedbackCellValue(closure));
Label end(this);
@@ -9417,7 +9463,7 @@ void CodeStubAssembler::CombineFeedback(TVariable<Smi>* existing_feedback,
*existing_feedback = SmiOr(existing_feedback->value(), feedback);
}
-void CodeStubAssembler::CheckForAssociatedProtector(SloppyTNode<Name> name,
+void CodeStubAssembler::CheckForAssociatedProtector(TNode<Name> name,
Label* if_protector) {
// This list must be kept in sync with LookupIterator::UpdateProtector!
// TODO(jkummerow): Would it be faster to have a bit in Symbol::flags()?
@@ -9522,14 +9568,19 @@ MachineRepresentation ElementsKindToMachineRepresentation(ElementsKind kind) {
} // namespace
-template <typename TIndex>
-void CodeStubAssembler::StoreElement(Node* elements, ElementsKind kind,
- TNode<TIndex> index, Node* value) {
+template <typename TArray, typename TIndex>
+void CodeStubAssembler::StoreElementBigIntOrTypedArray(TNode<TArray> elements,
+ ElementsKind kind,
+ TNode<TIndex> index,
+ Node* value) {
// TODO(v8:9708): Do we want to keep both IntPtrT and UintPtrT variants?
static_assert(std::is_same<TIndex, Smi>::value ||
std::is_same<TIndex, UintPtrT>::value ||
std::is_same<TIndex, IntPtrT>::value,
"Only Smi, UintPtrT or IntPtrT index is allowed");
+ static_assert(std::is_same<TArray, RawPtrT>::value ||
+ std::is_same<TArray, FixedArrayBase>::value,
+ "Only RawPtrT or FixedArrayBase elements are allowed");
if (kind == BIGINT64_ELEMENTS || kind == BIGUINT64_ELEMENTS) {
TNode<IntPtrT> offset = ElementOffsetFromIndex(index, kind, 0);
TVARIABLE(UintPtrT, var_low);
@@ -9555,7 +9606,8 @@ void CodeStubAssembler::StoreElement(Node* elements, ElementsKind kind,
var_high.value());
}
#endif
- } else if (IsTypedArrayElementsKind(kind)) {
+ } else {
+ DCHECK(IsTypedArrayElementsKind(kind));
if (kind == UINT8_CLAMPED_ELEMENTS) {
CSA_ASSERT(this, Word32Equal(UncheckedCast<Word32T>(value),
Word32And(Int32Constant(0xFF), value)));
@@ -9564,7 +9616,16 @@ void CodeStubAssembler::StoreElement(Node* elements, ElementsKind kind,
// TODO(cbruni): Add OOB check once typed.
MachineRepresentation rep = ElementsKindToMachineRepresentation(kind);
StoreNoWriteBarrier(rep, elements, offset, value);
- return;
+ }
+}
+
+template <typename TIndex>
+void CodeStubAssembler::StoreElement(TNode<FixedArrayBase> elements,
+ ElementsKind kind, TNode<TIndex> index,
+ Node* value) {
+ if (kind == BIGINT64_ELEMENTS || kind == BIGUINT64_ELEMENTS ||
+ IsTypedArrayElementsKind(kind)) {
+ StoreElementBigIntOrTypedArray(elements, kind, index, value);
} else if (IsDoubleElementsKind(kind)) {
TNode<Float64T> value_float64 = UncheckedCast<Float64T>(value);
StoreFixedDoubleArrayElement(CAST(elements), index, value_float64);
@@ -9576,14 +9637,15 @@ void CodeStubAssembler::StoreElement(Node* elements, ElementsKind kind,
}
}
-template V8_EXPORT_PRIVATE void CodeStubAssembler::StoreElement<Smi>(
- Node*, ElementsKind, TNode<Smi>, Node*);
-
-template V8_EXPORT_PRIVATE void CodeStubAssembler::StoreElement<IntPtrT>(
- Node*, ElementsKind, TNode<IntPtrT>, Node*);
-
+template <typename TIndex>
+void CodeStubAssembler::StoreElement(TNode<RawPtrT> elements, ElementsKind kind,
+ TNode<TIndex> index, Node* value) {
+ DCHECK(kind == BIGINT64_ELEMENTS || kind == BIGUINT64_ELEMENTS ||
+ IsTypedArrayElementsKind(kind));
+ StoreElementBigIntOrTypedArray(elements, kind, index, value);
+}
template V8_EXPORT_PRIVATE void CodeStubAssembler::StoreElement<UintPtrT>(
- Node*, ElementsKind, TNode<UintPtrT>, Node*);
+ TNode<RawPtrT>, ElementsKind, TNode<UintPtrT>, Node*);
TNode<Uint8T> CodeStubAssembler::Int32ToUint8Clamped(
TNode<Int32T> int32_value) {
@@ -10346,14 +10408,20 @@ TNode<TIndex> CodeStubAssembler::BuildFastLoop(const VariableList& vars,
}
// Instantiate BuildFastLoop for IntPtrT and UintPtrT.
-template TNode<IntPtrT> CodeStubAssembler::BuildFastLoop<IntPtrT>(
- const VariableList& vars, TNode<IntPtrT> start_index,
- TNode<IntPtrT> end_index, const FastLoopBody<IntPtrT>& body, int increment,
- IndexAdvanceMode advance_mode);
-template TNode<UintPtrT> CodeStubAssembler::BuildFastLoop<UintPtrT>(
- const VariableList& vars, TNode<UintPtrT> start_index,
- TNode<UintPtrT> end_index, const FastLoopBody<UintPtrT>& body,
- int increment, IndexAdvanceMode advance_mode);
+template V8_EXPORT_PRIVATE TNode<IntPtrT>
+CodeStubAssembler::BuildFastLoop<IntPtrT>(const VariableList& vars,
+ TNode<IntPtrT> start_index,
+ TNode<IntPtrT> end_index,
+ const FastLoopBody<IntPtrT>& body,
+ int increment,
+ IndexAdvanceMode advance_mode);
+template V8_EXPORT_PRIVATE TNode<UintPtrT>
+CodeStubAssembler::BuildFastLoop<UintPtrT>(const VariableList& vars,
+ TNode<UintPtrT> start_index,
+ TNode<UintPtrT> end_index,
+ const FastLoopBody<UintPtrT>& body,
+ int increment,
+ IndexAdvanceMode advance_mode);
template <typename TIndex>
void CodeStubAssembler::BuildFastArrayForEach(
@@ -10430,12 +10498,11 @@ void CodeStubAssembler::InitializeFieldsWithRoot(TNode<HeapObject> object,
-kTaggedSize, CodeStubAssembler::IndexAdvanceMode::kPre);
}
-void CodeStubAssembler::BranchIfNumberRelationalComparison(
- Operation op, SloppyTNode<Number> left, SloppyTNode<Number> right,
- Label* if_true, Label* if_false) {
- CSA_SLOW_ASSERT(this, IsNumber(left));
- CSA_SLOW_ASSERT(this, IsNumber(right));
-
+void CodeStubAssembler::BranchIfNumberRelationalComparison(Operation op,
+ TNode<Number> left,
+ TNode<Number> right,
+ Label* if_true,
+ Label* if_false) {
Label do_float_comparison(this);
TVARIABLE(Float64T, var_left_float);
TVARIABLE(Float64T, var_right_float);
@@ -10527,8 +10594,9 @@ void CodeStubAssembler::BranchIfNumberRelationalComparison(
}
}
-void CodeStubAssembler::GotoIfNumberGreaterThanOrEqual(
- SloppyTNode<Number> left, SloppyTNode<Number> right, Label* if_true) {
+void CodeStubAssembler::GotoIfNumberGreaterThanOrEqual(TNode<Number> left,
+ TNode<Number> right,
+ Label* if_true) {
Label if_false(this);
BranchIfNumberRelationalComparison(Operation::kGreaterThanOrEqual, left,
right, if_true, &if_false);
@@ -11083,7 +11151,7 @@ void CodeStubAssembler::GenerateEqual_Same(SloppyTNode<Object> value,
// ES6 section 7.2.12 Abstract Equality Comparison
TNode<Oddball> CodeStubAssembler::Equal(SloppyTNode<Object> left,
SloppyTNode<Object> right,
- SloppyTNode<Context> context,
+ TNode<Context> context,
TVariable<Smi>* var_type_feedback) {
// This is a slightly optimized version of Object::Equals. Whenever you
// change something functionality wise in here, remember to update the
@@ -12024,7 +12092,7 @@ void CodeStubAssembler::BranchIfSameNumberValue(TNode<Float64T> lhs_value,
}
}
-TNode<Oddball> CodeStubAssembler::HasProperty(SloppyTNode<Context> context,
+TNode<Oddball> CodeStubAssembler::HasProperty(TNode<Context> context,
SloppyTNode<Object> object,
SloppyTNode<Object> key,
HasPropertyLookupMode mode) {
@@ -12106,6 +12174,80 @@ TNode<Oddball> CodeStubAssembler::HasProperty(SloppyTNode<Context> context,
return result.value();
}
+void CodeStubAssembler::ForInPrepare(TNode<HeapObject> enumerator,
+ TNode<UintPtrT> slot,
+ TNode<HeapObject> maybe_feedback_vector,
+ TNode<FixedArray>* cache_array_out,
+ TNode<Smi>* cache_length_out) {
+ // Check if we're using an enum cache.
+ TVARIABLE(FixedArray, cache_array);
+ TVARIABLE(Smi, cache_length);
+ Label if_fast(this), if_slow(this, Label::kDeferred), out(this);
+ Branch(IsMap(enumerator), &if_fast, &if_slow);
+
+ BIND(&if_fast);
+ {
+ // Load the enumeration length and cache from the {enumerator}.
+ TNode<Map> map_enumerator = CAST(enumerator);
+ TNode<WordT> enum_length = LoadMapEnumLength(map_enumerator);
+ CSA_ASSERT(this, WordNotEqual(enum_length,
+ IntPtrConstant(kInvalidEnumCacheSentinel)));
+ TNode<DescriptorArray> descriptors = LoadMapDescriptors(map_enumerator);
+ TNode<EnumCache> enum_cache = LoadObjectField<EnumCache>(
+ descriptors, DescriptorArray::kEnumCacheOffset);
+ TNode<FixedArray> enum_keys =
+ LoadObjectField<FixedArray>(enum_cache, EnumCache::kKeysOffset);
+
+ // Check if we have enum indices available.
+ TNode<FixedArray> enum_indices =
+ LoadObjectField<FixedArray>(enum_cache, EnumCache::kIndicesOffset);
+ TNode<IntPtrT> enum_indices_length =
+ LoadAndUntagFixedArrayBaseLength(enum_indices);
+ TNode<Smi> feedback = SelectSmiConstant(
+ IntPtrLessThanOrEqual(enum_length, enum_indices_length),
+ static_cast<int>(ForInFeedback::kEnumCacheKeysAndIndices),
+ static_cast<int>(ForInFeedback::kEnumCacheKeys));
+ UpdateFeedback(feedback, maybe_feedback_vector, slot);
+
+ cache_array = enum_keys;
+ cache_length = SmiTag(Signed(enum_length));
+ Goto(&out);
+ }
+
+ BIND(&if_slow);
+ {
+ // The {enumerator} is a FixedArray with all the keys to iterate.
+ TNode<FixedArray> array_enumerator = CAST(enumerator);
+
+ // Record the fact that we hit the for-in slow-path.
+ UpdateFeedback(SmiConstant(ForInFeedback::kAny), maybe_feedback_vector,
+ slot);
+
+ cache_array = array_enumerator;
+ cache_length = LoadFixedArrayBaseLength(array_enumerator);
+ Goto(&out);
+ }
+
+ BIND(&out);
+ *cache_array_out = cache_array.value();
+ *cache_length_out = cache_length.value();
+}
+
+TNode<FixedArray> CodeStubAssembler::ForInPrepareForTorque(
+ TNode<HeapObject> enumerator, TNode<UintPtrT> slot,
+ TNode<HeapObject> maybe_feedback_vector) {
+ TNode<FixedArray> cache_array;
+ TNode<Smi> cache_length;
+ ForInPrepare(enumerator, slot, maybe_feedback_vector, &cache_array,
+ &cache_length);
+
+ TNode<FixedArray> result = AllocateUninitializedFixedArray(2);
+ StoreFixedArrayElement(result, 0, cache_array);
+ StoreFixedArrayElement(result, 1, cache_length);
+
+ return result;
+}
+
TNode<String> CodeStubAssembler::Typeof(SloppyTNode<Object> value) {
TVARIABLE(String, result_var);
@@ -12194,33 +12336,15 @@ TNode<String> CodeStubAssembler::Typeof(SloppyTNode<Object> value) {
return result_var.value();
}
-TNode<Object> CodeStubAssembler::GetSuperConstructor(
- TNode<Context> context, TNode<JSFunction> active_function) {
- Label is_not_constructor(this, Label::kDeferred), out(this);
- TVARIABLE(Object, result);
-
+TNode<HeapObject> CodeStubAssembler::GetSuperConstructor(
+ TNode<JSFunction> active_function) {
TNode<Map> map = LoadMap(active_function);
- TNode<HeapObject> prototype = LoadMapPrototype(map);
- TNode<Map> prototype_map = LoadMap(prototype);
- GotoIfNot(IsConstructorMap(prototype_map), &is_not_constructor);
-
- result = prototype;
- Goto(&out);
-
- BIND(&is_not_constructor);
- {
- CallRuntime(Runtime::kThrowNotSuperConstructor, context, prototype,
- active_function);
- Unreachable();
- }
-
- BIND(&out);
- return result.value();
+ return LoadMapPrototype(map);
}
TNode<JSReceiver> CodeStubAssembler::SpeciesConstructor(
- SloppyTNode<Context> context, SloppyTNode<Object> object,
- SloppyTNode<JSReceiver> default_constructor) {
+ TNode<Context> context, SloppyTNode<Object> object,
+ TNode<JSReceiver> default_constructor) {
Isolate* isolate = this->isolate();
TVARIABLE(JSReceiver, var_result, default_constructor);
@@ -12334,7 +12458,7 @@ TNode<Oddball> CodeStubAssembler::InstanceOf(TNode<Object> object,
return var_result.value();
}
-TNode<Number> CodeStubAssembler::NumberInc(SloppyTNode<Number> value) {
+TNode<Number> CodeStubAssembler::NumberInc(TNode<Number> value) {
TVARIABLE(Number, var_result);
TVARIABLE(Float64T, var_finc_value);
Label if_issmi(this), if_isnotsmi(this), do_finc(this), end(this);
@@ -12377,7 +12501,7 @@ TNode<Number> CodeStubAssembler::NumberInc(SloppyTNode<Number> value) {
return var_result.value();
}
-TNode<Number> CodeStubAssembler::NumberDec(SloppyTNode<Number> value) {
+TNode<Number> CodeStubAssembler::NumberDec(TNode<Number> value) {
TVARIABLE(Number, var_result);
TVARIABLE(Float64T, var_fdec_value);
Label if_issmi(this), if_isnotsmi(this), do_fdec(this), end(this);
@@ -12420,8 +12544,7 @@ TNode<Number> CodeStubAssembler::NumberDec(SloppyTNode<Number> value) {
return var_result.value();
}
-TNode<Number> CodeStubAssembler::NumberAdd(SloppyTNode<Number> a,
- SloppyTNode<Number> b) {
+TNode<Number> CodeStubAssembler::NumberAdd(TNode<Number> a, TNode<Number> b) {
TVARIABLE(Number, var_result);
Label float_add(this, Label::kDeferred), end(this);
GotoIf(TaggedIsNotSmi(a), &float_add);
@@ -12442,8 +12565,7 @@ TNode<Number> CodeStubAssembler::NumberAdd(SloppyTNode<Number> a,
return var_result.value();
}
-TNode<Number> CodeStubAssembler::NumberSub(SloppyTNode<Number> a,
- SloppyTNode<Number> b) {
+TNode<Number> CodeStubAssembler::NumberSub(TNode<Number> a, TNode<Number> b) {
TVARIABLE(Number, var_result);
Label float_sub(this, Label::kDeferred), end(this);
GotoIf(TaggedIsNotSmi(a), &float_sub);
@@ -12509,7 +12631,7 @@ TNode<Number> CodeStubAssembler::BitwiseOp(TNode<Word32T> left32,
}
TNode<JSObject> CodeStubAssembler::AllocateJSIteratorResult(
- SloppyTNode<Context> context, SloppyTNode<Object> value,
+ TNode<Context> context, SloppyTNode<Object> value,
SloppyTNode<Oddball> done) {
CSA_ASSERT(this, IsBoolean(done));
TNode<NativeContext> native_context = LoadNativeContext(context);
@@ -12569,7 +12691,7 @@ TNode<JSReceiver> CodeStubAssembler::ArraySpeciesCreate(TNode<Context> context,
}
void CodeStubAssembler::ThrowIfArrayBufferIsDetached(
- SloppyTNode<Context> context, TNode<JSArrayBuffer> array_buffer,
+ TNode<Context> context, TNode<JSArrayBuffer> array_buffer,
const char* method_name) {
Label if_detached(this, Label::kDeferred), if_not_detached(this);
Branch(IsDetachedBuffer(array_buffer), &if_detached, &if_not_detached);
@@ -12579,7 +12701,7 @@ void CodeStubAssembler::ThrowIfArrayBufferIsDetached(
}
void CodeStubAssembler::ThrowIfArrayBufferViewBufferIsDetached(
- SloppyTNode<Context> context, TNode<JSArrayBufferView> array_buffer_view,
+ TNode<Context> context, TNode<JSArrayBufferView> array_buffer_view,
const char* method_name) {
TNode<JSArrayBuffer> buffer = LoadJSArrayBufferViewBuffer(array_buffer_view);
ThrowIfArrayBufferIsDetached(context, buffer, method_name);
@@ -12587,7 +12709,9 @@ void CodeStubAssembler::ThrowIfArrayBufferViewBufferIsDetached(
TNode<RawPtrT> CodeStubAssembler::LoadJSArrayBufferBackingStorePtr(
TNode<JSArrayBuffer> array_buffer) {
- return DecodeExternalPointer(LoadJSArrayBufferBackingStore(array_buffer));
+ return LoadExternalPointerFromObject(array_buffer,
+ JSArrayBuffer::kBackingStoreOffset,
+ kArrayBufferBackingStoreTag);
}
TNode<JSArrayBuffer> CodeStubAssembler::LoadJSArrayBufferViewBuffer(
@@ -12641,50 +12765,28 @@ CodeStubArguments::CodeStubArguments(CodeStubAssembler* assembler,
argc_(argc),
base_(),
fp_(fp != nullptr ? fp : assembler_->LoadFramePointer()) {
-#ifdef V8_REVERSE_JSARGS
TNode<IntPtrT> offset = assembler_->IntPtrConstant(
(StandardFrameConstants::kFixedSlotCountAboveFp + 1) *
kSystemPointerSize);
-#else
- TNode<IntPtrT> offset = assembler_->ElementOffsetFromIndex(
- argc_, SYSTEM_POINTER_ELEMENTS,
- (StandardFrameConstants::kFixedSlotCountAboveFp - 1) *
- kSystemPointerSize);
-#endif
// base_ points to the first argument, not the receiver
// whether present or not.
base_ = assembler_->RawPtrAdd(fp_, offset);
}
TNode<Object> CodeStubArguments::GetReceiver() const {
-#ifdef V8_REVERSE_JSARGS
intptr_t offset = -kSystemPointerSize;
-#else
- intptr_t offset = kSystemPointerSize;
-#endif
return assembler_->LoadFullTagged(base_, assembler_->IntPtrConstant(offset));
}
void CodeStubArguments::SetReceiver(TNode<Object> object) const {
-#ifdef V8_REVERSE_JSARGS
intptr_t offset = -kSystemPointerSize;
-#else
- intptr_t offset = kSystemPointerSize;
-#endif
assembler_->StoreFullTaggedNoWriteBarrier(
base_, assembler_->IntPtrConstant(offset), object);
}
TNode<RawPtrT> CodeStubArguments::AtIndexPtr(TNode<IntPtrT> index) const {
-#ifdef V8_REVERSE_JSARGS
TNode<IntPtrT> offset =
assembler_->ElementOffsetFromIndex(index, SYSTEM_POINTER_ELEMENTS, 0);
-#else
- TNode<IntPtrT> negated_index =
- assembler_->IntPtrOrSmiSub(assembler_->IntPtrConstant(0), index);
- TNode<IntPtrT> offset = assembler_->ElementOffsetFromIndex(
- negated_index, SYSTEM_POINTER_ELEMENTS, 0);
-#endif
return assembler_->RawPtrAdd(base_, offset);
}
@@ -12730,11 +12832,7 @@ void CodeStubArguments::ForEach(
}
TNode<RawPtrT> start = AtIndexPtr(first);
TNode<RawPtrT> end = AtIndexPtr(last);
-#ifdef V8_REVERSE_JSARGS
const int increment = kSystemPointerSize;
-#else
- const int increment = -kSystemPointerSize;
-#endif
assembler_->BuildFastLoop<RawPtrT>(
vars, start, end,
[&](TNode<RawPtrT> current) {
@@ -12879,7 +12977,7 @@ TNode<Code> CodeStubAssembler::LoadBuiltin(TNode<Smi> builtin_id) {
}
TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
- SloppyTNode<SharedFunctionInfo> shared_info, Label* if_compile_lazy) {
+ TNode<SharedFunctionInfo> shared_info, Label* if_compile_lazy) {
TNode<Object> sfi_data =
LoadObjectField(shared_info, SharedFunctionInfo::kFunctionDataOffset);
@@ -13112,7 +13210,7 @@ void CodeStubAssembler::Print(const char* s) {
}
void CodeStubAssembler::Print(const char* prefix,
- SloppyTNode<MaybeObject> tagged_value) {
+ TNode<MaybeObject> tagged_value) {
if (prefix != nullptr) {
std::string formatted(prefix);
formatted += ": ";
@@ -13166,13 +13264,8 @@ TNode<Object> CodeStubAssembler::CallRuntimeNewArray(
// Runtime_NewArray receives arguments in the JS order (to avoid unnecessary
// copy). Except the last two (new_target and allocation_site) which are add
// on top of the stack later.
-#ifdef V8_REVERSE_JSARGS
return CallRuntime(Runtime::kNewArray, context, length, receiver, new_target,
allocation_site);
-#else
- return CallRuntime(Runtime::kNewArray, context, receiver, length, new_target,
- allocation_site);
-#endif
}
void CodeStubAssembler::TailCallRuntimeNewArray(TNode<Context> context,
@@ -13183,13 +13276,8 @@ void CodeStubAssembler::TailCallRuntimeNewArray(TNode<Context> context,
// Runtime_NewArray receives arguments in the JS order (to avoid unnecessary
// copy). Except the last two (new_target and allocation_site) which are add
// on top of the stack later.
-#ifdef V8_REVERSE_JSARGS
return TailCallRuntime(Runtime::kNewArray, context, length, receiver,
new_target, allocation_site);
-#else
- return TailCallRuntime(Runtime::kNewArray, context, receiver, length,
- new_target, allocation_site);
-#endif
}
TNode<JSArray> CodeStubAssembler::ArrayCreate(TNode<Context> context,
diff --git a/deps/v8/src/codegen/code-stub-assembler.h b/deps/v8/src/codegen/code-stub-assembler.h
index 8306b7e466..89e9556b9e 100644
--- a/deps/v8/src/codegen/code-stub-assembler.h
+++ b/deps/v8/src/codegen/code-stub-assembler.h
@@ -15,6 +15,7 @@
#include "src/compiler/code-assembler.h"
#include "src/objects/arguments.h"
#include "src/objects/bigint.h"
+#include "src/objects/feedback-vector.h"
#include "src/objects/js-function.h"
#include "src/objects/objects.h"
#include "src/objects/promise.h"
@@ -247,15 +248,16 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
#define CSA_ASSERT_BRANCH(csa, gen, ...) \
(csa)->Assert(gen, #gen, __FILE__, __LINE__, CSA_ASSERT_ARGS(__VA_ARGS__))
-#define CSA_ASSERT_JS_ARGC_OP(csa, Op, op, expected) \
- (csa)->Assert( \
- [&]() -> TNode<BoolT> { \
- const TNode<Word32T> argc = UncheckedCast<Word32T>( \
- (csa)->Parameter(Descriptor::kJSActualArgumentsCount)); \
- return (csa)->Op(argc, (csa)->Int32Constant(expected)); \
- }, \
- "argc " #op " " #expected, __FILE__, __LINE__, \
- {{SmiFromInt32((csa)->Parameter(Descriptor::kJSActualArgumentsCount)), \
+#define CSA_ASSERT_JS_ARGC_OP(csa, Op, op, expected) \
+ (csa)->Assert( \
+ [&]() -> TNode<BoolT> { \
+ const TNode<Word32T> argc = (csa)->UncheckedParameter<Word32T>( \
+ Descriptor::kJSActualArgumentsCount); \
+ return (csa)->Op(argc, (csa)->Int32Constant(expected)); \
+ }, \
+ "argc " #op " " #expected, __FILE__, __LINE__, \
+ {{SmiFromInt32((csa)->UncheckedParameter<Int32T>( \
+ Descriptor::kJSActualArgumentsCount)), \
"argc"}})
#define CSA_ASSERT_JS_ARGC_EQ(csa, expected) \
@@ -554,9 +556,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Float64T> Float64RoundToEven(SloppyTNode<Float64T> x);
TNode<Float64T> Float64Trunc(SloppyTNode<Float64T> x);
// Select the minimum of the two provided Number values.
- TNode<Number> NumberMax(SloppyTNode<Number> left, SloppyTNode<Number> right);
+ TNode<Number> NumberMax(TNode<Number> left, TNode<Number> right);
// Select the minimum of the two provided Number values.
- TNode<Number> NumberMin(SloppyTNode<Number> left, SloppyTNode<Number> right);
+ TNode<Number> NumberMin(TNode<Number> left, TNode<Number> right);
// Returns true iff the given value fits into smi range and is >= 0.
TNode<BoolT> IsValidPositiveSmi(TNode<IntPtrT> value);
@@ -718,10 +720,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
#undef BINT_COMPARISON_OP
// Smi | HeapNumber operations.
- TNode<Number> NumberInc(SloppyTNode<Number> value);
- TNode<Number> NumberDec(SloppyTNode<Number> value);
- TNode<Number> NumberAdd(SloppyTNode<Number> a, SloppyTNode<Number> b);
- TNode<Number> NumberSub(SloppyTNode<Number> a, SloppyTNode<Number> b);
+ TNode<Number> NumberInc(TNode<Number> value);
+ TNode<Number> NumberDec(TNode<Number> value);
+ TNode<Number> NumberAdd(TNode<Number> a, TNode<Number> b);
+ TNode<Number> NumberSub(TNode<Number> a, TNode<Number> b);
void GotoIfNotNumber(TNode<Object> value, Label* is_not_number);
void GotoIfNumber(TNode<Object> value, Label* is_number);
TNode<Number> SmiToNumber(TNode<Smi> v) { return v; }
@@ -754,7 +756,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
void Assert(const NodeGenerator<BoolT>& condition_body, const char* message,
const char* file, int line,
std::initializer_list<ExtraNode> extra_nodes = {});
- void Assert(SloppyTNode<Word32T> condition_node, const char* message,
+ void Assert(TNode<Word32T> condition_node, const char* message,
const char* file, int line,
std::initializer_list<ExtraNode> extra_nodes = {});
void Check(const BranchGenerator& branch, const char* message,
@@ -763,7 +765,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
void Check(const NodeGenerator<BoolT>& condition_body, const char* message,
const char* file, int line,
std::initializer_list<ExtraNode> extra_nodes = {});
- void Check(SloppyTNode<Word32T> condition_node, const char* message,
+ void Check(TNode<Word32T> condition_node, const char* message,
const char* file, int line,
std::initializer_list<ExtraNode> extra_nodes = {});
void FailAssert(const char* message,
@@ -857,22 +859,22 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
condition, [=] { return true_value; }, [=] { return false_value; });
}
- TNode<Int32T> SelectInt32Constant(SloppyTNode<BoolT> condition,
- int true_value, int false_value);
- TNode<IntPtrT> SelectIntPtrConstant(SloppyTNode<BoolT> condition,
- int true_value, int false_value);
- TNode<Oddball> SelectBooleanConstant(SloppyTNode<BoolT> condition);
- TNode<Smi> SelectSmiConstant(SloppyTNode<BoolT> condition, Smi true_value,
+ TNode<Int32T> SelectInt32Constant(TNode<BoolT> condition, int true_value,
+ int false_value);
+ TNode<IntPtrT> SelectIntPtrConstant(TNode<BoolT> condition, int true_value,
+ int false_value);
+ TNode<Oddball> SelectBooleanConstant(TNode<BoolT> condition);
+ TNode<Smi> SelectSmiConstant(TNode<BoolT> condition, Smi true_value,
Smi false_value);
- TNode<Smi> SelectSmiConstant(SloppyTNode<BoolT> condition, int true_value,
+ TNode<Smi> SelectSmiConstant(TNode<BoolT> condition, int true_value,
Smi false_value) {
return SelectSmiConstant(condition, Smi::FromInt(true_value), false_value);
}
- TNode<Smi> SelectSmiConstant(SloppyTNode<BoolT> condition, Smi true_value,
+ TNode<Smi> SelectSmiConstant(TNode<BoolT> condition, Smi true_value,
int false_value) {
return SelectSmiConstant(condition, true_value, Smi::FromInt(false_value));
}
- TNode<Smi> SelectSmiConstant(SloppyTNode<BoolT> condition, int true_value,
+ TNode<Smi> SelectSmiConstant(TNode<BoolT> condition, int true_value,
int false_value) {
return SelectSmiConstant(condition, Smi::FromInt(true_value),
Smi::FromInt(false_value));
@@ -889,8 +891,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Int32T> TruncateIntPtrToInt32(SloppyTNode<IntPtrT> value);
// Check a value for smi-ness
- TNode<BoolT> TaggedIsSmi(SloppyTNode<MaybeObject> a);
- TNode<BoolT> TaggedIsNotSmi(SloppyTNode<MaybeObject> a);
+ TNode<BoolT> TaggedIsSmi(TNode<MaybeObject> a);
+ TNode<BoolT> TaggedIsNotSmi(TNode<MaybeObject> a);
// Check that the value is a non-negative smi.
TNode<BoolT> TaggedIsPositiveSmi(SloppyTNode<Object> a);
@@ -967,29 +969,87 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Works only with V8_ENABLE_FORCE_SLOW_PATH compile time flag. Nop otherwise.
void GotoIfForceSlowPath(Label* if_true);
- // Convert external pointer from on-V8-heap representation to an actual
- // external pointer value.
- TNode<RawPtrT> DecodeExternalPointer(
- TNode<ExternalPointerT> encoded_pointer) {
- STATIC_ASSERT(kExternalPointerSize == kSystemPointerSize);
- TNode<RawPtrT> value = ReinterpretCast<RawPtrT>(encoded_pointer);
- if (V8_HEAP_SANDBOX_BOOL) {
- value = UncheckedCast<RawPtrT>(
- WordXor(value, UintPtrConstant(kExternalPointerSalt)));
- }
- return value;
+ //
+ // ExternalPointerT-related functionality.
+ //
+
+ TNode<ExternalPointerT> ChangeUint32ToExternalPointer(TNode<Uint32T> value);
+ TNode<Uint32T> ChangeExternalPointerToUint32(TNode<ExternalPointerT> value);
+
+ // Initialize an external pointer field in an object.
+ void InitializeExternalPointerField(TNode<HeapObject> object, int offset) {
+ InitializeExternalPointerField(object, IntPtrConstant(offset));
}
+ void InitializeExternalPointerField(TNode<HeapObject> object,
+ TNode<IntPtrT> offset);
- // Convert external pointer value to on-V8-heap representation.
- // This should eventually become a call to a non-allocating runtime function.
- TNode<ExternalPointerT> EncodeExternalPointer(TNode<RawPtrT> pointer) {
- STATIC_ASSERT(kExternalPointerSize == kSystemPointerSize);
- TNode<RawPtrT> encoded_pointer = pointer;
- if (V8_HEAP_SANDBOX_BOOL) {
- encoded_pointer = UncheckedCast<RawPtrT>(
- WordXor(encoded_pointer, UintPtrConstant(kExternalPointerSalt)));
- }
- return ReinterpretCast<ExternalPointerT>(encoded_pointer);
+ // Initialize an external pointer field in an object with given value.
+ void InitializeExternalPointerField(TNode<HeapObject> object, int offset,
+ TNode<RawPtrT> pointer,
+ ExternalPointerTag tag) {
+ InitializeExternalPointerField(object, IntPtrConstant(offset), pointer,
+ tag);
+ }
+
+ void InitializeExternalPointerField(TNode<HeapObject> object,
+ TNode<IntPtrT> offset,
+ TNode<RawPtrT> pointer,
+ ExternalPointerTag tag) {
+ InitializeExternalPointerField(object, offset);
+ StoreExternalPointerToObject(object, offset, pointer, tag);
+ }
+
+ // Load an external pointer value from an object.
+ TNode<RawPtrT> LoadExternalPointerFromObject(TNode<HeapObject> object,
+ int offset,
+ ExternalPointerTag tag) {
+ return LoadExternalPointerFromObject(object, IntPtrConstant(offset), tag);
+ }
+
+ TNode<RawPtrT> LoadExternalPointerFromObject(TNode<HeapObject> object,
+ TNode<IntPtrT> offset,
+ ExternalPointerTag tag);
+
+ // Store external object pointer to object.
+ void StoreExternalPointerToObject(TNode<HeapObject> object, int offset,
+ TNode<RawPtrT> pointer,
+ ExternalPointerTag tag) {
+ StoreExternalPointerToObject(object, IntPtrConstant(offset), pointer, tag);
+ }
+
+ void StoreExternalPointerToObject(TNode<HeapObject> object,
+ TNode<IntPtrT> offset,
+ TNode<RawPtrT> pointer,
+ ExternalPointerTag tag);
+
+ TNode<RawPtrT> LoadForeignForeignAddressPtr(TNode<Foreign> object) {
+ return LoadExternalPointerFromObject(object, Foreign::kForeignAddressOffset,
+ kForeignForeignAddressTag);
+ }
+
+ TNode<RawPtrT> LoadExternalStringResourcePtr(TNode<ExternalString> object) {
+ return LoadExternalPointerFromObject(
+ object, ExternalString::kResourceOffset, kExternalStringResourceTag);
+ }
+
+ TNode<RawPtrT> LoadExternalStringResourceDataPtr(
+ TNode<ExternalString> object) {
+ return LoadExternalPointerFromObject(object,
+ ExternalString::kResourceDataOffset,
+ kExternalStringResourceDataTag);
+ }
+
+ TNode<RawPtrT> LoadJSTypedArrayExternalPointerPtr(
+ TNode<JSTypedArray> holder) {
+ return LoadExternalPointerFromObject(holder,
+ JSTypedArray::kExternalPointerOffset,
+ kTypedArrayExternalPointerTag);
+ }
+
+ void StoreJSTypedArrayExternalPointerPtr(TNode<JSTypedArray> holder,
+ TNode<RawPtrT> value) {
+ StoreExternalPointerToObject(holder, JSTypedArray::kExternalPointerOffset,
+ value, kTypedArrayExternalPointerTag);
}
// Load value from current parent frame by given offset in bytes.
@@ -1049,13 +1109,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
IntPtrSub(offset, IntPtrConstant(kHeapObjectTag))));
}
// Load a SMI field and untag it.
- TNode<IntPtrT> LoadAndUntagObjectField(SloppyTNode<HeapObject> object,
- int offset);
+ TNode<IntPtrT> LoadAndUntagObjectField(TNode<HeapObject> object, int offset);
// Load a SMI field, untag it, and convert to Word32.
- TNode<Int32T> LoadAndUntagToWord32ObjectField(SloppyTNode<HeapObject> object,
+ TNode<Int32T> LoadAndUntagToWord32ObjectField(TNode<HeapObject> object,
int offset);
- TNode<MaybeObject> LoadMaybeWeakObjectField(SloppyTNode<HeapObject> object,
+ TNode<MaybeObject> LoadMaybeWeakObjectField(TNode<HeapObject> object,
int offset) {
return UncheckedCast<MaybeObject>(LoadObjectField(object, offset));
}
@@ -1124,74 +1183,71 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
}
// Load the floating point value of a HeapNumber.
- TNode<Float64T> LoadHeapNumberValue(SloppyTNode<HeapObject> object);
+ TNode<Float64T> LoadHeapNumberValue(TNode<HeapObject> object);
// Load the Map of an HeapObject.
- TNode<Map> LoadMap(SloppyTNode<HeapObject> object);
+ TNode<Map> LoadMap(TNode<HeapObject> object);
// Load the instance type of an HeapObject.
- TNode<Uint16T> LoadInstanceType(SloppyTNode<HeapObject> object);
+ TNode<Uint16T> LoadInstanceType(TNode<HeapObject> object);
// Compare the instance the type of the object against the provided one.
- TNode<BoolT> HasInstanceType(SloppyTNode<HeapObject> object,
- InstanceType type);
- TNode<BoolT> DoesntHaveInstanceType(SloppyTNode<HeapObject> object,
+ TNode<BoolT> HasInstanceType(TNode<HeapObject> object, InstanceType type);
+ TNode<BoolT> DoesntHaveInstanceType(TNode<HeapObject> object,
InstanceType type);
- TNode<BoolT> TaggedDoesntHaveInstanceType(SloppyTNode<HeapObject> any_tagged,
+ TNode<BoolT> TaggedDoesntHaveInstanceType(TNode<HeapObject> any_tagged,
InstanceType type);
TNode<Word32T> IsStringWrapperElementsKind(TNode<Map> map);
void GotoIfMapHasSlowProperties(TNode<Map> map, Label* if_slow);
// Load the properties backing store of a JSReceiver.
- TNode<HeapObject> LoadSlowProperties(SloppyTNode<JSReceiver> object);
- TNode<HeapObject> LoadFastProperties(SloppyTNode<JSReceiver> object);
+ TNode<HeapObject> LoadSlowProperties(TNode<JSReceiver> object);
+ TNode<HeapObject> LoadFastProperties(TNode<JSReceiver> object);
// Load the elements backing store of a JSObject.
- TNode<FixedArrayBase> LoadElements(SloppyTNode<JSObject> object) {
+ TNode<FixedArrayBase> LoadElements(TNode<JSObject> object) {
return LoadJSObjectElements(object);
}
// Load the length of a JSArray instance.
TNode<Object> LoadJSArgumentsObjectLength(TNode<Context> context,
TNode<JSArgumentsObject> array);
// Load the length of a fast JSArray instance. Returns a positive Smi.
- TNode<Smi> LoadFastJSArrayLength(SloppyTNode<JSArray> array);
+ TNode<Smi> LoadFastJSArrayLength(TNode<JSArray> array);
// Load the length of a fixed array base instance.
- TNode<Smi> LoadFixedArrayBaseLength(SloppyTNode<FixedArrayBase> array);
+ TNode<Smi> LoadFixedArrayBaseLength(TNode<FixedArrayBase> array);
// Load the length of a fixed array base instance.
- TNode<IntPtrT> LoadAndUntagFixedArrayBaseLength(
- SloppyTNode<FixedArrayBase> array);
+ TNode<IntPtrT> LoadAndUntagFixedArrayBaseLength(TNode<FixedArrayBase> array);
// Load the length of a WeakFixedArray.
TNode<Smi> LoadWeakFixedArrayLength(TNode<WeakFixedArray> array);
- TNode<IntPtrT> LoadAndUntagWeakFixedArrayLength(
- SloppyTNode<WeakFixedArray> array);
+ TNode<IntPtrT> LoadAndUntagWeakFixedArrayLength(TNode<WeakFixedArray> array);
// Load the number of descriptors in DescriptorArray.
TNode<Int32T> LoadNumberOfDescriptors(TNode<DescriptorArray> array);
// Load the number of own descriptors of a map.
TNode<Int32T> LoadNumberOfOwnDescriptors(TNode<Map> map);
// Load the bit field of a Map.
- TNode<Int32T> LoadMapBitField(SloppyTNode<Map> map);
+ TNode<Int32T> LoadMapBitField(TNode<Map> map);
// Load bit field 2 of a map.
- TNode<Int32T> LoadMapBitField2(SloppyTNode<Map> map);
+ TNode<Int32T> LoadMapBitField2(TNode<Map> map);
// Load bit field 3 of a map.
- TNode<Uint32T> LoadMapBitField3(SloppyTNode<Map> map);
+ TNode<Uint32T> LoadMapBitField3(TNode<Map> map);
// Load the instance type of a map.
- TNode<Uint16T> LoadMapInstanceType(SloppyTNode<Map> map);
+ TNode<Uint16T> LoadMapInstanceType(TNode<Map> map);
// Load the ElementsKind of a map.
- TNode<Int32T> LoadMapElementsKind(SloppyTNode<Map> map);
- TNode<Int32T> LoadElementsKind(SloppyTNode<HeapObject> object);
+ TNode<Int32T> LoadMapElementsKind(TNode<Map> map);
+ TNode<Int32T> LoadElementsKind(TNode<HeapObject> object);
// Load the instance descriptors of a map.
- TNode<DescriptorArray> LoadMapDescriptors(SloppyTNode<Map> map);
+ TNode<DescriptorArray> LoadMapDescriptors(TNode<Map> map);
// Load the prototype of a map.
- TNode<HeapObject> LoadMapPrototype(SloppyTNode<Map> map);
+ TNode<HeapObject> LoadMapPrototype(TNode<Map> map);
// Load the instance size of a Map.
- TNode<IntPtrT> LoadMapInstanceSizeInWords(SloppyTNode<Map> map);
+ TNode<IntPtrT> LoadMapInstanceSizeInWords(TNode<Map> map);
// Load the inobject properties start of a Map (valid only for JSObjects).
- TNode<IntPtrT> LoadMapInobjectPropertiesStartInWords(SloppyTNode<Map> map);
+ TNode<IntPtrT> LoadMapInobjectPropertiesStartInWords(TNode<Map> map);
// Load the constructor function index of a Map (only for primitive maps).
- TNode<IntPtrT> LoadMapConstructorFunctionIndex(SloppyTNode<Map> map);
+ TNode<IntPtrT> LoadMapConstructorFunctionIndex(TNode<Map> map);
// Load the constructor of a Map (equivalent to Map::GetConstructor()).
- TNode<Object> LoadMapConstructor(SloppyTNode<Map> map);
+ TNode<Object> LoadMapConstructor(TNode<Map> map);
// Load the EnumLength of a Map.
- TNode<WordT> LoadMapEnumLength(SloppyTNode<Map> map);
+ TNode<WordT> LoadMapEnumLength(TNode<Map> map);
// Load the back-pointer of a Map.
- TNode<Object> LoadMapBackPointer(SloppyTNode<Map> map);
+ TNode<Object> LoadMapBackPointer(TNode<Map> map);
// Checks that |map| has only simple properties, returns bitfield3.
TNode<Uint32T> EnsureOnlyHasSimpleProperties(TNode<Map> map,
TNode<Int32T> instance_type,
@@ -1206,7 +1262,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<IntPtrT> length);
// Check if the map is set for slow properties.
- TNode<BoolT> IsDictionaryMap(SloppyTNode<Map> map);
+ TNode<BoolT> IsDictionaryMap(TNode<Map> map);
// Load the Name::hash() value of a name as an uint32 value.
// If {if_hash_not_computed} label is specified then it also checks if
@@ -1218,9 +1274,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Load length field of a String object as Smi value.
TNode<Smi> LoadStringLengthAsSmi(TNode<String> string);
// Load length field of a String object as intptr_t value.
- TNode<IntPtrT> LoadStringLengthAsWord(SloppyTNode<String> string);
+ TNode<IntPtrT> LoadStringLengthAsWord(TNode<String> string);
// Load length field of a String object as uint32_t value.
- TNode<Uint32T> LoadStringLengthAsWord32(SloppyTNode<String> string);
+ TNode<Uint32T> LoadStringLengthAsWord32(TNode<String> string);
// Load value field of a JSPrimitiveWrapper object.
TNode<Object> LoadJSPrimitiveWrapperValue(TNode<JSPrimitiveWrapper> object);
@@ -1399,12 +1455,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> LoadScopeInfoHasExtensionField(TNode<ScopeInfo> scope_info);
// Context manipulation:
- void StoreContextElementNoWriteBarrier(SloppyTNode<Context> context,
- int slot_index,
+ void StoreContextElementNoWriteBarrier(TNode<Context> context, int slot_index,
SloppyTNode<Object> value);
- TNode<NativeContext> LoadNativeContext(SloppyTNode<Context> context);
+ TNode<NativeContext> LoadNativeContext(TNode<Context> context);
// Calling this is only valid if there's a module context in the chain.
- TNode<Context> LoadModuleContext(SloppyTNode<Context> context);
+ TNode<Context> LoadModuleContext(TNode<Context> context);
void GotoIfContextElementEqual(SloppyTNode<Object> value,
TNode<NativeContext> native_context,
@@ -1419,9 +1474,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<NativeContext> native_context);
TNode<Map> LoadJSArrayElementsMap(ElementsKind kind,
- SloppyTNode<NativeContext> native_context);
+ TNode<NativeContext> native_context);
TNode<Map> LoadJSArrayElementsMap(SloppyTNode<Int32T> kind,
- SloppyTNode<NativeContext> native_context);
+ TNode<NativeContext> native_context);
TNode<BoolT> IsJSFunctionWithPrototypeSlot(TNode<HeapObject> object);
TNode<BoolT> IsGeneratorFunction(TNode<JSFunction> function);
@@ -1435,7 +1490,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
Label* if_bailout);
TNode<BytecodeArray> LoadSharedFunctionInfoBytecodeArray(
- SloppyTNode<SharedFunctionInfo> shared);
+ TNode<SharedFunctionInfo> shared);
void StoreObjectByteNoWriteBarrier(TNode<HeapObject> object, int offset,
TNode<Word32T> value);
@@ -1699,10 +1754,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
SlackTrackingMode slack_tracking_mode = kNoSlackTracking);
void InitializeJSObjectBodyWithSlackTracking(
- SloppyTNode<HeapObject> object, SloppyTNode<Map> map,
+ TNode<HeapObject> object, TNode<Map> map,
SloppyTNode<IntPtrT> instance_size);
void InitializeJSObjectBodyNoSlackTracking(
- SloppyTNode<HeapObject> object, SloppyTNode<Map> map,
+ TNode<HeapObject> object, TNode<Map> map,
SloppyTNode<IntPtrT> instance_size,
int start_offset = JSObject::kHeaderSize);
@@ -1823,7 +1878,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<PropertyArray> AllocatePropertyArray(TNode<IntPtrT> capacity);
// TODO(v8:9722): Return type should be JSIteratorResult
- TNode<JSObject> AllocateJSIteratorResult(SloppyTNode<Context> context,
+ TNode<JSObject> AllocateJSIteratorResult(TNode<Context> context,
SloppyTNode<Object> value,
SloppyTNode<Oddball> done);
@@ -2040,10 +2095,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// compatible only with HOLEY_ELEMENTS and PACKED_ELEMENTS.
template <typename TIndex>
TNode<FixedArray> ExtractToFixedArray(
- SloppyTNode<FixedArrayBase> source, TNode<TIndex> first,
- TNode<TIndex> count, TNode<TIndex> capacity, TNode<Map> source_map,
- ElementsKind from_kind, AllocationFlags allocation_flags,
- ExtractFixedArrayFlags extract_flags, HoleConversionMode convert_holes,
+ TNode<FixedArrayBase> source, TNode<TIndex> first, TNode<TIndex> count,
+ TNode<TIndex> capacity, TNode<Map> source_map, ElementsKind from_kind,
+ AllocationFlags allocation_flags, ExtractFixedArrayFlags extract_flags,
+ HoleConversionMode convert_holes,
TVariable<BoolT>* var_holes_converted = nullptr,
base::Optional<TNode<Int32T>> source_runtime_kind = base::nullopt);
@@ -2138,9 +2193,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Float64T> TryTaggedToFloat64(TNode<Object> value,
Label* if_valueisnotnumber);
- TNode<Float64T> TruncateTaggedToFloat64(SloppyTNode<Context> context,
+ TNode<Float64T> TruncateTaggedToFloat64(TNode<Context> context,
SloppyTNode<Object> value);
- TNode<Word32T> TruncateTaggedToWord32(SloppyTNode<Context> context,
+ TNode<Word32T> TruncateTaggedToWord32(TNode<Context> context,
SloppyTNode<Object> value);
void TaggedToWord32OrBigInt(TNode<Context> context, TNode<Object> value,
Label* if_number, TVariable<Word32T>* var_word32,
@@ -2254,87 +2309,87 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsNoElementsProtectorCellInvalid();
TNode<BoolT> IsArrayIteratorProtectorCellInvalid();
TNode<BoolT> IsBigIntInstanceType(SloppyTNode<Int32T> instance_type);
- TNode<BoolT> IsBigInt(SloppyTNode<HeapObject> object);
- TNode<BoolT> IsBoolean(SloppyTNode<HeapObject> object);
- TNode<BoolT> IsCallableMap(SloppyTNode<Map> map);
- TNode<BoolT> IsCallable(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsBigInt(TNode<HeapObject> object);
+ TNode<BoolT> IsBoolean(TNode<HeapObject> object);
+ TNode<BoolT> IsCallableMap(TNode<Map> map);
+ TNode<BoolT> IsCallable(TNode<HeapObject> object);
TNode<BoolT> TaggedIsCallable(TNode<Object> object);
TNode<BoolT> IsConsStringInstanceType(SloppyTNode<Int32T> instance_type);
- TNode<BoolT> IsConstructorMap(SloppyTNode<Map> map);
- TNode<BoolT> IsConstructor(SloppyTNode<HeapObject> object);
- TNode<BoolT> IsDeprecatedMap(SloppyTNode<Map> map);
- TNode<BoolT> IsNameDictionary(SloppyTNode<HeapObject> object);
- TNode<BoolT> IsGlobalDictionary(SloppyTNode<HeapObject> object);
- TNode<BoolT> IsExtensibleMap(SloppyTNode<Map> map);
+ TNode<BoolT> IsConstructorMap(TNode<Map> map);
+ TNode<BoolT> IsConstructor(TNode<HeapObject> object);
+ TNode<BoolT> IsDeprecatedMap(TNode<Map> map);
+ TNode<BoolT> IsNameDictionary(TNode<HeapObject> object);
+ TNode<BoolT> IsGlobalDictionary(TNode<HeapObject> object);
+ TNode<BoolT> IsExtensibleMap(TNode<Map> map);
TNode<BoolT> IsExtensibleNonPrototypeMap(TNode<Map> map);
TNode<BoolT> IsExternalStringInstanceType(SloppyTNode<Int32T> instance_type);
- TNode<BoolT> IsFixedArray(SloppyTNode<HeapObject> object);
- TNode<BoolT> IsFixedArraySubclass(SloppyTNode<HeapObject> object);
- TNode<BoolT> IsFixedArrayWithKind(SloppyTNode<HeapObject> object,
+ TNode<BoolT> IsFixedArray(TNode<HeapObject> object);
+ TNode<BoolT> IsFixedArraySubclass(TNode<HeapObject> object);
+ TNode<BoolT> IsFixedArrayWithKind(TNode<HeapObject> object,
ElementsKind kind);
- TNode<BoolT> IsFixedArrayWithKindOrEmpty(SloppyTNode<FixedArrayBase> object,
+ TNode<BoolT> IsFixedArrayWithKindOrEmpty(TNode<FixedArrayBase> object,
ElementsKind kind);
- TNode<BoolT> IsFunctionWithPrototypeSlotMap(SloppyTNode<Map> map);
- TNode<BoolT> IsHashTable(SloppyTNode<HeapObject> object);
- TNode<BoolT> IsEphemeronHashTable(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsFunctionWithPrototypeSlotMap(TNode<Map> map);
+ TNode<BoolT> IsHashTable(TNode<HeapObject> object);
+ TNode<BoolT> IsEphemeronHashTable(TNode<HeapObject> object);
TNode<BoolT> IsHeapNumberInstanceType(SloppyTNode<Int32T> instance_type);
- TNode<BoolT> IsOddball(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsOddball(TNode<HeapObject> object);
TNode<BoolT> IsOddballInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsIndirectStringInstanceType(SloppyTNode<Int32T> instance_type);
- TNode<BoolT> IsJSArrayBuffer(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsJSArrayBuffer(TNode<HeapObject> object);
TNode<BoolT> IsJSDataView(TNode<HeapObject> object);
TNode<BoolT> IsJSArrayInstanceType(SloppyTNode<Int32T> instance_type);
- TNode<BoolT> IsJSArrayMap(SloppyTNode<Map> map);
- TNode<BoolT> IsJSArray(SloppyTNode<HeapObject> object);
- TNode<BoolT> IsJSArrayIterator(SloppyTNode<HeapObject> object);
- TNode<BoolT> IsJSAsyncGeneratorObject(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsJSArrayMap(TNode<Map> map);
+ TNode<BoolT> IsJSArray(TNode<HeapObject> object);
+ TNode<BoolT> IsJSArrayIterator(TNode<HeapObject> object);
+ TNode<BoolT> IsJSAsyncGeneratorObject(TNode<HeapObject> object);
TNode<BoolT> IsJSFunctionInstanceType(SloppyTNode<Int32T> instance_type);
- TNode<BoolT> IsJSFunctionMap(SloppyTNode<Map> map);
- TNode<BoolT> IsJSFunction(SloppyTNode<HeapObject> object);
- TNode<BoolT> IsJSBoundFunction(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsJSFunctionMap(TNode<Map> map);
+ TNode<BoolT> IsJSFunction(TNode<HeapObject> object);
+ TNode<BoolT> IsJSBoundFunction(TNode<HeapObject> object);
TNode<BoolT> IsJSGeneratorObject(TNode<HeapObject> object);
TNode<BoolT> IsJSGlobalProxyInstanceType(SloppyTNode<Int32T> instance_type);
- TNode<BoolT> IsJSGlobalProxyMap(SloppyTNode<Map> map);
- TNode<BoolT> IsJSGlobalProxy(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsJSGlobalProxyMap(TNode<Map> map);
+ TNode<BoolT> IsJSGlobalProxy(TNode<HeapObject> object);
TNode<BoolT> IsJSObjectInstanceType(SloppyTNode<Int32T> instance_type);
- TNode<BoolT> IsJSObjectMap(SloppyTNode<Map> map);
- TNode<BoolT> IsJSObject(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsJSObjectMap(TNode<Map> map);
+ TNode<BoolT> IsJSObject(TNode<HeapObject> object);
TNode<BoolT> IsJSFinalizationRegistryMap(TNode<Map> map);
TNode<BoolT> IsJSFinalizationRegistry(TNode<HeapObject> object);
- TNode<BoolT> IsJSPromiseMap(SloppyTNode<Map> map);
- TNode<BoolT> IsJSPromise(SloppyTNode<HeapObject> object);
- TNode<BoolT> IsJSProxy(SloppyTNode<HeapObject> object);
- TNode<BoolT> IsJSStringIterator(SloppyTNode<HeapObject> object);
- TNode<BoolT> IsJSRegExpStringIterator(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsJSPromiseMap(TNode<Map> map);
+ TNode<BoolT> IsJSPromise(TNode<HeapObject> object);
+ TNode<BoolT> IsJSProxy(TNode<HeapObject> object);
+ TNode<BoolT> IsJSStringIterator(TNode<HeapObject> object);
+ TNode<BoolT> IsJSRegExpStringIterator(TNode<HeapObject> object);
TNode<BoolT> IsJSReceiverInstanceType(SloppyTNode<Int32T> instance_type);
- TNode<BoolT> IsJSReceiverMap(SloppyTNode<Map> map);
- TNode<BoolT> IsJSReceiver(SloppyTNode<HeapObject> object);
- TNode<BoolT> IsJSRegExp(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsJSReceiverMap(TNode<Map> map);
+ TNode<BoolT> IsJSReceiver(TNode<HeapObject> object);
+ TNode<BoolT> IsJSRegExp(TNode<HeapObject> object);
TNode<BoolT> IsJSTypedArrayInstanceType(SloppyTNode<Int32T> instance_type);
- TNode<BoolT> IsJSTypedArrayMap(SloppyTNode<Map> map);
- TNode<BoolT> IsJSTypedArray(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsJSTypedArrayMap(TNode<Map> map);
+ TNode<BoolT> IsJSTypedArray(TNode<HeapObject> object);
TNode<BoolT> IsJSGeneratorMap(TNode<Map> map);
TNode<BoolT> IsJSPrimitiveWrapperInstanceType(
SloppyTNode<Int32T> instance_type);
- TNode<BoolT> IsJSPrimitiveWrapperMap(SloppyTNode<Map> map);
- TNode<BoolT> IsJSPrimitiveWrapper(SloppyTNode<HeapObject> object);
- TNode<BoolT> IsMap(SloppyTNode<HeapObject> object);
- TNode<BoolT> IsName(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsJSPrimitiveWrapperMap(TNode<Map> map);
+ TNode<BoolT> IsJSPrimitiveWrapper(TNode<HeapObject> object);
+ TNode<BoolT> IsMap(TNode<HeapObject> object);
+ TNode<BoolT> IsName(TNode<HeapObject> object);
TNode<BoolT> IsNameInstanceType(SloppyTNode<Int32T> instance_type);
- TNode<BoolT> IsNullOrJSReceiver(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsNullOrJSReceiver(TNode<HeapObject> object);
TNode<BoolT> IsNullOrUndefined(SloppyTNode<Object> object);
- TNode<BoolT> IsNumberDictionary(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsNumberDictionary(TNode<HeapObject> object);
TNode<BoolT> IsOneByteStringInstanceType(TNode<Int32T> instance_type);
TNode<BoolT> IsSeqOneByteStringInstanceType(TNode<Int32T> instance_type);
TNode<BoolT> IsPrimitiveInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsPrivateName(SloppyTNode<Symbol> symbol);
- TNode<BoolT> IsPropertyArray(SloppyTNode<HeapObject> object);
- TNode<BoolT> IsPropertyCell(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsPropertyArray(TNode<HeapObject> object);
+ TNode<BoolT> IsPropertyCell(TNode<HeapObject> object);
TNode<BoolT> IsPromiseReactionJobTask(TNode<HeapObject> object);
- TNode<BoolT> IsPrototypeInitialArrayPrototype(SloppyTNode<Context> context,
- SloppyTNode<Map> map);
- TNode<BoolT> IsPrototypeTypedArrayPrototype(SloppyTNode<Context> context,
- SloppyTNode<Map> map);
+ TNode<BoolT> IsPrototypeInitialArrayPrototype(TNode<Context> context,
+ TNode<Map> map);
+ TNode<BoolT> IsPrototypeTypedArrayPrototype(TNode<Context> context,
+ TNode<Map> map);
TNode<BoolT> IsFastAliasedArgumentsMap(TNode<Context> context,
TNode<Map> map);
@@ -2350,9 +2405,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsSpecialReceiverInstanceType(TNode<Int32T> instance_type);
TNode<BoolT> IsCustomElementsReceiverInstanceType(
TNode<Int32T> instance_type);
- TNode<BoolT> IsSpecialReceiverMap(SloppyTNode<Map> map);
+ TNode<BoolT> IsSpecialReceiverMap(TNode<Map> map);
TNode<BoolT> IsStringInstanceType(SloppyTNode<Int32T> instance_type);
- TNode<BoolT> IsString(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsString(TNode<HeapObject> object);
TNode<BoolT> IsSeqOneByteString(TNode<HeapObject> object);
TNode<BoolT> IsSymbolInstanceType(SloppyTNode<Int32T> instance_type);
@@ -2360,8 +2415,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsUniqueName(TNode<HeapObject> object);
TNode<BoolT> IsUniqueNameNoIndex(TNode<HeapObject> object);
TNode<BoolT> IsUniqueNameNoCachedIndex(TNode<HeapObject> object);
- TNode<BoolT> IsUndetectableMap(SloppyTNode<Map> map);
- TNode<BoolT> IsNotWeakFixedArraySubclass(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsUndetectableMap(TNode<Map> map);
+ TNode<BoolT> IsNotWeakFixedArraySubclass(TNode<HeapObject> object);
TNode<BoolT> IsZeroOrContext(SloppyTNode<Object> object);
TNode<BoolT> IsPromiseResolveProtectorCellInvalid();
@@ -2385,8 +2440,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// True iff |number| is either a Smi, or a HeapNumber whose value is not
// within Smi range.
- TNode<BoolT> IsNumberNormalized(SloppyTNode<Number> number);
- TNode<BoolT> IsNumberPositive(SloppyTNode<Number> number);
+ TNode<BoolT> IsNumberNormalized(TNode<Number> number);
+ TNode<BoolT> IsNumberPositive(TNode<Number> number);
TNode<BoolT> IsHeapNumberPositive(TNode<HeapNumber> number);
// True iff {number} is non-negative and less or equal than 2**53-1.
@@ -2463,11 +2518,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Convert a Non-Number object to a Number.
TNode<Number> NonNumberToNumber(
- TNode<Context> context, SloppyTNode<HeapObject> input,
+ TNode<Context> context, TNode<HeapObject> input,
BigIntHandling bigint_handling = BigIntHandling::kThrow);
// Convert a Non-Number object to a Numeric.
TNode<Numeric> NonNumberToNumeric(TNode<Context> context,
- SloppyTNode<HeapObject> input);
+ TNode<HeapObject> input);
// Convert any object to a Number.
// Conforms to ES#sec-tonumber if {bigint_handling} == kThrow.
// With {bigint_handling} == kConvertToNumber, matches behavior of
@@ -2475,7 +2530,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Number> ToNumber(
TNode<Context> context, SloppyTNode<Object> input,
BigIntHandling bigint_handling = BigIntHandling::kThrow);
- TNode<Number> ToNumber_Inline(SloppyTNode<Context> context,
+ TNode<Number> ToNumber_Inline(TNode<Context> context,
SloppyTNode<Object> input);
// Convert any plain primitive to a Number. No need to handle BigInts since
// they are not plain primitives.
@@ -2488,15 +2543,13 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Converts |input| to one of 2^32 integer values in the range 0 through
// 2^32-1, inclusive.
// ES#sec-touint32
- TNode<Number> ToUint32(SloppyTNode<Context> context,
- SloppyTNode<Object> input);
+ TNode<Number> ToUint32(TNode<Context> context, SloppyTNode<Object> input);
// Convert any object to a String.
- TNode<String> ToString_Inline(SloppyTNode<Context> context,
+ TNode<String> ToString_Inline(TNode<Context> context,
SloppyTNode<Object> input);
- TNode<JSReceiver> ToObject(SloppyTNode<Context> context,
- SloppyTNode<Object> input);
+ TNode<JSReceiver> ToObject(TNode<Context> context, SloppyTNode<Object> input);
// Same as ToObject but avoids the Builtin call if |input| is already a
// JSReceiver.
@@ -2504,7 +2557,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Object> input);
// ES6 7.1.15 ToLength, but with inlined fast path.
- TNode<Number> ToLength_Inline(SloppyTNode<Context> context,
+ TNode<Number> ToLength_Inline(TNode<Context> context,
SloppyTNode<Object> input);
TNode<Object> OrdinaryToPrimitive(TNode<Context> context, TNode<Object> input,
@@ -2513,7 +2566,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Returns a node that contains a decoded (unsigned!) value of a bit
// field |BitField| in |word32|. Returns result as an uint32 node.
template <typename BitField>
- TNode<Uint32T> DecodeWord32(SloppyTNode<Word32T> word32) {
+ TNode<Uint32T> DecodeWord32(TNode<Word32T> word32) {
return DecodeWord32(word32, BitField::kShift, BitField::kMask);
}
@@ -2527,7 +2580,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Returns a node that contains a decoded (unsigned!) value of a bit
// field |BitField| in |word32|. Returns result as a word-size node.
template <typename BitField>
- TNode<UintPtrT> DecodeWordFromWord32(SloppyTNode<Word32T> word32) {
+ TNode<UintPtrT> DecodeWordFromWord32(TNode<Word32T> word32) {
return DecodeWord<BitField>(ChangeUint32ToWord(word32));
}
@@ -2540,7 +2593,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
}
// Decodes an unsigned (!) value from |word32| to an uint32 node.
- TNode<Uint32T> DecodeWord32(SloppyTNode<Word32T> word32, uint32_t shift,
+ TNode<Uint32T> DecodeWord32(TNode<Word32T> word32, uint32_t shift,
uint32_t mask);
// Decodes an unsigned (!) value from |word| to a word-size node.
@@ -2594,24 +2647,24 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Returns true if any of the |T|'s bits in given |word32| are set.
template <typename T>
- TNode<BoolT> IsSetWord32(SloppyTNode<Word32T> word32) {
+ TNode<BoolT> IsSetWord32(TNode<Word32T> word32) {
return IsSetWord32(word32, T::kMask);
}
// Returns true if any of the mask's bits in given |word32| are set.
- TNode<BoolT> IsSetWord32(SloppyTNode<Word32T> word32, uint32_t mask) {
+ TNode<BoolT> IsSetWord32(TNode<Word32T> word32, uint32_t mask) {
return Word32NotEqual(Word32And(word32, Int32Constant(mask)),
Int32Constant(0));
}
// Returns true if none of the mask's bits in given |word32| are set.
- TNode<BoolT> IsNotSetWord32(SloppyTNode<Word32T> word32, uint32_t mask) {
+ TNode<BoolT> IsNotSetWord32(TNode<Word32T> word32, uint32_t mask) {
return Word32Equal(Word32And(word32, Int32Constant(mask)),
Int32Constant(0));
}
// Returns true if all of the mask's bits in a given |word32| are set.
- TNode<BoolT> IsAllSetWord32(SloppyTNode<Word32T> word32, uint32_t mask) {
+ TNode<BoolT> IsAllSetWord32(TNode<Word32T> word32, uint32_t mask) {
TNode<Int32T> const_mask = Int32Constant(mask);
return Word32Equal(Word32And(word32, const_mask), const_mask);
}
@@ -2648,12 +2701,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Returns true if all of the |T|'s bits in given |word32| are clear.
template <typename T>
- TNode<BoolT> IsClearWord32(SloppyTNode<Word32T> word32) {
+ TNode<BoolT> IsClearWord32(TNode<Word32T> word32) {
return IsClearWord32(word32, T::kMask);
}
// Returns true if all of the mask's bits in given |word32| are clear.
- TNode<BoolT> IsClearWord32(SloppyTNode<Word32T> word32, uint32_t mask) {
+ TNode<BoolT> IsClearWord32(TNode<Word32T> word32, uint32_t mask) {
return Word32Equal(Word32And(word32, Int32Constant(mask)),
Int32Constant(0));
}
@@ -2706,7 +2759,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// - |if_not_internalized| if the string is not in the string table (but
// does not add it).
// - |if_bailout| for unsupported cases (e.g. uncachable array index).
- void TryInternalizeString(SloppyTNode<String> string, Label* if_index,
+ void TryInternalizeString(TNode<String> string, Label* if_index,
TVariable<IntPtrT>* var_index,
Label* if_internalized,
TVariable<Name>* var_internalized,
@@ -2880,12 +2933,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// is an accessor then it also calls a getter. If the property is a double
// field it re-wraps value in an immutable heap number. {unique_name} must be
// a unique name (Symbol or InternalizedString) that is not an array index.
- void TryGetOwnProperty(TNode<Context> context, TNode<HeapObject> receiver,
+ void TryGetOwnProperty(TNode<Context> context, TNode<Object> receiver,
TNode<JSReceiver> object, TNode<Map> map,
TNode<Int32T> instance_type, TNode<Name> unique_name,
Label* if_found_value, TVariable<Object>* var_value,
Label* if_not_found, Label* if_bailout);
- void TryGetOwnProperty(TNode<Context> context, TNode<HeapObject> receiver,
+ void TryGetOwnProperty(TNode<Context> context, TNode<Object> receiver,
TNode<JSReceiver> object, TNode<Map> map,
TNode<Int32T> instance_type, TNode<Name> unique_name,
Label* if_found_value, TVariable<Object>* var_value,
@@ -2893,12 +2946,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TVariable<Object>* var_raw_value, Label* if_not_found,
Label* if_bailout, GetOwnPropertyMode mode);
- TNode<Object> GetProperty(SloppyTNode<Context> context,
+ TNode<Object> GetProperty(TNode<Context> context,
SloppyTNode<Object> receiver, Handle<Name> name) {
return GetProperty(context, receiver, HeapConstant(name));
}
- TNode<Object> GetProperty(SloppyTNode<Context> context,
+ TNode<Object> GetProperty(TNode<Context> context,
SloppyTNode<Object> receiver,
SloppyTNode<Object> name) {
return CallBuiltin(Builtins::kGetProperty, context, receiver, name);
@@ -2970,9 +3023,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
//
// Note: this code does not check if the global dictionary points to deleted
// entry! This has to be done by the caller.
- void TryLookupProperty(SloppyTNode<HeapObject> object, SloppyTNode<Map> map,
+ void TryLookupProperty(TNode<HeapObject> object, TNode<Map> map,
SloppyTNode<Int32T> instance_type,
- SloppyTNode<Name> unique_name, Label* if_found_fast,
+ TNode<Name> unique_name, Label* if_found_fast,
Label* if_found_dict, Label* if_found_global,
TVariable<HeapObject>* var_meta_storage,
TVariable<IntPtrT>* var_name_index,
@@ -3049,19 +3102,19 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<FeedbackVector> LoadFeedbackVectorForStub();
// Load the value from closure's feedback cell.
- TNode<HeapObject> LoadFeedbackCellValue(SloppyTNode<JSFunction> closure);
+ TNode<HeapObject> LoadFeedbackCellValue(TNode<JSFunction> closure);
// Load the object from feedback vector cell for the given closure.
// The returned object could be undefined if the closure does not have
// a feedback vector associated with it.
- TNode<HeapObject> LoadFeedbackVector(SloppyTNode<JSFunction> closure);
+ TNode<HeapObject> LoadFeedbackVector(TNode<JSFunction> closure);
// Load the ClosureFeedbackCellArray that contains the feedback cells
// used when creating closures from this function. This array could be
// directly hanging off the FeedbackCell when there is no feedback vector
// or available from the feedback vector's header.
TNode<ClosureFeedbackCellArray> LoadClosureFeedbackArray(
- SloppyTNode<JSFunction> closure);
+ TNode<JSFunction> closure);
// Update the type feedback vector.
void UpdateFeedback(TNode<Smi> feedback,
@@ -3084,7 +3137,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Check if a property name might require protector invalidation when it is
// used for a property store or deletion.
- void CheckForAssociatedProtector(SloppyTNode<Name> name, Label* if_protector);
+ void CheckForAssociatedProtector(TNode<Name> name, Label* if_protector);
TNode<Map> LoadReceiverMap(SloppyTNode<Object> receiver);
@@ -3109,8 +3162,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// we pass {value} as BigInt object instead of int64_t. We should
// teach TurboFan to handle int64_t on 32-bit platforms eventually.
template <typename TIndex>
- void StoreElement(Node* elements, ElementsKind kind, TNode<TIndex> index,
- Node* value);
+ void StoreElement(TNode<RawPtrT> elements, ElementsKind kind,
+ TNode<TIndex> index, Node* value);
// Implements the BigInt part of
// https://tc39.github.io/proposal-bigint/#sec-numbertorawbytes,
@@ -3219,10 +3272,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
Operation op, TNode<Object> left, TNode<Object> right,
TNode<Context> context, TVariable<Smi>* var_type_feedback = nullptr);
- void BranchIfNumberRelationalComparison(Operation op,
- SloppyTNode<Number> left,
- SloppyTNode<Number> right,
- Label* if_true, Label* if_false);
+ void BranchIfNumberRelationalComparison(Operation op, TNode<Number> left,
+ TNode<Number> right, Label* if_true,
+ Label* if_false);
void BranchIfNumberEqual(TNode<Number> left, TNode<Number> right,
Label* if_true, Label* if_false) {
@@ -3265,12 +3317,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
Branch(IsAccessorPair(CAST(value)), if_accessor_pair, if_not_accessor_pair);
}
- void GotoIfNumberGreaterThanOrEqual(SloppyTNode<Number> left,
- SloppyTNode<Number> right,
+ void GotoIfNumberGreaterThanOrEqual(TNode<Number> left, TNode<Number> right,
Label* if_false);
TNode<Oddball> Equal(SloppyTNode<Object> lhs, SloppyTNode<Object> rhs,
- SloppyTNode<Context> context,
+ TNode<Context> context,
TVariable<Smi>* var_type_feedback = nullptr);
TNode<Oddball> StrictEqual(SloppyTNode<Object> lhs, SloppyTNode<Object> rhs,
@@ -3291,8 +3342,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
enum HasPropertyLookupMode { kHasProperty, kForInHasProperty };
- TNode<Oddball> HasProperty(SloppyTNode<Context> context,
- SloppyTNode<Object> object,
+ TNode<Oddball> HasProperty(TNode<Context> context, SloppyTNode<Object> object,
SloppyTNode<Object> key,
HasPropertyLookupMode mode);
@@ -3304,14 +3354,25 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
HasPropertyLookupMode::kHasProperty);
}
+ void ForInPrepare(TNode<HeapObject> enumerator, TNode<UintPtrT> slot,
+ TNode<HeapObject> maybe_feedback_vector,
+ TNode<FixedArray>* cache_array_out,
+ TNode<Smi>* cache_length_out);
+ // Returns {cache_array} and {cache_length} in a fixed array of length 2.
+ // TODO(jgruber): Tuple2 would be a slightly better fit as the return type,
+ // but FixedArray has better support and there are no effective drawbacks to
+ // using it instead of Tuple2 in practice.
+ TNode<FixedArray> ForInPrepareForTorque(
+ TNode<HeapObject> enumerator, TNode<UintPtrT> slot,
+ TNode<HeapObject> maybe_feedback_vector);
+
TNode<String> Typeof(SloppyTNode<Object> value);
- TNode<Object> GetSuperConstructor(TNode<Context> context,
- TNode<JSFunction> active_function);
+ TNode<HeapObject> GetSuperConstructor(TNode<JSFunction> active_function);
- TNode<JSReceiver> SpeciesConstructor(
- SloppyTNode<Context> context, SloppyTNode<Object> object,
- SloppyTNode<JSReceiver> default_constructor);
+ TNode<JSReceiver> SpeciesConstructor(TNode<Context> context,
+ SloppyTNode<Object> object,
+ TNode<JSReceiver> default_constructor);
TNode<Oddball> InstanceOf(TNode<Object> object, TNode<Object> callable,
TNode<Context> context);
@@ -3322,7 +3383,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// JSArrayBuffer helpers
TNode<RawPtrT> LoadJSArrayBufferBackingStorePtr(
TNode<JSArrayBuffer> array_buffer);
- void ThrowIfArrayBufferIsDetached(SloppyTNode<Context> context,
+ void ThrowIfArrayBufferIsDetached(TNode<Context> context,
TNode<JSArrayBuffer> array_buffer,
const char* method_name);
@@ -3334,7 +3395,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<UintPtrT> LoadJSArrayBufferViewByteOffset(
TNode<JSArrayBufferView> array_buffer_view);
void ThrowIfArrayBufferViewBufferIsDetached(
- SloppyTNode<Context> context, TNode<JSArrayBufferView> array_buffer_view,
+ TNode<Context> context, TNode<JSArrayBufferView> array_buffer_view,
const char* method_name);
// JSTypedArray helpers
@@ -3358,9 +3419,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Figure out the SFI's code object using its data field.
// If |if_compile_lazy| is provided then the execution will go to the given
// label in case of an CompileLazy code object.
- TNode<Code> GetSharedFunctionInfoCode(
- SloppyTNode<SharedFunctionInfo> shared_info,
- Label* if_compile_lazy = nullptr);
+ TNode<Code> GetSharedFunctionInfoCode(TNode<SharedFunctionInfo> shared_info,
+ Label* if_compile_lazy = nullptr);
TNode<JSFunction> AllocateFunctionWithMapAndContext(
TNode<Map> map, TNode<SharedFunctionInfo> shared_info,
@@ -3387,8 +3447,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Support for printf-style debugging
void Print(const char* s);
- void Print(const char* prefix, SloppyTNode<MaybeObject> tagged_value);
- void Print(SloppyTNode<MaybeObject> tagged_value) {
+ void Print(const char* prefix, TNode<MaybeObject> tagged_value);
+ void Print(TNode<MaybeObject> tagged_value) {
return Print(nullptr, tagged_value);
}
@@ -3410,6 +3470,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
bool ConstexprInt31Equal(int31_t a, int31_t b) { return a == b; }
bool ConstexprInt31NotEqual(int31_t a, int31_t b) { return a != b; }
bool ConstexprInt31GreaterThanEqual(int31_t a, int31_t b) { return a >= b; }
+ bool ConstexprUint32Equal(uint32_t a, uint32_t b) { return a == b; }
+ bool ConstexprUint32NotEqual(uint32_t a, uint32_t b) { return a != b; }
bool ConstexprInt32Equal(int32_t a, int32_t b) { return a == b; }
bool ConstexprInt32NotEqual(int32_t a, int32_t b) { return a != b; }
bool ConstexprInt32GreaterThanEqual(int32_t a, int32_t b) { return a >= b; }
@@ -3449,18 +3511,18 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Number> length);
// Implements DescriptorArray::Search().
- void DescriptorLookup(SloppyTNode<Name> unique_name,
- SloppyTNode<DescriptorArray> descriptors,
- SloppyTNode<Uint32T> bitfield3, Label* if_found,
+ void DescriptorLookup(TNode<Name> unique_name,
+ TNode<DescriptorArray> descriptors,
+ TNode<Uint32T> bitfield3, Label* if_found,
TVariable<IntPtrT>* var_name_index,
Label* if_not_found);
// Implements TransitionArray::SearchName() - searches for first transition
// entry with given name (note that there could be multiple entries with
// the same name).
- void TransitionLookup(SloppyTNode<Name> unique_name,
- SloppyTNode<TransitionArray> transitions,
- Label* if_found, TVariable<IntPtrT>* var_name_index,
+ void TransitionLookup(TNode<Name> unique_name,
+ TNode<TransitionArray> transitions, Label* if_found,
+ TVariable<IntPtrT>* var_name_index,
Label* if_not_found);
// Implements generic search procedure like i::Search<Array>().
@@ -3569,6 +3631,18 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<JSFinalizationRegistry> finalization_registry,
TNode<WeakCell> weak_cell);
+ TNode<IntPtrT> FeedbackIteratorSizeFor(int number_of_entries) {
+ return IntPtrConstant(FeedbackIterator::SizeFor(number_of_entries));
+ }
+
+ TNode<IntPtrT> FeedbackIteratorMapIndexForEntry(int entry) {
+ return IntPtrConstant(FeedbackIterator::MapIndexForEntry(entry));
+ }
+
+ TNode<IntPtrT> FeedbackIteratorHandlerIndexForEntry(int entry) {
+ return IntPtrConstant(FeedbackIterator::HandlerIndexForEntry(entry));
+ }
+
private:
friend class CodeStubArguments;
@@ -3658,8 +3732,26 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Object> value, WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
int additional_offset = 0);
- // Converts {input} to a number. {input} must be a plain primitve.
- TNode<Number> PlainPrimitiveNonNumberToNumber(TNode<HeapObject> input);
+ // Store value to an elements array with given elements kind.
+ // TODO(turbofan): For BIGINT64_ELEMENTS and BIGUINT64_ELEMENTS
+ // we pass {value} as BigInt object instead of int64_t. We should
+ // teach TurboFan to handle int64_t on 32-bit platforms eventually.
+ // TODO(solanes): This method can go away and simplify into only one version
+ // of StoreElement once we have "if constexpr" available to use.
+ template <typename TArray, typename TIndex>
+ void StoreElementBigIntOrTypedArray(TNode<TArray> elements, ElementsKind kind,
+ TNode<TIndex> index, Node* value);
+
+ template <typename TIndex>
+ void StoreElement(TNode<FixedArrayBase> elements, ElementsKind kind,
+ TNode<TIndex> index, Node* value);
+
+ // Converts {input} to a number if {input} is a plain primitve (i.e. String or
+ // Oddball) and stores the result in {var_result}. Otherwise, it bails out to
+ // {if_bailout}.
+ void TryPlainPrimitiveNonNumberToNumber(TNode<HeapObject> input,
+ TVariable<Number>* var_result,
+ Label* if_bailout);
};
class V8_EXPORT_PRIVATE CodeStubArguments {
diff --git a/deps/v8/src/codegen/compilation-cache.cc b/deps/v8/src/codegen/compilation-cache.cc
index 9c5cb42edd..f5e9bb8988 100644
--- a/deps/v8/src/codegen/compilation-cache.cc
+++ b/deps/v8/src/codegen/compilation-cache.cc
@@ -8,7 +8,7 @@
#include "src/heap/factory.h"
#include "src/logging/counters.h"
#include "src/logging/log.h"
-#include "src/objects/compilation-cache-inl.h"
+#include "src/objects/compilation-cache-table-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/slots.h"
#include "src/objects/visitors.h"
@@ -72,12 +72,16 @@ void CompilationSubCache::AgeCustom(CompilationSubCache* c) {
CompilationCacheTable::cast(c->tables_[0]).Age();
}
-void CompilationCacheScript::Age() { AgeCustom(this); }
+void CompilationCacheScript::Age() {
+ if (FLAG_isolate_script_cache_ageing) AgeCustom(this);
+}
void CompilationCacheEval::Age() { AgeCustom(this); }
void CompilationCacheRegExp::Age() { AgeByGeneration(this); }
void CompilationCacheCode::Age() {
- if (FLAG_trace_turbo_nci) CompilationCacheCode::TraceAgeing();
- AgeByGeneration(this);
+ if (FLAG_turbo_nci_cache_ageing) {
+ if (FLAG_trace_turbo_nci) CompilationCacheCode::TraceAgeing();
+ AgeByGeneration(this);
+ }
}
void CompilationSubCache::Iterate(RootVisitor* v) {
diff --git a/deps/v8/src/codegen/compilation-cache.h b/deps/v8/src/codegen/compilation-cache.h
index 8aac29fc29..56d90a37da 100644
--- a/deps/v8/src/codegen/compilation-cache.h
+++ b/deps/v8/src/codegen/compilation-cache.h
@@ -6,7 +6,7 @@
#define V8_CODEGEN_COMPILATION_CACHE_H_
#include "src/base/hashmap.h"
-#include "src/objects/compilation-cache.h"
+#include "src/objects/compilation-cache-table.h"
#include "src/utils/allocation.h"
namespace v8 {
diff --git a/deps/v8/src/codegen/compiler.cc b/deps/v8/src/codegen/compiler.cc
index 33b6bbb945..bb51b3be1e 100644
--- a/deps/v8/src/codegen/compiler.cc
+++ b/deps/v8/src/codegen/compiler.cc
@@ -36,6 +36,7 @@
#include "src/heap/heap-inl.h"
#include "src/heap/local-factory-inl.h"
#include "src/heap/local-heap-inl.h"
+#include "src/heap/local-heap.h"
#include "src/init/bootstrapper.h"
#include "src/interpreter/interpreter.h"
#include "src/logging/log-inl.h"
@@ -93,16 +94,15 @@ class CompilerTracer : public AllStatic {
static void PrintTracePrefix(const CodeTracer::Scope& scope,
const char* header,
OptimizedCompilationInfo* info) {
- PrintF(scope.file(), "[%s ", header);
- info->closure()->ShortPrint(scope.file());
- PrintF(scope.file(), " (target %s)", CodeKindToString(info->code_kind()));
+ PrintTracePrefix(scope, header, info->closure(), info->code_kind());
}
static void PrintTracePrefix(const CodeTracer::Scope& scope,
- const char* header,
- Handle<JSFunction> function) {
+ const char* header, Handle<JSFunction> function,
+ CodeKind code_kind) {
PrintF(scope.file(), "[%s ", header);
function->ShortPrint(scope.file());
+ PrintF(scope.file(), " (target %s)", CodeKindToString(code_kind));
}
static void PrintTraceSuffix(const CodeTracer::Scope& scope) {
@@ -151,10 +151,11 @@ class CompilerTracer : public AllStatic {
static void TraceOptimizedCodeCacheHit(Isolate* isolate,
Handle<JSFunction> function,
- BailoutId osr_offset) {
+ BailoutId osr_offset,
+ CodeKind code_kind) {
if (!FLAG_trace_opt) return;
CodeTracer::Scope scope(isolate->GetCodeTracer());
- PrintTracePrefix(scope, "found optimized code for", function);
+ PrintTracePrefix(scope, "found optimized code for", function, code_kind);
if (!osr_offset.IsNone()) {
PrintF(scope.file(), " at OSR AST id %d", osr_offset.ToInt());
}
@@ -162,13 +163,24 @@ class CompilerTracer : public AllStatic {
}
static void TraceOptimizeForAlwaysOpt(Isolate* isolate,
- Handle<JSFunction> function) {
+ Handle<JSFunction> function,
+ CodeKind code_kind) {
if (!FLAG_trace_opt) return;
CodeTracer::Scope scope(isolate->GetCodeTracer());
- PrintTracePrefix(scope, "optimizing", function);
+ PrintTracePrefix(scope, "optimizing", function, code_kind);
PrintF(scope.file(), " because --always-opt");
PrintTraceSuffix(scope);
}
+
+ static void TraceMarkForAlwaysOpt(Isolate* isolate,
+ Handle<JSFunction> function) {
+ if (!FLAG_trace_opt) return;
+ CodeTracer::Scope scope(isolate->GetCodeTracer());
+ PrintF(scope.file(), "[marking ");
+ function->ShortPrint(scope.file());
+ PrintF(scope.file(), " for optimized recompilation because --always-opt");
+ PrintF(scope.file(), "]\n");
+ }
};
} // namespace
@@ -340,12 +352,13 @@ CompilationJob::Status OptimizedCompilationJob::PrepareJob(Isolate* isolate) {
}
CompilationJob::Status OptimizedCompilationJob::ExecuteJob(
- RuntimeCallStats* stats) {
+ RuntimeCallStats* stats, LocalIsolate* local_isolate) {
DisallowHeapAccess no_heap_access;
// Delegate to the underlying implementation.
DCHECK_EQ(state(), State::kReadyToExecute);
ScopedTimer t(&time_taken_to_execute_);
- return UpdateState(ExecuteJobImpl(stats), State::kReadyToFinalize);
+ return UpdateState(ExecuteJobImpl(stats, local_isolate),
+ State::kReadyToFinalize);
}
CompilationJob::Status OptimizedCompilationJob::FinalizeJob(Isolate* isolate) {
@@ -480,7 +493,7 @@ bool UseAsmWasm(FunctionLiteral* literal, bool asm_wasm_broken) {
void InstallInterpreterTrampolineCopy(Isolate* isolate,
Handle<SharedFunctionInfo> shared_info) {
DCHECK(FLAG_interpreted_frames_native_stack);
- if (!shared_info->function_data().IsBytecodeArray()) {
+ if (!shared_info->function_data(kAcquireLoad).IsBytecodeArray()) {
DCHECK(!shared_info->HasBytecodeArray());
return;
}
@@ -515,19 +528,6 @@ void InstallInterpreterTrampolineCopy(Isolate* isolate,
script_name, line_num, column_num));
}
-void InstallCoverageInfo(Isolate* isolate, Handle<SharedFunctionInfo> shared,
- Handle<CoverageInfo> coverage_info) {
- DCHECK(isolate->is_block_code_coverage());
- isolate->debug()->InstallCoverageInfo(shared, coverage_info);
-}
-
-void InstallCoverageInfo(LocalIsolate* isolate,
- Handle<SharedFunctionInfo> shared,
- Handle<CoverageInfo> coverage_info) {
- // We should only have coverage info when finalizing on the main thread.
- UNREACHABLE();
-}
-
template <typename LocalIsolate>
void InstallUnoptimizedCode(UnoptimizedCompilationInfo* compilation_info,
Handle<SharedFunctionInfo> shared_info,
@@ -556,12 +556,6 @@ void InstallUnoptimizedCode(UnoptimizedCompilationInfo* compilation_info,
shared_info->set_feedback_metadata(
ReadOnlyRoots(isolate).empty_feedback_metadata());
}
-
- if (compilation_info->has_coverage_info() &&
- !shared_info->HasCoverageInfo()) {
- InstallCoverageInfo(isolate, shared_info,
- compilation_info->coverage_info());
- }
}
void LogUnoptimizedCompilation(Isolate* isolate,
@@ -612,12 +606,10 @@ void UpdateSharedFunctionFlagsAfterCompilation(FunctionLiteral* literal,
shared_info.set_class_scope_has_private_brand(
literal->class_scope_has_private_brand());
- shared_info.set_is_safe_to_skip_arguments_adaptor(
- literal->SafeToSkipArgumentsAdaptor());
shared_info.set_has_static_private_methods_or_accessors(
literal->has_static_private_methods_or_accessors());
- shared_info.set_scope_info(*literal->scope()->scope_info());
+ shared_info.SetScopeInfo(*literal->scope()->scope_info());
}
// Finalize a single compilation job. This function can return
@@ -634,12 +626,20 @@ CompilationJob::Status FinalizeSingleUnoptimizedCompilationJob(
CompilationJob::Status status = job->FinalizeJob(shared_info, isolate);
if (status == CompilationJob::SUCCEEDED) {
InstallUnoptimizedCode(compilation_info, shared_info, isolate);
+
+ MaybeHandle<CoverageInfo> coverage_info;
+ if (compilation_info->has_coverage_info() &&
+ !shared_info->HasCoverageInfo()) {
+ coverage_info = compilation_info->coverage_info();
+ }
+
finalize_unoptimized_compilation_data_list->emplace_back(
- isolate, shared_info, job->time_taken_to_execute(),
+ isolate, shared_info, coverage_info, job->time_taken_to_execute(),
job->time_taken_to_finalize());
}
- DCHECK_IMPLIES(status == CompilationJob::RETRY_ON_MAIN_THREAD,
- (std::is_same<LocalIsolate, LocalIsolate>::value));
+ DCHECK_IMPLIES(
+ status == CompilationJob::RETRY_ON_MAIN_THREAD,
+ (std::is_same<LocalIsolate, v8::internal::LocalIsolate>::value));
return status;
}
@@ -833,7 +833,7 @@ bool FinalizeDeferredUnoptimizedCompilationJobs(
}
V8_WARN_UNUSED_RESULT MaybeHandle<Code> GetCodeFromOptimizedCodeCache(
- Handle<JSFunction> function, BailoutId osr_offset) {
+ Handle<JSFunction> function, BailoutId osr_offset, CodeKind code_kind) {
RuntimeCallTimerScope runtimeTimer(
function->GetIsolate(),
RuntimeCallCounterId::kCompileGetFromOptimizedCodeMap);
@@ -852,13 +852,13 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Code> GetCodeFromOptimizedCodeCache(
.GetOSROptimizedCodeCache()
.GetOptimizedCode(shared, osr_offset, isolate);
}
- if (!code.is_null()) {
+ DCHECK_IMPLIES(!code.is_null(), code.kind() <= code_kind);
+ if (!code.is_null() && code.kind() == code_kind) {
// Caching of optimized code enabled and optimized code found.
DCHECK(!code.marked_for_deoptimization());
DCHECK(function->shared().is_compiled());
DCHECK(CodeKindIsStoredInOptimizedCodeCache(code.kind()));
- DCHECK_IMPLIES(!osr_offset.IsNone(),
- code.kind() == CodeKind::OPTIMIZED_FUNCTION);
+ DCHECK_IMPLIES(!osr_offset.IsNone(), CodeKindCanOSR(code.kind()));
return Handle<Code>(code, isolate);
}
return MaybeHandle<Code>();
@@ -902,7 +902,7 @@ void InsertCodeIntoOptimizedCodeCache(
handle(function->feedback_vector(), function->GetIsolate());
FeedbackVector::SetOptimizedCode(vector, code);
} else {
- DCHECK_EQ(kind, CodeKind::OPTIMIZED_FUNCTION);
+ DCHECK(CodeKindCanOSR(kind));
OSROptimizedCodeCache::AddOptimizedCode(native_context, shared, code,
compilation_info->osr_offset());
}
@@ -952,10 +952,21 @@ bool GetOptimizedCodeNow(OptimizedCompilationJob* job, Isolate* isolate,
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.OptimizeNonConcurrent");
- if (!PrepareJobWithHandleScope(job, isolate, compilation_info) ||
- job->ExecuteJob(isolate->counters()->runtime_call_stats()) !=
- CompilationJob::SUCCEEDED ||
- job->FinalizeJob(isolate) != CompilationJob::SUCCEEDED) {
+ if (!PrepareJobWithHandleScope(job, isolate, compilation_info)) {
+ CompilerTracer::TraceAbortedJob(isolate, compilation_info);
+ return false;
+ }
+
+ {
+ LocalIsolate local_isolate(isolate, ThreadKind::kMain);
+ if (job->ExecuteJob(isolate->counters()->runtime_call_stats(),
+ &local_isolate)) {
+ CompilerTracer::TraceAbortedJob(isolate, compilation_info);
+ return false;
+ }
+ }
+
+ if (job->FinalizeJob(isolate) != CompilationJob::SUCCEEDED) {
CompilerTracer::TraceAbortedJob(isolate, compilation_info);
return false;
}
@@ -996,8 +1007,9 @@ bool GetOptimizedCodeLater(std::unique_ptr<OptimizedCompilationJob> job,
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.OptimizeConcurrentPrepare");
- if (!PrepareJobWithHandleScope(job.get(), isolate, compilation_info))
+ if (!PrepareJobWithHandleScope(job.get(), isolate, compilation_info)) {
return false;
+ }
// The background recompile will own this job.
isolate->optimizing_compile_dispatcher()->QueueForOptimization(job.get());
@@ -1012,7 +1024,12 @@ bool GetOptimizedCodeLater(std::unique_ptr<OptimizedCompilationJob> job,
if (CodeKindIsStoredInOptimizedCodeCache(code_kind)) {
function->SetOptimizationMarker(OptimizationMarker::kInOptimizationQueue);
}
- DCHECK(function->ActiveTierIsIgnition() || function->ActiveTierIsNCI());
+
+ // Note: Usually the active tier is expected to be Ignition or NCI at this
+ // point (in other words we don't expect to optimize if the function is
+ // already TF-optimized). There is a special case for OSR though, for which
+ // we *can* reach this point even if we've already generated non-OSR'd TF
+ // code.
DCHECK(function->shared().HasBytecodeArray());
return true;
}
@@ -1028,6 +1045,18 @@ Handle<Code> ContinuationForConcurrentOptimization(
// Tiering up to Turbofan and cached optimized code exists. Continue
// execution there until TF optimization has finished.
return cached_code;
+ } else if (FLAG_turboprop_as_midtier &&
+ function->HasAvailableOptimizedCode()) {
+ DCHECK(function->NextTier() == CodeKind::TURBOFAN);
+ // It is possible that we have marked a closure for TurboFan optimization
+ // but the marker is processed by another closure that doesn't have
+ // optimized code yet. So heal the closure here and return the optimized
+ // code.
+ if (!function->HasAttachedOptimizedCode()) {
+ DCHECK(function->feedback_vector().has_optimized_code());
+ function->set_code(function->feedback_vector().optimized_code());
+ }
+ return handle(function->code(), isolate);
}
return BUILTIN_CODE(isolate, InterpreterEntryTrampoline);
}
@@ -1074,9 +1103,10 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
// Check the optimized code cache (stored on the SharedFunctionInfo).
if (CodeKindIsStoredInOptimizedCodeCache(code_kind)) {
Handle<Code> cached_code;
- if (GetCodeFromOptimizedCodeCache(function, osr_offset)
+ if (GetCodeFromOptimizedCodeCache(function, osr_offset, code_kind)
.ToHandle(&cached_code)) {
- CompilerTracer::TraceOptimizedCodeCacheHit(isolate, function, osr_offset);
+ CompilerTracer::TraceOptimizedCodeCacheHit(isolate, function, osr_offset,
+ code_kind);
return cached_code;
}
}
@@ -1089,7 +1119,8 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
// contexts).
if (CodeKindIsNativeContextIndependentJSFunction(code_kind)) {
DCHECK(osr_offset.IsNone());
- DCHECK(FLAG_turbo_nci_as_midtier || shared->has_optimized_at_least_once());
+ DCHECK(FLAG_turbo_nci_as_midtier || !FLAG_turbo_nci_delayed_codegen ||
+ shared->has_optimized_at_least_once());
Handle<Code> cached_code;
if (GetCodeFromCompilationCache(isolate, shared).ToHandle(&cached_code)) {
@@ -1203,6 +1234,10 @@ void FinalizeUnoptimizedCompilation(
if (FLAG_interpreted_frames_native_stack) {
InstallInterpreterTrampolineCopy(isolate, shared_info);
}
+ Handle<CoverageInfo> coverage_info;
+ if (finalize_data.coverage_info().ToHandle(&coverage_info)) {
+ isolate->debug()->InstallCoverageInfo(shared_info, coverage_info);
+ }
LogUnoptimizedCompilation(isolate, shared_info, flags,
finalize_data.time_taken_to_execute(),
@@ -1395,11 +1430,14 @@ CompilationHandleScope::~CompilationHandleScope() {
FinalizeUnoptimizedCompilationData::FinalizeUnoptimizedCompilationData(
LocalIsolate* isolate, Handle<SharedFunctionInfo> function_handle,
+ MaybeHandle<CoverageInfo> coverage_info,
base::TimeDelta time_taken_to_execute,
base::TimeDelta time_taken_to_finalize)
: time_taken_to_execute_(time_taken_to_execute),
time_taken_to_finalize_(time_taken_to_finalize),
- function_handle_(isolate->heap()->NewPersistentHandle(function_handle)) {}
+ function_handle_(isolate->heap()->NewPersistentHandle(function_handle)),
+ coverage_info_(isolate->heap()->NewPersistentMaybeHandle(coverage_info)) {
+}
DeferredFinalizationJobData::DeferredFinalizationJobData(
LocalIsolate* isolate, Handle<SharedFunctionInfo> function_handle,
@@ -1414,7 +1452,7 @@ BackgroundCompileTask::BackgroundCompileTask(ScriptStreamingData* streamed_data,
REPLMode::kNo)),
compile_state_(isolate),
info_(std::make_unique<ParseInfo>(isolate, flags_, &compile_state_)),
- isolate_for_local_isolate_(nullptr),
+ isolate_for_local_isolate_(isolate),
start_position_(0),
end_position_(0),
function_literal_id_(kFunctionLiteralIdTopLevel),
@@ -1434,13 +1472,6 @@ BackgroundCompileTask::BackgroundCompileTask(ScriptStreamingData* streamed_data,
std::unique_ptr<Utf16CharacterStream> stream(ScannerStream::For(
streamed_data->source_stream.get(), streamed_data->encoding));
info_->set_character_stream(std::move(stream));
-
- // TODO(leszeks): Add block coverage support to off-thread finalization.
- finalize_on_background_thread_ =
- FLAG_finalize_streaming_on_background && !flags_.block_coverage_enabled();
- if (finalize_on_background_thread()) {
- isolate_for_local_isolate_ = isolate;
- }
}
BackgroundCompileTask::BackgroundCompileTask(
@@ -1460,8 +1491,7 @@ BackgroundCompileTask::BackgroundCompileTask(
stack_size_(max_stack_size),
worker_thread_runtime_call_stats_(worker_thread_runtime_stats),
timer_(timer),
- language_mode_(info_->language_mode()),
- finalize_on_background_thread_(false) {
+ language_mode_(info_->language_mode()) {
DCHECK_EQ(outer_parse_info->parameters_end_pos(), kNoSourcePosition);
DCHECK_NULL(outer_parse_info->extension());
@@ -1545,7 +1575,7 @@ void BackgroundCompileTask::Run() {
// Save the language mode.
language_mode_ = info_->language_mode();
- if (!finalize_on_background_thread_) {
+ if (!FLAG_finalize_streaming_on_background) {
if (info_->literal() != nullptr) {
CompileOnBackgroundThread(info_.get(), compile_state_.allocator(),
&compilation_jobs_);
@@ -1553,7 +1583,8 @@ void BackgroundCompileTask::Run() {
} else {
DCHECK(info_->flags().is_toplevel());
- LocalIsolate isolate(isolate_for_local_isolate_);
+ LocalIsolate isolate(isolate_for_local_isolate_, ThreadKind::kBackground);
+ UnparkedScope unparked_scope(isolate.heap());
LocalHandleScope handle_scope(&isolate);
info_->ast_value_factory()->Internalize(&isolate);
@@ -1701,8 +1732,8 @@ bool Compiler::CollectSourcePositions(Isolate* isolate,
shared_info->GetDebugInfo().HasInstrumentedBytecodeArray()) {
ByteArray source_position_table =
job->compilation_info()->bytecode_array()->SourcePositionTable();
- shared_info->GetDebugBytecodeArray().set_synchronized_source_position_table(
- source_position_table);
+ shared_info->GetDebugBytecodeArray().set_source_position_table(
+ source_position_table, kReleaseStore);
}
DCHECK(!isolate->has_pending_exception());
@@ -1815,7 +1846,8 @@ bool Compiler::Compile(Handle<JSFunction> function, ClearExceptionFlag flag,
// Optimize now if --always-opt is enabled.
if (FLAG_always_opt && !function->shared().HasAsmWasmData()) {
- CompilerTracer::TraceOptimizeForAlwaysOpt(isolate, function);
+ CompilerTracer::TraceOptimizeForAlwaysOpt(isolate, function,
+ CodeKindForTopTier());
Handle<Code> maybe_code;
if (GetOptimizedCode(function, ConcurrencyMode::kNotConcurrent,
@@ -1839,7 +1871,7 @@ bool Compiler::Compile(Handle<JSFunction> function, ClearExceptionFlag flag,
bool Compiler::FinalizeBackgroundCompileTask(
BackgroundCompileTask* task, Handle<SharedFunctionInfo> shared_info,
Isolate* isolate, ClearExceptionFlag flag) {
- DCHECK(!task->finalize_on_background_thread());
+ DCHECK(!FLAG_finalize_streaming_on_background);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.FinalizeBackgroundCompileTask");
@@ -2067,21 +2099,27 @@ bool CodeGenerationFromStringsAllowed(Isolate* isolate, Handle<Context> context,
}
// Check whether embedder allows code generation in this context.
-// (via v8::Isolate::SetModifyCodeGenerationFromStringsCallback)
+// (via v8::Isolate::SetModifyCodeGenerationFromStringsCallback
+// or v8::Isolate::SetModifyCodeGenerationFromStringsCallback2)
bool ModifyCodeGenerationFromStrings(Isolate* isolate, Handle<Context> context,
- Handle<i::Object>* source) {
- DCHECK(isolate->modify_code_gen_callback());
+ Handle<i::Object>* source,
+ bool is_code_like) {
+ DCHECK(isolate->modify_code_gen_callback() ||
+ isolate->modify_code_gen_callback2());
DCHECK(source);
// Callback set. Run it, and use the return value as source, or block
// execution if it's not set.
VMState<EXTERNAL> state(isolate);
- ModifyCodeGenerationFromStringsCallback modify_callback =
- isolate->modify_code_gen_callback();
RuntimeCallTimerScope timer(
isolate, RuntimeCallCounterId::kCodeGenerationFromStringsCallbacks);
ModifyCodeGenerationFromStringsResult result =
- modify_callback(v8::Utils::ToLocal(context), v8::Utils::ToLocal(*source));
+ isolate->modify_code_gen_callback()
+ ? isolate->modify_code_gen_callback()(v8::Utils::ToLocal(context),
+ v8::Utils::ToLocal(*source))
+ : isolate->modify_code_gen_callback2()(v8::Utils::ToLocal(context),
+ v8::Utils::ToLocal(*source),
+ is_code_like);
if (result.codegen_allowed && !result.modified_source.IsEmpty()) {
// Use the new source (which might be the same as the old source).
*source =
@@ -2107,7 +2145,7 @@ bool ModifyCodeGenerationFromStrings(Isolate* isolate, Handle<Context> context,
// static
std::pair<MaybeHandle<String>, bool> Compiler::ValidateDynamicCompilationSource(
Isolate* isolate, Handle<Context> context,
- Handle<i::Object> original_source) {
+ Handle<i::Object> original_source, bool is_code_like) {
// Check if the context unconditionally allows code gen from strings.
// allow_code_gen_from_strings can be many things, so we'll always check
// against the 'false' literal, so that e.g. undefined and 'true' are treated
@@ -2121,6 +2159,11 @@ std::pair<MaybeHandle<String>, bool> Compiler::ValidateDynamicCompilationSource(
// allow_code_gen_callback only allows proper strings.
// (I.e., let allow_code_gen_callback decide, if it has been set.)
if (isolate->allow_code_gen_callback()) {
+ // If we run into this condition, the embedder has marked some object
+ // templates as "code like", but has given us a callback that only accepts
+ // strings. That makes no sense.
+ DCHECK(!original_source->IsCodeLike(isolate));
+
if (!original_source->IsString()) {
return {MaybeHandle<String>(), true};
}
@@ -2134,9 +2177,11 @@ std::pair<MaybeHandle<String>, bool> Compiler::ValidateDynamicCompilationSource(
// Check if the context wants to block or modify this source object.
// Double-check that we really have a string now.
// (Let modify_code_gen_callback decide, if it's been set.)
- if (isolate->modify_code_gen_callback()) {
+ if (isolate->modify_code_gen_callback() ||
+ isolate->modify_code_gen_callback2()) {
Handle<i::Object> modified_source = original_source;
- if (!ModifyCodeGenerationFromStrings(isolate, context, &modified_source)) {
+ if (!ModifyCodeGenerationFromStrings(isolate, context, &modified_source,
+ is_code_like)) {
return {MaybeHandle<String>(), false};
}
if (!modified_source->IsString()) {
@@ -2145,6 +2190,15 @@ std::pair<MaybeHandle<String>, bool> Compiler::ValidateDynamicCompilationSource(
return {Handle<String>::cast(modified_source), false};
}
+ if (!context->allow_code_gen_from_strings().IsFalse(isolate) &&
+ original_source->IsCodeLike(isolate)) {
+ // Codegen is unconditionally allowed, and we're been given a CodeLike
+ // object. Stringify.
+ MaybeHandle<String> stringified_source =
+ Object::ToString(isolate, original_source);
+ return {stringified_source, stringified_source.is_null()};
+ }
+
// If unconditional codegen was disabled, and no callback defined, we block
// strings and allow all other objects.
return {MaybeHandle<String>(), !original_source->IsString()};
@@ -2181,12 +2235,13 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromValidatedString(
// static
MaybeHandle<JSFunction> Compiler::GetFunctionFromString(
Handle<Context> context, Handle<Object> source,
- ParseRestriction restriction, int parameters_end_pos) {
+ ParseRestriction restriction, int parameters_end_pos, bool is_code_like) {
Isolate* const isolate = context->GetIsolate();
- Handle<Context> native_context(context->native_context(), isolate);
- return GetFunctionFromValidatedString(
- context, ValidateDynamicCompilationSource(isolate, context, source).first,
- restriction, parameters_end_pos);
+ MaybeHandle<String> validated_source =
+ ValidateDynamicCompilationSource(isolate, context, source, is_code_like)
+ .first;
+ return GetFunctionFromValidatedString(context, validated_source, restriction,
+ parameters_end_pos);
}
namespace {
@@ -2820,7 +2875,7 @@ Compiler::GetSharedFunctionInfoForStreamedScript(
// the isolate cache.
Handle<Script> script;
- if (task->finalize_on_background_thread()) {
+ if (FLAG_finalize_streaming_on_background) {
RuntimeCallTimerScope runtimeTimerScope(
isolate, RuntimeCallCounterId::kCompilePublishBackgroundFinalization);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
@@ -3055,6 +3110,7 @@ void Compiler::PostInstantiation(Handle<JSFunction> function) {
if (FLAG_always_opt && shared->allows_lazy_compilation() &&
!shared->optimization_disabled() &&
!function->HasAvailableOptimizedCode()) {
+ CompilerTracer::TraceMarkForAlwaysOpt(isolate, function);
JSFunction::EnsureFeedbackVector(function, &is_compiled_scope);
function->MarkForOptimization(ConcurrencyMode::kNotConcurrent);
}
diff --git a/deps/v8/src/codegen/compiler.h b/deps/v8/src/codegen/compiler.h
index 1e3ed00f93..c599841a01 100644
--- a/deps/v8/src/codegen/compiler.h
+++ b/deps/v8/src/codegen/compiler.h
@@ -16,6 +16,7 @@
#include "src/handles/persistent-handles.h"
#include "src/logging/code-events.h"
#include "src/objects/contexts.h"
+#include "src/objects/debug-objects.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/pending-compilation-error-handler.h"
#include "src/utils/allocation.h"
@@ -141,13 +142,14 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
// Create a (bound) function for a String source within a context for eval.
V8_WARN_UNUSED_RESULT static MaybeHandle<JSFunction> GetFunctionFromString(
Handle<Context> context, Handle<i::Object> source,
- ParseRestriction restriction, int parameters_end_pos);
+ ParseRestriction restriction, int parameters_end_pos, bool is_code_like);
// Decompose GetFunctionFromString into two functions, to allow callers to
// deal seperately with a case of object not handled by the embedder.
V8_WARN_UNUSED_RESULT static std::pair<MaybeHandle<String>, bool>
ValidateDynamicCompilationSource(Isolate* isolate, Handle<Context> context,
- Handle<i::Object> source_object);
+ Handle<i::Object> source_object,
+ bool is_code_like = false);
V8_WARN_UNUSED_RESULT static MaybeHandle<JSFunction>
GetFunctionFromValidatedString(Handle<Context> context,
MaybeHandle<String> source,
@@ -332,7 +334,8 @@ class OptimizedCompilationJob : public CompilationJob {
// Executes the compile job. Can be called on a background thread if
// can_execute_on_background_thread() returns true.
- V8_WARN_UNUSED_RESULT Status ExecuteJob(RuntimeCallStats* stats);
+ V8_WARN_UNUSED_RESULT Status
+ ExecuteJob(RuntimeCallStats* stats, LocalIsolate* local_isolate = nullptr);
// Finalizes the compile job. Must be called on the main thread.
V8_WARN_UNUSED_RESULT Status FinalizeJob(Isolate* isolate);
@@ -357,7 +360,8 @@ class OptimizedCompilationJob : public CompilationJob {
protected:
// Overridden by the actual implementation.
virtual Status PrepareJobImpl(Isolate* isolate) = 0;
- virtual Status ExecuteJobImpl(RuntimeCallStats* stats) = 0;
+ virtual Status ExecuteJobImpl(RuntimeCallStats* stats,
+ LocalIsolate* local_heap) = 0;
virtual Status FinalizeJobImpl(Isolate* isolate) = 0;
private:
@@ -372,14 +376,17 @@ class FinalizeUnoptimizedCompilationData {
public:
FinalizeUnoptimizedCompilationData(Isolate* isolate,
Handle<SharedFunctionInfo> function_handle,
+ MaybeHandle<CoverageInfo> coverage_info,
base::TimeDelta time_taken_to_execute,
base::TimeDelta time_taken_to_finalize)
: time_taken_to_execute_(time_taken_to_execute),
time_taken_to_finalize_(time_taken_to_finalize),
- function_handle_(function_handle) {}
+ function_handle_(function_handle),
+ coverage_info_(coverage_info) {}
FinalizeUnoptimizedCompilationData(LocalIsolate* isolate,
Handle<SharedFunctionInfo> function_handle,
+ MaybeHandle<CoverageInfo> coverage_info,
base::TimeDelta time_taken_to_execute,
base::TimeDelta time_taken_to_finalize);
@@ -387,6 +394,8 @@ class FinalizeUnoptimizedCompilationData {
return function_handle_;
}
+ MaybeHandle<CoverageInfo> coverage_info() const { return coverage_info_; }
+
base::TimeDelta time_taken_to_execute() const {
return time_taken_to_execute_;
}
@@ -398,6 +407,7 @@ class FinalizeUnoptimizedCompilationData {
base::TimeDelta time_taken_to_execute_;
base::TimeDelta time_taken_to_finalize_;
Handle<SharedFunctionInfo> function_handle_;
+ MaybeHandle<CoverageInfo> coverage_info_;
};
using FinalizeUnoptimizedCompilationDataList =
@@ -474,9 +484,6 @@ class V8_EXPORT_PRIVATE BackgroundCompileTask {
UnoptimizedCompileFlags flags() const { return flags_; }
UnoptimizedCompileState* compile_state() { return &compile_state_; }
LanguageMode language_mode() { return language_mode_; }
- bool finalize_on_background_thread() {
- return finalize_on_background_thread_;
- }
FinalizeUnoptimizedCompilationDataList*
finalize_unoptimized_compilation_data() {
return &finalize_unoptimized_compilation_data_;
@@ -527,13 +534,6 @@ class V8_EXPORT_PRIVATE BackgroundCompileTask {
TimedHistogram* timer_;
LanguageMode language_mode_;
- // True if the background compilation should be finalized on the background
- // thread. When this is true, the ParseInfo, Parser and compilation jobs are
- // freed on the background thread, the outer_function_sfi holds the top-level
- // function, and the off_thread_isolate has to be merged into the main-thread
- // Isolate.
- bool finalize_on_background_thread_;
-
DISALLOW_COPY_AND_ASSIGN(BackgroundCompileTask);
};
diff --git a/deps/v8/src/codegen/external-reference.cc b/deps/v8/src/codegen/external-reference.cc
index ba71702e7c..499e5c5f37 100644
--- a/deps/v8/src/codegen/external-reference.cc
+++ b/deps/v8/src/codegen/external-reference.cc
@@ -121,6 +121,13 @@ ExternalReference ExternalReference::handle_scope_implementer_address(
return ExternalReference(isolate->handle_scope_implementer_address());
}
+#ifdef V8_HEAP_SANDBOX
+ExternalReference ExternalReference::external_pointer_table_address(
+ Isolate* isolate) {
+ return ExternalReference(isolate->external_pointer_table_address());
+}
+#endif
+
ExternalReference ExternalReference::interpreter_dispatch_table_address(
Isolate* isolate) {
return ExternalReference(isolate->interpreter()->dispatch_table_address());
@@ -468,6 +475,11 @@ ExternalReference ExternalReference::address_of_double_neg_constant() {
return ExternalReference(reinterpret_cast<Address>(&double_negate_constant));
}
+ExternalReference
+ExternalReference::address_of_enable_experimental_regexp_engine() {
+ return ExternalReference(&FLAG_enable_experimental_regexp_engine);
+}
+
ExternalReference ExternalReference::is_profiling_address(Isolate* isolate) {
return ExternalReference(isolate->is_profiling_address());
}
@@ -941,6 +953,11 @@ FUNCTION_REFERENCE(
js_finalization_registry_remove_cell_from_unregister_token_map,
JSFinalizationRegistry::RemoveCellFromUnregisterTokenMap)
+#ifdef V8_HEAP_SANDBOX
+FUNCTION_REFERENCE(external_pointer_table_grow_table_function,
+ ExternalPointerTable::GrowTable)
+#endif
+
bool operator==(ExternalReference lhs, ExternalReference rhs) {
return lhs.address() == rhs.address();
}
diff --git a/deps/v8/src/codegen/external-reference.h b/deps/v8/src/codegen/external-reference.h
index e35e12237b..72a3397007 100644
--- a/deps/v8/src/codegen/external-reference.h
+++ b/deps/v8/src/codegen/external-reference.h
@@ -84,12 +84,24 @@ class StatsCounter;
V(re_check_stack_guard_state, \
"RegExpMacroAssembler*::CheckStackGuardState()") \
V(re_grow_stack, "NativeRegExpMacroAssembler::GrowStack()") \
- V(re_word_character_map, "NativeRegExpMacroAssembler::word_character_map")
+ V(re_word_character_map, "NativeRegExpMacroAssembler::word_character_map") \
+ EXTERNAL_REFERENCE_LIST_WITH_ISOLATE_HEAP_SANDBOX(V)
+
+#ifdef V8_HEAP_SANDBOX
+#define EXTERNAL_REFERENCE_LIST_WITH_ISOLATE_HEAP_SANDBOX(V) \
+ V(external_pointer_table_address, \
+ "Isolate::external_pointer_table_address(" \
+ ")")
+#else
+#define EXTERNAL_REFERENCE_LIST_WITH_ISOLATE_HEAP_SANDBOX(V)
+#endif // V8_HEAP_SANDBOX
#define EXTERNAL_REFERENCE_LIST(V) \
V(abort_with_reason, "abort_with_reason") \
V(address_of_double_abs_constant, "double_absolute_constant") \
V(address_of_double_neg_constant, "double_negate_constant") \
+ V(address_of_enable_experimental_regexp_engine, \
+ "address_of_enable_experimental_regexp_engine") \
V(address_of_float_abs_constant, "float_absolute_constant") \
V(address_of_float_neg_constant, "float_negate_constant") \
V(address_of_min_int, "LDoubleConstant::min_int") \
@@ -233,7 +245,8 @@ class StatsCounter;
V(re_match_for_call_from_js, "IrregexpInterpreter::MatchForCallFromJs") \
V(re_experimental_match_for_call_from_js, \
"ExperimentalRegExp::MatchForCallFromJs") \
- EXTERNAL_REFERENCE_LIST_INTL(V)
+ EXTERNAL_REFERENCE_LIST_INTL(V) \
+ EXTERNAL_REFERENCE_LIST_HEAP_SANDBOX(V)
#ifdef V8_INTL_SUPPORT
#define EXTERNAL_REFERENCE_LIST_INTL(V) \
@@ -243,6 +256,14 @@ class StatsCounter;
#define EXTERNAL_REFERENCE_LIST_INTL(V)
#endif // V8_INTL_SUPPORT
+#ifdef V8_HEAP_SANDBOX
+#define EXTERNAL_REFERENCE_LIST_HEAP_SANDBOX(V) \
+ V(external_pointer_table_grow_table_function, \
+ "ExternalPointerTable::GrowTable")
+#else
+#define EXTERNAL_REFERENCE_LIST_HEAP_SANDBOX(V)
+#endif // V8_HEAP_SANDBOX
+
// An ExternalReference represents a C++ address used in the generated
// code. All references to C++ functions and variables must be encapsulated
// in an ExternalReference instance. This is done in order to track the
diff --git a/deps/v8/src/codegen/handler-table.cc b/deps/v8/src/codegen/handler-table.cc
index fb49f9fa70..8aec047d13 100644
--- a/deps/v8/src/codegen/handler-table.cc
+++ b/deps/v8/src/codegen/handler-table.cc
@@ -142,7 +142,7 @@ int HandlerTable::LengthForRange(int entries) {
// static
int HandlerTable::EmitReturnTableStart(Assembler* masm) {
- masm->DataAlign(sizeof(int32_t)); // Make sure entries are aligned.
+ masm->DataAlign(Code::kMetadataAlignment);
masm->RecordComment(";;; Exception handler table.");
int table_start = masm->pc_offset();
return table_start;
diff --git a/deps/v8/src/codegen/ia32/assembler-ia32.cc b/deps/v8/src/codegen/ia32/assembler-ia32.cc
index 321a59cede..f19c8dd1cd 100644
--- a/deps/v8/src/codegen/ia32/assembler-ia32.cc
+++ b/deps/v8/src/codegen/ia32/assembler-ia32.cc
@@ -302,6 +302,15 @@ Assembler::Assembler(const AssemblerOptions& options,
void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
SafepointTableBuilder* safepoint_table_builder,
int handler_table_offset) {
+ // As a crutch to avoid having to add manual Align calls wherever we use a
+ // raw workflow to create Code objects (mostly in tests), add another Align
+ // call here. It does no harm - the end of the Code object is aligned to the
+ // (larger) kCodeAlignment anyways.
+ // TODO(jgruber): Consider moving responsibility for proper alignment to
+ // metadata table builders (safepoint, handler, constant pool, code
+ // comments).
+ DataAlign(Code::kMetadataAlignment);
+
const int code_comments_size = WriteCodeComments();
// Finalize code (at this point overflow() may be true, but the gap ensures
@@ -510,13 +519,6 @@ void Assembler::pop(Operand dst) {
emit_operand(eax, dst);
}
-void Assembler::enter(const Immediate& size) {
- EnsureSpace ensure_space(this);
- EMIT(0xC8);
- emit_w(size);
- EMIT(0);
-}
-
void Assembler::leave() {
EnsureSpace ensure_space(this);
EMIT(0xC9);
diff --git a/deps/v8/src/codegen/ia32/assembler-ia32.h b/deps/v8/src/codegen/ia32/assembler-ia32.h
index ab26d36376..333daf6da3 100644
--- a/deps/v8/src/codegen/ia32/assembler-ia32.h
+++ b/deps/v8/src/codegen/ia32/assembler-ia32.h
@@ -474,7 +474,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void pop(Register dst);
void pop(Operand dst);
- void enter(const Immediate& size);
void leave();
// Moves
diff --git a/deps/v8/src/codegen/ia32/interface-descriptors-ia32.cc b/deps/v8/src/codegen/ia32/interface-descriptors-ia32.cc
index 0177e36c4b..ee9c3aac1c 100644
--- a/deps/v8/src/codegen/ia32/interface-descriptors-ia32.cc
+++ b/deps/v8/src/codegen/ia32/interface-descriptors-ia32.cc
@@ -294,54 +294,6 @@ void WasmFloat64ToNumberDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void BinaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void CallWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void CallWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void ConstructWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void ConstructWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void Compare_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void UnaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 3);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
index 9558cf540d..b615c59185 100644
--- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
@@ -33,16 +33,9 @@ namespace internal {
Operand StackArgumentsAccessor::GetArgumentOperand(int index) const {
DCHECK_GE(index, 0);
-#ifdef V8_REVERSE_JSARGS
// arg[0] = esp + kPCOnStackSize;
// arg[i] = arg[0] + i * kSystemPointerSize;
return Operand(esp, kPCOnStackSize + index * kSystemPointerSize);
-#else
- // arg[0] = (esp + kPCOnStackSize) + argc * kSystemPointerSize;
- // arg[i] = arg[0] - i * kSystemPointerSize;
- return Operand(esp, argc_, times_system_pointer_size,
- kPCOnStackSize - index * kSystemPointerSize);
-#endif
}
// -------------------------------------------------------------------------
@@ -1119,15 +1112,127 @@ void TurboAssembler::PrepareForTailCall(
mov(esp, new_sp_reg);
}
+void MacroAssembler::CompareStackLimit(Register with, StackLimitKind kind) {
+ DCHECK(root_array_available());
+ Isolate* isolate = this->isolate();
+ // Address through the root register. No load is needed.
+ ExternalReference limit =
+ kind == StackLimitKind::kRealStackLimit
+ ? ExternalReference::address_of_real_jslimit(isolate)
+ : ExternalReference::address_of_jslimit(isolate);
+ DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
+
+ intptr_t offset =
+ TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
+ cmp(with, Operand(kRootRegister, offset));
+}
+
+void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch,
+ Label* stack_overflow,
+ bool include_receiver) {
+ DCHECK_NE(num_args, scratch);
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ ExternalReference real_stack_limit =
+ ExternalReference::address_of_real_jslimit(isolate());
+ // Compute the space that is left as a negative number in scratch. If
+ // we already overflowed, this will be a positive number.
+ mov(scratch, ExternalReferenceAsOperand(real_stack_limit, scratch));
+ sub(scratch, esp);
+ // TODO(victorgomes): Remove {include_receiver} and always require one extra
+ // word of the stack space.
+ lea(scratch, Operand(scratch, num_args, times_system_pointer_size, 0));
+ if (include_receiver) {
+ add(scratch, Immediate(kSystemPointerSize));
+ }
+ // See if we overflowed, i.e. scratch is positive.
+ cmp(scratch, Immediate(0));
+ // TODO(victorgomes): Save some bytes in the builtins that use stack checks
+ // by jumping to a builtin that throws the exception.
+ j(greater, stack_overflow); // Signed comparison.
+}
+
void MacroAssembler::InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count,
Label* done, InvokeFlag flag) {
- DCHECK_EQ(actual_parameter_count, eax);
-
if (expected_parameter_count != actual_parameter_count) {
+ DCHECK_EQ(actual_parameter_count, eax);
DCHECK_EQ(expected_parameter_count, ecx);
-
Label regular_invoke;
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ // If the expected parameter count is equal to the adaptor sentinel, no need
+ // to push undefined value as arguments.
+ cmp(expected_parameter_count, Immediate(kDontAdaptArgumentsSentinel));
+ j(equal, &regular_invoke, Label::kFar);
+
+ // If overapplication or if the actual argument count is equal to the
+ // formal parameter count, no need to push extra undefined values.
+ sub(expected_parameter_count, actual_parameter_count);
+ j(less_equal, &regular_invoke, Label::kFar);
+
+ // We need to preserve edx, edi, esi and ebx.
+ movd(xmm0, edx);
+ movd(xmm1, edi);
+ movd(xmm2, esi);
+ movd(xmm3, ebx);
+
+ Label stack_overflow;
+ StackOverflowCheck(expected_parameter_count, edx, &stack_overflow);
+
+ Register scratch = esi;
+
+ // Underapplication. Move the arguments already in the stack, including the
+ // receiver and the return address.
+ {
+ Label copy, check;
+ Register src = edx, dest = esp, num = edi, current = ebx;
+ mov(src, esp);
+ lea(scratch,
+ Operand(expected_parameter_count, times_system_pointer_size, 0));
+ AllocateStackSpace(scratch);
+ // Extra words are the receiver and the return address (if a jump).
+ int extra_words = flag == CALL_FUNCTION ? 1 : 2;
+ lea(num, Operand(eax, extra_words)); // Number of words to copy.
+ Set(current, 0);
+ // Fall-through to the loop body because there are non-zero words to copy.
+ bind(&copy);
+ mov(scratch, Operand(src, current, times_system_pointer_size, 0));
+ mov(Operand(dest, current, times_system_pointer_size, 0), scratch);
+ inc(current);
+ bind(&check);
+ cmp(current, num);
+ j(less, &copy);
+ lea(edx, Operand(esp, num, times_system_pointer_size, 0));
+ }
+
+ // Fill remaining expected arguments with undefined values.
+ movd(ebx, xmm3); // Restore root.
+ LoadRoot(scratch, RootIndex::kUndefinedValue);
+ {
+ Label loop;
+ bind(&loop);
+ dec(expected_parameter_count);
+ mov(Operand(edx, expected_parameter_count, times_system_pointer_size, 0),
+ scratch);
+ j(greater, &loop, Label::kNear);
+ }
+
+ // Restore remaining registers.
+ movd(esi, xmm2);
+ movd(edi, xmm1);
+ movd(edx, xmm0);
+
+ jmp(&regular_invoke);
+
+ bind(&stack_overflow);
+ {
+ FrameScope frame(this,
+ has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ CallRuntime(Runtime::kThrowStackOverflow);
+ int3(); // This should be unreachable.
+ }
+#else
cmp(expected_parameter_count, actual_parameter_count);
j(equal, &regular_invoke);
Handle<Code> adaptor = BUILTIN_CODE(isolate(), ArgumentsAdaptorTrampoline);
@@ -1137,6 +1242,7 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
} else {
Jump(adaptor, RelocInfo::CODE_TARGET);
}
+#endif
bind(&regular_invoke);
}
}
@@ -1158,13 +1264,7 @@ void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
Push(fun);
Push(fun);
// Arguments are located 2 words below the base pointer.
-#ifdef V8_REVERSE_JSARGS
Operand receiver_op = Operand(ebp, kSystemPointerSize * 2);
-#else
- Operand receiver_op =
- Operand(ebp, actual_parameter_count, times_system_pointer_size,
- kSystemPointerSize * 2);
-#endif
Push(receiver_op);
CallRuntime(Runtime::kDebugOnFunctionCall);
Pop(fun);
@@ -1183,7 +1283,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
Register actual_parameter_count,
InvokeFlag flag) {
// You can't call a function without a valid frame.
- DCHECK(flag == JUMP_FUNCTION || has_frame());
+ DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
DCHECK_EQ(function, edi);
DCHECK_IMPLIES(new_target.is_valid(), new_target == edx);
DCHECK(expected_parameter_count == ecx || expected_parameter_count == eax);
@@ -1197,7 +1297,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
push(eax);
cmpb(ExternalReferenceAsOperand(debug_hook_active, eax), Immediate(0));
pop(eax);
- j(not_equal, &debug_hook, Label::kNear);
+ j(not_equal, &debug_hook);
}
bind(&continue_after_hook);
@@ -1225,7 +1325,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
bind(&debug_hook);
CallDebugOnFunctionCall(function, new_target, expected_parameter_count,
actual_parameter_count);
- jmp(&continue_after_hook, Label::kNear);
+ jmp(&continue_after_hook);
bind(&done);
}
@@ -1501,15 +1601,20 @@ void TurboAssembler::Psignd(XMMRegister dst, Operand src) {
FATAL("no AVX or SSE3 support");
}
-void TurboAssembler::Pshufb(XMMRegister dst, Operand src) {
+void TurboAssembler::Pshufb(XMMRegister dst, XMMRegister src, Operand mask) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
- vpshufb(dst, dst, src);
+ vpshufb(dst, src, mask);
return;
}
if (CpuFeatures::IsSupported(SSSE3)) {
+ // Make sure these are different so that we won't overwrite mask.
+ DCHECK(!mask.is_reg(dst));
CpuFeatureScope sse_scope(this, SSSE3);
- pshufb(dst, src);
+ if (dst != src) {
+ movapd(dst, src);
+ }
+ pshufb(dst, mask);
return;
}
FATAL("no AVX or SSE3 support");
@@ -1884,8 +1989,7 @@ void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
Builtins::IsIsolateIndependentBuiltin(*code_object));
if (options().inline_offheap_trampolines) {
int builtin_index = Builtins::kNoBuiltinId;
- if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index) &&
- Builtins::IsIsolateIndependent(builtin_index)) {
+ if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index)) {
// Inline the trampoline.
CallBuiltin(builtin_index);
return;
@@ -1988,8 +2092,7 @@ void TurboAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
Builtins::IsIsolateIndependentBuiltin(*code_object));
if (options().inline_offheap_trampolines) {
int builtin_index = Builtins::kNoBuiltinId;
- if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index) &&
- Builtins::IsIsolateIndependent(builtin_index)) {
+ if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index)) {
// Inline the trampoline.
RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
@@ -2089,13 +2192,15 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
}
}
-void TurboAssembler::CallForDeoptimization(Address target, int deopt_id,
- Label* exit, DeoptimizeKind kind) {
+void TurboAssembler::CallForDeoptimization(Builtins::Name target, int,
+ Label* exit, DeoptimizeKind kind,
+ Label*) {
+ CallBuiltin(target);
+ DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
+ (kind == DeoptimizeKind::kLazy)
+ ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kNonLazyDeoptExitSize);
USE(exit, kind);
- NoRootArrayScope no_root_array(this);
- // Save the deopt id in ebx (we don't need the roots array from now on).
- mov(ebx, deopt_id);
- call(target, RelocInfo::RUNTIME_ENTRY);
}
void TurboAssembler::Trap() { int3(); }
diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
index 72d574f14c..33635b09c5 100644
--- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
@@ -24,6 +24,10 @@ using MemOperand = Operand;
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
+// TODO(victorgomes): Move definition to macro-assembler.h, once all other
+// platforms are updated.
+enum class StackLimitKind { kInterruptStackLimit, kRealStackLimit };
+
// Convenient class to access arguments below the stack pointer.
class StackArgumentsAccessor {
public:
@@ -130,8 +134,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Trap() override;
void DebugBreak() override;
- void CallForDeoptimization(Address target, int deopt_id, Label* exit,
- DeoptimizeKind kind);
+ void CallForDeoptimization(Builtins::Name target, int deopt_id, Label* exit,
+ DeoptimizeKind kind,
+ Label* jump_deoptimization_entry_label);
// Jump the register contains a smi.
inline void JumpIfSmi(Register value, Label* smi_label,
@@ -479,8 +484,13 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
#undef AVX_OP3_XO_SSE4
#undef AVX_OP3_WITH_TYPE_SCOPE
- void Pshufb(XMMRegister dst, XMMRegister src) { Pshufb(dst, Operand(src)); }
- void Pshufb(XMMRegister dst, Operand src);
+ void Pshufb(XMMRegister dst, XMMRegister src) { Pshufb(dst, dst, src); }
+ void Pshufb(XMMRegister dst, Operand src) { Pshufb(dst, dst, src); }
+ // Handles SSE and AVX. On SSE, moves src to dst if they are not equal.
+ void Pshufb(XMMRegister dst, XMMRegister src, XMMRegister mask) {
+ Pshufb(dst, src, Operand(mask));
+ }
+ void Pshufb(XMMRegister dst, XMMRegister src, Operand mask);
void Pblendw(XMMRegister dst, XMMRegister src, uint8_t imm8) {
Pblendw(dst, Operand(src), imm8);
}
@@ -834,6 +844,12 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void IncrementCounter(StatsCounter* counter, int value, Register scratch);
void DecrementCounter(StatsCounter* counter, int value, Register scratch);
+ // ---------------------------------------------------------------------------
+ // Stack limit utilities
+ void CompareStackLimit(Register with, StackLimitKind kind);
+ void StackOverflowCheck(Register num_args, Register scratch,
+ Label* stack_overflow, bool include_receiver = false);
+
static int SafepointRegisterStackIndex(Register reg) {
return SafepointRegisterStackIndex(reg.code());
}
@@ -854,7 +870,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Needs access to SafepointRegisterStackIndex for compiled frame
// traversal.
- friend class StandardFrame;
+ friend class CommonFrame;
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
};
diff --git a/deps/v8/src/codegen/interface-descriptors.cc b/deps/v8/src/codegen/interface-descriptors.cc
index 8a6235fa08..79dad84077 100644
--- a/deps/v8/src/codegen/interface-descriptors.cc
+++ b/deps/v8/src/codegen/interface-descriptors.cc
@@ -446,5 +446,45 @@ void BigIntToI32PairDescriptor::InitializePlatformSpecific(
DefaultInitializePlatformSpecific(data, kParameterCount);
}
+void BinaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void CallWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void CallWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void ConstructWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void ConstructWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void Compare_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void UnaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ DefaultInitializePlatformSpecific(data, 3);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/interface-descriptors.h b/deps/v8/src/codegen/interface-descriptors.h
index d307502276..f086f23960 100644
--- a/deps/v8/src/codegen/interface-descriptors.h
+++ b/deps/v8/src/codegen/interface-descriptors.h
@@ -111,8 +111,7 @@ enum class StackArgumentOrder {
kJS, // Arguments in the stack are pushed in the same order as the one used
// by JS-to-JS function calls. This should be used if calling a
// JSFunction or if the builtin is expected to be called directly from a
- // JSFunction. When V8_REVERSE_JSARGS is set, this order is reversed
- // compared to kDefault.
+ // JSFunction. This order is reversed compared to kDefault.
};
class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
@@ -506,9 +505,7 @@ STATIC_ASSERT(kMaxTFSBuiltinRegisterParams <= kMaxBuiltinRegisterParams);
##__VA_ARGS__)
// When the extra arguments described here are located in the stack, they are
-// just above the return address in the frame. Therefore, they are either the
-// first arguments when V8_REVERSE_JSARGS is enabled, or otherwise the last
-// arguments.
+// just above the return address in the frame (first arguments).
#define DEFINE_JS_PARAMETERS(...) \
static constexpr int kDescriptorFlags = \
CallInterfaceDescriptorData::kAllowVarArgs; \
@@ -596,6 +593,12 @@ using DummyDescriptor = VoidDescriptor;
// Dummy descriptor that marks builtins with C calling convention.
using CCallDescriptor = VoidDescriptor;
+// Marks deoptimization entry builtins. Precise calling conventions currently
+// differ based on the platform.
+// TODO(jgruber): Once this is unified, we could create a better description
+// here.
+using DeoptimizationEntryDescriptor = VoidDescriptor;
+
class AllocateDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kRequestedSize)
@@ -1156,7 +1159,6 @@ class ArrayNoArgumentConstructorDescriptor
ArrayNArgumentsConstructorDescriptor)
};
-#ifdef V8_REVERSE_JSARGS
class ArraySingleArgumentConstructorDescriptor
: public ArrayNArgumentsConstructorDescriptor {
public:
@@ -1174,25 +1176,6 @@ class ArraySingleArgumentConstructorDescriptor
DECLARE_DESCRIPTOR(ArraySingleArgumentConstructorDescriptor,
ArrayNArgumentsConstructorDescriptor)
};
-#else
-class ArraySingleArgumentConstructorDescriptor
- : public ArrayNArgumentsConstructorDescriptor {
- public:
- // This descriptor declares same register arguments as the parent
- // ArrayNArgumentsConstructorDescriptor and it declares indices for
- // JS arguments passed on the expression stack.
- DEFINE_PARAMETERS(kFunction, kAllocationSite, kActualArgumentsCount,
- kReceiverParameter, kArraySizeSmiParameter)
- DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kFunction
- MachineType::AnyTagged(), // kAllocationSite
- MachineType::Int32(), // kActualArgumentsCount
- // JS arguments on the stack
- MachineType::AnyTagged(), // kReceiverParameter
- MachineType::AnyTagged()) // kArraySizeSmiParameter
- DECLARE_DESCRIPTOR(ArraySingleArgumentConstructorDescriptor,
- ArrayNArgumentsConstructorDescriptor)
-};
-#endif
class CompareDescriptor : public CallInterfaceDescriptor {
public:
diff --git a/deps/v8/src/codegen/mips/assembler-mips.cc b/deps/v8/src/codegen/mips/assembler-mips.cc
index 19a514b2d9..df46577db7 100644
--- a/deps/v8/src/codegen/mips/assembler-mips.cc
+++ b/deps/v8/src/codegen/mips/assembler-mips.cc
@@ -307,6 +307,15 @@ Assembler::Assembler(const AssemblerOptions& options,
void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
SafepointTableBuilder* safepoint_table_builder,
int handler_table_offset) {
+ // As a crutch to avoid having to add manual Align calls wherever we use a
+ // raw workflow to create Code objects (mostly in tests), add another Align
+ // call here. It does no harm - the end of the Code object is aligned to the
+ // (larger) kCodeAlignment anyways.
+ // TODO(jgruber): Consider moving responsibility for proper alignment to
+ // metadata table builders (safepoint, handler, constant pool, code
+ // comments).
+ DataAlign(Code::kMetadataAlignment);
+
EmitForbiddenSlotInstruction();
int code_comments_size = WriteCodeComments();
@@ -3550,6 +3559,7 @@ void Assembler::GrowBuffer() {
buffer_ = std::move(new_buffer);
buffer_start_ = new_start;
pc_ += pc_delta;
+ last_call_pc_ += pc_delta;
reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
reloc_info_writer.last_pc() + pc_delta);
diff --git a/deps/v8/src/codegen/mips/interface-descriptors-mips.cc b/deps/v8/src/codegen/mips/interface-descriptors-mips.cc
index 132811a173..75835e607c 100644
--- a/deps/v8/src/codegen/mips/interface-descriptors-mips.cc
+++ b/deps/v8/src/codegen/mips/interface-descriptors-mips.cc
@@ -304,54 +304,6 @@ void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void BinaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void CallWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void CallWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void ConstructWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void ConstructWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void Compare_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void UnaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 3);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.cc b/deps/v8/src/codegen/mips/macro-assembler-mips.cc
index f9a0f7f076..37a6acadfe 100644
--- a/deps/v8/src/codegen/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/codegen/mips/macro-assembler-mips.cc
@@ -2087,11 +2087,15 @@ void TurboAssembler::RoundFloat(FPURegister dst, FPURegister src,
int32_t kFloat32MantissaBits = 23;
int32_t kFloat32ExponentBits = 8;
Label done;
+ if (!IsDoubleZeroRegSet()) {
+ Move(kDoubleRegZero, 0.0);
+ }
mfc1(scratch, src);
Ext(at, scratch, kFloat32MantissaBits, kFloat32ExponentBits);
Branch(USE_DELAY_SLOT, &done, hs, at,
Operand(kFloat32ExponentBias + kFloat32MantissaBits));
- mov_s(dst, src);
+ // Canonicalize the result.
+ sub_s(dst, src, kDoubleRegZero);
round(this, dst, src);
mfc1(at, dst);
Branch(USE_DELAY_SLOT, &done, ne, at, Operand(zero_reg));
@@ -4110,9 +4114,19 @@ void TurboAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
}
void TurboAssembler::DropAndRet(int drop) {
- DCHECK(is_int16(drop * kPointerSize));
- Ret(USE_DELAY_SLOT);
- addiu(sp, sp, drop * kPointerSize);
+ int32_t drop_size = drop * kSystemPointerSize;
+ DCHECK(is_int31(drop_size));
+
+ if (is_int16(drop_size)) {
+ Ret(USE_DELAY_SLOT);
+ addiu(sp, sp, drop_size);
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, drop_size);
+ Ret(USE_DELAY_SLOT);
+ addu(sp, sp, scratch);
+ }
}
void TurboAssembler::DropAndRet(int drop, Condition cond, Register r1,
@@ -4373,23 +4387,107 @@ void TurboAssembler::PrepareForTailCall(Register callee_args_count,
mov(sp, dst_reg);
}
+void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) {
+ DCHECK(root_array_available());
+ Isolate* isolate = this->isolate();
+ ExternalReference limit =
+ kind == StackLimitKind::kRealStackLimit
+ ? ExternalReference::address_of_real_jslimit(isolate)
+ : ExternalReference::address_of_jslimit(isolate);
+ DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
+
+ intptr_t offset =
+ TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
+ CHECK(is_int32(offset));
+ Lw(destination, MemOperand(kRootRegister, static_cast<int32_t>(offset)));
+}
+
+void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch1,
+ Register scratch2,
+ Label* stack_overflow) {
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+
+ LoadStackLimit(scratch1, StackLimitKind::kRealStackLimit);
+ // Make scratch1 the space we have left. The stack might already be overflowed
+ // here which will cause scratch1 to become negative.
+ subu(scratch1, sp, scratch1);
+ // Check if the arguments will overflow the stack.
+ sll(scratch2, num_args, kPointerSizeLog2);
+ // Signed comparison.
+ Branch(stack_overflow, le, scratch1, Operand(scratch2));
+}
+
void MacroAssembler::InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count,
Label* done, InvokeFlag flag) {
Label regular_invoke;
- // Check whether the expected and actual arguments count match. The
- // registers are set up according to contract with
- // ArgumentsAdaptorTrampoline:
// a0: actual arguments count
// a1: function (passed through to callee)
// a2: expected arguments count
- // The code below is made a lot easier because the calling code already sets
- // up actual and expected registers according to the contract.
DCHECK_EQ(actual_parameter_count, a0);
DCHECK_EQ(expected_parameter_count, a2);
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ // If the expected parameter count is equal to the adaptor sentinel, no need
+ // to push undefined value as arguments.
+ Branch(&regular_invoke, eq, expected_parameter_count,
+ Operand(kDontAdaptArgumentsSentinel));
+
+ // If overapplication or if the actual argument count is equal to the
+ // formal parameter count, no need to push extra undefined values.
+ Subu(expected_parameter_count, expected_parameter_count,
+ actual_parameter_count);
+ Branch(&regular_invoke, le, expected_parameter_count, Operand(zero_reg));
+
+ Label stack_overflow;
+ StackOverflowCheck(expected_parameter_count, t0, t1, &stack_overflow);
+ // Underapplication. Move the arguments already in the stack, including the
+ // receiver and the return address.
+ {
+ Label copy;
+ Register src = t3, dest = t4;
+ mov(src, sp);
+ sll(t0, expected_parameter_count, kSystemPointerSizeLog2);
+ Subu(sp, sp, Operand(t0));
+ // Update stack pointer.
+ mov(dest, sp);
+ mov(t0, a0);
+ bind(&copy);
+ Lw(t1, MemOperand(src, 0));
+ Sw(t1, MemOperand(dest, 0));
+ Subu(t0, t0, Operand(1));
+ Addu(src, src, Operand(kSystemPointerSize));
+ Addu(dest, dest, Operand(kSystemPointerSize));
+ Branch(&copy, ge, t0, Operand(zero_reg));
+ }
+
+ // Fill remaining expected arguments with undefined values.
+ LoadRoot(t0, RootIndex::kUndefinedValue);
+ {
+ Label loop;
+ bind(&loop);
+ Sw(t0, MemOperand(t4, 0));
+ Subu(expected_parameter_count, expected_parameter_count, Operand(1));
+ Addu(t4, t4, Operand(kSystemPointerSize));
+ Branch(&loop, gt, expected_parameter_count, Operand(zero_reg));
+ }
+ b(&regular_invoke);
+ nop();
+
+ bind(&stack_overflow);
+ {
+ FrameScope frame(this,
+ has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ CallRuntime(Runtime::kThrowStackOverflow);
+ break_(0xCC);
+ }
+#else
+ // Check whether the expected and actual arguments count match. The registers
+ // are set up according to contract with ArgumentsAdaptorTrampoline:
Branch(&regular_invoke, eq, expected_parameter_count,
Operand(actual_parameter_count));
@@ -4400,7 +4498,7 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
} else {
Jump(adaptor, RelocInfo::CODE_TARGET);
}
-
+#endif
bind(&regular_invoke);
}
@@ -5508,17 +5606,18 @@ void TurboAssembler::ResetSpeculationPoisonRegister() {
li(kSpeculationPoisonRegister, -1);
}
-void TurboAssembler::CallForDeoptimization(Address target, int deopt_id,
- Label* exit, DeoptimizeKind kind) {
+void TurboAssembler::CallForDeoptimization(Builtins::Name target, int,
+ Label* exit, DeoptimizeKind kind,
+ Label*) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Lw(t9,
+ MemOperand(kRootRegister, IsolateData::builtin_entry_slot_offset(target)));
+ Call(t9);
+ DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
+ (kind == DeoptimizeKind::kLazy)
+ ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kNonLazyDeoptExitSize);
USE(exit, kind);
- NoRootArrayScope no_root_array(this);
-
- // Save the deipt id in kRootRegister (we don't need the roots array from now
- // on).
- DCHECK_LE(deopt_id, 0xFFFF);
- li(kRootRegister, deopt_id);
-
- Call(target, RelocInfo::RUNTIME_ENTRY);
}
} // namespace internal
diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.h b/deps/v8/src/codegen/mips/macro-assembler-mips.h
index cafcc42941..d91a4a7bb8 100644
--- a/deps/v8/src/codegen/mips/macro-assembler-mips.h
+++ b/deps/v8/src/codegen/mips/macro-assembler-mips.h
@@ -237,8 +237,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// The return address on the stack is used by frame iteration.
void StoreReturnAddressAndCall(Register target);
- void CallForDeoptimization(Address target, int deopt_id, Label* exit,
- DeoptimizeKind kind);
+ void CallForDeoptimization(Builtins::Name target, int deopt_id, Label* exit,
+ DeoptimizeKind kind,
+ Label* jump_deoptimization_entry_label);
void Ret(COND_ARGS);
inline void Ret(BranchDelaySlot bd, Condition cond = al,
@@ -252,8 +253,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Drop(int count, Condition cond = cc_always, Register reg = no_reg,
const Operand& op = Operand(no_reg));
- // Trivial case of DropAndRet that utilizes the delay slot and only emits
- // 2 instructions.
+ // Trivial case of DropAndRet that utilizes the delay slot.
void DropAndRet(int drop);
void DropAndRet(int drop, Condition cond, Register reg, const Operand& op);
@@ -914,21 +914,11 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// TODO(victorgomes): Remove this function once we stick with the reversed
// arguments order.
void LoadReceiver(Register dest, Register argc) {
-#ifdef V8_REVERSE_JSARGS
Lw(dest, MemOperand(sp, 0));
-#else
- Lsa(dest, sp, argc, kPointerSizeLog2);
- Lw(dest, MemOperand(dest, 0));
-#endif
}
void StoreReceiver(Register rec, Register argc, Register scratch) {
-#ifdef V8_REVERSE_JSARGS
Sw(rec, MemOperand(sp, 0));
-#else
- Lsa(scratch, sp, argc, kPointerSizeLog2);
- Sw(rec, MemOperand(scratch, 0));
-#endif
}
// Swap two registers. If the scratch register is omitted then a slightly
@@ -1106,6 +1096,14 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
Register scratch2);
// -------------------------------------------------------------------------
+ // Stack limit utilities
+
+ enum StackLimitKind { kInterruptStackLimit, kRealStackLimit };
+ void LoadStackLimit(Register destination, StackLimitKind kind);
+ void StackOverflowCheck(Register num_args, Register scratch1,
+ Register scratch2, Label* stack_overflow);
+
+ // ---------------------------------------------------------------------------
// Smi utilities.
void SmiTag(Register reg) { Addu(reg, reg, reg); }
@@ -1164,7 +1162,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Needs access to SafepointRegisterStackIndex for compiled frame
// traversal.
- friend class StandardFrame;
+ friend class CommonFrame;
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
};
diff --git a/deps/v8/src/codegen/mips64/assembler-mips64.cc b/deps/v8/src/codegen/mips64/assembler-mips64.cc
index b64005155d..3b16805f53 100644
--- a/deps/v8/src/codegen/mips64/assembler-mips64.cc
+++ b/deps/v8/src/codegen/mips64/assembler-mips64.cc
@@ -283,6 +283,15 @@ Assembler::Assembler(const AssemblerOptions& options,
void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
SafepointTableBuilder* safepoint_table_builder,
int handler_table_offset) {
+ // As a crutch to avoid having to add manual Align calls wherever we use a
+ // raw workflow to create Code objects (mostly in tests), add another Align
+ // call here. It does no harm - the end of the Code object is aligned to the
+ // (larger) kCodeAlignment anyways.
+ // TODO(jgruber): Consider moving responsibility for proper alignment to
+ // metadata table builders (safepoint, handler, constant pool, code
+ // comments).
+ DataAlign(Code::kMetadataAlignment);
+
EmitForbiddenSlotInstruction();
int code_comments_size = WriteCodeComments();
@@ -869,8 +878,10 @@ void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
Instr instr_branch_delay;
if (IsJump(instr_j)) {
- instr_branch_delay = instr_at(pos + 6 * kInstrSize);
+ // Case when branch delay slot is protected.
+ instr_branch_delay = nopInstr;
} else {
+ // Case when branch delay slot is used.
instr_branch_delay = instr_at(pos + 7 * kInstrSize);
}
instr_at_put(pos, instr_b);
@@ -3746,6 +3757,7 @@ void Assembler::GrowBuffer() {
buffer_ = std::move(new_buffer);
buffer_start_ = new_start;
pc_ += pc_delta;
+ last_call_pc_ += pc_delta;
reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
reloc_info_writer.last_pc() + pc_delta);
diff --git a/deps/v8/src/codegen/mips64/interface-descriptors-mips64.cc b/deps/v8/src/codegen/mips64/interface-descriptors-mips64.cc
index 4014607007..f77d8d4130 100644
--- a/deps/v8/src/codegen/mips64/interface-descriptors-mips64.cc
+++ b/deps/v8/src/codegen/mips64/interface-descriptors-mips64.cc
@@ -304,54 +304,6 @@ void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void BinaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void CallWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void CallWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void ConstructWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void ConstructWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void Compare_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void UnaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 3);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
index 509153e6c2..249fc9126b 100644
--- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
@@ -2509,11 +2509,15 @@ void TurboAssembler::RoundDouble(FPURegister dst, FPURegister src,
ctc1(scratch, FCSR);
} else {
Label done;
+ if (!IsDoubleZeroRegSet()) {
+ Move(kDoubleRegZero, 0.0);
+ }
mfhc1(scratch, src);
Ext(at, scratch, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
Branch(USE_DELAY_SLOT, &done, hs, at,
Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits));
- mov_d(dst, src);
+ // Canonicalize the result.
+ sub_d(dst, src, kDoubleRegZero);
round(this, dst, src);
dmfc1(at, dst);
Branch(USE_DELAY_SLOT, &done, ne, at, Operand(zero_reg));
@@ -2569,11 +2573,15 @@ void TurboAssembler::RoundFloat(FPURegister dst, FPURegister src,
int32_t kFloat32MantissaBits = 23;
int32_t kFloat32ExponentBits = 8;
Label done;
+ if (!IsDoubleZeroRegSet()) {
+ Move(kDoubleRegZero, 0.0);
+ }
mfc1(scratch, src);
Ext(at, scratch, kFloat32MantissaBits, kFloat32ExponentBits);
Branch(USE_DELAY_SLOT, &done, hs, at,
Operand(kFloat32ExponentBias + kFloat32MantissaBits));
- mov_s(dst, src);
+ // Canonicalize the result.
+ sub_s(dst, src, kDoubleRegZero);
round(this, dst, src);
mfc1(at, dst);
Branch(USE_DELAY_SLOT, &done, ne, at, Operand(zero_reg));
@@ -4448,9 +4456,19 @@ void TurboAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
}
void TurboAssembler::DropAndRet(int drop) {
- DCHECK(is_int16(drop * kPointerSize));
- Ret(USE_DELAY_SLOT);
- daddiu(sp, sp, drop * kPointerSize);
+ int32_t drop_size = drop * kSystemPointerSize;
+ DCHECK(is_int31(drop_size));
+
+ if (is_int16(drop_size)) {
+ Ret(USE_DELAY_SLOT);
+ daddiu(sp, sp, drop_size);
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, drop_size);
+ Ret(USE_DELAY_SLOT);
+ daddu(sp, sp, scratch);
+ }
}
void TurboAssembler::DropAndRet(int drop, Condition cond, Register r1,
@@ -4714,23 +4732,108 @@ void TurboAssembler::PrepareForTailCall(Register callee_args_count,
mov(sp, dst_reg);
}
+void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) {
+ DCHECK(root_array_available());
+ Isolate* isolate = this->isolate();
+ ExternalReference limit =
+ kind == StackLimitKind::kRealStackLimit
+ ? ExternalReference::address_of_real_jslimit(isolate)
+ : ExternalReference::address_of_jslimit(isolate);
+ DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
+
+ intptr_t offset =
+ TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
+ CHECK(is_int32(offset));
+ Ld(destination, MemOperand(kRootRegister, static_cast<int32_t>(offset)));
+}
+
+void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch1,
+ Register scratch2,
+ Label* stack_overflow) {
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+
+ LoadStackLimit(scratch1, StackLimitKind::kRealStackLimit);
+ // Make scratch1 the space we have left. The stack might already be overflowed
+ // here which will cause scratch1 to become negative.
+ dsubu(scratch1, sp, scratch1);
+ // Check if the arguments will overflow the stack.
+ dsll(scratch2, num_args, kPointerSizeLog2);
+ // Signed comparison.
+ Branch(stack_overflow, le, scratch1, Operand(scratch2));
+}
+
void MacroAssembler::InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count,
Label* done, InvokeFlag flag) {
Label regular_invoke;
- // Check whether the expected and actual arguments count match. The registers
- // are set up according to contract with ArgumentsAdaptorTrampoline:
// a0: actual arguments count
// a1: function (passed through to callee)
// a2: expected arguments count
- // The code below is made a lot easier because the calling code already sets
- // up actual and expected registers according to the contract.
-
DCHECK_EQ(actual_parameter_count, a0);
DCHECK_EQ(expected_parameter_count, a2);
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ // If the expected parameter count is equal to the adaptor sentinel, no need
+ // to push undefined value as arguments.
+ Branch(&regular_invoke, eq, expected_parameter_count,
+ Operand(kDontAdaptArgumentsSentinel));
+
+ // If overapplication or if the actual argument count is equal to the
+ // formal parameter count, no need to push extra undefined values.
+ Dsubu(expected_parameter_count, expected_parameter_count,
+ actual_parameter_count);
+ Branch(&regular_invoke, le, expected_parameter_count, Operand(zero_reg));
+
+ Label stack_overflow;
+ StackOverflowCheck(expected_parameter_count, t0, t1, &stack_overflow);
+ // Underapplication. Move the arguments already in the stack, including the
+ // receiver and the return address.
+ {
+ Label copy;
+ Register src = a6, dest = a7;
+ mov(src, sp);
+ dsll(t0, expected_parameter_count, kSystemPointerSizeLog2);
+ Dsubu(sp, sp, Operand(t0));
+ // Update stack pointer.
+ mov(dest, sp);
+ mov(t0, actual_parameter_count);
+ bind(&copy);
+ Ld(t1, MemOperand(src, 0));
+ Sd(t1, MemOperand(dest, 0));
+ Dsubu(t0, t0, Operand(1));
+ Daddu(src, src, Operand(kSystemPointerSize));
+ Daddu(dest, dest, Operand(kSystemPointerSize));
+ Branch(&copy, ge, t0, Operand(zero_reg));
+ }
+
+ // Fill remaining expected arguments with undefined values.
+ LoadRoot(t0, RootIndex::kUndefinedValue);
+ {
+ Label loop;
+ bind(&loop);
+ Sd(t0, MemOperand(a7, 0));
+ Dsubu(expected_parameter_count, expected_parameter_count, Operand(1));
+ Daddu(a7, a7, Operand(kSystemPointerSize));
+ Branch(&loop, gt, expected_parameter_count, Operand(zero_reg));
+ }
+ b(&regular_invoke);
+ nop();
+
+ bind(&stack_overflow);
+ {
+ FrameScope frame(this,
+ has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ CallRuntime(Runtime::kThrowStackOverflow);
+ break_(0xCC);
+ }
+#else
+ // Check whether the expected and actual arguments count match. The registers
+ // are set up according to contract with ArgumentsAdaptorTrampoline:
+
Branch(&regular_invoke, eq, expected_parameter_count,
Operand(actual_parameter_count));
@@ -4741,7 +4844,7 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
} else {
Jump(adaptor, RelocInfo::CODE_TARGET);
}
-
+#endif
bind(&regular_invoke);
}
@@ -5864,16 +5967,18 @@ void TurboAssembler::ResetSpeculationPoisonRegister() {
li(kSpeculationPoisonRegister, -1);
}
-void TurboAssembler::CallForDeoptimization(Address target, int deopt_id,
- Label* exit, DeoptimizeKind kind) {
+void TurboAssembler::CallForDeoptimization(Builtins::Name target, int,
+ Label* exit, DeoptimizeKind kind,
+ Label*) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Ld(t9,
+ MemOperand(kRootRegister, IsolateData::builtin_entry_slot_offset(target)));
+ Call(t9);
+ DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
+ (kind == DeoptimizeKind::kLazy)
+ ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kNonLazyDeoptExitSize);
USE(exit, kind);
- NoRootArrayScope no_root_array(this);
-
- // Save the deopt id in kRootRegister (we don't need the roots array from now
- // on).
- DCHECK_LE(deopt_id, 0xFFFF);
- li(kRootRegister, deopt_id);
- Call(target, RelocInfo::RUNTIME_ENTRY);
}
} // namespace internal
diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
index 56380cc8b2..a0d5e59bf0 100644
--- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
+++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
@@ -260,8 +260,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// The return address on the stack is used by frame iteration.
void StoreReturnAddressAndCall(Register target);
- void CallForDeoptimization(Address target, int deopt_id, Label* exit,
- DeoptimizeKind kind);
+ void CallForDeoptimization(Builtins::Name target, int deopt_id, Label* exit,
+ DeoptimizeKind kind,
+ Label* jump_deoptimization_entry_label);
void Ret(COND_ARGS);
inline void Ret(BranchDelaySlot bd, Condition cond = al,
@@ -275,8 +276,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Drop(int count, Condition cond = cc_always, Register reg = no_reg,
const Operand& op = Operand(no_reg));
- // Trivial case of DropAndRet that utilizes the delay slot and only emits
- // 2 instructions.
+ // Trivial case of DropAndRet that utilizes the delay slot.
void DropAndRet(int drop);
void DropAndRet(int drop, Condition cond, Register reg, const Operand& op);
@@ -921,21 +921,11 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// TODO(victorgomes): Remove this function once we stick with the reversed
// arguments order.
void LoadReceiver(Register dest, Register argc) {
-#ifdef V8_REVERSE_JSARGS
Ld(dest, MemOperand(sp, 0));
-#else
- Dlsa(dest, sp, argc, kPointerSizeLog2);
- Ld(dest, MemOperand(dest, 0));
-#endif
}
void StoreReceiver(Register rec, Register argc, Register scratch) {
-#ifdef V8_REVERSE_JSARGS
Sd(rec, MemOperand(sp, 0));
-#else
- Dlsa(scratch, sp, argc, kPointerSizeLog2);
- Sd(rec, MemOperand(scratch, 0));
-#endif
}
bool IsNear(Label* L, Condition cond, int rs_reg);
@@ -1150,6 +1140,14 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
Register scratch2);
// -------------------------------------------------------------------------
+ // Stack limit utilities
+
+ enum StackLimitKind { kInterruptStackLimit, kRealStackLimit };
+ void LoadStackLimit(Register destination, StackLimitKind kind);
+ void StackOverflowCheck(Register num_args, Register scratch1,
+ Register scratch2, Label* stack_overflow);
+
+ // ---------------------------------------------------------------------------
// Smi utilities.
void SmiTag(Register dst, Register src) {
@@ -1228,7 +1226,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Needs access to SafepointRegisterStackIndex for compiled frame
// traversal.
- friend class StandardFrame;
+ friend class CommonFrame;
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
};
diff --git a/deps/v8/src/codegen/optimized-compilation-info.cc b/deps/v8/src/codegen/optimized-compilation-info.cc
index b2c100aa05..bf45a5f38b 100644
--- a/deps/v8/src/codegen/optimized-compilation-info.cc
+++ b/deps/v8/src/codegen/optimized-compilation-info.cc
@@ -80,11 +80,12 @@ void OptimizedCompilationInfo::ConfigureFlags() {
if (FLAG_untrusted_code_mitigations) set_untrusted_code_mitigations();
switch (code_kind_) {
- case CodeKind::OPTIMIZED_FUNCTION:
+ case CodeKind::TURBOFAN:
if (FLAG_function_context_specialization) {
set_function_context_specializing();
}
V8_FALLTHROUGH;
+ case CodeKind::TURBOPROP:
case CodeKind::NATIVE_CONTEXT_INDEPENDENT:
set_called_with_code_start_register();
set_switch_jump_table();
@@ -98,7 +99,7 @@ void OptimizedCompilationInfo::ConfigureFlags() {
if (FLAG_turbo_splitting) set_splitting();
break;
case CodeKind::BUILTIN:
- case CodeKind::STUB:
+ case CodeKind::FOR_TESTING:
if (FLAG_turbo_splitting) set_splitting();
#if ENABLE_GDB_JIT_INTERFACE && DEBUG
set_source_positions();
@@ -160,7 +161,7 @@ std::unique_ptr<char[]> OptimizedCompilationInfo::GetDebugName() const {
StackFrame::Type OptimizedCompilationInfo::GetOutputStackFrameType() const {
switch (code_kind()) {
- case CodeKind::STUB:
+ case CodeKind::FOR_TESTING:
case CodeKind::BYTECODE_HANDLER:
case CodeKind::BUILTIN:
return StackFrame::STUB;
diff --git a/deps/v8/src/codegen/optimized-compilation-info.h b/deps/v8/src/codegen/optimized-compilation-info.h
index 4de8ba1645..6e238d6239 100644
--- a/deps/v8/src/codegen/optimized-compilation-info.h
+++ b/deps/v8/src/codegen/optimized-compilation-info.h
@@ -149,7 +149,7 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
bool IsNativeContextIndependent() const {
return code_kind() == CodeKind::NATIVE_CONTEXT_INDEPENDENT;
}
- bool IsStub() const { return code_kind() == CodeKind::STUB; }
+ bool IsTurboprop() const { return code_kind() == CodeKind::TURBOPROP; }
bool IsWasm() const { return code_kind() == CodeKind::WASM_FUNCTION; }
void SetOptimizingForOsr(BailoutId osr_offset, JavaScriptFrame* osr_frame) {
@@ -299,11 +299,8 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
// 1) PersistentHandles created via PersistentHandlesScope inside of
// CompilationHandleScope
// 2) Owned by OptimizedCompilationInfo
- // 3) Owned by JSHeapBroker
- // 4) Owned by the broker's LocalHeap
- // 5) Back to the broker for a brief moment (after tearing down the
- // LocalHeap as part of exiting LocalHeapScope)
- // 6) Back to OptimizedCompilationInfo when exiting the LocalHeapScope.
+ // 3) Owned by the broker's LocalHeap when entering the LocalHeapScope.
+ // 4) Back to OptimizedCompilationInfo when exiting the LocalHeapScope.
//
// In normal execution it gets destroyed when PipelineData gets destroyed.
// There is a special case in GenerateCodeForTesting where the JSHeapBroker
diff --git a/deps/v8/src/codegen/ppc/assembler-ppc.cc b/deps/v8/src/codegen/ppc/assembler-ppc.cc
index 37a53b49f2..54136a9f2b 100644
--- a/deps/v8/src/codegen/ppc/assembler-ppc.cc
+++ b/deps/v8/src/codegen/ppc/assembler-ppc.cc
@@ -248,6 +248,15 @@ Assembler::Assembler(const AssemblerOptions& options,
void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
SafepointTableBuilder* safepoint_table_builder,
int handler_table_offset) {
+ // As a crutch to avoid having to add manual Align calls wherever we use a
+ // raw workflow to create Code objects (mostly in tests), add another Align
+ // call here. It does no harm - the end of the Code object is aligned to the
+ // (larger) kCodeAlignment anyways.
+ // TODO(jgruber): Consider moving responsibility for proper alignment to
+ // metadata table builders (safepoint, handler, constant pool, code
+ // comments).
+ DataAlign(Code::kMetadataAlignment);
+
// Emit constant pool if necessary.
int constant_pool_size = EmitConstantPool();
@@ -1777,6 +1786,12 @@ void Assembler::mtvsrd(const Simd128Register rt, const Register ra) {
emit(MTVSRD | rt.code() * B21 | ra.code() * B16 | TX);
}
+void Assembler::mtvsrdd(const Simd128Register rt, const Register ra,
+ const Register rb) {
+ int TX = 1;
+ emit(MTVSRDD | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 | TX);
+}
+
void Assembler::lxvd(const Simd128Register rt, const MemOperand& src) {
int TX = 1;
emit(LXVD | rt.code() * B21 | src.ra().code() * B16 | src.rb().code() * B11 |
@@ -1789,6 +1804,11 @@ void Assembler::stxvd(const Simd128Register rt, const MemOperand& dst) {
SX);
}
+void Assembler::xxspltib(const Simd128Register rt, const Operand& imm) {
+ int TX = 1;
+ emit(XXSPLTIB | rt.code() * B21 | imm.immediate() * B11 | TX);
+}
+
// Pseudo instructions.
void Assembler::nop(int type) {
Register reg = r0;
diff --git a/deps/v8/src/codegen/ppc/assembler-ppc.h b/deps/v8/src/codegen/ppc/assembler-ppc.h
index f26a3c89c9..11497c90ce 100644
--- a/deps/v8/src/codegen/ppc/assembler-ppc.h
+++ b/deps/v8/src/codegen/ppc/assembler-ppc.h
@@ -1019,8 +1019,10 @@ class Assembler : public AssemblerBase {
void mfvsrd(const Register ra, const Simd128Register r);
void mfvsrwz(const Register ra, const Simd128Register r);
void mtvsrd(const Simd128Register rt, const Register ra);
+ void mtvsrdd(const Simd128Register rt, const Register ra, const Register rb);
void lxvd(const Simd128Register rt, const MemOperand& src);
void stxvd(const Simd128Register rt, const MemOperand& src);
+ void xxspltib(const Simd128Register rt, const Operand& imm);
// Pseudo instructions
diff --git a/deps/v8/src/codegen/ppc/constants-ppc.h b/deps/v8/src/codegen/ppc/constants-ppc.h
index 306175e06d..f71d1beae3 100644
--- a/deps/v8/src/codegen/ppc/constants-ppc.h
+++ b/deps/v8/src/codegen/ppc/constants-ppc.h
@@ -414,6 +414,8 @@ using Instr = uint32_t;
V(xssqrtsp, XSSQRTSP, 0xF000002C) \
/* Move To VSR Doubleword */ \
V(mtvsrd, MTVSRD, 0x7C000166) \
+ /* Move To VSR Double Doubleword */ \
+ V(mtvsrdd, MTVSRDD, 0x7C000366) \
/* Move To VSR Word Algebraic */ \
V(mtvsrwa, MTVSRWA, 0x7C0001A6) \
/* Move To VSR Word and Zero */ \
@@ -1930,7 +1932,9 @@ using Instr = uint32_t;
/* Vector Multiply-Low-Add Unsigned Halfword Modulo */ \
V(vmladduhm, VMLADDUHM, 0x10000022) \
/* Vector Select */ \
- V(vsel, VSEL, 0x1000002A)
+ V(vsel, VSEL, 0x1000002A) \
+ /* Vector Multiply-Sum Signed Halfword Modulo */ \
+ V(vmsumshm, VMSUMSHM, 0x10000028)
#define PPC_VA_OPCODE_UNUSED_LIST(V) \
/* Vector Add Extended & write Carry Unsigned Quadword */ \
@@ -1945,8 +1949,6 @@ using Instr = uint32_t;
V(vmhraddshs, VMHRADDSHS, 0x10000021) \
/* Vector Multiply-Sum Mixed Byte Modulo */ \
V(vmsummbm, VMSUMMBM, 0x10000025) \
- /* Vector Multiply-Sum Signed Halfword Modulo */ \
- V(vmsumshm, VMSUMSHM, 0x10000028) \
/* Vector Multiply-Sum Signed Halfword Saturate */ \
V(vmsumshs, VMSUMSHS, 0x10000029) \
/* Vector Multiply-Sum Unsigned Byte Modulo */ \
@@ -1998,7 +2000,9 @@ using Instr = uint32_t;
/* Store VSR Vector Doubleword*2 Indexed */ \
V(stxvd, STXVD, 0x7C000798) \
/* Store VSR Vector Word*4 Indexed */ \
- V(stxvw, STXVW, 0x7C000718)
+ V(stxvw, STXVW, 0x7C000718) \
+ /* Vector Splat Immediate Byte */ \
+ V(xxspltib, XXSPLTIB, 0xF00002D1)
#define PPC_B_OPCODE_LIST(V) \
/* Branch Conditional */ \
@@ -2202,13 +2206,29 @@ using Instr = uint32_t;
/* Rotate Left Word then AND with Mask */ \
V(rlwnm, RLWNMX, 0x5C000000)
-#define PPC_VX_OPCODE_A_FORM_LIST(V) \
- /* Vector Splat Byte */ \
- V(vspltb, VSPLTB, 0x1000020C) \
- /* Vector Splat Word */ \
- V(vspltw, VSPLTW, 0x1000028C) \
- /* Vector Splat Halfword */ \
- V(vsplth, VSPLTH, 0x1000024C)
+#define PPC_VX_OPCODE_A_FORM_LIST(V) \
+ /* Vector Splat Byte */ \
+ V(vspltb, VSPLTB, 0x1000020C) \
+ /* Vector Splat Word */ \
+ V(vspltw, VSPLTW, 0x1000028C) \
+ /* Vector Splat Halfword */ \
+ V(vsplth, VSPLTH, 0x1000024C) \
+ /* Vector Extract Unsigned Byte */ \
+ V(vextractub, VEXTRACTUB, 0x1000020D) \
+ /* Vector Extract Unsigned Halfword */ \
+ V(vextractuh, VEXTRACTUH, 0x1000024D) \
+ /* Vector Extract Unsigned Word */ \
+ V(vextractuw, VEXTRACTUW, 0x1000028D) \
+ /* Vector Extract Doubleword */ \
+ V(vextractd, VEXTRACTD, 0x100002CD) \
+ /* Vector Insert Byte */ \
+ V(vinsertb, VINSERTB, 0x1000030D) \
+ /* Vector Insert Halfword */ \
+ V(vinserth, VINSERTH, 0x1000034D) \
+ /* Vector Insert Word */ \
+ V(vinsertw, VINSERTW, 0x1000038D) \
+ /* Vector Insert Doubleword */ \
+ V(vinsertd, VINSERTD, 0x100003CD)
#define PPC_VX_OPCODE_B_FORM_LIST(V) \
/* Vector Logical OR */ \
@@ -2348,7 +2368,9 @@ using Instr = uint32_t;
/* Vector Minimum Single-Precision */ \
V(vminfp, VMINFP, 0x1000044A) \
/* Vector Maximum Single-Precision */ \
- V(vmaxfp, VMAXFP, 0x1000040A)
+ V(vmaxfp, VMAXFP, 0x1000040A) \
+ /* Vector Bit Permute Quadword */ \
+ V(vbpermq, VBPERMQ, 0x1000054C)
#define PPC_VX_OPCODE_C_FORM_LIST(V) \
/* Vector Unpack Low Signed Halfword */ \
@@ -2387,8 +2409,6 @@ using Instr = uint32_t;
V(vavgsw, VAVGSW, 0x10000582) \
/* Vector Average Unsigned Word */ \
V(vavguw, VAVGUW, 0x10000482) \
- /* Vector Bit Permute Quadword */ \
- V(vbpermq, VBPERMQ, 0x1000054C) \
/* Vector Convert From Signed Fixed-Point Word To Single-Precision */ \
V(vcfsx, VCFSX, 0x1000034A) \
/* Vector Convert From Unsigned Fixed-Point Word To Single-Precision */ \
diff --git a/deps/v8/src/codegen/ppc/interface-descriptors-ppc.cc b/deps/v8/src/codegen/ppc/interface-descriptors-ppc.cc
index 4d68e01285..3c2d92237d 100644
--- a/deps/v8/src/codegen/ppc/interface-descriptors-ppc.cc
+++ b/deps/v8/src/codegen/ppc/interface-descriptors-ppc.cc
@@ -278,54 +278,6 @@ void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void BinaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void CallWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void CallWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void ConstructWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void ConstructWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void Compare_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void UnaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 3);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
index 4a526384e0..08955805e6 100644
--- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
@@ -173,9 +173,8 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Builtins::IsIsolateIndependentBuiltin(*code));
int builtin_index = Builtins::kNoBuiltinId;
- bool target_is_isolate_independent_builtin =
- isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
- Builtins::IsIsolateIndependent(builtin_index);
+ bool target_is_builtin =
+ isolate()->builtins()->IsBuiltinHandle(code, &builtin_index);
if (root_array_available_ && options().isolate_independent_code) {
Label skip;
@@ -187,8 +186,7 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Jump(scratch);
bind(&skip);
return;
- } else if (options().inline_offheap_trampolines &&
- target_is_isolate_independent_builtin) {
+ } else if (options().inline_offheap_trampolines && target_is_builtin) {
// Inline the trampoline.
Label skip;
RecordCommentForOffHeapTrampoline(builtin_index);
@@ -264,9 +262,8 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Builtins::IsIsolateIndependentBuiltin(*code));
int builtin_index = Builtins::kNoBuiltinId;
- bool target_is_isolate_independent_builtin =
- isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
- Builtins::IsIsolateIndependent(builtin_index);
+ bool target_is_builtin =
+ isolate()->builtins()->IsBuiltinHandle(code, &builtin_index);
if (root_array_available_ && options().isolate_independent_code) {
Label skip;
@@ -277,8 +274,7 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Call(ip);
bind(&skip);
return;
- } else if (options().inline_offheap_trampolines &&
- target_is_isolate_independent_builtin) {
+ } else if (options().inline_offheap_trampolines && target_is_builtin) {
// Inline the trampoline.
RecordCommentForOffHeapTrampoline(builtin_index);
EmbeddedData d = EmbeddedData::FromBlob();
@@ -1057,10 +1053,16 @@ void TurboAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high,
void TurboAssembler::LoadConstantPoolPointerRegisterFromCodeTargetAddress(
Register code_target_address) {
+ // Builtins do not use the constant pool (see is_constant_pool_available).
+ STATIC_ASSERT(Code::kOnHeapBodyIsContiguous);
+
+ lwz(r0, MemOperand(code_target_address,
+ Code::kInstructionSizeOffset - Code::kHeaderSize));
lwz(kConstantPoolRegister,
MemOperand(code_target_address,
Code::kConstantPoolOffsetOffset - Code::kHeaderSize));
add(kConstantPoolRegister, kConstantPoolRegister, code_target_address);
+ add(kConstantPoolRegister, kConstantPoolRegister, r0);
}
void TurboAssembler::LoadPC(Register dst) {
@@ -1076,6 +1078,10 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
}
void TurboAssembler::LoadConstantPoolPointerRegister() {
+ //
+ // Builtins do not use the constant pool (see is_constant_pool_available).
+ STATIC_ASSERT(Code::kOnHeapBodyIsContiguous);
+
LoadPC(kConstantPoolRegister);
int32_t delta = -pc_offset() + 4;
add_label_offset(kConstantPoolRegister, kConstantPoolRegister,
@@ -3251,16 +3257,17 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) {
SizeOfCodeGeneratedSince(&start_call));
}
-void TurboAssembler::CallForDeoptimization(Address target, int deopt_id,
- Label* exit, DeoptimizeKind kind) {
+void TurboAssembler::CallForDeoptimization(Builtins::Name target, int,
+ Label* exit, DeoptimizeKind kind,
+ Label*) {
+ LoadP(ip, MemOperand(kRootRegister,
+ IsolateData::builtin_entry_slot_offset(target)));
+ Call(ip);
+ DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
+ (kind == DeoptimizeKind::kLazy)
+ ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kNonLazyDeoptExitSize);
USE(exit, kind);
- NoRootArrayScope no_root_array(this);
-
- // Save the deopt id in r29 (we don't need the roots array from now on).
- DCHECK_LE(deopt_id, 0xFFFF);
-
- mov(r29, Operand(deopt_id));
- Call(target, RelocInfo::RUNTIME_ENTRY);
}
void TurboAssembler::ZeroExtByte(Register dst, Register src) {
diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
index a74985cbe1..db0d6857ac 100644
--- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
@@ -441,8 +441,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void JumpCodeObject(Register code_object) override;
void CallBuiltinByIndex(Register builtin_index) override;
- void CallForDeoptimization(Address target, int deopt_id, Label* exit,
- DeoptimizeKind kind);
+ void CallForDeoptimization(Builtins::Name target, int deopt_id, Label* exit,
+ DeoptimizeKind kind,
+ Label* jump_deoptimization_entry_label);
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the sp register.
@@ -728,21 +729,11 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// TODO(victorgomes): Remove this function once we stick with the reversed
// arguments order.
void LoadReceiver(Register dest, Register argc) {
-#ifdef V8_REVERSE_JSARGS
LoadP(dest, MemOperand(sp, 0));
-#else
- ShiftLeftImm(dest, argc, Operand(kSystemPointerSizeLog2));
- LoadPX(dest, MemOperand(sp, dest));
-#endif
}
void StoreReceiver(Register rec, Register argc, Register scratch) {
-#ifdef V8_REVERSE_JSARGS
StoreP(rec, MemOperand(sp, 0));
-#else
- ShiftLeftImm(scratch, argc, Operand(kSystemPointerSizeLog2));
- StorePX(rec, MemOperand(sp, scratch));
-#endif
}
// ---------------------------------------------------------------------------
diff --git a/deps/v8/src/codegen/ppc/register-ppc.h b/deps/v8/src/codegen/ppc/register-ppc.h
index eded9622c4..925dc355a7 100644
--- a/deps/v8/src/codegen/ppc/register-ppc.h
+++ b/deps/v8/src/codegen/ppc/register-ppc.h
@@ -228,7 +228,11 @@ class DoubleRegister : public RegisterBase<DoubleRegister, kDoubleAfterLast> {
// d14: 0.0
// d15: scratch register.
static constexpr int kSizeInBytes = 8;
- inline static int NumRegisters();
+
+ // This function differs from kNumRegisters by returning the number of double
+ // registers supported by the current CPU, while kNumRegisters always returns
+ // 32.
+ inline static int SupportedRegisterCount();
private:
friend class RegisterBase;
diff --git a/deps/v8/src/codegen/register-configuration.cc b/deps/v8/src/codegen/register-configuration.cc
index 5752b46339..1c48303294 100644
--- a/deps/v8/src/codegen/register-configuration.cc
+++ b/deps/v8/src/codegen/register-configuration.cc
@@ -42,6 +42,8 @@ STATIC_ASSERT(RegisterConfiguration::kMaxFPRegisters >=
STATIC_ASSERT(RegisterConfiguration::kMaxFPRegisters >=
Simd128Register::kNumRegisters);
+// Callers on architectures other than Arm expect this to be be constant
+// between build and runtime. Avoid adding variability on other platforms.
static int get_num_allocatable_double_registers() {
return
#if V8_TARGET_ARCH_IA32
@@ -71,6 +73,8 @@ static int get_num_allocatable_double_registers() {
#undef REGISTER_COUNT
+// Callers on architectures other than Arm expect this to be be constant
+// between build and runtime. Avoid adding variability on other platforms.
static const int* get_allocatable_double_codes() {
return
#if V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/codegen/register-configuration.h b/deps/v8/src/codegen/register-configuration.h
index 0521599734..cdf9ddae35 100644
--- a/deps/v8/src/codegen/register-configuration.h
+++ b/deps/v8/src/codegen/register-configuration.h
@@ -29,7 +29,7 @@ class V8_EXPORT_PRIVATE RegisterConfiguration {
static constexpr int kMaxGeneralRegisters = 32;
static constexpr int kMaxFPRegisters = 32;
static constexpr int kMaxRegisters =
- Max(kMaxFPRegisters, kMaxGeneralRegisters);
+ std::max(kMaxFPRegisters, kMaxGeneralRegisters);
// Default RegisterConfigurations for the target architecture.
static const RegisterConfiguration* Default();
@@ -57,6 +57,9 @@ class V8_EXPORT_PRIVATE RegisterConfiguration {
int num_allocatable_float_registers() const {
return num_allocatable_float_registers_;
}
+ // Caution: this value depends on the current cpu and may change between
+ // build and runtime. At the time of writing, the only architecture with a
+ // variable allocatable double register set is Arm.
int num_allocatable_double_registers() const {
return num_allocatable_double_registers_;
}
diff --git a/deps/v8/src/codegen/reloc-info.cc b/deps/v8/src/codegen/reloc-info.cc
index d984b1e917..7fdc2f374a 100644
--- a/deps/v8/src/codegen/reloc-info.cc
+++ b/deps/v8/src/codegen/reloc-info.cc
@@ -476,7 +476,7 @@ void RelocInfo::Print(Isolate* isolate, std::ostream& os) { // NOLINT
os << " " << Builtins::name(code.builtin_index());
}
os << ") (" << reinterpret_cast<const void*>(target_address()) << ")";
- } else if (IsRuntimeEntry(rmode_) && isolate->deoptimizer_data() != nullptr) {
+ } else if (IsRuntimeEntry(rmode_)) {
// Deoptimization bailouts are stored as runtime entries.
DeoptimizeKind type;
if (Deoptimizer::IsDeoptimizationEntry(isolate, target_address(), &type)) {
diff --git a/deps/v8/src/codegen/s390/assembler-s390.cc b/deps/v8/src/codegen/s390/assembler-s390.cc
index d96bfd8b84..2e74f029d2 100644
--- a/deps/v8/src/codegen/s390/assembler-s390.cc
+++ b/deps/v8/src/codegen/s390/assembler-s390.cc
@@ -370,6 +370,15 @@ Assembler::Assembler(const AssemblerOptions& options,
void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
SafepointTableBuilder* safepoint_table_builder,
int handler_table_offset) {
+ // As a crutch to avoid having to add manual Align calls wherever we use a
+ // raw workflow to create Code objects (mostly in tests), add another Align
+ // call here. It does no harm - the end of the Code object is aligned to the
+ // (larger) kCodeAlignment anyways.
+ // TODO(jgruber): Consider moving responsibility for proper alignment to
+ // metadata table builders (safepoint, handler, constant pool, code
+ // comments).
+ DataAlign(Code::kMetadataAlignment);
+
EmitRelocations();
int code_comments_size = WriteCodeComments();
diff --git a/deps/v8/src/codegen/s390/interface-descriptors-s390.cc b/deps/v8/src/codegen/s390/interface-descriptors-s390.cc
index 6c56c19b5a..a848cdf27a 100644
--- a/deps/v8/src/codegen/s390/interface-descriptors-s390.cc
+++ b/deps/v8/src/codegen/s390/interface-descriptors-s390.cc
@@ -278,54 +278,6 @@ void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void BinaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void CallWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void CallWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void ConstructWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void ConstructWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void Compare_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void UnaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:8888): Implement on this platform.
- DefaultInitializePlatformSpecific(data, 3);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.cc b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
index 5c9fe62dd1..4f63543ad7 100644
--- a/deps/v8/src/codegen/s390/macro-assembler-s390.cc
+++ b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
@@ -174,24 +174,17 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Builtins::IsIsolateIndependentBuiltin(*code));
int builtin_index = Builtins::kNoBuiltinId;
- bool target_is_isolate_independent_builtin =
- isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
- Builtins::IsIsolateIndependent(builtin_index);
-
- if (options().inline_offheap_trampolines &&
- target_is_isolate_independent_builtin) {
- Label skip;
- if (cond != al) {
- b(NegateCondition(cond), &skip, Label::kNear);
- }
+ bool target_is_builtin =
+ isolate()->builtins()->IsBuiltinHandle(code, &builtin_index);
+
+ if (options().inline_offheap_trampolines && target_is_builtin) {
// Inline the trampoline.
RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
- b(ip);
- bind(&skip);
+ b(cond, ip);
return;
}
jump(code, RelocInfo::RELATIVE_CODE_TARGET, cond);
@@ -242,12 +235,10 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
DCHECK_IMPLIES(options().isolate_independent_code,
Builtins::IsIsolateIndependentBuiltin(*code));
int builtin_index = Builtins::kNoBuiltinId;
- bool target_is_isolate_independent_builtin =
- isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
- Builtins::IsIsolateIndependent(builtin_index);
+ bool target_is_builtin =
+ isolate()->builtins()->IsBuiltinHandle(code, &builtin_index);
- if (options().inline_offheap_trampolines &&
- target_is_isolate_independent_builtin) {
+ if (target_is_builtin && options().inline_offheap_trampolines) {
// Inline the trampoline.
RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
@@ -4540,15 +4531,17 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) {
bind(&return_label);
}
-void TurboAssembler::CallForDeoptimization(Address target, int deopt_id,
- Label* exit, DeoptimizeKind kind) {
+void TurboAssembler::CallForDeoptimization(Builtins::Name target, int,
+ Label* exit, DeoptimizeKind kind,
+ Label*) {
+ LoadP(ip, MemOperand(kRootRegister,
+ IsolateData::builtin_entry_slot_offset(target)));
+ Call(ip);
+ DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
+ (kind == DeoptimizeKind::kLazy)
+ ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kNonLazyDeoptExitSize);
USE(exit, kind);
- NoRootArrayScope no_root_array(this);
-
- // Save the deopt id in r10 (we don't need the roots array from now on).
- DCHECK_LE(deopt_id, 0xFFFF);
- lghi(r10, Operand(deopt_id));
- Call(target, RelocInfo::RUNTIME_ENTRY);
}
void TurboAssembler::Trap() { stop(); }
diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.h b/deps/v8/src/codegen/s390/macro-assembler-s390.h
index f66be8c2ef..f81dfb503b 100644
--- a/deps/v8/src/codegen/s390/macro-assembler-s390.h
+++ b/deps/v8/src/codegen/s390/macro-assembler-s390.h
@@ -153,8 +153,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Ret() { b(r14); }
void Ret(Condition cond) { b(cond, r14); }
- void CallForDeoptimization(Address target, int deopt_id, Label* exit,
- DeoptimizeKind kind);
+ void CallForDeoptimization(Builtins::Name target, int deopt_id, Label* exit,
+ DeoptimizeKind kind,
+ Label* jump_deoptimization_entry_label);
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the sp register.
@@ -1072,21 +1073,11 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// TODO(victorgomes): Remove this function once we stick with the reversed
// arguments order.
void LoadReceiver(Register dest, Register argc) {
-#ifdef V8_REVERSE_JSARGS
LoadP(dest, MemOperand(sp, 0));
-#else
- ShiftLeftP(dest, argc, Operand(kSystemPointerSizeLog2));
- LoadP(dest, MemOperand(sp, dest));
-#endif
}
void StoreReceiver(Register rec, Register argc, Register scratch) {
-#ifdef V8_REVERSE_JSARGS
StoreP(rec, MemOperand(sp, 0));
-#else
- ShiftLeftP(scratch, argc, Operand(kSystemPointerSizeLog2));
- StoreP(rec, MemOperand(sp, scratch));
-#endif
}
void CallRuntime(const Runtime::Function* f, int num_arguments,
diff --git a/deps/v8/src/codegen/s390/register-s390.h b/deps/v8/src/codegen/s390/register-s390.h
index 009248a65c..0c6da03901 100644
--- a/deps/v8/src/codegen/s390/register-s390.h
+++ b/deps/v8/src/codegen/s390/register-s390.h
@@ -186,7 +186,11 @@ class DoubleRegister : public RegisterBase<DoubleRegister, kDoubleAfterLast> {
// d14: 0.0
// d15: scratch register.
static constexpr int kSizeInBytes = 8;
- inline static int NumRegisters();
+
+ // This function differs from kNumRegisters by returning the number of double
+ // registers supported by the current CPU, while kNumRegisters always returns
+ // 32.
+ inline static int SupportedRegisterCount();
private:
friend class RegisterBase;
diff --git a/deps/v8/src/codegen/safepoint-table.cc b/deps/v8/src/codegen/safepoint-table.cc
index 396cc9007f..644931e0ea 100644
--- a/deps/v8/src/codegen/safepoint-table.cc
+++ b/deps/v8/src/codegen/safepoint-table.cc
@@ -120,7 +120,7 @@ void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) {
RemoveDuplicates();
// Make sure the safepoint table is properly aligned. Pad with nops.
- assembler->Align(kIntSize);
+ assembler->Align(Code::kMetadataAlignment);
assembler->RecordComment(";;; Safepoint table.");
offset_ = assembler->pc_offset();
diff --git a/deps/v8/src/codegen/tnode.h b/deps/v8/src/codegen/tnode.h
index ba1f609bcf..72f9c6e3aa 100644
--- a/deps/v8/src/codegen/tnode.h
+++ b/deps/v8/src/codegen/tnode.h
@@ -238,28 +238,11 @@ struct UnionT {
using AnyTaggedT = UnionT<Object, MaybeObject>;
using Number = UnionT<Smi, HeapNumber>;
using Numeric = UnionT<Number, BigInt>;
+using ContextOrEmptyContext = UnionT<Context, Smi>;
// A pointer to a builtin function, used by Torque's function pointers.
using BuiltinPtr = Smi;
-class int31_t {
- public:
- int31_t() : value_(0) {}
- int31_t(int value) : value_(value) { // NOLINT(runtime/explicit)
- DCHECK_EQ((value & 0x80000000) != 0, (value & 0x40000000) != 0);
- }
- int31_t& operator=(int value) {
- DCHECK_EQ((value & 0x80000000) != 0, (value & 0x40000000) != 0);
- value_ = value;
- return *this;
- }
- int32_t value() const { return value_; }
- operator int32_t() const { return value_; }
-
- private:
- int32_t value_;
-};
-
template <class T, class U>
struct is_subtype {
static const bool value =
diff --git a/deps/v8/src/codegen/x64/assembler-x64.cc b/deps/v8/src/codegen/x64/assembler-x64.cc
index c1e2ec9808..5327745a02 100644
--- a/deps/v8/src/codegen/x64/assembler-x64.cc
+++ b/deps/v8/src/codegen/x64/assembler-x64.cc
@@ -246,6 +246,9 @@ bool ConstPool::AddSharedEntry(uint64_t data, int offset) {
bool ConstPool::TryRecordEntry(intptr_t data, RelocInfo::Mode mode) {
if (!FLAG_partial_constant_pool) return false;
+ DCHECK_WITH_MSG(
+ FLAG_text_is_readable,
+ "The partial constant pool requires a readable .text section");
if (!RelocInfo::IsShareableRelocMode(mode)) return false;
// Currently, partial constant pool only handles the following kinds of
@@ -332,6 +335,15 @@ Assembler::Assembler(const AssemblerOptions& options,
void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
SafepointTableBuilder* safepoint_table_builder,
int handler_table_offset) {
+ // As a crutch to avoid having to add manual Align calls wherever we use a
+ // raw workflow to create Code objects (mostly in tests), add another Align
+ // call here. It does no harm - the end of the Code object is aligned to the
+ // (larger) kCodeAlignment anyways.
+ // TODO(jgruber): Consider moving responsibility for proper alignment to
+ // metadata table builders (safepoint, handler, constant pool, code
+ // comments).
+ DataAlign(Code::kMetadataAlignment);
+
PatchConstPool();
DCHECK(constpool_.IsEmpty());
@@ -1207,13 +1219,6 @@ void Assembler::decb(Operand dst) {
emit_operand(1, dst);
}
-void Assembler::enter(Immediate size) {
- EnsureSpace ensure_space(this);
- emit(0xC8);
- emitw(size.value_); // 16 bit operand, always.
- emit(0);
-}
-
void Assembler::hlt() {
EnsureSpace ensure_space(this);
emit(0xF4);
@@ -2757,8 +2762,16 @@ void Assembler::movdqu(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
-void Assembler::pinsrw(XMMRegister dst, Register src, int8_t imm8) {
- DCHECK(is_uint8(imm8));
+void Assembler::movdqu(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0xF3);
+ emit_rex_64(dst, src);
+ emit(0x0F);
+ emit(0x6F);
+ emit_sse_operand(dst, src);
+}
+
+void Assembler::pinsrw(XMMRegister dst, Register src, uint8_t imm8) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(dst, src);
@@ -2768,8 +2781,7 @@ void Assembler::pinsrw(XMMRegister dst, Register src, int8_t imm8) {
emit(imm8);
}
-void Assembler::pinsrw(XMMRegister dst, Operand src, int8_t imm8) {
- DCHECK(is_uint8(imm8));
+void Assembler::pinsrw(XMMRegister dst, Operand src, uint8_t imm8) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(dst, src);
@@ -2791,7 +2803,7 @@ void Assembler::pextrq(Register dst, XMMRegister src, int8_t imm8) {
emit(imm8);
}
-void Assembler::pinsrq(XMMRegister dst, Register src, int8_t imm8) {
+void Assembler::pinsrq(XMMRegister dst, Register src, uint8_t imm8) {
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
emit(0x66);
@@ -2803,9 +2815,8 @@ void Assembler::pinsrq(XMMRegister dst, Register src, int8_t imm8) {
emit(imm8);
}
-void Assembler::pinsrq(XMMRegister dst, Operand src, int8_t imm8) {
+void Assembler::pinsrq(XMMRegister dst, Operand src, uint8_t imm8) {
DCHECK(IsEnabled(SSE4_1));
- DCHECK(is_uint8(imm8));
EnsureSpace ensure_space(this);
emit(0x66);
emit_rex_64(dst, src);
@@ -2816,22 +2827,20 @@ void Assembler::pinsrq(XMMRegister dst, Operand src, int8_t imm8) {
emit(imm8);
}
-void Assembler::pinsrd(XMMRegister dst, Register src, int8_t imm8) {
+void Assembler::pinsrd(XMMRegister dst, Register src, uint8_t imm8) {
sse4_instr(dst, src, 0x66, 0x0F, 0x3A, 0x22, imm8);
}
-void Assembler::pinsrd(XMMRegister dst, Operand src, int8_t imm8) {
- DCHECK(is_uint8(imm8));
+void Assembler::pinsrd(XMMRegister dst, Operand src, uint8_t imm8) {
sse4_instr(dst, src, 0x66, 0x0F, 0x3A, 0x22);
emit(imm8);
}
-void Assembler::pinsrb(XMMRegister dst, Register src, int8_t imm8) {
+void Assembler::pinsrb(XMMRegister dst, Register src, uint8_t imm8) {
sse4_instr(dst, src, 0x66, 0x0F, 0x3A, 0x20, imm8);
}
-void Assembler::pinsrb(XMMRegister dst, Operand src, int8_t imm8) {
- DCHECK(is_uint8(imm8));
+void Assembler::pinsrb(XMMRegister dst, Operand src, uint8_t imm8) {
sse4_instr(dst, src, 0x66, 0x0F, 0x3A, 0x20);
emit(imm8);
}
@@ -2990,6 +2999,42 @@ void Assembler::movss(Operand src, XMMRegister dst) {
emit_sse_operand(dst, src);
}
+void Assembler::movlps(XMMRegister dst, Operand src) {
+ DCHECK(!IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x12);
+ emit_sse_operand(dst, src);
+}
+
+void Assembler::movlps(Operand src, XMMRegister dst) {
+ DCHECK(!IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x13);
+ emit_sse_operand(dst, src);
+}
+
+void Assembler::movhps(XMMRegister dst, Operand src) {
+ DCHECK(!IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x16);
+ emit_sse_operand(dst, src);
+}
+
+void Assembler::movhps(Operand src, XMMRegister dst) {
+ DCHECK(!IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x17);
+ emit_sse_operand(dst, src);
+}
+
void Assembler::cmpps(XMMRegister dst, XMMRegister src, int8_t cmp) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
@@ -3463,6 +3508,38 @@ void Assembler::vmovdqu(Operand dst, XMMRegister src) {
emit_sse_operand(src, dst);
}
+void Assembler::vmovlps(XMMRegister dst, XMMRegister src1, Operand src2) {
+ DCHECK(IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(dst, src1, src2, kL128, kNone, k0F, kWIG);
+ emit(0x12);
+ emit_sse_operand(dst, src2);
+}
+
+void Assembler::vmovlps(Operand dst, XMMRegister src) {
+ DCHECK(IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(src, xmm0, dst, kL128, kNone, k0F, kWIG);
+ emit(0x13);
+ emit_sse_operand(src, dst);
+}
+
+void Assembler::vmovhps(XMMRegister dst, XMMRegister src1, Operand src2) {
+ DCHECK(IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(dst, src1, src2, kL128, kNone, k0F, kWIG);
+ emit(0x16);
+ emit_sse_operand(dst, src2);
+}
+
+void Assembler::vmovhps(Operand dst, XMMRegister src) {
+ DCHECK(IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(src, xmm0, dst, kL128, kNone, k0F, kWIG);
+ emit(0x17);
+ emit_sse_operand(src, dst);
+}
+
void Assembler::vinstr(byte op, XMMRegister dst, XMMRegister src1,
XMMRegister src2, SIMDPrefix pp, LeadingOpcode m,
VexW w) {
diff --git a/deps/v8/src/codegen/x64/assembler-x64.h b/deps/v8/src/codegen/x64/assembler-x64.h
index ac0c66ae5d..e05eaa9592 100644
--- a/deps/v8/src/codegen/x64/assembler-x64.h
+++ b/deps/v8/src/codegen/x64/assembler-x64.h
@@ -561,7 +561,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void popq(Register dst);
void popq(Operand dst);
- void enter(Immediate size);
void leave();
// Moves
@@ -929,6 +928,13 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void movss(XMMRegister dst, Operand src);
void movss(Operand dst, XMMRegister src);
+
+ void movlps(XMMRegister dst, Operand src);
+ void movlps(Operand dst, XMMRegister src);
+
+ void movhps(XMMRegister dst, Operand src);
+ void movhps(Operand dst, XMMRegister src);
+
void shufps(XMMRegister dst, XMMRegister src, byte imm8);
void cvttss2si(Register dst, Operand src);
@@ -1060,16 +1066,18 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
SSE4_INSTRUCTION_LIST(DECLARE_SSE4_INSTRUCTION)
SSE4_UNOP_INSTRUCTION_LIST(DECLARE_SSE4_INSTRUCTION)
+ DECLARE_SSE4_INSTRUCTION(pblendvb, 66, 0F, 38, 10)
+ DECLARE_SSE4_INSTRUCTION(blendvps, 66, 0F, 38, 14)
DECLARE_SSE4_INSTRUCTION(blendvpd, 66, 0F, 38, 15)
#undef DECLARE_SSE4_INSTRUCTION
#define DECLARE_SSE4_EXTRACT_INSTRUCTION(instruction, prefix, escape1, \
escape2, opcode) \
- void instruction(Register dst, XMMRegister src, int8_t imm8) { \
+ void instruction(Register dst, XMMRegister src, uint8_t imm8) { \
sse4_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode, \
imm8); \
} \
- void instruction(Operand dst, XMMRegister src, int8_t imm8) { \
+ void instruction(Operand dst, XMMRegister src, uint8_t imm8) { \
sse4_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode, \
imm8); \
}
@@ -1120,6 +1128,20 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
SSSE3_UNOP_INSTRUCTION_LIST(DECLARE_SSSE3_UNOP_AVX_INSTRUCTION)
#undef DECLARE_SSSE3_UNOP_AVX_INSTRUCTION
+ void vpblendvb(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ XMMRegister mask) {
+ vinstr(0x4C, dst, src1, src2, k66, k0F3A, kW0);
+ // The mask operand is encoded in bits[7:4] of the immediate byte.
+ emit(mask.code() << 4);
+ }
+
+ void vblendvps(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ XMMRegister mask) {
+ vinstr(0x4A, dst, src1, src2, k66, k0F3A, kW0);
+ // The mask operand is encoded in bits[7:4] of the immediate byte.
+ emit(mask.code() << 4);
+ }
+
void vblendvpd(XMMRegister dst, XMMRegister src1, XMMRegister src2,
XMMRegister mask) {
vinstr(0x4B, dst, src1, src2, k66, k0F3A, kW0);
@@ -1138,6 +1160,20 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
SSE4_UNOP_INSTRUCTION_LIST(DECLARE_SSE4_PMOV_AVX_INSTRUCTION)
#undef DECLARE_SSE4_PMOV_AVX_INSTRUCTION
+#define DECLARE_AVX_INSTRUCTION(instruction, prefix, escape1, escape2, opcode) \
+ void v##instruction(Register dst, XMMRegister src, uint8_t imm8) { \
+ XMMRegister idst = XMMRegister::from_code(dst.code()); \
+ vinstr(0x##opcode, src, xmm0, idst, k##prefix, k##escape1##escape2, kW0); \
+ emit(imm8); \
+ } \
+ void v##instruction(Operand dst, XMMRegister src, uint8_t imm8) { \
+ vinstr(0x##opcode, src, xmm0, dst, k##prefix, k##escape1##escape2, kW0); \
+ emit(imm8); \
+ }
+
+ SSE4_EXTRACT_INSTRUCTION_LIST(DECLARE_AVX_INSTRUCTION)
+#undef DECLARE_AVX_INSTRUCTION
+
void movd(XMMRegister dst, Register src);
void movd(XMMRegister dst, Operand src);
void movd(Register dst, XMMRegister src);
@@ -1160,6 +1196,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void movdqu(Operand dst, XMMRegister src);
void movdqu(XMMRegister dst, Operand src);
+ void movdqu(XMMRegister dst, XMMRegister src);
void movapd(XMMRegister dst, XMMRegister src);
void movupd(XMMRegister dst, Operand src);
@@ -1204,14 +1241,14 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void insertps(XMMRegister dst, XMMRegister src, byte imm8);
void insertps(XMMRegister dst, Operand src, byte imm8);
void pextrq(Register dst, XMMRegister src, int8_t imm8);
- void pinsrb(XMMRegister dst, Register src, int8_t imm8);
- void pinsrb(XMMRegister dst, Operand src, int8_t imm8);
- void pinsrw(XMMRegister dst, Register src, int8_t imm8);
- void pinsrw(XMMRegister dst, Operand src, int8_t imm8);
- void pinsrd(XMMRegister dst, Register src, int8_t imm8);
- void pinsrd(XMMRegister dst, Operand src, int8_t imm8);
- void pinsrq(XMMRegister dst, Register src, int8_t imm8);
- void pinsrq(XMMRegister dst, Operand src, int8_t imm8);
+ void pinsrb(XMMRegister dst, Register src, uint8_t imm8);
+ void pinsrb(XMMRegister dst, Operand src, uint8_t imm8);
+ void pinsrw(XMMRegister dst, Register src, uint8_t imm8);
+ void pinsrw(XMMRegister dst, Operand src, uint8_t imm8);
+ void pinsrd(XMMRegister dst, Register src, uint8_t imm8);
+ void pinsrd(XMMRegister dst, Operand src, uint8_t imm8);
+ void pinsrq(XMMRegister dst, Register src, uint8_t imm8);
+ void pinsrq(XMMRegister dst, Operand src, uint8_t imm8);
void roundss(XMMRegister dst, XMMRegister src, RoundingMode mode);
void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
@@ -1290,6 +1327,12 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vmovdqu(XMMRegister dst, Operand src);
void vmovdqu(Operand dst, XMMRegister src);
+ void vmovlps(XMMRegister dst, XMMRegister src1, Operand src2);
+ void vmovlps(Operand dst, XMMRegister src);
+
+ void vmovhps(XMMRegister dst, XMMRegister src1, Operand src2);
+ void vmovhps(Operand dst, XMMRegister src);
+
#define AVX_SSE_UNOP(instr, escape, opcode) \
void v##instr(XMMRegister dst, XMMRegister src2) { \
vps(0x##opcode, dst, xmm0, src2); \
@@ -1532,38 +1575,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
vinstr(0x21, dst, src1, src2, k66, k0F3A, kWIG);
emit(imm8);
}
- void vextractps(Register dst, XMMRegister src, int8_t imm8) {
- XMMRegister idst = XMMRegister::from_code(dst.code());
- vinstr(0x17, src, xmm0, idst, k66, k0F3A, kWIG);
- emit(imm8);
- }
- void vpextrb(Register dst, XMMRegister src, uint8_t imm8) {
- XMMRegister idst = XMMRegister::from_code(dst.code());
- vinstr(0x14, src, xmm0, idst, k66, k0F3A, kW0);
- emit(imm8);
- }
- void vpextrb(Operand dst, XMMRegister src, uint8_t imm8) {
- vinstr(0x14, src, xmm0, dst, k66, k0F3A, kW0);
- emit(imm8);
- }
- void vpextrw(Register dst, XMMRegister src, uint8_t imm8) {
- XMMRegister idst = XMMRegister::from_code(dst.code());
- vinstr(0xc5, idst, xmm0, src, k66, k0F, kW0);
- emit(imm8);
- }
- void vpextrw(Operand dst, XMMRegister src, uint8_t imm8) {
- vinstr(0x15, src, xmm0, dst, k66, k0F3A, kW0);
- emit(imm8);
- }
- void vpextrd(Register dst, XMMRegister src, uint8_t imm8) {
- XMMRegister idst = XMMRegister::from_code(dst.code());
- vinstr(0x16, src, xmm0, idst, k66, k0F3A, kW0);
- emit(imm8);
- }
- void vpextrd(Operand dst, XMMRegister src, uint8_t imm8) {
- vinstr(0x16, src, xmm0, dst, k66, k0F3A, kW0);
- emit(imm8);
- }
void vpextrq(Register dst, XMMRegister src, int8_t imm8) {
XMMRegister idst = XMMRegister::from_code(dst.code());
vinstr(0x16, src, xmm0, idst, k66, k0F3A, kW1);
@@ -1596,12 +1607,12 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
vinstr(0x22, dst, src1, src2, k66, k0F3A, kW0);
emit(imm8);
}
- void vpinsrq(XMMRegister dst, XMMRegister src1, Register src2, int8_t imm8) {
+ void vpinsrq(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8) {
XMMRegister isrc = XMMRegister::from_code(src2.code());
vinstr(0x22, dst, src1, isrc, k66, k0F3A, kW1);
emit(imm8);
}
- void vpinsrq(XMMRegister dst, XMMRegister src1, Operand src2, int8_t imm8) {
+ void vpinsrq(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8) {
vinstr(0x22, dst, src1, src2, k66, k0F3A, kW1);
emit(imm8);
}
diff --git a/deps/v8/src/codegen/x64/interface-descriptors-x64.cc b/deps/v8/src/codegen/x64/interface-descriptors-x64.cc
index 5a9c386eb8..e4d6b92708 100644
--- a/deps/v8/src/codegen/x64/interface-descriptors-x64.cc
+++ b/deps/v8/src/codegen/x64/interface-descriptors-x64.cc
@@ -129,16 +129,6 @@ void CallWithSpreadDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CallWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // rax : number of arguments (on the stack, not including receiver)
- // rdi : the target to call
- // rbx : the object to spread
- // rdx : the feedback slot
- Register registers[] = {rdi, rax, rbx, rdx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
void CallWithArrayLikeDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// rdi : the target to call
@@ -147,16 +137,6 @@ void CallWithArrayLikeDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void CallWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // rdi : the target to call
- // rbx : the arguments list
- // rdx : the feedback slot
- // rax : the feedback vector
- Register registers[] = {rdi, rbx, rdx, rax};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
void ConstructVarargsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// rax : number of arguments (on the stack, not including receiver)
@@ -188,16 +168,6 @@ void ConstructWithSpreadDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ConstructWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // rax : number of arguments (on the stack, not including receiver)
- // rdi : the target to call
- // rdx : the new target
- // rbx : the feedback slot
- Register registers[] = {rdi, rdx, rax, rbx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// rdi : the target to call
@@ -207,16 +177,6 @@ void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ConstructWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // rdi : the target to call
- // rdx : the new target
- // rbx : the arguments list
- // rax : the feedback slot
- Register registers[] = {rdi, rdx, rbx, rax};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
void ConstructStubDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// rax : number of arguments
@@ -320,41 +280,6 @@ void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void BinaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {rdx, // kLeft
- rax, // kRight
- rdi, // kSlot
- rbx}; // kMaybeFeedbackVector
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {rdi, // kFunction
- rax, // kActualArgumentsCount
- rcx, // kSlot
- rbx}; // kMaybeFeedbackVector
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void Compare_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {rdx, // kLeft
- rax, // kRight
- rdi, // kSlot
- rbx}; // kMaybeFeedbackVector
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void UnaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {rdx, // kValue
- rax, // kSlot
- rdi}; // kMaybeFeedbackVector
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.cc b/deps/v8/src/codegen/x64/macro-assembler-x64.cc
index 7f7ff5038a..9f5917c23a 100644
--- a/deps/v8/src/codegen/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/codegen/x64/macro-assembler-x64.cc
@@ -37,16 +37,9 @@ namespace internal {
Operand StackArgumentsAccessor::GetArgumentOperand(int index) const {
DCHECK_GE(index, 0);
-#ifdef V8_REVERSE_JSARGS
// arg[0] = rsp + kPCOnStackSize;
// arg[i] = arg[0] + i * kSystemPointerSize;
return Operand(rsp, kPCOnStackSize + index * kSystemPointerSize);
-#else
- // arg[0] = (rsp + kPCOnStackSize) + argc * kSystemPointerSize;
- // arg[i] = arg[0] - i * kSystemPointerSize;
- return Operand(rsp, argc_, times_system_pointer_size,
- kPCOnStackSize - index * kSystemPointerSize);
-#endif
}
void MacroAssembler::Load(Register destination, ExternalReference source) {
@@ -343,11 +336,22 @@ void TurboAssembler::SaveRegisters(RegList registers) {
}
void TurboAssembler::LoadExternalPointerField(Register destination,
- Operand field_operand) {
- movq(destination, field_operand);
- if (V8_HEAP_SANDBOX_BOOL) {
- xorq(destination, Immediate(kExternalPointerSalt));
+ Operand field_operand,
+ ExternalPointerTag tag) {
+#ifdef V8_HEAP_SANDBOX
+ LoadAddress(kScratchRegister,
+ ExternalReference::external_pointer_table_address(isolate()));
+ movq(kScratchRegister,
+ Operand(kScratchRegister, Internals::kExternalPointerTableBufferOffset));
+ movl(destination, field_operand);
+ movq(destination, Operand(kScratchRegister, destination, times_8, 0));
+ if (tag != 0) {
+ movq(kScratchRegister, Immediate64(tag));
+ xorq(destination, kScratchRegister);
}
+#else
+ movq(destination, field_operand);
+#endif // V8_HEAP_SANDBOX
}
void TurboAssembler::RestoreRegisters(RegList registers) {
@@ -1062,6 +1066,14 @@ void TurboAssembler::Move(Register dst, ExternalReference ext) {
movq(dst, Immediate64(ext.address(), RelocInfo::EXTERNAL_REFERENCE));
}
+void MacroAssembler::Cmp(Register dst, int32_t src) {
+ if (src == 0) {
+ testl(dst, dst);
+ } else {
+ cmpl(dst, Immediate(src));
+ }
+}
+
void MacroAssembler::SmiTag(Register reg) {
STATIC_ASSERT(kSmiTag == 0);
DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
@@ -1356,7 +1368,7 @@ void TurboAssembler::Move(XMMRegister dst, uint64_t src) {
void TurboAssembler::Move(XMMRegister dst, uint64_t high, uint64_t low) {
Move(dst, low);
movq(kScratchRegister, high);
- Pinsrq(dst, kScratchRegister, int8_t{1});
+ Pinsrq(dst, kScratchRegister, uint8_t{1});
}
// ----------------------------------------------------------------------------
@@ -1526,8 +1538,7 @@ void TurboAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode,
Builtins::IsIsolateIndependentBuiltin(*code_object));
if (options().inline_offheap_trampolines) {
int builtin_index = Builtins::kNoBuiltinId;
- if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index) &&
- Builtins::IsIsolateIndependent(builtin_index)) {
+ if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index)) {
Label skip;
if (cc != always) {
if (cc == never) return;
@@ -1576,8 +1587,7 @@ void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
Builtins::IsIsolateIndependentBuiltin(*code_object));
if (options().inline_offheap_trampolines) {
int builtin_index = Builtins::kNoBuiltinId;
- if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index) &&
- Builtins::IsIsolateIndependent(builtin_index)) {
+ if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index)) {
// Inline the trampoline.
CallBuiltin(builtin_index);
return;
@@ -1587,6 +1597,13 @@ void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
call(code_object, rmode);
}
+Operand TurboAssembler::EntryFromBuiltinIndexAsOperand(
+ Builtins::Name builtin_index) {
+ DCHECK(root_array_available());
+ return Operand(kRootRegister,
+ IsolateData::builtin_entry_slot_offset(builtin_index));
+}
+
Operand TurboAssembler::EntryFromBuiltinIndexAsOperand(Register builtin_index) {
if (SmiValuesAre32Bits()) {
// The builtin_index register contains the builtin index as a Smi.
@@ -1710,7 +1727,19 @@ void TurboAssembler::RetpolineJump(Register reg) {
ret(0);
}
-void TurboAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) {
+void TurboAssembler::Shufps(XMMRegister dst, XMMRegister src, byte imm8) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vshufps(dst, src, src, imm8);
+ } else {
+ if (dst != src) {
+ movss(dst, src);
+ }
+ shufps(dst, src, static_cast<byte>(0));
+ }
+}
+
+void TurboAssembler::Pextrd(Register dst, XMMRegister src, uint8_t imm8) {
if (imm8 == 0) {
Movd(dst, src);
return;
@@ -1729,43 +1758,71 @@ void TurboAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) {
shrq(dst, Immediate(32));
}
-void TurboAssembler::Pextrw(Register dst, XMMRegister src, int8_t imm8) {
+namespace {
+
+template <typename Src>
+using AvxFn = void (Assembler::*)(XMMRegister, XMMRegister, Src, uint8_t);
+template <typename Src>
+using NoAvxFn = void (Assembler::*)(XMMRegister, Src, uint8_t);
+
+template <typename Src>
+void PinsrHelper(Assembler* assm, AvxFn<Src> avx, NoAvxFn<Src> noavx,
+ XMMRegister dst, XMMRegister src1, Src src2, uint8_t imm8,
+ base::Optional<CpuFeature> feature = base::nullopt) {
if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpextrw(dst, src, imm8);
- return;
- } else {
- DCHECK(CpuFeatures::IsSupported(SSE4_1));
- CpuFeatureScope sse_scope(this, SSE4_1);
- pextrw(dst, src, imm8);
+ CpuFeatureScope scope(assm, AVX);
+ (assm->*avx)(dst, src1, src2, imm8);
return;
}
-}
-void TurboAssembler::Pextrb(Register dst, XMMRegister src, int8_t imm8) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpextrb(dst, src, imm8);
- return;
+ if (dst != src1) {
+ assm->movdqu(dst, src1);
+ }
+ if (feature.has_value()) {
+ DCHECK(CpuFeatures::IsSupported(*feature));
+ CpuFeatureScope scope(assm, *feature);
+ (assm->*noavx)(dst, src2, imm8);
} else {
- DCHECK(CpuFeatures::IsSupported(SSE4_1));
- CpuFeatureScope sse_scope(this, SSE4_1);
- pextrb(dst, src, imm8);
- return;
+ (assm->*noavx)(dst, src2, imm8);
}
}
+} // namespace
-void TurboAssembler::Pinsrd(XMMRegister dst, Register src, int8_t imm8) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpinsrd(dst, dst, src, imm8);
- return;
- } else if (CpuFeatures::IsSupported(SSE4_1)) {
- CpuFeatureScope sse_scope(this, SSE4_1);
- pinsrd(dst, src, imm8);
+void TurboAssembler::Pinsrb(XMMRegister dst, XMMRegister src1, Register src2,
+ uint8_t imm8) {
+ PinsrHelper(this, &Assembler::vpinsrb, &Assembler::pinsrb, dst, src1, src2,
+ imm8, base::Optional<CpuFeature>(SSE4_1));
+}
+
+void TurboAssembler::Pinsrb(XMMRegister dst, XMMRegister src1, Operand src2,
+ uint8_t imm8) {
+ PinsrHelper(this, &Assembler::vpinsrb, &Assembler::pinsrb, dst, src1, src2,
+ imm8, base::Optional<CpuFeature>(SSE4_1));
+}
+
+void TurboAssembler::Pinsrw(XMMRegister dst, XMMRegister src1, Register src2,
+ uint8_t imm8) {
+ PinsrHelper(this, &Assembler::vpinsrw, &Assembler::pinsrw, dst, src1, src2,
+ imm8);
+}
+
+void TurboAssembler::Pinsrw(XMMRegister dst, XMMRegister src1, Operand src2,
+ uint8_t imm8) {
+ PinsrHelper(this, &Assembler::vpinsrw, &Assembler::pinsrw, dst, src1, src2,
+ imm8);
+}
+
+void TurboAssembler::Pinsrd(XMMRegister dst, XMMRegister src1, Register src2,
+ uint8_t imm8) {
+ // Need a fall back when SSE4_1 is unavailable. Pinsrb and Pinsrq are used
+ // only by Wasm SIMD, which requires SSE4_1 already.
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ PinsrHelper(this, &Assembler::vpinsrd, &Assembler::pinsrd, dst, src1, src2,
+ imm8, base::Optional<CpuFeature>(SSE4_1));
return;
}
- Movd(kScratchDoubleReg, src);
+
+ Movd(kScratchDoubleReg, src2);
if (imm8 == 1) {
punpckldq(dst, kScratchDoubleReg);
} else {
@@ -1774,17 +1831,17 @@ void TurboAssembler::Pinsrd(XMMRegister dst, Register src, int8_t imm8) {
}
}
-void TurboAssembler::Pinsrd(XMMRegister dst, Operand src, int8_t imm8) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpinsrd(dst, dst, src, imm8);
- return;
- } else if (CpuFeatures::IsSupported(SSE4_1)) {
- CpuFeatureScope sse_scope(this, SSE4_1);
- pinsrd(dst, src, imm8);
+void TurboAssembler::Pinsrd(XMMRegister dst, XMMRegister src1, Operand src2,
+ uint8_t imm8) {
+ // Need a fall back when SSE4_1 is unavailable. Pinsrb and Pinsrq are used
+ // only by Wasm SIMD, which requires SSE4_1 already.
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ PinsrHelper(this, &Assembler::vpinsrd, &Assembler::pinsrd, dst, src1, src2,
+ imm8, base::Optional<CpuFeature>(SSE4_1));
return;
}
- Movd(kScratchDoubleReg, src);
+
+ Movd(kScratchDoubleReg, src2);
if (imm8 == 1) {
punpckldq(dst, kScratchDoubleReg);
} else {
@@ -1793,54 +1850,24 @@ void TurboAssembler::Pinsrd(XMMRegister dst, Operand src, int8_t imm8) {
}
}
-void TurboAssembler::Pinsrw(XMMRegister dst, Register src, int8_t imm8) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpinsrw(dst, dst, src, imm8);
- return;
- } else {
- DCHECK(CpuFeatures::IsSupported(SSE4_1));
- CpuFeatureScope sse_scope(this, SSE4_1);
- pinsrw(dst, src, imm8);
- return;
- }
+void TurboAssembler::Pinsrd(XMMRegister dst, Register src2, uint8_t imm8) {
+ Pinsrd(dst, dst, src2, imm8);
}
-void TurboAssembler::Pinsrw(XMMRegister dst, Operand src, int8_t imm8) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpinsrw(dst, dst, src, imm8);
- return;
- } else {
- CpuFeatureScope sse_scope(this, SSE4_1);
- pinsrw(dst, src, imm8);
- return;
- }
+void TurboAssembler::Pinsrd(XMMRegister dst, Operand src2, uint8_t imm8) {
+ Pinsrd(dst, dst, src2, imm8);
}
-void TurboAssembler::Pinsrb(XMMRegister dst, Register src, int8_t imm8) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpinsrb(dst, dst, src, imm8);
- return;
- } else {
- DCHECK(CpuFeatures::IsSupported(SSE4_1));
- CpuFeatureScope sse_scope(this, SSE4_1);
- pinsrb(dst, src, imm8);
- return;
- }
+void TurboAssembler::Pinsrq(XMMRegister dst, XMMRegister src1, Register src2,
+ uint8_t imm8) {
+ PinsrHelper(this, &Assembler::vpinsrq, &Assembler::pinsrq, dst, src1, src2,
+ imm8, base::Optional<CpuFeature>(SSE4_1));
}
-void TurboAssembler::Pinsrb(XMMRegister dst, Operand src, int8_t imm8) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpinsrb(dst, dst, src, imm8);
- return;
- } else {
- CpuFeatureScope sse_scope(this, SSE4_1);
- pinsrb(dst, src, imm8);
- return;
- }
+void TurboAssembler::Pinsrq(XMMRegister dst, XMMRegister src1, Operand src2,
+ uint8_t imm8) {
+ PinsrHelper(this, &Assembler::vpinsrq, &Assembler::pinsrq, dst, src1, src2,
+ imm8, base::Optional<CpuFeature>(SSE4_1));
}
void TurboAssembler::Psllq(XMMRegister dst, byte imm8) {
@@ -1873,6 +1900,58 @@ void TurboAssembler::Pslld(XMMRegister dst, byte imm8) {
}
}
+void TurboAssembler::Pblendvb(XMMRegister dst, XMMRegister src1,
+ XMMRegister src2, XMMRegister mask) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vpblendvb(dst, src1, src2, mask);
+ } else {
+ DCHECK_EQ(dst, src1);
+ DCHECK_EQ(xmm0, mask);
+ pblendvb(dst, src2);
+ }
+}
+
+void TurboAssembler::Blendvps(XMMRegister dst, XMMRegister src1,
+ XMMRegister src2, XMMRegister mask) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vblendvps(dst, src1, src2, mask);
+ } else {
+ DCHECK_EQ(dst, src1);
+ DCHECK_EQ(xmm0, mask);
+ blendvps(dst, src2);
+ }
+}
+
+void TurboAssembler::Blendvpd(XMMRegister dst, XMMRegister src1,
+ XMMRegister src2, XMMRegister mask) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vblendvpd(dst, src1, src2, mask);
+ } else {
+ DCHECK_EQ(dst, src1);
+ DCHECK_EQ(xmm0, mask);
+ blendvpd(dst, src2);
+ }
+}
+
+void TurboAssembler::Pshufb(XMMRegister dst, XMMRegister src,
+ XMMRegister mask) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vpshufb(dst, src, mask);
+ } else {
+ // Make sure these are different so that we won't overwrite mask.
+ DCHECK_NE(dst, mask);
+ if (dst != src) {
+ movapd(dst, src);
+ }
+ CpuFeatureScope sse_scope(this, SSSE3);
+ pshufb(dst, mask);
+ }
+}
+
void TurboAssembler::Psrld(XMMRegister dst, byte imm8) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
@@ -2315,8 +2394,8 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
Register actual_parameter_count,
InvokeFlag flag) {
// You can't call a function without a valid frame.
- DCHECK(flag == JUMP_FUNCTION || has_frame());
- DCHECK(function == rdi);
+ DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
+ DCHECK_EQ(function, rdi);
DCHECK_IMPLIES(new_target.is_valid(), new_target == rdx);
// On function call, call into the debugger if necessary.
@@ -2327,7 +2406,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
Operand debug_hook_active_operand =
ExternalReferenceAsOperand(debug_hook_active);
cmpb(debug_hook_active_operand, Immediate(0));
- j(not_equal, &debug_hook, Label::kNear);
+ j(not_equal, &debug_hook);
}
bind(&continue_after_hook);
@@ -2355,24 +2434,67 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
bind(&debug_hook);
CallDebugOnFunctionCall(function, new_target, expected_parameter_count,
actual_parameter_count);
- jmp(&continue_after_hook, Label::kNear);
+ jmp(&continue_after_hook);
bind(&done);
}
+Operand MacroAssembler::StackLimitAsOperand(StackLimitKind kind) {
+ DCHECK(root_array_available());
+ Isolate* isolate = this->isolate();
+ ExternalReference limit =
+ kind == StackLimitKind::kRealStackLimit
+ ? ExternalReference::address_of_real_jslimit(isolate)
+ : ExternalReference::address_of_jslimit(isolate);
+ DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
+
+ intptr_t offset =
+ TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
+ CHECK(is_int32(offset));
+ return Operand(kRootRegister, static_cast<int32_t>(offset));
+}
+
+void MacroAssembler::StackOverflowCheck(
+ Register num_args, Register scratch, Label* stack_overflow,
+ Label::Distance stack_overflow_distance) {
+ DCHECK_NE(num_args, scratch);
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ movq(kScratchRegister, StackLimitAsOperand(StackLimitKind::kRealStackLimit));
+ movq(scratch, rsp);
+ // Make scratch the space we have left. The stack might already be overflowed
+ // here which will cause scratch to become negative.
+ subq(scratch, kScratchRegister);
+ // TODO(victorgomes): Use ia32 approach with leaq, since it requires less
+ // instructions.
+ sarq(scratch, Immediate(kSystemPointerSizeLog2));
+ // Check if the arguments will overflow the stack.
+ cmpq(scratch, num_args);
+ // Signed comparison.
+ // TODO(victorgomes): Save some bytes in the builtins that use stack checks
+ // by jumping to a builtin that throws the exception.
+ j(less_equal, stack_overflow, stack_overflow_distance);
+}
+
void MacroAssembler::InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count,
Label* done, InvokeFlag flag) {
if (expected_parameter_count != actual_parameter_count) {
Label regular_invoke;
#ifdef V8_NO_ARGUMENTS_ADAPTOR
- // Skip if adaptor sentinel.
+ // If the expected parameter count is equal to the adaptor sentinel, no need
+ // to push undefined value as arguments.
cmpl(expected_parameter_count, Immediate(kDontAdaptArgumentsSentinel));
- j(equal, &regular_invoke, Label::kNear);
+ j(equal, &regular_invoke, Label::kFar);
- // Skip if overapplication or if expected number of arguments.
+ // If overapplication or if the actual argument count is equal to the
+ // formal parameter count, no need to push extra undefined values.
subq(expected_parameter_count, actual_parameter_count);
- j(less_equal, &regular_invoke, Label::kNear);
+ j(less_equal, &regular_invoke, Label::kFar);
+
+ Label stack_overflow;
+ StackOverflowCheck(expected_parameter_count, rcx, &stack_overflow);
// Underapplication. Move the arguments already in the stack, including the
// receiver and the return address.
@@ -2409,6 +2531,15 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
kScratchRegister);
j(greater, &loop, Label::kNear);
}
+ jmp(&regular_invoke);
+
+ bind(&stack_overflow);
+ {
+ FrameScope frame(this,
+ has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ CallRuntime(Runtime::kThrowStackOverflow);
+ int3(); // This should be unreachable.
+ }
#else
// Both expected and actual are in (different) registers. This
// is the case when we invoke functions using call and apply.
@@ -2449,13 +2580,7 @@ void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
Push(fun);
Push(fun);
// Arguments are located 2 words below the base pointer.
-#ifdef V8_REVERSE_JSARGS
Operand receiver_op = Operand(rbp, kSystemPointerSize * 2);
-#else
- Operand receiver_op =
- Operand(rbp, actual_parameter_count, times_system_pointer_size,
- kSystemPointerSize * 2);
-#endif
Push(receiver_op);
CallRuntime(Runtime::kDebugOnFunctionCall);
Pop(fun);
@@ -2831,13 +2956,18 @@ void TurboAssembler::ResetSpeculationPoisonRegister() {
Set(kSpeculationPoisonRegister, -1);
}
-void TurboAssembler::CallForDeoptimization(Address target, int deopt_id,
- Label* exit, DeoptimizeKind kind) {
+void TurboAssembler::CallForDeoptimization(Builtins::Name target, int,
+ Label* exit, DeoptimizeKind kind,
+ Label*) {
+ // Note: Assembler::call is used here on purpose to guarantee fixed-size
+ // exits even on Atom CPUs; see TurboAssembler::Call for Atom-specific
+ // performance tuning which emits a different instruction sequence.
+ call(EntryFromBuiltinIndexAsOperand(target));
+ DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
+ (kind == DeoptimizeKind::kLazy)
+ ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kNonLazyDeoptExitSize);
USE(exit, kind);
- NoRootArrayScope no_root_array(this);
- // Save the deopt id in r13 (we don't need the roots array from now on).
- movq(r13, Immediate(deopt_id));
- call(target, RelocInfo::RUNTIME_ENTRY);
}
void TurboAssembler::Trap() { int3(); }
diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.h b/deps/v8/src/codegen/x64/macro-assembler-x64.h
index 995f2565cc..9fc4d94768 100644
--- a/deps/v8/src/codegen/x64/macro-assembler-x64.h
+++ b/deps/v8/src/codegen/x64/macro-assembler-x64.h
@@ -33,6 +33,10 @@ struct SmiIndex {
ScaleFactor scale;
};
+// TODO(victorgomes): Move definition to macro-assembler.h, once all other
+// platforms are updated.
+enum class StackLimitKind { kInterruptStackLimit, kRealStackLimit };
+
// Convenient class to access arguments below the stack pointer.
class StackArgumentsAccessor {
public:
@@ -145,6 +149,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP(Movss, movss)
AVX_OP(Movsd, movsd)
AVX_OP(Movdqu, movdqu)
+ AVX_OP(Movlps, movlps)
+ AVX_OP(Movhps, movhps)
AVX_OP(Pcmpeqb, pcmpeqb)
AVX_OP(Pcmpeqw, pcmpeqw)
AVX_OP(Pcmpeqd, pcmpeqd)
@@ -222,7 +228,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP(Divpd, divpd)
AVX_OP(Maxps, maxps)
AVX_OP(Maxpd, maxpd)
- AVX_OP(Shufps, shufps)
AVX_OP(Cvtdq2ps, cvtdq2ps)
AVX_OP(Rcpps, rcpps)
AVX_OP(Rsqrtps, rsqrtps)
@@ -281,6 +286,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP_SSE4_1(Pmovzxbw, pmovzxbw)
AVX_OP_SSE4_1(Pmovzxwd, pmovzxwd)
AVX_OP_SSE4_1(Pmovzxdq, pmovzxdq)
+ AVX_OP_SSE4_1(Pextrb, pextrb)
+ AVX_OP_SSE4_1(Pextrw, pextrw)
AVX_OP_SSE4_1(Pextrq, pextrq)
AVX_OP_SSE4_1(Roundps, roundps)
AVX_OP_SSE4_1(Roundpd, roundpd)
@@ -488,6 +495,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Call(ExternalReference ext);
void Call(Label* target) { call(target); }
+ Operand EntryFromBuiltinIndexAsOperand(Builtins::Name builtin_index);
Operand EntryFromBuiltinIndexAsOperand(Register builtin_index);
void CallBuiltinByIndex(Register builtin_index) override;
void CallBuiltin(int builtin_index);
@@ -507,22 +515,29 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void RetpolineJump(Register reg);
- void CallForDeoptimization(Address target, int deopt_id, Label* exit,
- DeoptimizeKind kind);
+ void CallForDeoptimization(Builtins::Name target, int deopt_id, Label* exit,
+ DeoptimizeKind kind,
+ Label* jump_deoptimization_entry_label);
void Trap() override;
void DebugBreak() override;
+ // Shufps that will mov src into dst if AVX is not supported.
+ void Shufps(XMMRegister dst, XMMRegister src, byte imm8);
+
// Non-SSE2 instructions.
- void Pextrd(Register dst, XMMRegister src, int8_t imm8);
- void Pextrw(Register dst, XMMRegister src, int8_t imm8);
- void Pextrb(Register dst, XMMRegister src, int8_t imm8);
- void Pinsrd(XMMRegister dst, Register src, int8_t imm8);
- void Pinsrd(XMMRegister dst, Operand src, int8_t imm8);
- void Pinsrw(XMMRegister dst, Register src, int8_t imm8);
- void Pinsrw(XMMRegister dst, Operand src, int8_t imm8);
- void Pinsrb(XMMRegister dst, Register src, int8_t imm8);
- void Pinsrb(XMMRegister dst, Operand src, int8_t imm8);
+ void Pextrd(Register dst, XMMRegister src, uint8_t imm8);
+
+ void Pinsrb(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8);
+ void Pinsrb(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8);
+ void Pinsrw(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8);
+ void Pinsrw(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8);
+ void Pinsrd(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8);
+ void Pinsrd(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8);
+ void Pinsrd(XMMRegister dst, Register src2, uint8_t imm8);
+ void Pinsrd(XMMRegister dst, Operand src2, uint8_t imm8);
+ void Pinsrq(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8);
+ void Pinsrq(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8);
void Psllq(XMMRegister dst, int imm8) { Psllq(dst, static_cast<byte>(imm8)); }
void Psllq(XMMRegister dst, byte imm8);
@@ -531,6 +546,16 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Pslld(XMMRegister dst, byte imm8);
void Psrld(XMMRegister dst, byte imm8);
+ void Pblendvb(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ XMMRegister mask);
+ void Blendvps(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ XMMRegister mask);
+ void Blendvpd(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ XMMRegister mask);
+
+ // Supports both SSE and AVX. Move src1 to dst if they are not equal on SSE.
+ void Pshufb(XMMRegister dst, XMMRegister src1, XMMRegister src2);
+
void CompareRoot(Register with, RootIndex index);
void CompareRoot(Operand with, RootIndex index);
@@ -686,7 +711,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Loads a field containing off-heap pointer and does necessary decoding
// if V8 heap sandbox is enabled.
- void LoadExternalPointerField(Register destination, Operand field_operand);
+ void LoadExternalPointerField(Register destination, Operand field_operand,
+ ExternalPointerTag tag);
protected:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
@@ -879,6 +905,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void Cmp(Operand dst, Handle<Object> source);
void Cmp(Register dst, Smi src);
void Cmp(Operand dst, Smi src);
+ void Cmp(Register dst, int32_t src);
// Checks if value is in range [lower_limit, higher_limit] using a single
// comparison.
@@ -1008,6 +1035,13 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void DecrementCounter(StatsCounter* counter, int value);
// ---------------------------------------------------------------------------
+ // Stack limit utilities
+ Operand StackLimitAsOperand(StackLimitKind kind);
+ void StackOverflowCheck(
+ Register num_args, Register scratch, Label* stack_overflow,
+ Label::Distance stack_overflow_distance = Label::kFar);
+
+ // ---------------------------------------------------------------------------
// In-place weak references.
void LoadWeakValue(Register in_out, Label* target_if_cleared);
@@ -1044,7 +1078,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Needs access to SafepointRegisterStackIndex for compiled frame
// traversal.
- friend class StandardFrame;
+ friend class CommonFrame;
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
};
diff --git a/deps/v8/src/common/DIR_METADATA b/deps/v8/src/common/DIR_METADATA
new file mode 100644
index 0000000000..2f8dbbcf45
--- /dev/null
+++ b/deps/v8/src/common/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript"
+} \ No newline at end of file
diff --git a/deps/v8/src/common/OWNERS b/deps/v8/src/common/OWNERS
index 4750620072..48d72aea5e 100644
--- a/deps/v8/src/common/OWNERS
+++ b/deps/v8/src/common/OWNERS
@@ -1,3 +1 @@
file:../../COMMON_OWNERS
-
-# COMPONENT: Blink>JavaScript
diff --git a/deps/v8/src/common/assert-scope.cc b/deps/v8/src/common/assert-scope.cc
index 531ac4e024..520826349d 100644
--- a/deps/v8/src/common/assert-scope.cc
+++ b/deps/v8/src/common/assert-scope.cc
@@ -130,6 +130,8 @@ template class PerThreadAssertScope<HANDLE_DEREFERENCE_ASSERT, false>;
template class PerThreadAssertScope<HANDLE_DEREFERENCE_ASSERT, true>;
template class PerThreadAssertScope<CODE_DEPENDENCY_CHANGE_ASSERT, false>;
template class PerThreadAssertScope<CODE_DEPENDENCY_CHANGE_ASSERT, true>;
+template class PerThreadAssertScope<CODE_ALLOCATION_ASSERT, false>;
+template class PerThreadAssertScope<CODE_ALLOCATION_ASSERT, true>;
template class PerIsolateAssertScope<JAVASCRIPT_EXECUTION_ASSERT, false>;
template class PerIsolateAssertScope<JAVASCRIPT_EXECUTION_ASSERT, true>;
diff --git a/deps/v8/src/common/assert-scope.h b/deps/v8/src/common/assert-scope.h
index b958ca4bed..8937197d26 100644
--- a/deps/v8/src/common/assert-scope.h
+++ b/deps/v8/src/common/assert-scope.h
@@ -33,6 +33,7 @@ enum PerThreadAssertType {
HANDLE_ALLOCATION_ASSERT,
HANDLE_DEREFERENCE_ASSERT,
CODE_DEPENDENCY_CHANGE_ASSERT,
+ CODE_ALLOCATION_ASSERT,
LAST_PER_THREAD_ASSERT_TYPE
};
@@ -128,9 +129,17 @@ using AllowHandleAllocation =
PerThreadAssertScopeDebugOnly<HANDLE_ALLOCATION_ASSERT, true>;
// Scope to document where we do not expect garbage collections. It differs from
-// DisallowHeapAllocation by also forbiding safepoints.
+// DisallowHeapAllocation by also forbidding safepoints.
using DisallowGarbageCollection =
PerThreadAssertScopeDebugOnly<GARBAGE_COLLECTION_ASSERT, false>;
+// The DISALLOW_GARBAGE_COLLECTION macro can be used to define a
+// DisallowGarbageCollection field in classes that isn't present in release
+// builds.
+#ifdef DEBUG
+#define DISALLOW_GARBAGE_COLLECTION(name) DisallowGarbageCollection name;
+#else
+#define DISALLOW_GARBAGE_COLLECTION(name)
+#endif
// Scope to introduce an exception to DisallowGarbageCollection.
using AllowGarbageCollection =
@@ -140,6 +149,9 @@ using AllowGarbageCollection =
// and will eventually be removed, use DisallowGarbageCollection instead.
using DisallowHeapAllocation =
PerThreadAssertScopeDebugOnly<HEAP_ALLOCATION_ASSERT, false>;
+// The DISALLOW_HEAP_ALLOCATION macro can be used to define a
+// DisallowHeapAllocation field in classes that isn't present in release
+// builds.
#ifdef DEBUG
#define DISALLOW_HEAP_ALLOCATION(name) DisallowHeapAllocation name;
#else
@@ -166,6 +178,14 @@ using DisallowCodeDependencyChange =
using AllowCodeDependencyChange =
PerThreadAssertScopeDebugOnly<CODE_DEPENDENCY_CHANGE_ASSERT, true>;
+// Scope to document where we do not expect code to be allocated.
+using DisallowCodeAllocation =
+ PerThreadAssertScopeDebugOnly<CODE_ALLOCATION_ASSERT, false>;
+
+// Scope to introduce an exception to DisallowCodeAllocation.
+using AllowCodeAllocation =
+ PerThreadAssertScopeDebugOnly<CODE_ALLOCATION_ASSERT, true>;
+
class DisallowHeapAccess {
DisallowCodeDependencyChange no_dependency_change_;
DisallowHandleAllocation no_handle_allocation_;
@@ -273,6 +293,8 @@ extern template class PerThreadAssertScope<HANDLE_DEREFERENCE_ASSERT, true>;
extern template class PerThreadAssertScope<CODE_DEPENDENCY_CHANGE_ASSERT,
false>;
extern template class PerThreadAssertScope<CODE_DEPENDENCY_CHANGE_ASSERT, true>;
+extern template class PerThreadAssertScope<CODE_ALLOCATION_ASSERT, false>;
+extern template class PerThreadAssertScope<CODE_ALLOCATION_ASSERT, true>;
extern template class PerIsolateAssertScope<JAVASCRIPT_EXECUTION_ASSERT, false>;
extern template class PerIsolateAssertScope<JAVASCRIPT_EXECUTION_ASSERT, true>;
diff --git a/deps/v8/src/common/external-pointer-inl.h b/deps/v8/src/common/external-pointer-inl.h
index 32a78002e1..070d787b63 100644
--- a/deps/v8/src/common/external-pointer-inl.h
+++ b/deps/v8/src/common/external-pointer-inl.h
@@ -12,18 +12,93 @@
namespace v8 {
namespace internal {
-V8_INLINE ExternalPointer_t EncodeExternalPointer(Isolate* isolate,
- Address external_pointer) {
+V8_INLINE Address DecodeExternalPointer(IsolateRoot isolate_root,
+ ExternalPointer_t encoded_pointer,
+ ExternalPointerTag tag) {
STATIC_ASSERT(kExternalPointerSize == kSystemPointerSize);
- if (!V8_HEAP_SANDBOX_BOOL) return external_pointer;
- return external_pointer ^ kExternalPointerSalt;
+#ifdef V8_HEAP_SANDBOX
+ uint32_t index = static_cast<uint32_t>(encoded_pointer);
+ const Isolate* isolate = Isolate::FromRootAddress(isolate_root.address());
+ return isolate->external_pointer_table().get(index) ^ tag;
+#else
+ return encoded_pointer;
+#endif
}
-V8_INLINE Address DecodeExternalPointer(const Isolate* isolate,
- ExternalPointer_t encoded_pointer) {
- STATIC_ASSERT(kExternalPointerSize == kSystemPointerSize);
- if (!V8_HEAP_SANDBOX_BOOL) return encoded_pointer;
- return encoded_pointer ^ kExternalPointerSalt;
+V8_INLINE void InitExternalPointerField(Address field_address,
+ Isolate* isolate) {
+#ifdef V8_HEAP_SANDBOX
+ static_assert(kExternalPointerSize == kSystemPointerSize,
+ "Review the code below, once kExternalPointerSize is 4-byte "
+ "the address of the field will always be aligned");
+ ExternalPointer_t index = isolate->external_pointer_table().allocate();
+ base::WriteUnalignedValue<ExternalPointer_t>(field_address, index);
+#else
+ // Nothing to do.
+#endif // V8_HEAP_SANDBOX
+}
+
+V8_INLINE void InitExternalPointerField(Address field_address, Isolate* isolate,
+ Address value, ExternalPointerTag tag) {
+#ifdef V8_HEAP_SANDBOX
+ ExternalPointer_t index = isolate->external_pointer_table().allocate();
+ isolate->external_pointer_table().set(static_cast<uint32_t>(index),
+ value ^ tag);
+ static_assert(kExternalPointerSize == kSystemPointerSize,
+ "Review the code below, once kExternalPointerSize is 4-byte "
+ "the address of the field will always be aligned");
+ base::WriteUnalignedValue<ExternalPointer_t>(field_address, index);
+#else
+ // Pointer compression causes types larger than kTaggedSize to be unaligned.
+ constexpr bool v8_pointer_compression_unaligned =
+ kExternalPointerSize > kTaggedSize;
+ ExternalPointer_t encoded_value = static_cast<ExternalPointer_t>(value);
+ if (v8_pointer_compression_unaligned) {
+ base::WriteUnalignedValue<ExternalPointer_t>(field_address, encoded_value);
+ } else {
+ base::Memory<ExternalPointer_t>(field_address) = encoded_value;
+ }
+#endif // V8_HEAP_SANDBOX
+}
+
+V8_INLINE Address ReadExternalPointerField(Address field_address,
+ IsolateRoot isolate_root,
+ ExternalPointerTag tag) {
+ // Pointer compression causes types larger than kTaggedSize to be unaligned.
+ constexpr bool v8_pointer_compression_unaligned =
+ kExternalPointerSize > kTaggedSize;
+ ExternalPointer_t encoded_value;
+ if (v8_pointer_compression_unaligned) {
+ encoded_value = base::ReadUnalignedValue<ExternalPointer_t>(field_address);
+ } else {
+ encoded_value = base::Memory<ExternalPointer_t>(field_address);
+ }
+ return DecodeExternalPointer(isolate_root, encoded_value, tag);
+}
+
+V8_INLINE void WriteExternalPointerField(Address field_address,
+ Isolate* isolate, Address value,
+ ExternalPointerTag tag) {
+#ifdef V8_HEAP_SANDBOX
+ static_assert(kExternalPointerSize == kSystemPointerSize,
+ "Review the code below, once kExternalPointerSize is 4-byte "
+ "the address of the field will always be aligned");
+
+ ExternalPointer_t index =
+ base::ReadUnalignedValue<ExternalPointer_t>(field_address);
+ isolate->external_pointer_table().set(static_cast<uint32_t>(index),
+ value ^ tag);
+#else
+ // Pointer compression causes types larger than kTaggedSize to be unaligned.
+ constexpr bool v8_pointer_compression_unaligned =
+ kExternalPointerSize > kTaggedSize;
+ ExternalPointer_t encoded_value = static_cast<ExternalPointer_t>(value);
+ if (v8_pointer_compression_unaligned) {
+ base::WriteUnalignedValue<ExternalPointer_t>(field_address, encoded_value);
+ } else {
+ base::Memory<ExternalPointer_t>(field_address) = encoded_value;
+ }
+#endif // V8_HEAP_SANDBOX
}
} // namespace internal
diff --git a/deps/v8/src/common/external-pointer.h b/deps/v8/src/common/external-pointer.h
index 9b5b061997..5a380df762 100644
--- a/deps/v8/src/common/external-pointer.h
+++ b/deps/v8/src/common/external-pointer.h
@@ -10,22 +10,37 @@
namespace v8 {
namespace internal {
-// See v8:10391 for details about V8 heap sandbox.
-constexpr uint32_t kExternalPointerSalt =
- 0x7fffffff & ~static_cast<uint32_t>(kHeapObjectTagMask);
-
-static_assert(static_cast<int32_t>(kExternalPointerSalt) >= 0,
- "Salt value must be positive for better assembly code");
-
-// Convert external pointer value into encoded form suitable for being stored
-// on V8 heap.
-V8_INLINE ExternalPointer_t EncodeExternalPointer(Isolate* isolate,
- Address external_pointer);
-
// Convert external pointer from on-V8-heap representation to an actual external
// pointer value.
-V8_INLINE Address DecodeExternalPointer(const Isolate* isolate,
- ExternalPointer_t encoded_pointer);
+V8_INLINE Address DecodeExternalPointer(IsolateRoot isolate,
+ ExternalPointer_t encoded_pointer,
+ ExternalPointerTag tag);
+
+constexpr ExternalPointer_t kNullExternalPointer = 0;
+
+// Creates uninitialized entry in external pointer table and writes the entry id
+// to the field.
+// When sandbox is not enabled, it's a no-op.
+V8_INLINE void InitExternalPointerField(Address field_address,
+ Isolate* isolate);
+
+// Creates and initializes entry in external pointer table and writes the entry
+// id to the field.
+// Basically, it's InitExternalPointerField() followed by
+// WriteExternalPointerField().
+V8_INLINE void InitExternalPointerField(Address field_address, Isolate* isolate,
+ Address value, ExternalPointerTag tag);
+
+// Reads external pointer for the field, and decodes it if the sandbox is
+// enabled.
+V8_INLINE Address ReadExternalPointerField(Address field_address,
+ IsolateRoot isolate,
+ ExternalPointerTag tag);
+
+// Encodes value if the sandbox is enabled and writes it into the field.
+V8_INLINE void WriteExternalPointerField(Address field_address,
+ Isolate* isolate, Address value,
+ ExternalPointerTag tag);
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/common/globals.h b/deps/v8/src/common/globals.h
index 0e9d815207..988ab10c15 100644
--- a/deps/v8/src/common/globals.h
+++ b/deps/v8/src/common/globals.h
@@ -103,6 +103,13 @@ STATIC_ASSERT(V8_DEFAULT_STACK_SIZE_KB* KB +
#define V8_DOUBLE_FIELDS_UNBOXING false
#endif
+// Determine whether dict mode prototypes feature is enabled.
+#ifdef V8_DICT_MODE_PROTOTYPES
+#define V8_DICT_MODE_PROTOTYPES_BOOL true
+#else
+#define V8_DICT_MODE_PROTOTYPES_BOOL false
+#endif
+
// Determine whether tagged pointers are 8 bytes (used in Torque layouts for
// choosing where to insert padding).
#if V8_TARGET_ARCH_64_BIT && !defined(V8_COMPRESS_POINTERS)
@@ -189,9 +196,8 @@ constexpr int kDoubleSizeLog2 = 3;
// Total wasm code space per engine (i.e. per process) is limited to make
// certain attacks that rely on heap spraying harder.
-// This limit was increased to 2GB in August 2020 and we have security clearance
-// to increase to 4GB if needed.
-constexpr size_t kMaxWasmCodeMB = 2048;
+// Just below 4GB, such that {kMaxWasmCodeMemory} fits in a 32-bit size_t.
+constexpr size_t kMaxWasmCodeMB = 4095;
constexpr size_t kMaxWasmCodeMemory = kMaxWasmCodeMB * MB;
#if V8_HOST_ARCH_64_BIT
@@ -295,7 +301,6 @@ STATIC_ASSERT(kPointerSize == (1 << kPointerSizeLog2));
// This type defines raw storage type for external (or off-V8 heap) pointers
// stored on V8 heap.
-using ExternalPointer_t = Address;
constexpr int kExternalPointerSize = sizeof(ExternalPointer_t);
constexpr int kEmbedderDataSlotSize = kSystemPointerSize;
@@ -465,8 +470,11 @@ enum class DeoptimizeKind : uint8_t {
kSoft,
kBailout,
kLazy,
- kLastDeoptimizeKind = kLazy
};
+constexpr DeoptimizeKind kFirstDeoptimizeKind = DeoptimizeKind::kEager;
+constexpr DeoptimizeKind kLastDeoptimizeKind = DeoptimizeKind::kLazy;
+STATIC_ASSERT(static_cast<int>(kFirstDeoptimizeKind) == 0);
+constexpr int kDeoptimizeKindCount = static_cast<int>(kLastDeoptimizeKind) + 1;
inline size_t hash_value(DeoptimizeKind kind) {
return static_cast<size_t>(kind);
}
@@ -481,23 +489,8 @@ inline std::ostream& operator<<(std::ostream& os, DeoptimizeKind kind) {
case DeoptimizeKind::kBailout:
return os << "Bailout";
}
- UNREACHABLE();
}
-enum class IsolateAllocationMode {
- // Allocate Isolate in C++ heap using default new/delete operators.
- kInCppHeap,
-
- // Allocate Isolate in a committed region inside V8 heap reservation.
- kInV8Heap,
-
-#ifdef V8_COMPRESS_POINTERS
- kDefault = kInV8Heap,
-#else
- kDefault = kInCppHeap,
-#endif
-};
-
// Indicates whether the lookup is related to sloppy-mode block-scoped
// function hoisting, and is a synthetic assignment for that.
enum class LookupHoistingMode { kNormal, kLegacySloppy };
@@ -795,12 +788,7 @@ inline std::ostream& operator<<(std::ostream& os, AllocationType kind) {
}
// TODO(ishell): review and rename kWordAligned to kTaggedAligned.
-enum AllocationAlignment {
- kWordAligned,
- kDoubleAligned,
- kDoubleUnaligned,
- kCodeAligned
-};
+enum AllocationAlignment { kWordAligned, kDoubleAligned, kDoubleUnaligned };
enum class AccessMode { ATOMIC, NON_ATOMIC };
@@ -899,15 +887,7 @@ enum ShouldThrow {
kDontThrow = Internals::kDontThrow
};
-// The Store Buffer (GC).
-enum StoreBufferEvent {
- kStoreBufferFullEvent,
- kStoreBufferStartScanningPagesEvent,
- kStoreBufferScanningPageEvent
-};
-
-using StoreBufferCallback = void (*)(Heap* heap, MemoryChunk* page,
- StoreBufferEvent event);
+enum class ThreadKind { kMain, kBackground };
// Union used for customized checking of the IEEE double types
// inlined within v8 runtime, rather than going to the underlying
@@ -1426,22 +1406,21 @@ enum class Operation {
// at different points by performing an 'OR' operation. Type feedback moves
// to a more generic type when we combine feedback.
// kNone -> kEnumCacheKeysAndIndices -> kEnumCacheKeys -> kAny
-class ForInFeedback {
- public:
- enum {
- kNone = 0x0,
- kEnumCacheKeysAndIndices = 0x1,
- kEnumCacheKeys = 0x3,
- kAny = 0x7
- };
+enum class ForInFeedback : uint8_t {
+ kNone = 0x0,
+ kEnumCacheKeysAndIndices = 0x1,
+ kEnumCacheKeys = 0x3,
+ kAny = 0x7
};
-STATIC_ASSERT((ForInFeedback::kNone |
- ForInFeedback::kEnumCacheKeysAndIndices) ==
- ForInFeedback::kEnumCacheKeysAndIndices);
-STATIC_ASSERT((ForInFeedback::kEnumCacheKeysAndIndices |
- ForInFeedback::kEnumCacheKeys) == ForInFeedback::kEnumCacheKeys);
-STATIC_ASSERT((ForInFeedback::kEnumCacheKeys | ForInFeedback::kAny) ==
- ForInFeedback::kAny);
+STATIC_ASSERT((static_cast<int>(ForInFeedback::kNone) |
+ static_cast<int>(ForInFeedback::kEnumCacheKeysAndIndices)) ==
+ static_cast<int>(ForInFeedback::kEnumCacheKeysAndIndices));
+STATIC_ASSERT((static_cast<int>(ForInFeedback::kEnumCacheKeysAndIndices) |
+ static_cast<int>(ForInFeedback::kEnumCacheKeys)) ==
+ static_cast<int>(ForInFeedback::kEnumCacheKeys));
+STATIC_ASSERT((static_cast<int>(ForInFeedback::kEnumCacheKeys) |
+ static_cast<int>(ForInFeedback::kAny)) ==
+ static_cast<int>(ForInFeedback::kAny));
enum class UnicodeEncoding : uint8_t {
// Different unicode encodings in a |word32|:
@@ -1530,13 +1509,31 @@ inline std::ostream& operator<<(std::ostream& os,
using FileAndLine = std::pair<const char*, int>;
-enum class OptimizationMarker {
- kLogFirstExecution,
- kNone,
- kCompileOptimized,
- kCompileOptimizedConcurrent,
- kInOptimizationQueue
+enum OptimizationMarker : int32_t {
+ // These values are set so that it is easy to check if there is a marker where
+ // some processing needs to be done.
+ kNone = 0b000,
+ kInOptimizationQueue = 0b001,
+ kCompileOptimized = 0b010,
+ kCompileOptimizedConcurrent = 0b011,
+ kLogFirstExecution = 0b100,
+ kLastOptimizationMarker = kLogFirstExecution
};
+// For kNone or kInOptimizationQueue we don't need any special processing.
+// To check both cases using a single mask, we expect the kNone to be 0 and
+// kInOptimizationQueue to be 1 so that we can mask off the lsb for checking.
+STATIC_ASSERT(kNone == 0b000 && kInOptimizationQueue == 0b001);
+STATIC_ASSERT(kLastOptimizationMarker <= 0b111);
+static constexpr uint32_t kNoneOrInOptimizationQueueMask = 0b110;
+
+inline bool IsInOptimizationQueueMarker(OptimizationMarker marker) {
+ return marker == OptimizationMarker::kInOptimizationQueue;
+}
+
+inline bool IsCompileOptimizedMarker(OptimizationMarker marker) {
+ return marker == OptimizationMarker::kCompileOptimized ||
+ marker == OptimizationMarker::kCompileOptimizedConcurrent;
+}
inline std::ostream& operator<<(std::ostream& os,
const OptimizationMarker& marker) {
@@ -1552,8 +1549,27 @@ inline std::ostream& operator<<(std::ostream& os,
case OptimizationMarker::kInOptimizationQueue:
return os << "OptimizationMarker::kInOptimizationQueue";
}
- UNREACHABLE();
- return os;
+}
+
+enum class OptimizationTier {
+ kNone = 0b00,
+ kMidTier = 0b01,
+ kTopTier = 0b10,
+ kLastOptimizationTier = kTopTier
+};
+static constexpr uint32_t kNoneOrMidTierMask = 0b10;
+static constexpr uint32_t kNoneMask = 0b11;
+
+inline std::ostream& operator<<(std::ostream& os,
+ const OptimizationTier& tier) {
+ switch (tier) {
+ case OptimizationTier::kNone:
+ return os << "OptimizationTier::kNone";
+ case OptimizationTier::kMidTier:
+ return os << "OptimizationTier::kMidTier";
+ case OptimizationTier::kTopTier:
+ return os << "OptimizationTier::kTopTier";
+ }
}
enum class SpeculationMode { kAllowSpeculation, kDisallowSpeculation };
@@ -1618,7 +1634,6 @@ enum class LoadSensitivity {
V(TrapDivUnrepresentable) \
V(TrapRemByZero) \
V(TrapFloatUnrepresentable) \
- V(TrapFuncInvalid) \
V(TrapFuncSigMismatch) \
V(TrapDataSegmentDropped) \
V(TrapElemSegmentDropped) \
@@ -1627,7 +1642,6 @@ enum class LoadSensitivity {
V(TrapRethrowNull) \
V(TrapNullDereference) \
V(TrapIllegalCast) \
- V(TrapWasmJSFunction) \
V(TrapArrayOutOfBounds)
enum KeyedAccessLoadMode {
@@ -1695,7 +1709,67 @@ enum class TraceRetainingPathMode { kEnabled, kDisabled };
// can be used in Torque.
enum class VariableAllocationInfo { NONE, STACK, CONTEXT, UNUSED };
+enum class DynamicMapChecksStatus : uint8_t {
+ kSuccess = 0,
+ kBailout = 1,
+ kDeopt = 2
+};
+
+#ifdef V8_COMPRESS_POINTERS
+class IsolateRoot {
+ public:
+ explicit constexpr IsolateRoot(Address address) : address_(address) {}
+ // NOLINTNEXTLINE
+ inline IsolateRoot(const Isolate* isolate);
+ // NOLINTNEXTLINE
+ inline IsolateRoot(const LocalIsolate* isolate);
+
+ inline Address address() const;
+
+ private:
+ Address address_;
+};
+#else
+class IsolateRoot {
+ public:
+ IsolateRoot() = default;
+ // NOLINTNEXTLINE
+ IsolateRoot(const Isolate* isolate) {}
+ // NOLINTNEXTLINE
+ IsolateRoot(const LocalIsolate* isolate) {}
+};
+#endif
+
+class int31_t {
+ public:
+ constexpr int31_t() : value_(0) {}
+ constexpr int31_t(int value) : value_(value) { // NOLINT(runtime/explicit)
+ DCHECK_EQ((value & 0x80000000) != 0, (value & 0x40000000) != 0);
+ }
+ int31_t& operator=(int value) {
+ DCHECK_EQ((value & 0x80000000) != 0, (value & 0x40000000) != 0);
+ value_ = value;
+ return *this;
+ }
+ int32_t value() const { return value_; }
+ operator int32_t() const { return value_; }
+
+ private:
+ int32_t value_;
+};
+
} // namespace internal
+
+// Tag dispatching support for acquire loads and release stores.
+struct AcquireLoadTag {};
+struct RelaxedLoadTag {};
+struct ReleaseStoreTag {};
+struct RelaxedStoreTag {};
+static constexpr AcquireLoadTag kAcquireLoad;
+static constexpr RelaxedLoadTag kRelaxedLoad;
+static constexpr ReleaseStoreTag kReleaseStore;
+static constexpr RelaxedStoreTag kRelaxedStore;
+
} // namespace v8
namespace i = v8::internal;
diff --git a/deps/v8/src/common/message-template.h b/deps/v8/src/common/message-template.h
index dc4d7581f1..c8ff902642 100644
--- a/deps/v8/src/common/message-template.h
+++ b/deps/v8/src/common/message-template.h
@@ -92,6 +92,7 @@ namespace internal {
T(IllegalInvocation, "Illegal invocation") \
T(ImmutablePrototypeSet, \
"Immutable prototype object '%' cannot have their prototype set") \
+ T(ImportAssertionDuplicateKey, "Import assertion has duplicate key '%'") \
T(ImportCallNotNewExpression, "Cannot use new with import") \
T(ImportOutsideModule, "Cannot use import statement outside a module") \
T(ImportMetaOutsideModule, "Cannot use 'import.meta' outside a module") \
@@ -364,6 +365,8 @@ namespace internal {
T(ToRadixFormatRange, "toString() radix argument must be between 2 and 36") \
T(TypedArraySetOffsetOutOfBounds, "offset is out of bounds") \
T(TypedArraySetSourceTooLarge, "Source is too large") \
+ T(TypedArrayTooLargeToSort, \
+ "Custom comparefn not supported for huge TypedArrays") \
T(ValueOutOfRange, "Value % out of range for % options property %") \
/* SyntaxError */ \
T(AmbiguousExport, \
@@ -392,6 +395,8 @@ namespace internal {
"Async functions can only be declared at the top level or inside a " \
"block.") \
T(IllegalBreak, "Illegal break statement") \
+ T(ModuleExportNameWithoutFromClause, \
+ "String literal module export names must be followed by a 'from' clause") \
T(NoIterationStatement, \
"Illegal continue statement: no surrounding iteration statement") \
T(IllegalContinue, \
@@ -416,6 +421,8 @@ namespace internal {
"Invalid left-hand side expression in postfix operation") \
T(InvalidLhsInPrefixOp, \
"Invalid left-hand side expression in prefix operation") \
+ T(InvalidModuleExportName, \
+ "Invalid module export name: contains unpaired surrogate") \
T(InvalidRegExpFlags, "Invalid flags supplied to RegExp constructor '%'") \
T(InvalidOrUnexpectedToken, "Invalid or unexpected token") \
T(InvalidPrivateBrand, "Object must be an instance of class %") \
@@ -551,19 +558,17 @@ namespace internal {
T(WasmTrapDivUnrepresentable, "divide result unrepresentable") \
T(WasmTrapRemByZero, "remainder by zero") \
T(WasmTrapFloatUnrepresentable, "float unrepresentable in integer range") \
- T(WasmTrapFuncInvalid, "invalid index into function table") \
+ T(WasmTrapTableOutOfBounds, "table index is out of bounds") \
T(WasmTrapFuncSigMismatch, "function signature mismatch") \
T(WasmTrapMultiReturnLengthMismatch, "multi-return length mismatch") \
- T(WasmTrapTypeError, "wasm function signature contains illegal type") \
+ T(WasmTrapJSTypeError, "type incompatibility when transforming from/to JS") \
T(WasmTrapDataSegmentDropped, "data segment has been dropped") \
T(WasmTrapElemSegmentDropped, "element segment has been dropped") \
- T(WasmTrapTableOutOfBounds, "table access out of bounds") \
T(WasmTrapBrOnExnNull, "br_on_exn on null value") \
T(WasmTrapRethrowNull, "rethrowing null value") \
T(WasmTrapNullDereference, "dereferencing a null pointer") \
T(WasmTrapIllegalCast, "illegal cast") \
T(WasmTrapArrayOutOfBounds, "array element access out of bounds") \
- T(WasmTrapWasmJSFunction, "cannot call WebAssembly.Function with call_ref") \
T(WasmExceptionError, "wasm exception") \
/* Asm.js validation related */ \
T(AsmJsInvalid, "Invalid asm.js: %") \
diff --git a/deps/v8/src/common/ptr-compr-inl.h b/deps/v8/src/common/ptr-compr-inl.h
index ad0b17ff5e..f74c4d82c9 100644
--- a/deps/v8/src/common/ptr-compr-inl.h
+++ b/deps/v8/src/common/ptr-compr-inl.h
@@ -8,32 +8,37 @@
#include "include/v8-internal.h"
#include "src/common/ptr-compr.h"
#include "src/execution/isolate.h"
+#include "src/execution/local-isolate-inl.h"
namespace v8 {
namespace internal {
-#if V8_TARGET_ARCH_64_BIT
+#ifdef V8_COMPRESS_POINTERS
+
+IsolateRoot::IsolateRoot(const Isolate* isolate)
+ : address_(isolate->isolate_root()) {}
+IsolateRoot::IsolateRoot(const LocalIsolate* isolate)
+ : address_(isolate->isolate_root()) {}
+
+Address IsolateRoot::address() const {
+ Address ret = address_;
+ ret = reinterpret_cast<Address>(V8_ASSUME_ALIGNED(
+ reinterpret_cast<void*>(ret), kPtrComprIsolateRootAlignment));
+ return ret;
+}
+
// Compresses full-pointer representation of a tagged value to on-heap
// representation.
V8_INLINE Tagged_t CompressTagged(Address tagged) {
return static_cast<Tagged_t>(static_cast<uint32_t>(tagged));
}
-V8_INLINE Address GetIsolateRoot(Address on_heap_addr) {
- // We subtract 1 here in order to let the compiler generate addition of 32-bit
- // signed constant instead of 64-bit constant (the problem is that 2Gb looks
- // like a negative 32-bit value). It's correct because we will never use
- // leftmost address of V8 heap as |on_heap_addr|.
+V8_INLINE constexpr Address GetIsolateRootAddress(Address on_heap_addr) {
return RoundDown<kPtrComprIsolateRootAlignment>(on_heap_addr);
}
-V8_INLINE Address GetIsolateRoot(const Isolate* isolate) {
- Address isolate_root = isolate->isolate_root();
-#ifdef V8_COMPRESS_POINTERS
- isolate_root = reinterpret_cast<Address>(V8_ASSUME_ALIGNED(
- reinterpret_cast<void*>(isolate_root), kPtrComprIsolateRootAlignment));
-#endif
- return isolate_root;
+V8_INLINE Address GetIsolateRootAddress(IsolateRoot isolate) {
+ return isolate.address();
}
// Decompresses smi value.
@@ -47,7 +52,7 @@ V8_INLINE Address DecompressTaggedSigned(Tagged_t raw_value) {
template <typename TOnHeapAddress>
V8_INLINE Address DecompressTaggedPointer(TOnHeapAddress on_heap_addr,
Tagged_t raw_value) {
- return GetIsolateRoot(on_heap_addr) + static_cast<Address>(raw_value);
+ return GetIsolateRootAddress(on_heap_addr) + static_cast<Address>(raw_value);
}
// Decompresses any tagged value, preserving both weak- and smi- tags.
@@ -57,22 +62,18 @@ V8_INLINE Address DecompressTaggedAny(TOnHeapAddress on_heap_addr,
return DecompressTaggedPointer(on_heap_addr, raw_value);
}
-#ifdef V8_COMPRESS_POINTERS
-
STATIC_ASSERT(kPtrComprHeapReservationSize ==
Internals::kPtrComprHeapReservationSize);
STATIC_ASSERT(kPtrComprIsolateRootAlignment ==
Internals::kPtrComprIsolateRootAlignment);
-#endif // V8_COMPRESS_POINTERS
-
#else
V8_INLINE Tagged_t CompressTagged(Address tagged) { UNREACHABLE(); }
-V8_INLINE Address GetIsolateRoot(Address on_heap_addr) { UNREACHABLE(); }
+V8_INLINE Address GetIsolateRootAddress(Address on_heap_addr) { UNREACHABLE(); }
-V8_INLINE Address GetIsolateRoot(const Isolate* isolate) { UNREACHABLE(); }
+V8_INLINE Address GetIsolateRootAddress(IsolateRoot isolate) { UNREACHABLE(); }
V8_INLINE Address DecompressTaggedSigned(Tagged_t raw_value) { UNREACHABLE(); }
@@ -88,7 +89,7 @@ V8_INLINE Address DecompressTaggedAny(TOnHeapAddress on_heap_addr,
UNREACHABLE();
}
-#endif // V8_TARGET_ARCH_64_BIT
+#endif // V8_COMPRESS_POINTERS
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/common/ptr-compr.h b/deps/v8/src/common/ptr-compr.h
index 105d5f1a4f..0c82c2328c 100644
--- a/deps/v8/src/common/ptr-compr.h
+++ b/deps/v8/src/common/ptr-compr.h
@@ -7,7 +7,7 @@
#include "src/common/globals.h"
-#if V8_TARGET_ARCH_64_BIT
+#ifdef V8_COMPRESS_POINTERS
namespace v8 {
namespace internal {
@@ -19,6 +19,6 @@ constexpr size_t kPtrComprIsolateRootAlignment = size_t{4} * GB;
} // namespace internal
} // namespace v8
-#endif // V8_TARGET_ARCH_64_BIT
+#endif // V8_COMPRESS_POINTERS
#endif // V8_COMMON_PTR_COMPR_H_
diff --git a/deps/v8/src/compiler-dispatcher/DIR_METADATA b/deps/v8/src/compiler-dispatcher/DIR_METADATA
new file mode 100644
index 0000000000..fc018666b1
--- /dev/null
+++ b/deps/v8/src/compiler-dispatcher/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>Compiler"
+} \ No newline at end of file
diff --git a/deps/v8/src/compiler-dispatcher/OWNERS b/deps/v8/src/compiler-dispatcher/OWNERS
index b71c01a305..7bc22f1662 100644
--- a/deps/v8/src/compiler-dispatcher/OWNERS
+++ b/deps/v8/src/compiler-dispatcher/OWNERS
@@ -2,5 +2,3 @@ ahaas@chromium.org
jkummerow@chromium.org
leszeks@chromium.org
rmcilroy@chromium.org
-
-# COMPONENT: Blink>JavaScript>Compiler
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
index c20a38a7e4..a22c79e0ad 100644
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
@@ -114,7 +114,7 @@ void CompilerDispatcher::RegisterSharedFunctionInfo(
auto job_it = jobs_.find(job_id);
DCHECK_NE(job_it, jobs_.end());
Job* job = job_it->second.get();
- shared_to_unoptimized_job_id_.Set(function_handle, job_id);
+ shared_to_unoptimized_job_id_.Insert(function_handle, job_id);
{
base::MutexGuard lock(&mutex_);
diff --git a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
index 528a9babe3..931a9e197b 100644
--- a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
+++ b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
@@ -8,6 +8,8 @@
#include "src/codegen/compiler.h"
#include "src/codegen/optimized-compilation-info.h"
#include "src/execution/isolate.h"
+#include "src/execution/local-isolate.h"
+#include "src/heap/local-heap.h"
#include "src/init/v8.h"
#include "src/logging/counters.h"
#include "src/logging/log.h"
@@ -56,6 +58,7 @@ class OptimizingCompileDispatcher::CompileTask : public CancelableTask {
private:
// v8::Task overrides.
void RunInternal() override {
+ LocalIsolate local_isolate(isolate_, ThreadKind::kBackground);
DisallowHeapAllocation no_allocation;
DisallowHandleAllocation no_handles;
DisallowHandleDereference no_deref;
@@ -76,8 +79,8 @@ class OptimizingCompileDispatcher::CompileTask : public CancelableTask {
dispatcher_->recompilation_delay_));
}
- dispatcher_->CompileNext(dispatcher_->NextInput(true),
- runtime_call_stats_scope.Get());
+ dispatcher_->CompileNext(dispatcher_->NextInput(&local_isolate, true),
+ runtime_call_stats_scope.Get(), &local_isolate);
}
{
base::MutexGuard lock_guard(&dispatcher_->ref_count_mutex_);
@@ -106,7 +109,7 @@ OptimizingCompileDispatcher::~OptimizingCompileDispatcher() {
}
OptimizedCompilationJob* OptimizingCompileDispatcher::NextInput(
- bool check_if_flushing) {
+ LocalIsolate* local_isolate, bool check_if_flushing) {
base::MutexGuard access_input_queue_(&input_queue_mutex_);
if (input_queue_length_ == 0) return nullptr;
OptimizedCompilationJob* job = input_queue_[InputQueueIndex(0)];
@@ -115,6 +118,7 @@ OptimizedCompilationJob* OptimizingCompileDispatcher::NextInput(
input_queue_length_--;
if (check_if_flushing) {
if (mode_ == FLUSH) {
+ UnparkedScope scope(local_isolate->heap());
AllowHandleDereference allow_handle_dereference;
DisposeCompilationJob(job, true);
return nullptr;
@@ -124,11 +128,12 @@ OptimizedCompilationJob* OptimizingCompileDispatcher::NextInput(
}
void OptimizingCompileDispatcher::CompileNext(OptimizedCompilationJob* job,
- RuntimeCallStats* stats) {
+ RuntimeCallStats* stats,
+ LocalIsolate* local_isolate) {
if (!job) return;
// The function may have already been optimized by OSR. Simply continue.
- CompilationJob::Status status = job->ExecuteJob(stats);
+ CompilationJob::Status status = job->ExecuteJob(stats, local_isolate);
USE(status); // Prevent an unused-variable error.
{
diff --git a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h
index 51803822d1..36f285d163 100644
--- a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h
+++ b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h
@@ -18,6 +18,7 @@
namespace v8 {
namespace internal {
+class LocalHeap;
class OptimizedCompilationJob;
class RuntimeCallStats;
class SharedFunctionInfo;
@@ -58,8 +59,10 @@ class V8_EXPORT_PRIVATE OptimizingCompileDispatcher {
enum ModeFlag { COMPILE, FLUSH };
void FlushOutputQueue(bool restore_function_code);
- void CompileNext(OptimizedCompilationJob* job, RuntimeCallStats* stats);
- OptimizedCompilationJob* NextInput(bool check_if_flushing = false);
+ void CompileNext(OptimizedCompilationJob* job, RuntimeCallStats* stats,
+ LocalIsolate* local_isolate);
+ OptimizedCompilationJob* NextInput(LocalIsolate* local_isolate,
+ bool check_if_flushing = false);
inline int InputQueueIndex(int i) {
int result = (i + input_queue_shift_) % input_queue_capacity_;
diff --git a/deps/v8/src/compiler/DIR_METADATA b/deps/v8/src/compiler/DIR_METADATA
new file mode 100644
index 0000000000..fc018666b1
--- /dev/null
+++ b/deps/v8/src/compiler/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>Compiler"
+} \ No newline at end of file
diff --git a/deps/v8/src/compiler/OWNERS b/deps/v8/src/compiler/OWNERS
index 9fd19af803..afc8551ae0 100644
--- a/deps/v8/src/compiler/OWNERS
+++ b/deps/v8/src/compiler/OWNERS
@@ -7,6 +7,7 @@ mvstanton@chromium.org
mslekova@chromium.org
jgruber@chromium.org
nicohartmann@chromium.org
+solanes@chromium.org
per-file wasm-*=ahaas@chromium.org
per-file wasm-*=bbudge@chromium.org
@@ -20,5 +21,3 @@ per-file int64-lowering.*=ahaas@chromium.org
per-file simd-scalar-lowering.*=bbudge@chromium.org
per-file simd-scalar-lowering.*=gdeepti@chromium.org
per-file simd-scalar-lowering.*=zhin@chromium.org
-
-# COMPONENT: Blink>JavaScript>Compiler
diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc
index f9d15264e6..ccb8772c4e 100644
--- a/deps/v8/src/compiler/access-builder.cc
+++ b/deps/v8/src/compiler/access-builder.cc
@@ -17,7 +17,6 @@
#include "src/objects/objects-inl.h"
#include "src/objects/ordered-hash-table.h"
#include "src/objects/source-text-module.h"
-#include "torque-generated/exported-class-definitions.h"
namespace v8 {
namespace internal {
@@ -425,20 +424,34 @@ FieldAccess AccessBuilder::ForJSTypedArrayExternalPointer() {
: Type::ExternalPointer(),
MachineType::Pointer(),
kNoWriteBarrier,
- LoadSensitivity::kCritical};
+ LoadSensitivity::kCritical,
+ ConstFieldInfo::None(),
+ false,
+#ifdef V8_HEAP_SANDBOX
+ kTypedArrayExternalPointerTag
+#endif
+ };
return access;
}
// static
FieldAccess AccessBuilder::ForJSDataViewDataPointer() {
- FieldAccess access = {kTaggedBase,
- JSDataView::kDataPointerOffset,
- MaybeHandle<Name>(),
- MaybeHandle<Map>(),
- V8_HEAP_SANDBOX_BOOL ? Type::SandboxedExternalPointer()
- : Type::ExternalPointer(),
- MachineType::Pointer(),
- kNoWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase,
+ JSDataView::kDataPointerOffset,
+ MaybeHandle<Name>(),
+ MaybeHandle<Map>(),
+ V8_HEAP_SANDBOX_BOOL ? Type::SandboxedExternalPointer()
+ : Type::ExternalPointer(),
+ MachineType::Pointer(),
+ kNoWriteBarrier,
+ LoadSensitivity::kUnsafe,
+ ConstFieldInfo::None(),
+ false,
+#ifdef V8_HEAP_SANDBOX
+ kDataViewDataPointerTag,
+#endif
+ };
return access;
}
@@ -734,14 +747,22 @@ FieldAccess AccessBuilder::ForSlicedStringParent() {
// static
FieldAccess AccessBuilder::ForExternalStringResourceData() {
- FieldAccess access = {kTaggedBase,
- ExternalString::kResourceDataOffset,
- Handle<Name>(),
- MaybeHandle<Map>(),
- V8_HEAP_SANDBOX_BOOL ? Type::SandboxedExternalPointer()
- : Type::ExternalPointer(),
- MachineType::Pointer(),
- kNoWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase,
+ ExternalString::kResourceDataOffset,
+ Handle<Name>(),
+ MaybeHandle<Map>(),
+ V8_HEAP_SANDBOX_BOOL ? Type::SandboxedExternalPointer()
+ : Type::ExternalPointer(),
+ MachineType::Pointer(),
+ kNoWriteBarrier,
+ LoadSensitivity::kUnsafe,
+ ConstFieldInfo::None(),
+ false,
+#ifdef V8_HEAP_SANDBOX
+ kExternalStringResourceTag,
+#endif
+ };
return access;
}
@@ -1239,21 +1260,32 @@ FieldAccess AccessBuilder::ForFeedbackCellInterruptBudget() {
}
// static
-FieldAccess AccessBuilder::ForFeedbackVectorClosureFeedbackCellArray() {
+FieldAccess AccessBuilder::ForFeedbackVectorInvocationCount() {
+ FieldAccess access = {kTaggedBase,
+ FeedbackVector::kInvocationCountOffset,
+ Handle<Name>(),
+ MaybeHandle<Map>(),
+ TypeCache::Get()->kInt32,
+ MachineType::Int32(),
+ kNoWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForFeedbackVectorFlags() {
FieldAccess access = {
- kTaggedBase, FeedbackVector::kClosureFeedbackCellArrayOffset,
- Handle<Name>(), MaybeHandle<Map>(),
- Type::Any(), MachineType::TaggedPointer(),
- kFullWriteBarrier};
+ kTaggedBase, FeedbackVector::kFlagsOffset, Handle<Name>(),
+ MaybeHandle<Map>(), TypeCache::Get()->kUint32, MachineType::Uint32(),
+ kNoWriteBarrier};
return access;
}
// static
-FieldAccess AccessBuilder::ForFeedbackVectorOptimizedCodeWeakOrSmi() {
+FieldAccess AccessBuilder::ForFeedbackVectorClosureFeedbackCellArray() {
FieldAccess access = {
- kTaggedBase, FeedbackVector::kOptimizedCodeWeakOrSmiOffset,
+ kTaggedBase, FeedbackVector::kClosureFeedbackCellArrayOffset,
Handle<Name>(), MaybeHandle<Map>(),
- Type::Any(), MachineType::AnyTagged(),
+ Type::Any(), MachineType::TaggedPointer(),
kFullWriteBarrier};
return access;
}
diff --git a/deps/v8/src/compiler/access-builder.h b/deps/v8/src/compiler/access-builder.h
index af5882988d..ce1e51ff23 100644
--- a/deps/v8/src/compiler/access-builder.h
+++ b/deps/v8/src/compiler/access-builder.h
@@ -344,8 +344,9 @@ class V8_EXPORT_PRIVATE AccessBuilder final
static FieldAccess ForFeedbackCellInterruptBudget();
// Provides access to a FeedbackVector fields.
+ static FieldAccess ForFeedbackVectorInvocationCount();
+ static FieldAccess ForFeedbackVectorFlags();
static FieldAccess ForFeedbackVectorClosureFeedbackCellArray();
- static FieldAccess ForFeedbackVectorOptimizedCodeWeakOrSmi();
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(AccessBuilder);
diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc
index 046927e943..ddf742e708 100644
--- a/deps/v8/src/compiler/access-info.cc
+++ b/deps/v8/src/compiler/access-info.cc
@@ -70,12 +70,13 @@ std::ostream& operator<<(std::ostream& os, AccessMode access_mode) {
UNREACHABLE();
}
-ElementAccessInfo::ElementAccessInfo(ZoneVector<Handle<Map>>&& receiver_maps,
- ElementsKind elements_kind, Zone* zone)
+ElementAccessInfo::ElementAccessInfo(
+ ZoneVector<Handle<Map>>&& lookup_start_object_maps,
+ ElementsKind elements_kind, Zone* zone)
: elements_kind_(elements_kind),
- receiver_maps_(receiver_maps),
+ lookup_start_object_maps_(lookup_start_object_maps),
transition_sources_(zone) {
- CHECK(!receiver_maps.empty());
+ CHECK(!lookup_start_object_maps.empty());
}
// static
@@ -158,27 +159,26 @@ MinimorphicLoadPropertyAccessInfo MinimorphicLoadPropertyAccessInfo::Invalid() {
PropertyAccessInfo::PropertyAccessInfo(Zone* zone)
: kind_(kInvalid),
- receiver_maps_(zone),
+ lookup_start_object_maps_(zone),
unrecorded_dependencies_(zone),
field_representation_(Representation::None()),
field_type_(Type::None()) {}
-PropertyAccessInfo::PropertyAccessInfo(Zone* zone, Kind kind,
- MaybeHandle<JSObject> holder,
- ZoneVector<Handle<Map>>&& receiver_maps)
+PropertyAccessInfo::PropertyAccessInfo(
+ Zone* zone, Kind kind, MaybeHandle<JSObject> holder,
+ ZoneVector<Handle<Map>>&& lookup_start_object_maps)
: kind_(kind),
- receiver_maps_(receiver_maps),
+ lookup_start_object_maps_(lookup_start_object_maps),
unrecorded_dependencies_(zone),
holder_(holder),
field_representation_(Representation::None()),
field_type_(Type::None()) {}
-PropertyAccessInfo::PropertyAccessInfo(Zone* zone, Kind kind,
- MaybeHandle<JSObject> holder,
- Handle<Object> constant,
- ZoneVector<Handle<Map>>&& receiver_maps)
+PropertyAccessInfo::PropertyAccessInfo(
+ Zone* zone, Kind kind, MaybeHandle<JSObject> holder,
+ Handle<Object> constant, ZoneVector<Handle<Map>>&& lookup_start_object_maps)
: kind_(kind),
- receiver_maps_(receiver_maps),
+ lookup_start_object_maps_(lookup_start_object_maps),
unrecorded_dependencies_(zone),
constant_(constant),
holder_(holder),
@@ -189,10 +189,10 @@ PropertyAccessInfo::PropertyAccessInfo(
Kind kind, MaybeHandle<JSObject> holder, MaybeHandle<Map> transition_map,
FieldIndex field_index, Representation field_representation,
Type field_type, Handle<Map> field_owner_map, MaybeHandle<Map> field_map,
- ZoneVector<Handle<Map>>&& receiver_maps,
+ ZoneVector<Handle<Map>>&& lookup_start_object_maps,
ZoneVector<CompilationDependency const*>&& unrecorded_dependencies)
: kind_(kind),
- receiver_maps_(receiver_maps),
+ lookup_start_object_maps_(lookup_start_object_maps),
unrecorded_dependencies_(std::move(unrecorded_dependencies)),
transition_map_(transition_map),
holder_(holder),
@@ -265,9 +265,10 @@ bool PropertyAccessInfo::Merge(PropertyAccessInfo const* that,
}
this->field_type_ =
Type::Union(this->field_type_, that->field_type_, zone);
- this->receiver_maps_.insert(this->receiver_maps_.end(),
- that->receiver_maps_.begin(),
- that->receiver_maps_.end());
+ this->lookup_start_object_maps_.insert(
+ this->lookup_start_object_maps_.end(),
+ that->lookup_start_object_maps_.begin(),
+ that->lookup_start_object_maps_.end());
this->unrecorded_dependencies_.insert(
this->unrecorded_dependencies_.end(),
that->unrecorded_dependencies_.begin(),
@@ -282,9 +283,10 @@ bool PropertyAccessInfo::Merge(PropertyAccessInfo const* that,
if (this->constant_.address() == that->constant_.address()) {
DCHECK(this->unrecorded_dependencies_.empty());
DCHECK(that->unrecorded_dependencies_.empty());
- this->receiver_maps_.insert(this->receiver_maps_.end(),
- that->receiver_maps_.begin(),
- that->receiver_maps_.end());
+ this->lookup_start_object_maps_.insert(
+ this->lookup_start_object_maps_.end(),
+ that->lookup_start_object_maps_.begin(),
+ that->lookup_start_object_maps_.end());
return true;
}
return false;
@@ -294,9 +296,10 @@ bool PropertyAccessInfo::Merge(PropertyAccessInfo const* that,
case kStringLength: {
DCHECK(this->unrecorded_dependencies_.empty());
DCHECK(that->unrecorded_dependencies_.empty());
- this->receiver_maps_.insert(this->receiver_maps_.end(),
- that->receiver_maps_.begin(),
- that->receiver_maps_.end());
+ this->lookup_start_object_maps_.insert(
+ this->lookup_start_object_maps_.end(),
+ that->lookup_start_object_maps_.begin(),
+ that->lookup_start_object_maps_.end());
return true;
}
case kModuleExport:
@@ -364,7 +367,8 @@ PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo(
Handle<Map> receiver_map, Handle<Map> map, MaybeHandle<JSObject> holder,
InternalIndex descriptor, AccessMode access_mode) const {
DCHECK(descriptor.is_found());
- Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate());
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(kRelaxedLoad),
+ isolate());
PropertyDetails const details = descriptors->GetDetails(descriptor);
int index = descriptors->GetFieldIndex(descriptor);
Representation details_representation = details.representation();
@@ -429,7 +433,7 @@ PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo(
PropertyConstness constness;
if (details.IsReadOnly() && !details.IsConfigurable()) {
constness = PropertyConstness::kConst;
- } else if (FLAG_turboprop && !map->is_prototype_map()) {
+ } else if (broker()->is_turboprop() && !map->is_prototype_map()) {
// The constness feedback is too unstable for the aggresive compilation
// of turboprop.
constness = PropertyConstness::kMutable;
@@ -459,7 +463,8 @@ PropertyAccessInfo AccessInfoFactory::ComputeAccessorDescriptorAccessInfo(
MaybeHandle<JSObject> holder, InternalIndex descriptor,
AccessMode access_mode) const {
DCHECK(descriptor.is_found());
- Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate());
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(kRelaxedLoad),
+ isolate());
SLOW_DCHECK(descriptor == descriptors->Search(*name, *map));
if (map->instance_type() == JS_MODULE_NAMESPACE_TYPE) {
DCHECK(map->is_prototype_map());
@@ -557,8 +562,8 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
MaybeHandle<JSObject> holder;
while (true) {
// Lookup the named property on the {map}.
- Handle<DescriptorArray> descriptors(
- map->synchronized_instance_descriptors(), isolate());
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(kAcquireLoad),
+ isolate());
InternalIndex const number =
descriptors->Search(*name, *map, broker()->is_concurrent_inlining());
if (number.is_found()) {
@@ -830,7 +835,7 @@ PropertyAccessInfo AccessInfoFactory::LookupTransition(
Handle<Map> transition_map(transition, isolate());
InternalIndex const number = transition_map->LastAdded();
Handle<DescriptorArray> descriptors(
- transition_map->synchronized_instance_descriptors(), isolate());
+ transition_map->instance_descriptors(kAcquireLoad), isolate());
PropertyDetails const details = descriptors->GetDetails(number);
// Don't bother optimizing stores to read-only properties.
if (details.IsReadOnly()) {
diff --git a/deps/v8/src/compiler/access-info.h b/deps/v8/src/compiler/access-info.h
index 65ea6a5376..aa402fe695 100644
--- a/deps/v8/src/compiler/access-info.h
+++ b/deps/v8/src/compiler/access-info.h
@@ -37,25 +37,25 @@ std::ostream& operator<<(std::ostream&, AccessMode);
// This class encapsulates all information required to access a certain element.
class ElementAccessInfo final {
public:
- ElementAccessInfo(ZoneVector<Handle<Map>>&& receiver_maps,
+ ElementAccessInfo(ZoneVector<Handle<Map>>&& lookup_start_object_maps,
ElementsKind elements_kind, Zone* zone);
ElementsKind elements_kind() const { return elements_kind_; }
- ZoneVector<Handle<Map>> const& receiver_maps() const {
- return receiver_maps_;
+ ZoneVector<Handle<Map>> const& lookup_start_object_maps() const {
+ return lookup_start_object_maps_;
}
ZoneVector<Handle<Map>> const& transition_sources() const {
return transition_sources_;
}
void AddTransitionSource(Handle<Map> map) {
- CHECK_EQ(receiver_maps_.size(), 1);
+ CHECK_EQ(lookup_start_object_maps_.size(), 1);
transition_sources_.push_back(map);
}
private:
ElementsKind elements_kind_;
- ZoneVector<Handle<Map>> receiver_maps_;
+ ZoneVector<Handle<Map>> lookup_start_object_maps_;
ZoneVector<Handle<Map>> transition_sources_;
};
@@ -128,26 +128,26 @@ class PropertyAccessInfo final {
Type field_type() const { return field_type_; }
Representation field_representation() const { return field_representation_; }
MaybeHandle<Map> field_map() const { return field_map_; }
- ZoneVector<Handle<Map>> const& receiver_maps() const {
- return receiver_maps_;
+ ZoneVector<Handle<Map>> const& lookup_start_object_maps() const {
+ return lookup_start_object_maps_;
}
private:
explicit PropertyAccessInfo(Zone* zone);
PropertyAccessInfo(Zone* zone, Kind kind, MaybeHandle<JSObject> holder,
- ZoneVector<Handle<Map>>&& receiver_maps);
+ ZoneVector<Handle<Map>>&& lookup_start_object_maps);
PropertyAccessInfo(Zone* zone, Kind kind, MaybeHandle<JSObject> holder,
Handle<Object> constant,
- ZoneVector<Handle<Map>>&& receiver_maps);
+ ZoneVector<Handle<Map>>&& lookup_start_object_maps);
PropertyAccessInfo(Kind kind, MaybeHandle<JSObject> holder,
MaybeHandle<Map> transition_map, FieldIndex field_index,
Representation field_representation, Type field_type,
Handle<Map> field_owner_map, MaybeHandle<Map> field_map,
- ZoneVector<Handle<Map>>&& receiver_maps,
+ ZoneVector<Handle<Map>>&& lookup_start_object_maps,
ZoneVector<CompilationDependency const*>&& dependencies);
Kind kind_;
- ZoneVector<Handle<Map>> receiver_maps_;
+ ZoneVector<Handle<Map>> lookup_start_object_maps_;
ZoneVector<CompilationDependency const*> unrecorded_dependencies_;
Handle<Object> constant_;
MaybeHandle<Map> transition_map_;
@@ -258,7 +258,9 @@ class AccessInfoFactory final {
TypeCache const* const type_cache_;
Zone* const zone_;
- DISALLOW_COPY_AND_ASSIGN(AccessInfoFactory);
+ // TODO(nicohartmann@): Move to public
+ AccessInfoFactory(const AccessInfoFactory&) = delete;
+ AccessInfoFactory& operator=(const AccessInfoFactory&) = delete;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/add-type-assertions-reducer.h b/deps/v8/src/compiler/add-type-assertions-reducer.h
index 36add040e1..bd8000a06f 100644
--- a/deps/v8/src/compiler/add-type-assertions-reducer.h
+++ b/deps/v8/src/compiler/add-type-assertions-reducer.h
@@ -22,6 +22,9 @@ class V8_EXPORT_PRIVATE AddTypeAssertionsReducer final
AddTypeAssertionsReducer(Editor* editor, JSGraph* jsgraph, Zone* zone);
~AddTypeAssertionsReducer() final;
+ AddTypeAssertionsReducer(const AddTypeAssertionsReducer&) = delete;
+ AddTypeAssertionsReducer& operator=(const AddTypeAssertionsReducer&) = delete;
+
const char* reducer_name() const override {
return "AddTypeAssertionsReducer";
}
@@ -34,8 +37,6 @@ class V8_EXPORT_PRIVATE AddTypeAssertionsReducer final
Graph* graph() { return jsgraph_->graph(); }
SimplifiedOperatorBuilder* simplified() { return jsgraph_->simplified(); }
-
- DISALLOW_COPY_AND_ASSIGN(AddTypeAssertionsReducer);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/allocation-builder-inl.h b/deps/v8/src/compiler/allocation-builder-inl.h
index 8a9d74e071..ff1404baa7 100644
--- a/deps/v8/src/compiler/allocation-builder-inl.h
+++ b/deps/v8/src/compiler/allocation-builder-inl.h
@@ -7,9 +7,8 @@
#include "src/compiler/access-builder.h"
#include "src/compiler/allocation-builder.h"
+#include "src/objects/arguments-inl.h"
#include "src/objects/map-inl.h"
-#include "torque-generated/exported-class-definitions-inl.h"
-#include "torque-generated/exported-class-definitions.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/allocation-builder.h b/deps/v8/src/compiler/allocation-builder.h
index 709146950c..c9a2570493 100644
--- a/deps/v8/src/compiler/allocation-builder.h
+++ b/deps/v8/src/compiler/allocation-builder.h
@@ -27,7 +27,7 @@ class AllocationBuilder final {
// Primitive allocation of static size.
void Allocate(int size, AllocationType allocation = AllocationType::kYoung,
Type type = Type::Any()) {
- DCHECK_LE(size, kMaxRegularHeapObjectSize);
+ DCHECK_LE(size, Heap::MaxRegularHeapObjectSize(allocation));
effect_ = graph()->NewNode(
common()->BeginRegion(RegionObservability::kNotObservable), effect_);
allocation_ =
diff --git a/deps/v8/src/compiler/backend/DIR_METADATA b/deps/v8/src/compiler/backend/DIR_METADATA
new file mode 100644
index 0000000000..fc018666b1
--- /dev/null
+++ b/deps/v8/src/compiler/backend/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>Compiler"
+} \ No newline at end of file
diff --git a/deps/v8/src/compiler/backend/OWNERS b/deps/v8/src/compiler/backend/OWNERS
index d2b3198471..d55672b606 100644
--- a/deps/v8/src/compiler/backend/OWNERS
+++ b/deps/v8/src/compiler/backend/OWNERS
@@ -6,5 +6,3 @@ zhin@chromium.org
per-file register-allocator*=thibaudm@chromium.org
per-file spill-placer*=thibaudm@chromium.org
-
-# COMPONENT: Blink>JavaScript>Compiler
diff --git a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
index 2c7e856239..9267cb1f0c 100644
--- a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
@@ -755,7 +755,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
- HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ CallCodeObject(reg);
}
@@ -797,7 +797,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
- HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ JumpCodeObject(reg);
}
@@ -825,7 +825,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CHECK(!instr->InputAt(0)->IsImmediate());
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
- HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ Jump(reg);
unwinding_info_writer_.MarkBlockWillExit();
@@ -962,9 +962,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchDeoptimize: {
DeoptimizationExit* exit =
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
- CodeGenResult result = AssembleDeoptimizerCall(exit);
- if (result != kSuccess) return result;
- unwinding_info_writer_.MarkBlockWillExit();
+ __ b(exit->label());
break;
}
case kArchRet:
@@ -2539,7 +2537,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kArmI16x8AddSaturateS: {
+ case kArmI16x8AddSatS: {
__ vqadd(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
@@ -2552,7 +2550,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kArmI16x8SubSaturateS: {
+ case kArmI16x8SubSatS: {
__ vqsub(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
@@ -2611,12 +2609,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmI16x8UConvertI32x4:
ASSEMBLE_NEON_NARROWING_OP(NeonU16, NeonS16);
break;
- case kArmI16x8AddSaturateU: {
+ case kArmI16x8AddSatU: {
__ vqadd(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kArmI16x8SubSaturateU: {
+ case kArmI16x8SubSatU: {
__ vqsub(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
@@ -2707,7 +2705,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kArmI8x16AddSaturateS: {
+ case kArmI8x16AddSatS: {
__ vqadd(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
@@ -2717,7 +2715,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kArmI8x16SubSaturateS: {
+ case kArmI8x16SubSatS: {
__ vqsub(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
@@ -2765,12 +2763,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmI8x16UConvertI16x8:
ASSEMBLE_NEON_NARROWING_OP(NeonU8, NeonS8);
break;
- case kArmI8x16AddSaturateU: {
+ case kArmI8x16AddSatU: {
__ vqadd(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kArmI8x16SubSaturateU: {
+ case kArmI8x16SubSatU: {
__ vqsub(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
@@ -3121,8 +3119,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
int scratch_s_base = scratch.code() * 4;
for (int j = 0; j < 4; j++) {
uint32_t four_lanes = i.InputUint32(2 + j);
- // Ensure byte indices are in [0, 31] so masks are never NaNs.
- four_lanes &= 0x1F1F1F1F;
+ DCHECK_EQ(0, four_lanes & (table_size == 2 ? 0xF0F0F0F0 : 0xE0E0E0E0));
__ vmov(SwVfpRegister::from_code(scratch_s_base + j),
Float32::FromBits(four_lanes));
}
@@ -3210,63 +3207,75 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ mov(i.OutputRegister(), Operand(1), LeaveCC, ne);
break;
}
- case kArmS8x16LoadSplat: {
+ case kArmS128Load8Splat: {
__ vld1r(Neon8, NeonListOperand(i.OutputSimd128Register()),
i.NeonInputOperand(0));
break;
}
- case kArmS16x8LoadSplat: {
+ case kArmS128Load16Splat: {
__ vld1r(Neon16, NeonListOperand(i.OutputSimd128Register()),
i.NeonInputOperand(0));
break;
}
- case kArmS32x4LoadSplat: {
+ case kArmS128Load32Splat: {
__ vld1r(Neon32, NeonListOperand(i.OutputSimd128Register()),
i.NeonInputOperand(0));
break;
}
- case kArmS64x2LoadSplat: {
+ case kArmS128Load64Splat: {
Simd128Register dst = i.OutputSimd128Register();
__ vld1(Neon32, NeonListOperand(dst.low()), i.NeonInputOperand(0));
__ Move(dst.high(), dst.low());
break;
}
- case kArmI16x8Load8x8S: {
+ case kArmS128Load8x8S: {
Simd128Register dst = i.OutputSimd128Register();
__ vld1(Neon8, NeonListOperand(dst.low()), i.NeonInputOperand(0));
__ vmovl(NeonS8, dst, dst.low());
break;
}
- case kArmI16x8Load8x8U: {
+ case kArmS128Load8x8U: {
Simd128Register dst = i.OutputSimd128Register();
__ vld1(Neon8, NeonListOperand(dst.low()), i.NeonInputOperand(0));
__ vmovl(NeonU8, dst, dst.low());
break;
}
- case kArmI32x4Load16x4S: {
+ case kArmS128Load16x4S: {
Simd128Register dst = i.OutputSimd128Register();
__ vld1(Neon16, NeonListOperand(dst.low()), i.NeonInputOperand(0));
__ vmovl(NeonS16, dst, dst.low());
break;
}
- case kArmI32x4Load16x4U: {
+ case kArmS128Load16x4U: {
Simd128Register dst = i.OutputSimd128Register();
__ vld1(Neon16, NeonListOperand(dst.low()), i.NeonInputOperand(0));
__ vmovl(NeonU16, dst, dst.low());
break;
}
- case kArmI64x2Load32x2S: {
+ case kArmS128Load32x2S: {
Simd128Register dst = i.OutputSimd128Register();
__ vld1(Neon32, NeonListOperand(dst.low()), i.NeonInputOperand(0));
__ vmovl(NeonS32, dst, dst.low());
break;
}
- case kArmI64x2Load32x2U: {
+ case kArmS128Load32x2U: {
Simd128Register dst = i.OutputSimd128Register();
__ vld1(Neon32, NeonListOperand(dst.low()), i.NeonInputOperand(0));
__ vmovl(NeonU32, dst, dst.low());
break;
}
+ case kArmS128Load32Zero: {
+ Simd128Register dst = i.OutputSimd128Register();
+ __ vmov(dst, 0);
+ __ vld1s(Neon32, NeonListOperand(dst.low()), 0, i.NeonInputOperand(0));
+ break;
+ }
+ case kArmS128Load64Zero: {
+ Simd128Register dst = i.OutputSimd128Register();
+ __ vmov(dst.high(), 0);
+ __ vld1(Neon64, NeonListOperand(dst.low()), i.NeonInputOperand(0));
+ break;
+ }
case kWord32AtomicLoadInt8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(ldrsb);
break;
@@ -3759,9 +3768,8 @@ void CodeGenerator::AssembleConstructFrame() {
}
}
-void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
+void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
auto call_descriptor = linkage()->GetIncomingDescriptor();
- int pop_count = static_cast<int>(call_descriptor->StackParameterCount());
const int returns = frame()->GetReturnSlotCount();
if (returns != 0) {
@@ -3787,38 +3795,85 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
unwinding_info_writer_.MarkBlockWillExit();
+ // We might need r3 for scratch.
+ DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & r3.bit());
ArmOperandConverter g(this, nullptr);
+ const int parameter_count =
+ static_cast<int>(call_descriptor->StackParameterCount());
+
+ // {additional_pop_count} is only greater than zero if {parameter_count = 0}.
+ // Check RawMachineAssembler::PopAndReturn.
+ if (parameter_count != 0) {
+ if (additional_pop_count->IsImmediate()) {
+ DCHECK_EQ(g.ToConstant(additional_pop_count).ToInt32(), 0);
+ } else if (__ emit_debug_code()) {
+ __ cmp(g.ToRegister(additional_pop_count), Operand(0));
+ __ Assert(eq, AbortReason::kUnexpectedAdditionalPopValue);
+ }
+ }
+
+ Register argc_reg = r3;
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ // Functions with JS linkage have at least one parameter (the receiver).
+ // If {parameter_count} == 0, it means it is a builtin with
+ // kDontAdaptArgumentsSentinel, which takes care of JS arguments popping
+ // itself.
+ const bool drop_jsargs = frame_access_state()->has_frame() &&
+ call_descriptor->IsJSFunctionCall() &&
+ parameter_count != 0;
+#else
+ const bool drop_jsargs = false;
+#endif
if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
// Canonicalize JSFunction return sites for now unless they have an variable
// number of stack slot pops.
- if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
+ if (additional_pop_count->IsImmediate() &&
+ g.ToConstant(additional_pop_count).ToInt32() == 0) {
if (return_label_.is_bound()) {
__ b(&return_label_);
return;
} else {
__ bind(&return_label_);
- AssembleDeconstructFrame();
}
- } else {
- AssembleDeconstructFrame();
}
+ if (drop_jsargs) {
+ // Get the actual argument count.
+ __ ldr(argc_reg, MemOperand(fp, StandardFrameConstants::kArgCOffset));
+ }
+ AssembleDeconstructFrame();
}
- if (pop->IsImmediate()) {
- DCHECK_EQ(Constant::kInt32, g.ToConstant(pop).type());
- pop_count += g.ToConstant(pop).ToInt32();
+ if (drop_jsargs) {
+ // We must pop all arguments from the stack (including the receiver). This
+ // number of arguments is given by max(1 + argc_reg, parameter_count).
+ __ add(argc_reg, argc_reg, Operand(1)); // Also pop the receiver.
+ if (parameter_count > 1) {
+ __ cmp(argc_reg, Operand(parameter_count));
+ __ mov(argc_reg, Operand(parameter_count), LeaveCC, lt);
+ }
+ __ Drop(argc_reg);
+ } else if (additional_pop_count->IsImmediate()) {
+ DCHECK_EQ(Constant::kInt32, g.ToConstant(additional_pop_count).type());
+ int additional_count = g.ToConstant(additional_pop_count).ToInt32();
+ __ Drop(parameter_count + additional_count);
+ } else if (parameter_count == 0) {
+ __ Drop(g.ToRegister(additional_pop_count));
} else {
- __ Drop(g.ToRegister(pop));
+ // {additional_pop_count} is guaranteed to be zero if {parameter_count !=
+ // 0}. Check RawMachineAssembler::PopAndReturn.
+ __ Drop(parameter_count);
}
- __ Drop(pop_count);
__ Ret();
}
void CodeGenerator::FinishCode() { __ CheckConstPool(true, false); }
-void CodeGenerator::PrepareForDeoptimizationExits(int deopt_count) {}
+void CodeGenerator::PrepareForDeoptimizationExits(
+ ZoneDeque<DeoptimizationExit*>* exits) {
+ __ CheckConstPool(true, false);
+}
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
diff --git a/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h b/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h
index b3ee561e27..f4629ffec7 100644
--- a/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h
+++ b/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h
@@ -222,10 +222,10 @@ namespace compiler {
V(ArmI16x8ShrS) \
V(ArmI16x8SConvertI32x4) \
V(ArmI16x8Add) \
- V(ArmI16x8AddSaturateS) \
+ V(ArmI16x8AddSatS) \
V(ArmI16x8AddHoriz) \
V(ArmI16x8Sub) \
- V(ArmI16x8SubSaturateS) \
+ V(ArmI16x8SubSatS) \
V(ArmI16x8Mul) \
V(ArmI16x8MinS) \
V(ArmI16x8MaxS) \
@@ -238,8 +238,8 @@ namespace compiler {
V(ArmI16x8UConvertI8x16High) \
V(ArmI16x8ShrU) \
V(ArmI16x8UConvertI32x4) \
- V(ArmI16x8AddSaturateU) \
- V(ArmI16x8SubSaturateU) \
+ V(ArmI16x8AddSatU) \
+ V(ArmI16x8SubSatU) \
V(ArmI16x8MinU) \
V(ArmI16x8MaxU) \
V(ArmI16x8GtU) \
@@ -255,9 +255,9 @@ namespace compiler {
V(ArmI8x16ShrS) \
V(ArmI8x16SConvertI16x8) \
V(ArmI8x16Add) \
- V(ArmI8x16AddSaturateS) \
+ V(ArmI8x16AddSatS) \
V(ArmI8x16Sub) \
- V(ArmI8x16SubSaturateS) \
+ V(ArmI8x16SubSatS) \
V(ArmI8x16Mul) \
V(ArmI8x16MinS) \
V(ArmI8x16MaxS) \
@@ -268,8 +268,8 @@ namespace compiler {
V(ArmI8x16ExtractLaneU) \
V(ArmI8x16ShrU) \
V(ArmI8x16UConvertI16x8) \
- V(ArmI8x16AddSaturateU) \
- V(ArmI8x16SubSaturateU) \
+ V(ArmI8x16AddSatU) \
+ V(ArmI8x16SubSatU) \
V(ArmI8x16MinU) \
V(ArmI8x16MaxU) \
V(ArmI8x16GtU) \
@@ -321,16 +321,18 @@ namespace compiler {
V(ArmV16x8AllTrue) \
V(ArmV8x16AnyTrue) \
V(ArmV8x16AllTrue) \
- V(ArmS8x16LoadSplat) \
- V(ArmS16x8LoadSplat) \
- V(ArmS32x4LoadSplat) \
- V(ArmS64x2LoadSplat) \
- V(ArmI16x8Load8x8S) \
- V(ArmI16x8Load8x8U) \
- V(ArmI32x4Load16x4S) \
- V(ArmI32x4Load16x4U) \
- V(ArmI64x2Load32x2S) \
- V(ArmI64x2Load32x2U) \
+ V(ArmS128Load8Splat) \
+ V(ArmS128Load16Splat) \
+ V(ArmS128Load32Splat) \
+ V(ArmS128Load64Splat) \
+ V(ArmS128Load8x8S) \
+ V(ArmS128Load8x8U) \
+ V(ArmS128Load16x4S) \
+ V(ArmS128Load16x4U) \
+ V(ArmS128Load32x2S) \
+ V(ArmS128Load32x2U) \
+ V(ArmS128Load32Zero) \
+ V(ArmS128Load64Zero) \
V(ArmWord32AtomicPairLoad) \
V(ArmWord32AtomicPairStore) \
V(ArmWord32AtomicPairAdd) \
diff --git a/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc b/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc
index 6459d22a11..70fb1a7ccf 100644
--- a/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc
@@ -202,10 +202,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmI16x8ShrS:
case kArmI16x8SConvertI32x4:
case kArmI16x8Add:
- case kArmI16x8AddSaturateS:
+ case kArmI16x8AddSatS:
case kArmI16x8AddHoriz:
case kArmI16x8Sub:
- case kArmI16x8SubSaturateS:
+ case kArmI16x8SubSatS:
case kArmI16x8Mul:
case kArmI16x8MinS:
case kArmI16x8MaxS:
@@ -218,8 +218,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmI16x8UConvertI8x16High:
case kArmI16x8ShrU:
case kArmI16x8UConvertI32x4:
- case kArmI16x8AddSaturateU:
- case kArmI16x8SubSaturateU:
+ case kArmI16x8AddSatU:
+ case kArmI16x8SubSatU:
case kArmI16x8MinU:
case kArmI16x8MaxU:
case kArmI16x8GtU:
@@ -235,9 +235,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmI8x16ShrS:
case kArmI8x16SConvertI16x8:
case kArmI8x16Add:
- case kArmI8x16AddSaturateS:
+ case kArmI8x16AddSatS:
case kArmI8x16Sub:
- case kArmI8x16SubSaturateS:
+ case kArmI8x16SubSatS:
case kArmI8x16Mul:
case kArmI8x16MinS:
case kArmI8x16MaxS:
@@ -247,8 +247,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmI8x16GeS:
case kArmI8x16ExtractLaneU:
case kArmI8x16UConvertI16x8:
- case kArmI8x16AddSaturateU:
- case kArmI8x16SubSaturateU:
+ case kArmI8x16AddSatU:
+ case kArmI8x16SubSatU:
case kArmI8x16ShrU:
case kArmI8x16MinU:
case kArmI8x16MaxU:
@@ -314,16 +314,18 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmLdr:
case kArmPeek:
case kArmWord32AtomicPairLoad:
- case kArmS8x16LoadSplat:
- case kArmS16x8LoadSplat:
- case kArmS32x4LoadSplat:
- case kArmS64x2LoadSplat:
- case kArmI16x8Load8x8S:
- case kArmI16x8Load8x8U:
- case kArmI32x4Load16x4S:
- case kArmI32x4Load16x4U:
- case kArmI64x2Load32x2S:
- case kArmI64x2Load32x2U:
+ case kArmS128Load8Splat:
+ case kArmS128Load16Splat:
+ case kArmS128Load32Splat:
+ case kArmS128Load64Splat:
+ case kArmS128Load8x8S:
+ case kArmS128Load8x8U:
+ case kArmS128Load16x4S:
+ case kArmS128Load16x4U:
+ case kArmS128Load32x2S:
+ case kArmS128Load32x2U:
+ case kArmS128Load32Zero:
+ case kArmS128Load64Zero:
return kIsLoadOperation;
case kArmVstrF32:
diff --git a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
index e868a1a47a..248f76558e 100644
--- a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
@@ -29,8 +29,8 @@ class ArmOperandGenerator : public OperandGenerator {
bool CanBeImmediate(Node* node, InstructionCode opcode) {
Int32Matcher m(node);
- if (!m.HasValue()) return false;
- int32_t value = m.Value();
+ if (!m.HasResolvedValue()) return false;
+ int32_t value = m.ResolvedValue();
switch (ArchOpcodeField::decode(opcode)) {
case kArmAnd:
case kArmMov:
@@ -95,7 +95,7 @@ void VisitSimdShiftRRR(InstructionSelector* selector, ArchOpcode opcode,
Node* node, int width) {
ArmOperandGenerator g(selector);
Int32Matcher m(node->InputAt(1));
- if (m.HasValue()) {
+ if (m.HasResolvedValue()) {
if (m.IsMultipleOf(width)) {
selector->EmitIdentity(node);
} else {
@@ -389,13 +389,14 @@ void EmitLoad(InstructionSelector* selector, InstructionCode opcode,
size_t input_count = 2;
ExternalReferenceMatcher m(base);
- if (m.HasValue() && selector->CanAddressRelativeToRootsRegister(m.Value())) {
+ if (m.HasResolvedValue() &&
+ selector->CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
Int32Matcher int_matcher(index);
- if (int_matcher.HasValue()) {
+ if (int_matcher.HasResolvedValue()) {
ptrdiff_t const delta =
- int_matcher.Value() +
+ int_matcher.ResolvedValue() +
TurboAssemblerBase::RootRegisterOffsetForExternalReference(
- selector->isolate(), m.Value());
+ selector->isolate(), m.ResolvedValue());
input_count = 1;
inputs[0] = g.UseImmediate(static_cast<int32_t>(delta));
opcode |= AddressingModeField::encode(kMode_Root);
@@ -502,35 +503,41 @@ void InstructionSelector::VisitLoadTransform(Node* node) {
LoadTransformParameters params = LoadTransformParametersOf(node->op());
InstructionCode opcode = kArchNop;
switch (params.transformation) {
- case LoadTransformation::kS8x16LoadSplat:
- opcode = kArmS8x16LoadSplat;
+ case LoadTransformation::kS128Load8Splat:
+ opcode = kArmS128Load8Splat;
break;
- case LoadTransformation::kS16x8LoadSplat:
- opcode = kArmS16x8LoadSplat;
+ case LoadTransformation::kS128Load16Splat:
+ opcode = kArmS128Load16Splat;
break;
- case LoadTransformation::kS32x4LoadSplat:
- opcode = kArmS32x4LoadSplat;
+ case LoadTransformation::kS128Load32Splat:
+ opcode = kArmS128Load32Splat;
break;
- case LoadTransformation::kS64x2LoadSplat:
- opcode = kArmS64x2LoadSplat;
+ case LoadTransformation::kS128Load64Splat:
+ opcode = kArmS128Load64Splat;
break;
- case LoadTransformation::kI16x8Load8x8S:
- opcode = kArmI16x8Load8x8S;
+ case LoadTransformation::kS128Load8x8S:
+ opcode = kArmS128Load8x8S;
break;
- case LoadTransformation::kI16x8Load8x8U:
- opcode = kArmI16x8Load8x8U;
+ case LoadTransformation::kS128Load8x8U:
+ opcode = kArmS128Load8x8U;
break;
- case LoadTransformation::kI32x4Load16x4S:
- opcode = kArmI32x4Load16x4S;
+ case LoadTransformation::kS128Load16x4S:
+ opcode = kArmS128Load16x4S;
break;
- case LoadTransformation::kI32x4Load16x4U:
- opcode = kArmI32x4Load16x4U;
+ case LoadTransformation::kS128Load16x4U:
+ opcode = kArmS128Load16x4U;
break;
- case LoadTransformation::kI64x2Load32x2S:
- opcode = kArmI64x2Load32x2S;
+ case LoadTransformation::kS128Load32x2S:
+ opcode = kArmS128Load32x2S;
break;
- case LoadTransformation::kI64x2Load32x2U:
- opcode = kArmI64x2Load32x2U;
+ case LoadTransformation::kS128Load32x2U:
+ opcode = kArmS128Load32x2U;
+ break;
+ case LoadTransformation::kS128Load32Zero:
+ opcode = kArmS128Load32Zero;
+ break;
+ case LoadTransformation::kS128Load64Zero:
+ opcode = kArmS128Load64Zero;
break;
default:
UNIMPLEMENTED();
@@ -666,17 +673,17 @@ void InstructionSelector::VisitStore(Node* node) {
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
- return;
}
ExternalReferenceMatcher m(base);
- if (m.HasValue() && CanAddressRelativeToRootsRegister(m.Value())) {
+ if (m.HasResolvedValue() &&
+ CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
Int32Matcher int_matcher(index);
- if (int_matcher.HasValue()) {
+ if (int_matcher.HasResolvedValue()) {
ptrdiff_t const delta =
- int_matcher.Value() +
+ int_matcher.ResolvedValue() +
TurboAssemblerBase::RootRegisterOffsetForExternalReference(
- isolate(), m.Value());
+ isolate(), m.ResolvedValue());
int input_count = 2;
InstructionOperand inputs[2];
inputs[0] = g.UseRegister(value);
@@ -898,16 +905,16 @@ void InstructionSelector::VisitWord32And(Node* node) {
return;
}
}
- if (m.right().HasValue()) {
- uint32_t const value = m.right().Value();
+ if (m.right().HasResolvedValue()) {
+ uint32_t const value = m.right().ResolvedValue();
uint32_t width = base::bits::CountPopulation(value);
uint32_t leading_zeros = base::bits::CountLeadingZeros32(value);
// Try to merge SHR operations on the left hand input into this AND.
if (m.left().IsWord32Shr()) {
Int32BinopMatcher mshr(m.left().node());
- if (mshr.right().HasValue()) {
- uint32_t const shift = mshr.right().Value();
+ if (mshr.right().HasResolvedValue()) {
+ uint32_t const shift = mshr.right().ResolvedValue();
if (((shift == 8) || (shift == 16) || (shift == 24)) &&
(value == 0xFF)) {
@@ -915,14 +922,14 @@ void InstructionSelector::VisitWord32And(Node* node) {
// bytewise rotation.
Emit(kArmUxtb, g.DefineAsRegister(m.node()),
g.UseRegister(mshr.left().node()),
- g.TempImmediate(mshr.right().Value()));
+ g.TempImmediate(mshr.right().ResolvedValue()));
return;
} else if (((shift == 8) || (shift == 16)) && (value == 0xFFFF)) {
// Merge SHR into AND by emitting a UXTH instruction with a
// bytewise rotation.
Emit(kArmUxth, g.DefineAsRegister(m.node()),
g.UseRegister(mshr.left().node()),
- g.TempImmediate(mshr.right().Value()));
+ g.TempImmediate(mshr.right().ResolvedValue()));
return;
} else if (IsSupported(ARMv7) && (width != 0) &&
((leading_zeros + width) == 32)) {
@@ -1074,11 +1081,11 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
Int32BinopMatcher m(node);
if (IsSupported(ARMv7) && m.left().IsWord32And() &&
m.right().IsInRange(0, 31)) {
- uint32_t lsb = m.right().Value();
+ uint32_t lsb = m.right().ResolvedValue();
Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue()) {
- uint32_t value = static_cast<uint32_t>(mleft.right().Value() >> lsb)
- << lsb;
+ if (mleft.right().HasResolvedValue()) {
+ uint32_t value =
+ static_cast<uint32_t>(mleft.right().ResolvedValue() >> lsb) << lsb;
uint32_t width = base::bits::CountPopulation(value);
uint32_t msb = base::bits::CountLeadingZeros32(value);
if ((width != 0) && (msb + width + lsb == 32)) {
@@ -1095,9 +1102,9 @@ void InstructionSelector::VisitWord32Sar(Node* node) {
Int32BinopMatcher m(node);
if (CanCover(m.node(), m.left().node()) && m.left().IsWord32Shl()) {
Int32BinopMatcher mleft(m.left().node());
- if (m.right().HasValue() && mleft.right().HasValue()) {
- uint32_t sar = m.right().Value();
- uint32_t shl = mleft.right().Value();
+ if (m.right().HasResolvedValue() && mleft.right().HasResolvedValue()) {
+ uint32_t sar = m.right().ResolvedValue();
+ uint32_t shl = mleft.right().ResolvedValue();
if ((sar == shl) && (sar == 16)) {
Emit(kArmSxth, g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()), g.TempImmediate(0));
@@ -1199,7 +1206,7 @@ void VisitWord32PairShift(InstructionSelector* selector, InstructionCode opcode,
// no register aliasing of input registers with output registers.
Int32Matcher m(node->InputAt(2));
InstructionOperand shift_operand;
- if (m.HasValue()) {
+ if (m.HasResolvedValue()) {
shift_operand = g.UseImmediate(m.node());
} else {
shift_operand = g.UseUniqueRegister(m.node());
@@ -1420,8 +1427,8 @@ void EmitInt32MulWithOverflow(InstructionSelector* selector, Node* node,
void InstructionSelector::VisitInt32Mul(Node* node) {
ArmOperandGenerator g(this);
Int32BinopMatcher m(node);
- if (m.right().HasValue() && m.right().Value() > 0) {
- int32_t value = m.right().Value();
+ if (m.right().HasResolvedValue() && m.right().ResolvedValue() > 0) {
+ int32_t value = m.right().ResolvedValue();
if (base::bits::IsPowerOfTwo(value - 1)) {
Emit(kArmAdd | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
g.DefineAsRegister(node), g.UseRegister(m.left().node()),
@@ -2191,7 +2198,7 @@ void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
ArmOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
opcode =
@@ -2217,7 +2224,7 @@ void InstructionSelector::VisitWord32AtomicStore(Node* node) {
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
switch (rep) {
case MachineRepresentation::kWord8:
opcode = kWord32AtomicStoreWord8;
@@ -2247,7 +2254,7 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = kWord32AtomicExchangeInt8;
@@ -2261,7 +2268,6 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
opcode = kWord32AtomicExchangeWord32;
} else {
UNREACHABLE();
- return;
}
AddressingMode addressing_mode = kMode_Offset_RR;
@@ -2283,7 +2289,7 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
Node* index = node->InputAt(1);
Node* old_value = node->InputAt(2);
Node* new_value = node->InputAt(3);
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = kWord32AtomicCompareExchangeInt8;
@@ -2297,7 +2303,6 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
opcode = kWord32AtomicCompareExchangeWord32;
} else {
UNREACHABLE();
- return;
}
AddressingMode addressing_mode = kMode_Offset_RR;
@@ -2322,7 +2327,7 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = int8_op;
@@ -2336,7 +2341,6 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
opcode = word32_op;
} else {
UNREACHABLE();
- return;
}
AddressingMode addressing_mode = kMode_Offset_RR;
@@ -2598,10 +2602,10 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(I32x4GeU, kArmI32x4GeU) \
V(I16x8SConvertI32x4, kArmI16x8SConvertI32x4) \
V(I16x8Add, kArmI16x8Add) \
- V(I16x8AddSaturateS, kArmI16x8AddSaturateS) \
+ V(I16x8AddSatS, kArmI16x8AddSatS) \
V(I16x8AddHoriz, kArmI16x8AddHoriz) \
V(I16x8Sub, kArmI16x8Sub) \
- V(I16x8SubSaturateS, kArmI16x8SubSaturateS) \
+ V(I16x8SubSatS, kArmI16x8SubSatS) \
V(I16x8Mul, kArmI16x8Mul) \
V(I16x8MinS, kArmI16x8MinS) \
V(I16x8MaxS, kArmI16x8MaxS) \
@@ -2610,8 +2614,8 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(I16x8GtS, kArmI16x8GtS) \
V(I16x8GeS, kArmI16x8GeS) \
V(I16x8UConvertI32x4, kArmI16x8UConvertI32x4) \
- V(I16x8AddSaturateU, kArmI16x8AddSaturateU) \
- V(I16x8SubSaturateU, kArmI16x8SubSaturateU) \
+ V(I16x8AddSatU, kArmI16x8AddSatU) \
+ V(I16x8SubSatU, kArmI16x8SubSatU) \
V(I16x8MinU, kArmI16x8MinU) \
V(I16x8MaxU, kArmI16x8MaxU) \
V(I16x8GtU, kArmI16x8GtU) \
@@ -2619,9 +2623,9 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(I16x8RoundingAverageU, kArmI16x8RoundingAverageU) \
V(I8x16SConvertI16x8, kArmI8x16SConvertI16x8) \
V(I8x16Add, kArmI8x16Add) \
- V(I8x16AddSaturateS, kArmI8x16AddSaturateS) \
+ V(I8x16AddSatS, kArmI8x16AddSatS) \
V(I8x16Sub, kArmI8x16Sub) \
- V(I8x16SubSaturateS, kArmI8x16SubSaturateS) \
+ V(I8x16SubSatS, kArmI8x16SubSatS) \
V(I8x16Mul, kArmI8x16Mul) \
V(I8x16MinS, kArmI8x16MinS) \
V(I8x16MaxS, kArmI8x16MaxS) \
@@ -2630,8 +2634,8 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(I8x16GtS, kArmI8x16GtS) \
V(I8x16GeS, kArmI8x16GeS) \
V(I8x16UConvertI16x8, kArmI8x16UConvertI16x8) \
- V(I8x16AddSaturateU, kArmI8x16AddSaturateU) \
- V(I8x16SubSaturateU, kArmI8x16SubSaturateU) \
+ V(I8x16AddSatU, kArmI8x16AddSatU) \
+ V(I8x16SubSatU, kArmI8x16SubSatU) \
V(I8x16MinU, kArmI8x16MinU) \
V(I8x16MaxU, kArmI8x16MaxU) \
V(I8x16GtU, kArmI8x16GtU) \
diff --git a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
index 6524502408..02809942a1 100644
--- a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
@@ -418,6 +418,18 @@ void EmitMaybePoisonedFPLoad(CodeGenerator* codegen, InstructionCode opcode,
}
}
+// Handles unary ops that work for float (scalar), double (scalar), or NEON.
+template <typename Fn>
+void EmitFpOrNeonUnop(TurboAssembler* tasm, Fn fn, Instruction* instr,
+ Arm64OperandConverter i, VectorFormat scalar,
+ VectorFormat vector) {
+ VectorFormat f = instr->InputAt(0)->IsSimd128Register() ? vector : scalar;
+
+ VRegister output = VRegister::Create(i.OutputDoubleRegister().code(), f);
+ VRegister input = VRegister::Create(i.InputDoubleRegister(0).code(), f);
+ (tasm->*fn)(output, input);
+}
+
} // namespace
#define ASSEMBLE_SHIFT(asm_instr, width) \
@@ -679,7 +691,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
- HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ CallCodeObject(reg);
}
@@ -720,7 +732,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
- HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ JumpCodeObject(reg);
}
@@ -750,7 +762,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CHECK(!instr->InputAt(0)->IsImmediate());
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
- HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
UseScratchRegisterScope temps(tasm());
temps.Exclude(x17);
@@ -1030,31 +1042,40 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_IEEE754_UNOP(tanh);
break;
case kArm64Float32RoundDown:
- __ Frintm(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintm, instr, i, kFormatS,
+ kFormat4S);
break;
case kArm64Float64RoundDown:
- __ Frintm(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintm, instr, i, kFormatD,
+ kFormat2D);
break;
case kArm64Float32RoundUp:
- __ Frintp(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintp, instr, i, kFormatS,
+ kFormat4S);
break;
case kArm64Float64RoundUp:
- __ Frintp(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintp, instr, i, kFormatD,
+ kFormat2D);
break;
case kArm64Float64RoundTiesAway:
- __ Frinta(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frinta, instr, i, kFormatD,
+ kFormat2D);
break;
case kArm64Float32RoundTruncate:
- __ Frintz(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintz, instr, i, kFormatS,
+ kFormat4S);
break;
case kArm64Float64RoundTruncate:
- __ Frintz(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintz, instr, i, kFormatD,
+ kFormat2D);
break;
case kArm64Float32RoundTiesEven:
- __ Frintn(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintn, instr, i, kFormatS,
+ kFormat4S);
break;
case kArm64Float64RoundTiesEven:
- __ Frintn(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintn, instr, i, kFormatD,
+ kFormat2D);
break;
case kArm64Add:
if (FlagsModeField::decode(opcode) != kFlags_none) {
@@ -1118,12 +1139,64 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64Mul32:
__ Mul(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
break;
- case kArm64Smull:
- __ Smull(i.OutputRegister(), i.InputRegister32(0), i.InputRegister32(1));
+ case kArm64Saddlp: {
+ VectorFormat dst_f = VectorFormatFillQ(MiscField::decode(opcode));
+ VectorFormat src_f = VectorFormatHalfWidthDoubleLanes(dst_f);
+ __ Saddlp(i.OutputSimd128Register().Format(dst_f),
+ i.InputSimd128Register(0).Format(src_f));
break;
- case kArm64Umull:
- __ Umull(i.OutputRegister(), i.InputRegister32(0), i.InputRegister32(1));
+ }
+ case kArm64Uaddlp: {
+ VectorFormat dst_f = VectorFormatFillQ(MiscField::decode(opcode));
+ VectorFormat src_f = VectorFormatHalfWidthDoubleLanes(dst_f);
+ __ Uaddlp(i.OutputSimd128Register().Format(dst_f),
+ i.InputSimd128Register(0).Format(src_f));
+ break;
+ }
+ case kArm64Smull: {
+ if (instr->InputAt(0)->IsRegister()) {
+ __ Smull(i.OutputRegister(), i.InputRegister32(0),
+ i.InputRegister32(1));
+ } else {
+ DCHECK(instr->InputAt(0)->IsSimd128Register());
+ VectorFormat dst_f = VectorFormatFillQ(MiscField::decode(opcode));
+ VectorFormat src_f = VectorFormatHalfWidth(dst_f);
+ __ Smull(i.OutputSimd128Register().Format(dst_f),
+ i.InputSimd128Register(0).Format(src_f),
+ i.InputSimd128Register(1).Format(src_f));
+ }
+ break;
+ }
+ case kArm64Smull2: {
+ VectorFormat dst_f = VectorFormatFillQ(MiscField::decode(opcode));
+ VectorFormat src_f = VectorFormatHalfWidthDoubleLanes(dst_f);
+ __ Smull2(i.OutputSimd128Register().Format(dst_f),
+ i.InputSimd128Register(0).Format(src_f),
+ i.InputSimd128Register(1).Format(src_f));
break;
+ }
+ case kArm64Umull: {
+ if (instr->InputAt(0)->IsRegister()) {
+ __ Umull(i.OutputRegister(), i.InputRegister32(0),
+ i.InputRegister32(1));
+ } else {
+ DCHECK(instr->InputAt(0)->IsSimd128Register());
+ VectorFormat dst_f = VectorFormatFillQ(MiscField::decode(opcode));
+ VectorFormat src_f = VectorFormatHalfWidth(dst_f);
+ __ Umull(i.OutputSimd128Register().Format(dst_f),
+ i.InputSimd128Register(0).Format(src_f),
+ i.InputSimd128Register(1).Format(src_f));
+ }
+ break;
+ }
+ case kArm64Umull2: {
+ VectorFormat dst_f = VectorFormatFillQ(MiscField::decode(opcode));
+ VectorFormat src_f = VectorFormatHalfWidthDoubleLanes(dst_f);
+ __ Umull2(i.OutputSimd128Register().Format(dst_f),
+ i.InputSimd128Register(0).Format(src_f),
+ i.InputSimd128Register(1).Format(src_f));
+ break;
+ }
case kArm64Madd:
__ Madd(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
i.InputRegister(2));
@@ -1399,6 +1472,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64Cmn32:
__ Cmn(i.InputOrZeroRegister32(0), i.InputOperand2_32(1));
break;
+ case kArm64Cnt: {
+ VectorFormat f = VectorFormatFillQ(MiscField::decode(opcode));
+ __ Cnt(i.OutputSimd128Register().Format(f),
+ i.InputSimd128Register(0).Format(f));
+ break;
+ }
case kArm64Tst:
__ Tst(i.InputOrZeroRegister64(0), i.InputOperand2_64(1));
break;
@@ -1852,11 +1931,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Instr(i.OutputSimd128Register().V##FORMAT(), \
i.InputSimd128Register(0).V##FORMAT()); \
break;
-#define SIMD_WIDENING_UNOP_CASE(Op, Instr, WIDE, NARROW) \
- case Op: \
- __ Instr(i.OutputSimd128Register().V##WIDE(), \
- i.InputSimd128Register(0).V##NARROW()); \
- break;
#define SIMD_BINOP_CASE(Op, Instr, FORMAT) \
case Op: \
__ Instr(i.OutputSimd128Register().V##FORMAT(), \
@@ -1872,6 +1946,34 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; \
}
+ case kArm64Sxtl: {
+ VectorFormat wide = VectorFormatFillQ(MiscField::decode(opcode));
+ VectorFormat narrow = VectorFormatHalfWidth(wide);
+ __ Sxtl(i.OutputSimd128Register().Format(wide),
+ i.InputSimd128Register(0).Format(narrow));
+ break;
+ }
+ case kArm64Sxtl2: {
+ VectorFormat wide = VectorFormatFillQ(MiscField::decode(opcode));
+ VectorFormat narrow = VectorFormatHalfWidthDoubleLanes(wide);
+ __ Sxtl2(i.OutputSimd128Register().Format(wide),
+ i.InputSimd128Register(0).Format(narrow));
+ break;
+ }
+ case kArm64Uxtl: {
+ VectorFormat wide = VectorFormatFillQ(MiscField::decode(opcode));
+ VectorFormat narrow = VectorFormatHalfWidth(wide);
+ __ Uxtl(i.OutputSimd128Register().Format(wide),
+ i.InputSimd128Register(0).Format(narrow));
+ break;
+ }
+ case kArm64Uxtl2: {
+ VectorFormat wide = VectorFormatFillQ(MiscField::decode(opcode));
+ VectorFormat narrow = VectorFormatHalfWidthDoubleLanes(wide);
+ __ Uxtl2(i.OutputSimd128Register().Format(wide),
+ i.InputSimd128Register(0).Format(narrow));
+ break;
+ }
case kArm64F64x2Splat: {
__ Dup(i.OutputSimd128Register().V2D(), i.InputSimd128Register(0).D(), 0);
break;
@@ -1940,22 +2042,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Bsl(dst.V16B(), rhs.V16B(), lhs.V16B());
break;
}
- case kArm64F64x2RoundUp:
- __ Frintp(i.OutputSimd128Register().V2D(),
- i.InputSimd128Register(0).V2D());
- break;
- case kArm64F64x2RoundDown:
- __ Frintm(i.OutputSimd128Register().V2D(),
- i.InputSimd128Register(0).V2D());
- break;
- case kArm64F64x2RoundTruncate:
- __ Frintz(i.OutputSimd128Register().V2D(),
- i.InputSimd128Register(0).V2D());
- break;
- case kArm64F64x2RoundTiesEven:
- __ Frintn(i.OutputSimd128Register().V2D(),
- i.InputSimd128Register(0).V2D());
- break;
case kArm64F32x4Splat: {
__ Dup(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).S(), 0);
break;
@@ -2029,22 +2115,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Bsl(dst.V16B(), rhs.V16B(), lhs.V16B());
break;
}
- case kArm64F32x4RoundUp:
- __ Frintp(i.OutputSimd128Register().V4S(),
- i.InputSimd128Register(0).V4S());
- break;
- case kArm64F32x4RoundDown:
- __ Frintm(i.OutputSimd128Register().V4S(),
- i.InputSimd128Register(0).V4S());
- break;
- case kArm64F32x4RoundTruncate:
- __ Frintz(i.OutputSimd128Register().V4S(),
- i.InputSimd128Register(0).V4S());
- break;
- case kArm64F32x4RoundTiesEven:
- __ Frintn(i.OutputSimd128Register().V4S(),
- i.InputSimd128Register(0).V4S());
- break;
case kArm64I64x2Splat: {
__ Dup(i.OutputSimd128Register().V2D(), i.InputRegister64(0));
break;
@@ -2134,21 +2204,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
SIMD_BINOP_CASE(kArm64I64x2Eq, Cmeq, 2D);
- case kArm64I64x2Ne: {
- VRegister dst = i.OutputSimd128Register().V2D();
- __ Cmeq(dst, i.InputSimd128Register(0).V2D(),
- i.InputSimd128Register(1).V2D());
- __ Mvn(dst, dst);
- break;
- }
- SIMD_BINOP_CASE(kArm64I64x2GtS, Cmgt, 2D);
- SIMD_BINOP_CASE(kArm64I64x2GeS, Cmge, 2D);
case kArm64I64x2ShrU: {
ASSEMBLE_SIMD_SHIFT_RIGHT(Ushr, 6, V2D, Ushl, X);
break;
}
- SIMD_BINOP_CASE(kArm64I64x2GtU, Cmhi, 2D);
- SIMD_BINOP_CASE(kArm64I64x2GeU, Cmhs, 2D);
case kArm64I32x4Splat: {
__ Dup(i.OutputSimd128Register().V4S(), i.InputRegister32(0));
break;
@@ -2168,8 +2227,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
SIMD_UNOP_CASE(kArm64I32x4SConvertF32x4, Fcvtzs, 4S);
- SIMD_WIDENING_UNOP_CASE(kArm64I32x4SConvertI16x8Low, Sxtl, 4S, 4H);
- SIMD_WIDENING_UNOP_CASE(kArm64I32x4SConvertI16x8High, Sxtl2, 4S, 8H);
SIMD_UNOP_CASE(kArm64I32x4Neg, Neg, 4S);
case kArm64I32x4Shl: {
ASSEMBLE_SIMD_SHIFT_LEFT(Shl, 5, V4S, Sshl, W);
@@ -2198,8 +2255,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SIMD_BINOP_CASE(kArm64I32x4GtS, Cmgt, 4S);
SIMD_BINOP_CASE(kArm64I32x4GeS, Cmge, 4S);
SIMD_UNOP_CASE(kArm64I32x4UConvertF32x4, Fcvtzu, 4S);
- SIMD_WIDENING_UNOP_CASE(kArm64I32x4UConvertI16x8Low, Uxtl, 4S, 4H);
- SIMD_WIDENING_UNOP_CASE(kArm64I32x4UConvertI16x8High, Uxtl2, 4S, 8H);
case kArm64I32x4ShrU: {
ASSEMBLE_SIMD_SHIFT_RIGHT(Ushr, 5, V4S, Ushl, W);
break;
@@ -2258,8 +2313,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Mov(dst, i.InputInt8(1), i.InputRegister32(2));
break;
}
- SIMD_WIDENING_UNOP_CASE(kArm64I16x8SConvertI8x16Low, Sxtl, 8H, 8B);
- SIMD_WIDENING_UNOP_CASE(kArm64I16x8SConvertI8x16High, Sxtl2, 8H, 16B);
SIMD_UNOP_CASE(kArm64I16x8Neg, Neg, 8H);
case kArm64I16x8Shl: {
ASSEMBLE_SIMD_SHIFT_LEFT(Shl, 4, V8H, Sshl, W);
@@ -2284,10 +2337,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
SIMD_BINOP_CASE(kArm64I16x8Add, Add, 8H);
- SIMD_BINOP_CASE(kArm64I16x8AddSaturateS, Sqadd, 8H);
+ SIMD_BINOP_CASE(kArm64I16x8AddSatS, Sqadd, 8H);
SIMD_BINOP_CASE(kArm64I16x8AddHoriz, Addp, 8H);
SIMD_BINOP_CASE(kArm64I16x8Sub, Sub, 8H);
- SIMD_BINOP_CASE(kArm64I16x8SubSaturateS, Sqsub, 8H);
+ SIMD_BINOP_CASE(kArm64I16x8SubSatS, Sqsub, 8H);
SIMD_BINOP_CASE(kArm64I16x8Mul, Mul, 8H);
SIMD_DESTRUCTIVE_BINOP_CASE(kArm64I16x8Mla, Mla, 8H);
SIMD_DESTRUCTIVE_BINOP_CASE(kArm64I16x8Mls, Mls, 8H);
@@ -2303,15 +2356,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
SIMD_BINOP_CASE(kArm64I16x8GtS, Cmgt, 8H);
SIMD_BINOP_CASE(kArm64I16x8GeS, Cmge, 8H);
- case kArm64I16x8UConvertI8x16Low: {
- __ Uxtl(i.OutputSimd128Register().V8H(), i.InputSimd128Register(0).V8B());
- break;
- }
- case kArm64I16x8UConvertI8x16High: {
- __ Uxtl2(i.OutputSimd128Register().V8H(),
- i.InputSimd128Register(0).V16B());
- break;
- }
case kArm64I16x8ShrU: {
ASSEMBLE_SIMD_SHIFT_RIGHT(Ushr, 4, V8H, Ushl, W);
break;
@@ -2330,13 +2374,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Sqxtun2(dst.V8H(), src1.V4S());
break;
}
- SIMD_BINOP_CASE(kArm64I16x8AddSaturateU, Uqadd, 8H);
- SIMD_BINOP_CASE(kArm64I16x8SubSaturateU, Uqsub, 8H);
+ SIMD_BINOP_CASE(kArm64I16x8AddSatU, Uqadd, 8H);
+ SIMD_BINOP_CASE(kArm64I16x8SubSatU, Uqsub, 8H);
SIMD_BINOP_CASE(kArm64I16x8MinU, Umin, 8H);
SIMD_BINOP_CASE(kArm64I16x8MaxU, Umax, 8H);
SIMD_BINOP_CASE(kArm64I16x8GtU, Cmhi, 8H);
SIMD_BINOP_CASE(kArm64I16x8GeU, Cmhs, 8H);
SIMD_BINOP_CASE(kArm64I16x8RoundingAverageU, Urhadd, 8H);
+ SIMD_BINOP_CASE(kArm64I16x8Q15MulRSatS, Sqrdmulh, 8H);
SIMD_UNOP_CASE(kArm64I16x8Abs, Abs, 8H);
case kArm64I16x8BitMask: {
Register dst = i.OutputRegister32();
@@ -2400,9 +2445,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
SIMD_BINOP_CASE(kArm64I8x16Add, Add, 16B);
- SIMD_BINOP_CASE(kArm64I8x16AddSaturateS, Sqadd, 16B);
+ SIMD_BINOP_CASE(kArm64I8x16AddSatS, Sqadd, 16B);
SIMD_BINOP_CASE(kArm64I8x16Sub, Sub, 16B);
- SIMD_BINOP_CASE(kArm64I8x16SubSaturateS, Sqsub, 16B);
+ SIMD_BINOP_CASE(kArm64I8x16SubSatS, Sqsub, 16B);
SIMD_BINOP_CASE(kArm64I8x16Mul, Mul, 16B);
SIMD_DESTRUCTIVE_BINOP_CASE(kArm64I8x16Mla, Mla, 16B);
SIMD_DESTRUCTIVE_BINOP_CASE(kArm64I8x16Mls, Mls, 16B);
@@ -2436,8 +2481,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Sqxtun2(dst.V16B(), src1.V8H());
break;
}
- SIMD_BINOP_CASE(kArm64I8x16AddSaturateU, Uqadd, 16B);
- SIMD_BINOP_CASE(kArm64I8x16SubSaturateU, Uqsub, 16B);
+ SIMD_BINOP_CASE(kArm64I8x16AddSatU, Uqadd, 16B);
+ SIMD_BINOP_CASE(kArm64I8x16SubSatU, Uqsub, 16B);
SIMD_BINOP_CASE(kArm64I8x16MinU, Umin, 16B);
SIMD_BINOP_CASE(kArm64I8x16MaxU, Umax, 16B);
SIMD_BINOP_CASE(kArm64I8x16GtU, Cmhi, 16B);
@@ -2562,17 +2607,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
src1 = i.InputSimd128Register(1).V16B();
// Unary shuffle table is in src0, binary shuffle table is in src0, src1,
// which must be consecutive.
- uint32_t mask = 0;
- if (src0 == src1) {
- mask = 0x0F0F0F0F;
- } else {
- mask = 0x1F1F1F1F;
+ if (src0 != src1) {
DCHECK(AreConsecutive(src0, src1));
}
- int64_t imm1 =
- make_uint64(i.InputInt32(3) & mask, i.InputInt32(2) & mask);
- int64_t imm2 =
- make_uint64(i.InputInt32(5) & mask, i.InputInt32(4) & mask);
+
+ int64_t imm1 = make_uint64(i.InputInt32(3), i.InputInt32(2));
+ int64_t imm2 = make_uint64(i.InputInt32(5), i.InputInt32(4));
+ DCHECK_EQ(0, (imm1 | imm2) & (src0 == src1 ? 0xF0F0F0F0F0F0F0F0
+ : 0xE0E0E0E0E0E0E0E0));
+
UseScratchRegisterScope scope(tasm());
VRegister temp = scope.AcquireV(kFormat16B);
__ Movi(temp, imm2, imm1);
@@ -2590,57 +2633,46 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SIMD_UNOP_CASE(kArm64S8x8Reverse, Rev64, 16B);
SIMD_UNOP_CASE(kArm64S8x4Reverse, Rev32, 16B);
SIMD_UNOP_CASE(kArm64S8x2Reverse, Rev16, 16B);
- case kArm64V64x2AllTrue: {
- UseScratchRegisterScope scope(tasm());
- VRegister temp1 = scope.AcquireV(kFormat2D);
- VRegister temp2 = scope.AcquireV(kFormatS);
-
- __ Cmeq(temp1, i.InputSimd128Register(0).V2D(), 0);
- __ Umaxv(temp2, temp1.V4S());
- __ Umov(i.OutputRegister32(), temp2, 0);
- __ Add(i.OutputRegister32(), i.OutputRegister32(), 1);
- break;
- }
case kArm64LoadSplat: {
VectorFormat f = VectorFormatFillQ(MiscField::decode(opcode));
__ ld1r(i.OutputSimd128Register().Format(f), i.MemoryOperand(0));
break;
}
- case kArm64I16x8Load8x8S: {
+ case kArm64S128Load8x8S: {
__ Ldr(i.OutputSimd128Register().V8B(), i.MemoryOperand(0));
__ Sxtl(i.OutputSimd128Register().V8H(), i.OutputSimd128Register().V8B());
break;
}
- case kArm64I16x8Load8x8U: {
+ case kArm64S128Load8x8U: {
__ Ldr(i.OutputSimd128Register().V8B(), i.MemoryOperand(0));
__ Uxtl(i.OutputSimd128Register().V8H(), i.OutputSimd128Register().V8B());
break;
}
- case kArm64I32x4Load16x4S: {
+ case kArm64S128Load16x4S: {
__ Ldr(i.OutputSimd128Register().V4H(), i.MemoryOperand(0));
__ Sxtl(i.OutputSimd128Register().V4S(), i.OutputSimd128Register().V4H());
break;
}
- case kArm64I32x4Load16x4U: {
+ case kArm64S128Load16x4U: {
__ Ldr(i.OutputSimd128Register().V4H(), i.MemoryOperand(0));
__ Uxtl(i.OutputSimd128Register().V4S(), i.OutputSimd128Register().V4H());
break;
}
- case kArm64I64x2Load32x2S: {
+ case kArm64S128Load32x2S: {
__ Ldr(i.OutputSimd128Register().V2S(), i.MemoryOperand(0));
__ Sxtl(i.OutputSimd128Register().V2D(), i.OutputSimd128Register().V2S());
break;
}
- case kArm64I64x2Load32x2U: {
+ case kArm64S128Load32x2U: {
__ Ldr(i.OutputSimd128Register().V2S(), i.MemoryOperand(0));
__ Uxtl(i.OutputSimd128Register().V2D(), i.OutputSimd128Register().V2S());
break;
}
- case kArm64S128LoadMem32Zero: {
+ case kArm64S128Load32Zero: {
__ Ldr(i.OutputSimd128Register().S(), i.MemoryOperand(0));
break;
}
- case kArm64S128LoadMem64Zero: {
+ case kArm64S128Load64Zero: {
__ Ldr(i.OutputSimd128Register().D(), i.MemoryOperand(0));
break;
}
@@ -2664,7 +2696,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} // NOLINT(readability/fn_size)
#undef SIMD_UNOP_CASE
-#undef SIMD_WIDENING_UNOP_CASE
#undef SIMD_BINOP_CASE
#undef SIMD_DESTRUCTIVE_BINOP_CASE
#undef SIMD_REDUCE_OP_CASE
@@ -3062,11 +3093,10 @@ void CodeGenerator::AssembleConstructFrame() {
}
}
-void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
+void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
auto call_descriptor = linkage()->GetIncomingDescriptor();
const int returns = RoundUp(frame()->GetReturnSlotCount(), 2);
-
if (returns != 0) {
__ Drop(returns);
}
@@ -3083,48 +3113,113 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
unwinding_info_writer_.MarkBlockWillExit();
+ // We might need x3 for scratch.
+ DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & x3.bit());
+ const int parameter_count =
+ static_cast<int>(call_descriptor->StackParameterCount());
Arm64OperandConverter g(this, nullptr);
- int pop_count = static_cast<int>(call_descriptor->StackParameterCount());
+
+ // {aditional_pop_count} is only greater than zero if {parameter_count = 0}.
+ // Check RawMachineAssembler::PopAndReturn.
+ if (parameter_count != 0) {
+ if (additional_pop_count->IsImmediate()) {
+ DCHECK_EQ(g.ToConstant(additional_pop_count).ToInt32(), 0);
+ } else if (__ emit_debug_code()) {
+ __ cmp(g.ToRegister(additional_pop_count), Operand(0));
+ __ Assert(eq, AbortReason::kUnexpectedAdditionalPopValue);
+ }
+ }
+
+ Register argc_reg = x3;
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ // Functions with JS linkage have at least one parameter (the receiver).
+ // If {parameter_count} == 0, it means it is a builtin with
+ // kDontAdaptArgumentsSentinel, which takes care of JS arguments popping
+ // itself.
+ const bool drop_jsargs = frame_access_state()->has_frame() &&
+ call_descriptor->IsJSFunctionCall() &&
+ parameter_count != 0;
+#else
+ const bool drop_jsargs = false;
+#endif
if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
// Canonicalize JSFunction return sites for now unless they have an variable
// number of stack slot pops.
- if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
+ if (additional_pop_count->IsImmediate() &&
+ g.ToConstant(additional_pop_count).ToInt32() == 0) {
if (return_label_.is_bound()) {
__ B(&return_label_);
return;
} else {
__ Bind(&return_label_);
- AssembleDeconstructFrame();
}
- } else {
- AssembleDeconstructFrame();
}
+ if (drop_jsargs) {
+ // Get the actual argument count.
+ __ Ldr(argc_reg, MemOperand(fp, StandardFrameConstants::kArgCOffset));
+ }
+ AssembleDeconstructFrame();
}
- if (pop->IsImmediate()) {
- pop_count += g.ToConstant(pop).ToInt32();
- __ DropArguments(pop_count);
+ if (drop_jsargs) {
+ // We must pop all arguments from the stack (including the receiver). This
+ // number of arguments is given by max(1 + argc_reg, parameter_count).
+ Label argc_reg_has_final_count;
+ __ Add(argc_reg, argc_reg, 1); // Consider the receiver.
+ if (parameter_count > 1) {
+ __ Cmp(argc_reg, Operand(parameter_count));
+ __ B(&argc_reg_has_final_count, ge);
+ __ Mov(argc_reg, Operand(parameter_count));
+ __ Bind(&argc_reg_has_final_count);
+ }
+ __ DropArguments(argc_reg);
+ } else if (additional_pop_count->IsImmediate()) {
+ int additional_count = g.ToConstant(additional_pop_count).ToInt32();
+ __ DropArguments(parameter_count + additional_count);
+ } else if (parameter_count == 0) {
+ __ DropArguments(g.ToRegister(additional_pop_count));
} else {
- Register pop_reg = g.ToRegister(pop);
- __ Add(pop_reg, pop_reg, pop_count);
- __ DropArguments(pop_reg);
+ // {additional_pop_count} is guaranteed to be zero if {parameter_count !=
+ // 0}. Check RawMachineAssembler::PopAndReturn.
+ __ DropArguments(parameter_count);
}
-
__ AssertSpAligned();
__ Ret();
}
void CodeGenerator::FinishCode() { __ ForceConstantPoolEmissionWithoutJump(); }
-void CodeGenerator::PrepareForDeoptimizationExits(int deopt_count) {
+void CodeGenerator::PrepareForDeoptimizationExits(
+ ZoneDeque<DeoptimizationExit*>* exits) {
__ ForceConstantPoolEmissionWithoutJump();
// We are conservative here, assuming all deopts are lazy deopts.
DCHECK_GE(Deoptimizer::kLazyDeoptExitSize,
Deoptimizer::kNonLazyDeoptExitSize);
- __ CheckVeneerPool(false, false,
- deopt_count * Deoptimizer::kLazyDeoptExitSize);
+ __ CheckVeneerPool(
+ false, false,
+ static_cast<int>(exits->size()) * Deoptimizer::kLazyDeoptExitSize);
+
+ // Check which deopt kinds exist in this Code object, to avoid emitting jumps
+ // to unused entries.
+ bool saw_deopt_kind[kDeoptimizeKindCount] = {false};
+ for (auto exit : *exits) {
+ saw_deopt_kind[static_cast<int>(exit->kind())] = true;
+ }
+
+ // Emit the jumps to deoptimization entries.
+ UseScratchRegisterScope scope(tasm());
+ Register scratch = scope.AcquireX();
+ STATIC_ASSERT(static_cast<int>(kFirstDeoptimizeKind) == 0);
+ for (int i = 0; i < kDeoptimizeKindCount; i++) {
+ if (!saw_deopt_kind[i]) continue;
+ __ bind(&jump_deoptimization_entry_labels_[i]);
+ __ LoadEntryFromBuiltinIndex(Deoptimizer::GetDeoptimizationEntry(
+ isolate(), static_cast<DeoptimizeKind>(i)),
+ scratch);
+ __ Jump(scratch);
+ }
}
void CodeGenerator::AssembleMove(InstructionOperand* source,
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
index 7f84a3504b..c80538f3a9 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
+++ b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
@@ -24,6 +24,7 @@ namespace compiler {
V(Arm64Cmp32) \
V(Arm64Cmn) \
V(Arm64Cmn32) \
+ V(Arm64Cnt) \
V(Arm64Tst) \
V(Arm64Tst32) \
V(Arm64Or) \
@@ -34,12 +35,16 @@ namespace compiler {
V(Arm64Eor32) \
V(Arm64Eon) \
V(Arm64Eon32) \
+ V(Arm64Saddlp) \
V(Arm64Sub) \
V(Arm64Sub32) \
V(Arm64Mul) \
V(Arm64Mul32) \
V(Arm64Smull) \
+ V(Arm64Smull2) \
+ V(Arm64Uaddlp) \
V(Arm64Umull) \
+ V(Arm64Umull2) \
V(Arm64Madd) \
V(Arm64Madd32) \
V(Arm64Msub) \
@@ -168,6 +173,10 @@ namespace compiler {
V(Arm64StrCompressTagged) \
V(Arm64DmbIsh) \
V(Arm64DsbIsb) \
+ V(Arm64Sxtl) \
+ V(Arm64Sxtl2) \
+ V(Arm64Uxtl) \
+ V(Arm64Uxtl2) \
V(Arm64F64x2Splat) \
V(Arm64F64x2ExtractLane) \
V(Arm64F64x2ReplaceLane) \
@@ -188,10 +197,6 @@ namespace compiler {
V(Arm64F64x2Qfms) \
V(Arm64F64x2Pmin) \
V(Arm64F64x2Pmax) \
- V(Arm64F64x2RoundUp) \
- V(Arm64F64x2RoundDown) \
- V(Arm64F64x2RoundTruncate) \
- V(Arm64F64x2RoundTiesEven) \
V(Arm64F32x4Splat) \
V(Arm64F32x4ExtractLane) \
V(Arm64F32x4ReplaceLane) \
@@ -217,10 +222,6 @@ namespace compiler {
V(Arm64F32x4Qfms) \
V(Arm64F32x4Pmin) \
V(Arm64F32x4Pmax) \
- V(Arm64F32x4RoundUp) \
- V(Arm64F32x4RoundDown) \
- V(Arm64F32x4RoundTruncate) \
- V(Arm64F32x4RoundTiesEven) \
V(Arm64I64x2Splat) \
V(Arm64I64x2ExtractLane) \
V(Arm64I64x2ReplaceLane) \
@@ -231,18 +232,11 @@ namespace compiler {
V(Arm64I64x2Sub) \
V(Arm64I64x2Mul) \
V(Arm64I64x2Eq) \
- V(Arm64I64x2Ne) \
- V(Arm64I64x2GtS) \
- V(Arm64I64x2GeS) \
V(Arm64I64x2ShrU) \
- V(Arm64I64x2GtU) \
- V(Arm64I64x2GeU) \
V(Arm64I32x4Splat) \
V(Arm64I32x4ExtractLane) \
V(Arm64I32x4ReplaceLane) \
V(Arm64I32x4SConvertF32x4) \
- V(Arm64I32x4SConvertI16x8Low) \
- V(Arm64I32x4SConvertI16x8High) \
V(Arm64I32x4Neg) \
V(Arm64I32x4Shl) \
V(Arm64I32x4ShrS) \
@@ -259,8 +253,6 @@ namespace compiler {
V(Arm64I32x4GtS) \
V(Arm64I32x4GeS) \
V(Arm64I32x4UConvertF32x4) \
- V(Arm64I32x4UConvertI16x8Low) \
- V(Arm64I32x4UConvertI16x8High) \
V(Arm64I32x4ShrU) \
V(Arm64I32x4MinU) \
V(Arm64I32x4MaxU) \
@@ -273,17 +265,15 @@ namespace compiler {
V(Arm64I16x8ExtractLaneU) \
V(Arm64I16x8ExtractLaneS) \
V(Arm64I16x8ReplaceLane) \
- V(Arm64I16x8SConvertI8x16Low) \
- V(Arm64I16x8SConvertI8x16High) \
V(Arm64I16x8Neg) \
V(Arm64I16x8Shl) \
V(Arm64I16x8ShrS) \
V(Arm64I16x8SConvertI32x4) \
V(Arm64I16x8Add) \
- V(Arm64I16x8AddSaturateS) \
+ V(Arm64I16x8AddSatS) \
V(Arm64I16x8AddHoriz) \
V(Arm64I16x8Sub) \
- V(Arm64I16x8SubSaturateS) \
+ V(Arm64I16x8SubSatS) \
V(Arm64I16x8Mul) \
V(Arm64I16x8Mla) \
V(Arm64I16x8Mls) \
@@ -293,17 +283,16 @@ namespace compiler {
V(Arm64I16x8Ne) \
V(Arm64I16x8GtS) \
V(Arm64I16x8GeS) \
- V(Arm64I16x8UConvertI8x16Low) \
- V(Arm64I16x8UConvertI8x16High) \
V(Arm64I16x8ShrU) \
V(Arm64I16x8UConvertI32x4) \
- V(Arm64I16x8AddSaturateU) \
- V(Arm64I16x8SubSaturateU) \
+ V(Arm64I16x8AddSatU) \
+ V(Arm64I16x8SubSatU) \
V(Arm64I16x8MinU) \
V(Arm64I16x8MaxU) \
V(Arm64I16x8GtU) \
V(Arm64I16x8GeU) \
V(Arm64I16x8RoundingAverageU) \
+ V(Arm64I16x8Q15MulRSatS) \
V(Arm64I16x8Abs) \
V(Arm64I16x8BitMask) \
V(Arm64I8x16Splat) \
@@ -315,9 +304,9 @@ namespace compiler {
V(Arm64I8x16ShrS) \
V(Arm64I8x16SConvertI16x8) \
V(Arm64I8x16Add) \
- V(Arm64I8x16AddSaturateS) \
+ V(Arm64I8x16AddSatS) \
V(Arm64I8x16Sub) \
- V(Arm64I8x16SubSaturateS) \
+ V(Arm64I8x16SubSatS) \
V(Arm64I8x16Mul) \
V(Arm64I8x16Mla) \
V(Arm64I8x16Mls) \
@@ -329,8 +318,8 @@ namespace compiler {
V(Arm64I8x16GeS) \
V(Arm64I8x16ShrU) \
V(Arm64I8x16UConvertI16x8) \
- V(Arm64I8x16AddSaturateU) \
- V(Arm64I8x16SubSaturateU) \
+ V(Arm64I8x16AddSatU) \
+ V(Arm64I8x16SubSatU) \
V(Arm64I8x16MinU) \
V(Arm64I8x16MaxU) \
V(Arm64I8x16GtU) \
@@ -376,17 +365,18 @@ namespace compiler {
V(Arm64S8x4Reverse) \
V(Arm64S8x2Reverse) \
V(Arm64V128AnyTrue) \
- V(Arm64V64x2AllTrue) \
V(Arm64V32x4AllTrue) \
V(Arm64V16x8AllTrue) \
V(Arm64V8x16AllTrue) \
V(Arm64LoadSplat) \
- V(Arm64I16x8Load8x8S) \
- V(Arm64I16x8Load8x8U) \
- V(Arm64I32x4Load16x4S) \
- V(Arm64I32x4Load16x4U) \
- V(Arm64I64x2Load32x2S) \
- V(Arm64I64x2Load32x2U) \
+ V(Arm64S128Load8x8S) \
+ V(Arm64S128Load8x8U) \
+ V(Arm64S128Load16x4S) \
+ V(Arm64S128Load16x4U) \
+ V(Arm64S128Load32x2S) \
+ V(Arm64S128Load32x2U) \
+ V(Arm64S128Load32Zero) \
+ V(Arm64S128Load64Zero) \
V(Arm64Word64AtomicLoadUint8) \
V(Arm64Word64AtomicLoadUint16) \
V(Arm64Word64AtomicLoadUint32) \
@@ -422,11 +412,7 @@ namespace compiler {
V(Arm64Word64AtomicCompareExchangeUint8) \
V(Arm64Word64AtomicCompareExchangeUint16) \
V(Arm64Word64AtomicCompareExchangeUint32) \
- V(Arm64Word64AtomicCompareExchangeUint64) \
- V(Arm64S128LoadMem32Zero) \
- V(Arm64S128LoadMem64Zero)
-// TODO(v8:10930) Adding new codes before these atomic instructions causes a
-// mksnapshot error.
+ V(Arm64Word64AtomicCompareExchangeUint64)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
index 6c572d2a1c..9d53074042 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
@@ -25,6 +25,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64Cmp32:
case kArm64Cmn:
case kArm64Cmn32:
+ case kArm64Cnt:
case kArm64Tst:
case kArm64Tst32:
case kArm64Or:
@@ -35,12 +36,16 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64Eor32:
case kArm64Eon:
case kArm64Eon32:
+ case kArm64Saddlp:
case kArm64Sub:
case kArm64Sub32:
case kArm64Mul:
case kArm64Mul32:
case kArm64Smull:
+ case kArm64Smull2:
+ case kArm64Uaddlp:
case kArm64Umull:
+ case kArm64Umull2:
case kArm64Madd:
case kArm64Madd32:
case kArm64Msub:
@@ -158,10 +163,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64F64x2Qfms:
case kArm64F64x2Pmin:
case kArm64F64x2Pmax:
- case kArm64F64x2RoundUp:
- case kArm64F64x2RoundDown:
- case kArm64F64x2RoundTruncate:
- case kArm64F64x2RoundTiesEven:
case kArm64F32x4Splat:
case kArm64F32x4ExtractLane:
case kArm64F32x4ReplaceLane:
@@ -187,10 +188,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64F32x4Qfms:
case kArm64F32x4Pmin:
case kArm64F32x4Pmax:
- case kArm64F32x4RoundUp:
- case kArm64F32x4RoundDown:
- case kArm64F32x4RoundTruncate:
- case kArm64F32x4RoundTiesEven:
case kArm64I64x2Splat:
case kArm64I64x2ExtractLane:
case kArm64I64x2ReplaceLane:
@@ -201,18 +198,15 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64I64x2Sub:
case kArm64I64x2Mul:
case kArm64I64x2Eq:
- case kArm64I64x2Ne:
- case kArm64I64x2GtS:
- case kArm64I64x2GeS:
case kArm64I64x2ShrU:
- case kArm64I64x2GtU:
- case kArm64I64x2GeU:
case kArm64I32x4Splat:
case kArm64I32x4ExtractLane:
case kArm64I32x4ReplaceLane:
case kArm64I32x4SConvertF32x4:
- case kArm64I32x4SConvertI16x8Low:
- case kArm64I32x4SConvertI16x8High:
+ case kArm64Sxtl:
+ case kArm64Sxtl2:
+ case kArm64Uxtl:
+ case kArm64Uxtl2:
case kArm64I32x4Neg:
case kArm64I32x4Shl:
case kArm64I32x4ShrS:
@@ -229,8 +223,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64I32x4GtS:
case kArm64I32x4GeS:
case kArm64I32x4UConvertF32x4:
- case kArm64I32x4UConvertI16x8Low:
- case kArm64I32x4UConvertI16x8High:
case kArm64I32x4ShrU:
case kArm64I32x4MinU:
case kArm64I32x4MaxU:
@@ -243,17 +235,15 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64I16x8ExtractLaneU:
case kArm64I16x8ExtractLaneS:
case kArm64I16x8ReplaceLane:
- case kArm64I16x8SConvertI8x16Low:
- case kArm64I16x8SConvertI8x16High:
case kArm64I16x8Neg:
case kArm64I16x8Shl:
case kArm64I16x8ShrS:
case kArm64I16x8SConvertI32x4:
case kArm64I16x8Add:
- case kArm64I16x8AddSaturateS:
+ case kArm64I16x8AddSatS:
case kArm64I16x8AddHoriz:
case kArm64I16x8Sub:
- case kArm64I16x8SubSaturateS:
+ case kArm64I16x8SubSatS:
case kArm64I16x8Mul:
case kArm64I16x8Mla:
case kArm64I16x8Mls:
@@ -263,17 +253,16 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64I16x8Ne:
case kArm64I16x8GtS:
case kArm64I16x8GeS:
- case kArm64I16x8UConvertI8x16Low:
- case kArm64I16x8UConvertI8x16High:
case kArm64I16x8ShrU:
case kArm64I16x8UConvertI32x4:
- case kArm64I16x8AddSaturateU:
- case kArm64I16x8SubSaturateU:
+ case kArm64I16x8AddSatU:
+ case kArm64I16x8SubSatU:
case kArm64I16x8MinU:
case kArm64I16x8MaxU:
case kArm64I16x8GtU:
case kArm64I16x8GeU:
case kArm64I16x8RoundingAverageU:
+ case kArm64I16x8Q15MulRSatS:
case kArm64I16x8Abs:
case kArm64I16x8BitMask:
case kArm64I8x16Splat:
@@ -285,9 +274,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64I8x16ShrS:
case kArm64I8x16SConvertI16x8:
case kArm64I8x16Add:
- case kArm64I8x16AddSaturateS:
+ case kArm64I8x16AddSatS:
case kArm64I8x16Sub:
- case kArm64I8x16SubSaturateS:
+ case kArm64I8x16SubSatS:
case kArm64I8x16Mul:
case kArm64I8x16Mla:
case kArm64I8x16Mls:
@@ -298,8 +287,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64I8x16GtS:
case kArm64I8x16GeS:
case kArm64I8x16UConvertI16x8:
- case kArm64I8x16AddSaturateU:
- case kArm64I8x16SubSaturateU:
+ case kArm64I8x16AddSatU:
+ case kArm64I8x16SubSatU:
case kArm64I8x16ShrU:
case kArm64I8x16MinU:
case kArm64I8x16MaxU:
@@ -346,7 +335,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64S8x4Reverse:
case kArm64S8x2Reverse:
case kArm64V128AnyTrue:
- case kArm64V64x2AllTrue:
case kArm64V32x4AllTrue:
case kArm64V16x8AllTrue:
case kArm64V8x16AllTrue:
@@ -371,14 +359,14 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64LdrDecompressAnyTagged:
case kArm64Peek:
case kArm64LoadSplat:
- case kArm64I16x8Load8x8S:
- case kArm64I16x8Load8x8U:
- case kArm64I32x4Load16x4S:
- case kArm64I32x4Load16x4U:
- case kArm64I64x2Load32x2S:
- case kArm64I64x2Load32x2U:
- case kArm64S128LoadMem32Zero:
- case kArm64S128LoadMem64Zero:
+ case kArm64S128Load8x8S:
+ case kArm64S128Load8x8U:
+ case kArm64S128Load16x4S:
+ case kArm64S128Load16x4U:
+ case kArm64S128Load32x2S:
+ case kArm64S128Load32x2U:
+ case kArm64S128Load32Zero:
+ case kArm64S128Load64Zero:
return kIsLoadOperation;
case kArm64Claim:
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
index fac7f9c1d1..584cfb6184 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
@@ -144,6 +144,13 @@ void VisitRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
g.UseRegister(node->InputAt(0)));
}
+void VisitRR(InstructionSelector* selector, InstructionCode opcode,
+ Node* node) {
+ Arm64OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
Arm64OperandGenerator g(selector);
selector->Emit(opcode, g.DefineAsRegister(node),
@@ -151,6 +158,14 @@ void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
g.UseRegister(node->InputAt(1)));
}
+void VisitRRR(InstructionSelector* selector, InstructionCode opcode,
+ Node* node) {
+ Arm64OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)));
+}
+
void VisitSimdShiftRRR(InstructionSelector* selector, ArchOpcode opcode,
Node* node, int width) {
Arm64OperandGenerator g(selector);
@@ -311,7 +326,7 @@ bool TryMatchAnyExtend(Arm64OperandGenerator* g, InstructionSelector* selector,
if (nm.IsWord32And()) {
Int32BinopMatcher mright(right_node);
if (mright.right().Is(0xFF) || mright.right().Is(0xFFFF)) {
- int32_t mask = mright.right().Value();
+ int32_t mask = mright.right().ResolvedValue();
*left_op = g->UseRegister(left_node);
*right_op = g->UseRegister(mright.left().node());
*opcode |= AddressingModeField::encode(
@@ -325,7 +340,7 @@ bool TryMatchAnyExtend(Arm64OperandGenerator* g, InstructionSelector* selector,
Int32BinopMatcher mleft_of_right(mright.left().node());
if ((mright.right().Is(16) && mleft_of_right.right().Is(16)) ||
(mright.right().Is(24) && mleft_of_right.right().Is(24))) {
- int32_t shift = mright.right().Value();
+ int32_t shift = mright.right().ResolvedValue();
*left_op = g->UseRegister(left_node);
*right_op = g->UseRegister(mleft_of_right.left().node());
*opcode |= AddressingModeField::encode(
@@ -466,8 +481,8 @@ void VisitBinop(InstructionSelector* selector, Node* node,
inputs[input_count++] = g.UseRegisterOrImmediateZero(left_node);
inputs[input_count++] = g.UseRegister(m_shift.left().node());
// We only need at most the last 6 bits of the shift.
- inputs[input_count++] =
- g.UseImmediate(static_cast<int>(m_shift.right().Value() & 0x3F));
+ inputs[input_count++] = g.UseImmediate(
+ static_cast<int>(m_shift.right().ResolvedValue() & 0x3F));
} else if (can_commute && TryMatchAnyShift(selector, node, left_node, &opcode,
!is_add_sub)) {
if (must_commute_cond) cont->Commute();
@@ -475,8 +490,8 @@ void VisitBinop(InstructionSelector* selector, Node* node,
inputs[input_count++] = g.UseRegisterOrImmediateZero(right_node);
inputs[input_count++] = g.UseRegister(m_shift.left().node());
// We only need at most the last 6 bits of the shift.
- inputs[input_count++] =
- g.UseImmediate(static_cast<int>(m_shift.right().Value() & 0x3F));
+ inputs[input_count++] = g.UseImmediate(
+ static_cast<int>(m_shift.right().ResolvedValue() & 0x3F));
} else {
inputs[input_count++] = g.UseRegisterOrImmediateZero(left_node);
inputs[input_count++] = g.UseRegister(right_node);
@@ -508,12 +523,12 @@ void VisitAddSub(InstructionSelector* selector, Node* node, ArchOpcode opcode,
ArchOpcode negate_opcode) {
Arm64OperandGenerator g(selector);
Matcher m(node);
- if (m.right().HasValue() && (m.right().Value() < 0) &&
- (m.right().Value() > std::numeric_limits<int>::min()) &&
- g.CanBeImmediate(-m.right().Value(), kArithmeticImm)) {
- selector->Emit(negate_opcode, g.DefineAsRegister(node),
- g.UseRegister(m.left().node()),
- g.TempImmediate(static_cast<int32_t>(-m.right().Value())));
+ if (m.right().HasResolvedValue() && (m.right().ResolvedValue() < 0) &&
+ (m.right().ResolvedValue() > std::numeric_limits<int>::min()) &&
+ g.CanBeImmediate(-m.right().ResolvedValue(), kArithmeticImm)) {
+ selector->Emit(
+ negate_opcode, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.TempImmediate(static_cast<int32_t>(-m.right().ResolvedValue())));
} else {
VisitBinop<Matcher>(selector, node, opcode, kArithmeticImm);
}
@@ -525,8 +540,8 @@ void VisitAddSub(InstructionSelector* selector, Node* node, ArchOpcode opcode,
template <typename Matcher>
int32_t LeftShiftForReducedMultiply(Matcher* m) {
DCHECK(m->IsInt32Mul() || m->IsInt64Mul());
- if (m->right().HasValue() && m->right().Value() >= 3) {
- uint64_t value_minus_one = m->right().Value() - 1;
+ if (m->right().HasResolvedValue() && m->right().ResolvedValue() >= 3) {
+ uint64_t value_minus_one = m->right().ResolvedValue() - 1;
if (base::bits::IsPowerOfTwo(value_minus_one)) {
return base::bits::WhichPowerOfTwo(value_minus_one);
}
@@ -565,12 +580,12 @@ void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
outputs[0] = g.DefineAsRegister(output == nullptr ? node : output);
ExternalReferenceMatcher m(base);
- if (m.HasValue() && g.IsIntegerConstant(index) &&
- selector->CanAddressRelativeToRootsRegister(m.Value())) {
+ if (m.HasResolvedValue() && g.IsIntegerConstant(index) &&
+ selector->CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
ptrdiff_t const delta =
g.GetIntegerConstantValue(index) +
TurboAssemblerBase::RootRegisterOffsetForExternalReference(
- selector->isolate(), m.Value());
+ selector->isolate(), m.ResolvedValue());
input_count = 1;
// Check that the delta is a 32-bit integer due to the limitations of
// immediate operands.
@@ -606,55 +621,55 @@ void InstructionSelector::VisitLoadTransform(Node* node) {
InstructionCode opcode = kArchNop;
bool require_add = false;
switch (params.transformation) {
- case LoadTransformation::kS8x16LoadSplat:
+ case LoadTransformation::kS128Load8Splat:
opcode = kArm64LoadSplat;
opcode |= MiscField::encode(8);
require_add = true;
break;
- case LoadTransformation::kS16x8LoadSplat:
+ case LoadTransformation::kS128Load16Splat:
opcode = kArm64LoadSplat;
opcode |= MiscField::encode(16);
require_add = true;
break;
- case LoadTransformation::kS32x4LoadSplat:
+ case LoadTransformation::kS128Load32Splat:
opcode = kArm64LoadSplat;
opcode |= MiscField::encode(32);
require_add = true;
break;
- case LoadTransformation::kS64x2LoadSplat:
+ case LoadTransformation::kS128Load64Splat:
opcode = kArm64LoadSplat;
opcode |= MiscField::encode(64);
require_add = true;
break;
- case LoadTransformation::kI16x8Load8x8S:
- opcode = kArm64I16x8Load8x8S;
+ case LoadTransformation::kS128Load8x8S:
+ opcode = kArm64S128Load8x8S;
break;
- case LoadTransformation::kI16x8Load8x8U:
- opcode = kArm64I16x8Load8x8U;
+ case LoadTransformation::kS128Load8x8U:
+ opcode = kArm64S128Load8x8U;
break;
- case LoadTransformation::kI32x4Load16x4S:
- opcode = kArm64I32x4Load16x4S;
+ case LoadTransformation::kS128Load16x4S:
+ opcode = kArm64S128Load16x4S;
break;
- case LoadTransformation::kI32x4Load16x4U:
- opcode = kArm64I32x4Load16x4U;
+ case LoadTransformation::kS128Load16x4U:
+ opcode = kArm64S128Load16x4U;
break;
- case LoadTransformation::kI64x2Load32x2S:
- opcode = kArm64I64x2Load32x2S;
+ case LoadTransformation::kS128Load32x2S:
+ opcode = kArm64S128Load32x2S;
break;
- case LoadTransformation::kI64x2Load32x2U:
- opcode = kArm64I64x2Load32x2U;
+ case LoadTransformation::kS128Load32x2U:
+ opcode = kArm64S128Load32x2U;
break;
- case LoadTransformation::kS128LoadMem32Zero:
- opcode = kArm64S128LoadMem32Zero;
+ case LoadTransformation::kS128Load32Zero:
+ opcode = kArm64S128Load32Zero;
break;
- case LoadTransformation::kS128LoadMem64Zero:
- opcode = kArm64S128LoadMem64Zero;
+ case LoadTransformation::kS128Load64Zero:
+ opcode = kArm64S128Load64Zero;
break;
default:
UNIMPLEMENTED();
}
// ARM64 supports unaligned loads
- DCHECK_NE(params.kind, LoadKind::kUnaligned);
+ DCHECK_NE(params.kind, MemoryAccessKind::kUnaligned);
Arm64OperandGenerator g(this);
Node* base = node->InputAt(0);
@@ -857,12 +872,12 @@ void InstructionSelector::VisitStore(Node* node) {
}
ExternalReferenceMatcher m(base);
- if (m.HasValue() && g.IsIntegerConstant(index) &&
- CanAddressRelativeToRootsRegister(m.Value())) {
+ if (m.HasResolvedValue() && g.IsIntegerConstant(index) &&
+ CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
ptrdiff_t const delta =
g.GetIntegerConstantValue(index) +
- TurboAssemblerBase::RootRegisterOffsetForExternalReference(isolate(),
- m.Value());
+ TurboAssemblerBase::RootRegisterOffsetForExternalReference(
+ isolate(), m.ResolvedValue());
if (is_int32(delta)) {
input_count = 2;
InstructionOperand inputs[2];
@@ -981,8 +996,8 @@ void InstructionSelector::VisitWord32And(Node* node) {
Arm64OperandGenerator g(this);
Int32BinopMatcher m(node);
if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) &&
- m.right().HasValue()) {
- uint32_t mask = m.right().Value();
+ m.right().HasResolvedValue()) {
+ uint32_t mask = m.right().ResolvedValue();
uint32_t mask_width = base::bits::CountPopulation(mask);
uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
if ((mask_width != 0) && (mask_width != 32) &&
@@ -993,9 +1008,9 @@ void InstructionSelector::VisitWord32And(Node* node) {
// Select Ubfx for And(Shr(x, imm), mask) where the mask is in the least
// significant bits.
Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue()) {
+ if (mleft.right().HasResolvedValue()) {
// Any shift value can match; int32 shifts use `value % 32`.
- uint32_t lsb = mleft.right().Value() & 0x1F;
+ uint32_t lsb = mleft.right().ResolvedValue() & 0x1F;
// Ubfx cannot extract bits past the register size, however since
// shifting the original value would have introduced some zeros we can
@@ -1021,8 +1036,8 @@ void InstructionSelector::VisitWord64And(Node* node) {
Arm64OperandGenerator g(this);
Int64BinopMatcher m(node);
if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) &&
- m.right().HasValue()) {
- uint64_t mask = m.right().Value();
+ m.right().HasResolvedValue()) {
+ uint64_t mask = m.right().ResolvedValue();
uint64_t mask_width = base::bits::CountPopulation(mask);
uint64_t mask_msb = base::bits::CountLeadingZeros64(mask);
if ((mask_width != 0) && (mask_width != 64) &&
@@ -1033,9 +1048,10 @@ void InstructionSelector::VisitWord64And(Node* node) {
// Select Ubfx for And(Shr(x, imm), mask) where the mask is in the least
// significant bits.
Int64BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue()) {
+ if (mleft.right().HasResolvedValue()) {
// Any shift value can match; int64 shifts use `value % 64`.
- uint32_t lsb = static_cast<uint32_t>(mleft.right().Value() & 0x3F);
+ uint32_t lsb =
+ static_cast<uint32_t>(mleft.right().ResolvedValue() & 0x3F);
// Ubfx cannot extract bits past the register size, however since
// shifting the original value would have introduced some zeros we can
@@ -1091,12 +1107,12 @@ void InstructionSelector::VisitWord32Shl(Node* node) {
m.right().IsInRange(1, 31)) {
Arm64OperandGenerator g(this);
Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue()) {
- uint32_t mask = mleft.right().Value();
+ if (mleft.right().HasResolvedValue()) {
+ uint32_t mask = mleft.right().ResolvedValue();
uint32_t mask_width = base::bits::CountPopulation(mask);
uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
- uint32_t shift = m.right().Value();
+ uint32_t shift = m.right().ResolvedValue();
DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
DCHECK_NE(0u, shift);
@@ -1174,13 +1190,14 @@ bool TryEmitBitfieldExtract32(InstructionSelector* selector, Node* node) {
// Select Ubfx or Sbfx for (x << (K & 0x1F)) OP (K & 0x1F), where
// OP is >>> or >> and (K & 0x1F) != 0.
Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue() && m.right().HasValue() &&
- (mleft.right().Value() & 0x1F) != 0 &&
- (mleft.right().Value() & 0x1F) == (m.right().Value() & 0x1F)) {
+ if (mleft.right().HasResolvedValue() && m.right().HasResolvedValue() &&
+ (mleft.right().ResolvedValue() & 0x1F) != 0 &&
+ (mleft.right().ResolvedValue() & 0x1F) ==
+ (m.right().ResolvedValue() & 0x1F)) {
DCHECK(m.IsWord32Shr() || m.IsWord32Sar());
ArchOpcode opcode = m.IsWord32Sar() ? kArm64Sbfx32 : kArm64Ubfx32;
- int right_val = m.right().Value() & 0x1F;
+ int right_val = m.right().ResolvedValue() & 0x1F;
DCHECK_NE(right_val, 0);
selector->Emit(opcode, g.DefineAsRegister(node),
@@ -1196,14 +1213,15 @@ bool TryEmitBitfieldExtract32(InstructionSelector* selector, Node* node) {
void InstructionSelector::VisitWord32Shr(Node* node) {
Int32BinopMatcher m(node);
- if (m.left().IsWord32And() && m.right().HasValue()) {
- uint32_t lsb = m.right().Value() & 0x1F;
+ if (m.left().IsWord32And() && m.right().HasResolvedValue()) {
+ uint32_t lsb = m.right().ResolvedValue() & 0x1F;
Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue() && mleft.right().Value() != 0) {
+ if (mleft.right().HasResolvedValue() &&
+ mleft.right().ResolvedValue() != 0) {
// Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is
// shifted into the least-significant bits.
- uint32_t mask = static_cast<uint32_t>(mleft.right().Value() >> lsb)
- << lsb;
+ uint32_t mask =
+ static_cast<uint32_t>(mleft.right().ResolvedValue() >> lsb) << lsb;
unsigned mask_width = base::bits::CountPopulation(mask);
unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
if ((mask_msb + mask_width + lsb) == 32) {
@@ -1220,13 +1238,13 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
return;
}
- if (m.left().IsUint32MulHigh() && m.right().HasValue() &&
+ if (m.left().IsUint32MulHigh() && m.right().HasResolvedValue() &&
CanCover(node, node->InputAt(0))) {
// Combine this shift with the multiply and shift that would be generated
// by Uint32MulHigh.
Arm64OperandGenerator g(this);
Node* left = m.left().node();
- int shift = m.right().Value() & 0x1F;
+ int shift = m.right().ResolvedValue() & 0x1F;
InstructionOperand const smull_operand = g.TempRegister();
Emit(kArm64Umull, smull_operand, g.UseRegister(left->InputAt(0)),
g.UseRegister(left->InputAt(1)));
@@ -1240,14 +1258,15 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
void InstructionSelector::VisitWord64Shr(Node* node) {
Int64BinopMatcher m(node);
- if (m.left().IsWord64And() && m.right().HasValue()) {
- uint32_t lsb = m.right().Value() & 0x3F;
+ if (m.left().IsWord64And() && m.right().HasResolvedValue()) {
+ uint32_t lsb = m.right().ResolvedValue() & 0x3F;
Int64BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue() && mleft.right().Value() != 0) {
+ if (mleft.right().HasResolvedValue() &&
+ mleft.right().ResolvedValue() != 0) {
// Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is
// shifted into the least-significant bits.
- uint64_t mask = static_cast<uint64_t>(mleft.right().Value() >> lsb)
- << lsb;
+ uint64_t mask =
+ static_cast<uint64_t>(mleft.right().ResolvedValue() >> lsb) << lsb;
unsigned mask_width = base::bits::CountPopulation(mask);
unsigned mask_msb = base::bits::CountLeadingZeros64(mask);
if ((mask_msb + mask_width + lsb) == 64) {
@@ -1270,13 +1289,13 @@ void InstructionSelector::VisitWord32Sar(Node* node) {
}
Int32BinopMatcher m(node);
- if (m.left().IsInt32MulHigh() && m.right().HasValue() &&
+ if (m.left().IsInt32MulHigh() && m.right().HasResolvedValue() &&
CanCover(node, node->InputAt(0))) {
// Combine this shift with the multiply and shift that would be generated
// by Int32MulHigh.
Arm64OperandGenerator g(this);
Node* left = m.left().node();
- int shift = m.right().Value() & 0x1F;
+ int shift = m.right().ResolvedValue() & 0x1F;
InstructionOperand const smull_operand = g.TempRegister();
Emit(kArm64Smull, smull_operand, g.UseRegister(left->InputAt(0)),
g.UseRegister(left->InputAt(1)));
@@ -1285,7 +1304,7 @@ void InstructionSelector::VisitWord32Sar(Node* node) {
return;
}
- if (m.left().IsInt32Add() && m.right().HasValue() &&
+ if (m.left().IsInt32Add() && m.right().HasResolvedValue() &&
CanCover(node, node->InputAt(0))) {
Node* add_node = m.left().node();
Int32BinopMatcher madd_node(add_node);
@@ -1379,14 +1398,14 @@ void InstructionSelector::VisitWord64Ror(Node* node) {
V(Float64ExtractLowWord32, kArm64Float64ExtractLowWord32) \
V(Float64ExtractHighWord32, kArm64Float64ExtractHighWord32) \
V(Float64SilenceNaN, kArm64Float64SilenceNaN) \
- V(F32x4Ceil, kArm64F32x4RoundUp) \
- V(F32x4Floor, kArm64F32x4RoundDown) \
- V(F32x4Trunc, kArm64F32x4RoundTruncate) \
- V(F32x4NearestInt, kArm64F32x4RoundTiesEven) \
- V(F64x2Ceil, kArm64F64x2RoundUp) \
- V(F64x2Floor, kArm64F64x2RoundDown) \
- V(F64x2Trunc, kArm64F64x2RoundTruncate) \
- V(F64x2NearestInt, kArm64F64x2RoundTiesEven)
+ V(F32x4Ceil, kArm64Float32RoundUp) \
+ V(F32x4Floor, kArm64Float32RoundDown) \
+ V(F32x4Trunc, kArm64Float32RoundTruncate) \
+ V(F32x4NearestInt, kArm64Float32RoundTiesEven) \
+ V(F64x2Ceil, kArm64Float64RoundUp) \
+ V(F64x2Floor, kArm64Float64RoundDown) \
+ V(F64x2Trunc, kArm64Float64RoundTruncate) \
+ V(F64x2NearestInt, kArm64Float64RoundTiesEven)
#define RRR_OP_LIST(V) \
V(Int32Div, kArm64Idiv32) \
@@ -1632,6 +1651,88 @@ void InstructionSelector::VisitInt64Mul(Node* node) {
VisitRRR(this, kArm64Mul, node);
}
+namespace {
+void VisitExtMul(InstructionSelector* selector, ArchOpcode opcode, Node* node,
+ int dst_lane_size) {
+ InstructionCode code = opcode;
+ code |= MiscField::encode(dst_lane_size);
+ VisitRRR(selector, code, node);
+}
+} // namespace
+
+void InstructionSelector::VisitI16x8ExtMulLowI8x16S(Node* node) {
+ VisitExtMul(this, kArm64Smull, node, 16);
+}
+
+void InstructionSelector::VisitI16x8ExtMulHighI8x16S(Node* node) {
+ VisitExtMul(this, kArm64Smull2, node, 16);
+}
+
+void InstructionSelector::VisitI16x8ExtMulLowI8x16U(Node* node) {
+ VisitExtMul(this, kArm64Umull, node, 16);
+}
+
+void InstructionSelector::VisitI16x8ExtMulHighI8x16U(Node* node) {
+ VisitExtMul(this, kArm64Umull2, node, 16);
+}
+
+void InstructionSelector::VisitI32x4ExtMulLowI16x8S(Node* node) {
+ VisitExtMul(this, kArm64Smull, node, 32);
+}
+
+void InstructionSelector::VisitI32x4ExtMulHighI16x8S(Node* node) {
+ VisitExtMul(this, kArm64Smull2, node, 32);
+}
+
+void InstructionSelector::VisitI32x4ExtMulLowI16x8U(Node* node) {
+ VisitExtMul(this, kArm64Umull, node, 32);
+}
+
+void InstructionSelector::VisitI32x4ExtMulHighI16x8U(Node* node) {
+ VisitExtMul(this, kArm64Umull2, node, 32);
+}
+
+void InstructionSelector::VisitI64x2ExtMulLowI32x4S(Node* node) {
+ VisitExtMul(this, kArm64Smull, node, 64);
+}
+
+void InstructionSelector::VisitI64x2ExtMulHighI32x4S(Node* node) {
+ VisitExtMul(this, kArm64Smull2, node, 64);
+}
+
+void InstructionSelector::VisitI64x2ExtMulLowI32x4U(Node* node) {
+ VisitExtMul(this, kArm64Umull, node, 64);
+}
+
+void InstructionSelector::VisitI64x2ExtMulHighI32x4U(Node* node) {
+ VisitExtMul(this, kArm64Umull2, node, 64);
+}
+
+namespace {
+void VisitExtAddPairwise(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node, int dst_lane_size) {
+ InstructionCode code = opcode;
+ code |= MiscField::encode(dst_lane_size);
+ VisitRR(selector, code, node);
+}
+} // namespace
+
+void InstructionSelector::VisitI32x4ExtAddPairwiseI16x8S(Node* node) {
+ VisitExtAddPairwise(this, kArm64Saddlp, node, 32);
+}
+
+void InstructionSelector::VisitI32x4ExtAddPairwiseI16x8U(Node* node) {
+ VisitExtAddPairwise(this, kArm64Uaddlp, node, 32);
+}
+
+void InstructionSelector::VisitI16x8ExtAddPairwiseI8x16S(Node* node) {
+ VisitExtAddPairwise(this, kArm64Saddlp, node, 16);
+}
+
+void InstructionSelector::VisitI16x8ExtAddPairwiseI8x16U(Node* node) {
+ VisitExtAddPairwise(this, kArm64Uaddlp, node, 16);
+}
+
void InstructionSelector::VisitInt32MulHigh(Node* node) {
Arm64OperandGenerator g(this);
InstructionOperand const smull_operand = g.TempRegister();
@@ -1764,7 +1865,6 @@ void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
break;
default:
UNREACHABLE();
- return;
}
EmitLoad(this, value, opcode, immediate_mode, rep, node);
return;
@@ -1772,10 +1872,10 @@ void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
if (value->opcode() == IrOpcode::kWord32Sar && CanCover(node, value)) {
Int32BinopMatcher m(value);
- if (m.right().HasValue()) {
+ if (m.right().HasResolvedValue()) {
Arm64OperandGenerator g(this);
// Mask the shift amount, to keep the same semantics as Word32Sar.
- int right = m.right().Value() & 0x1F;
+ int right = m.right().ResolvedValue() & 0x1F;
Emit(kArm64Sbfx, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.TempImmediate(right), g.TempImmediate(32 - right));
return;
@@ -2211,8 +2311,8 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
if (opcode == kArm64Cmp && !cont->IsPoisoned()) {
Int64Matcher m(right);
- if (m.HasValue()) {
- if (TryEmitCbzOrTbz<64>(selector, left, m.Value(), node,
+ if (m.HasResolvedValue()) {
+ if (TryEmitCbzOrTbz<64>(selector, left, m.ResolvedValue(), node,
cont->condition(), cont)) {
return;
}
@@ -2228,15 +2328,16 @@ void VisitWord32Compare(InstructionSelector* selector, Node* node,
Int32BinopMatcher m(node);
FlagsCondition cond = cont->condition();
if (!cont->IsPoisoned()) {
- if (m.right().HasValue()) {
- if (TryEmitCbzOrTbz<32>(selector, m.left().node(), m.right().Value(),
- node, cond, cont)) {
+ if (m.right().HasResolvedValue()) {
+ if (TryEmitCbzOrTbz<32>(selector, m.left().node(),
+ m.right().ResolvedValue(), node, cond, cont)) {
return;
}
- } else if (m.left().HasValue()) {
+ } else if (m.left().HasResolvedValue()) {
FlagsCondition commuted_cond = CommuteFlagsCondition(cond);
- if (TryEmitCbzOrTbz<32>(selector, m.right().node(), m.left().Value(),
- node, commuted_cond, cont)) {
+ if (TryEmitCbzOrTbz<32>(selector, m.right().node(),
+ m.left().ResolvedValue(), node, commuted_cond,
+ cont)) {
return;
}
}
@@ -2313,7 +2414,7 @@ struct TestAndBranchMatcher {
unsigned bit() const {
DCHECK(Matches());
- return base::bits::CountTrailingZeros(matcher_.right().Value());
+ return base::bits::CountTrailingZeros(matcher_.right().ResolvedValue());
}
Node* input() const {
@@ -2328,8 +2429,8 @@ struct TestAndBranchMatcher {
void Initialize() {
if (cont_->IsBranch() && !cont_->IsPoisoned() &&
- matcher_.right().HasValue() &&
- base::bits::IsPowerOfTwo(matcher_.right().Value())) {
+ matcher_.right().HasResolvedValue() &&
+ base::bits::IsPowerOfTwo(matcher_.right().ResolvedValue())) {
// If the mask has only one bit set, we can use tbz/tbnz.
DCHECK((cont_->condition() == kEqual) ||
(cont_->condition() == kNotEqual));
@@ -2967,7 +3068,7 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) {
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
opcode =
@@ -2988,7 +3089,7 @@ void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
opcode = kArm64Word64AtomicLoadUint8;
@@ -3010,7 +3111,7 @@ void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
switch (rep) {
case MachineRepresentation::kWord8:
opcode = kWord32AtomicStoreWord8;
@@ -3029,7 +3130,7 @@ void InstructionSelector::VisitWord32AtomicStore(Node* node) {
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
switch (rep) {
case MachineRepresentation::kWord8:
opcode = kArm64Word64AtomicStoreWord8;
@@ -3050,7 +3151,7 @@ void InstructionSelector::VisitWord64AtomicStore(Node* node) {
}
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = kWord32AtomicExchangeInt8;
@@ -3064,13 +3165,12 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
opcode = kWord32AtomicExchangeWord32;
} else {
UNREACHABLE();
- return;
}
VisitAtomicExchange(this, node, opcode);
}
void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
opcode = kArm64Word64AtomicExchangeUint8;
@@ -3082,13 +3182,12 @@ void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
opcode = kArm64Word64AtomicExchangeUint64;
} else {
UNREACHABLE();
- return;
}
VisitAtomicExchange(this, node, opcode);
}
void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = kWord32AtomicCompareExchangeInt8;
@@ -3102,13 +3201,12 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
opcode = kWord32AtomicCompareExchangeWord32;
} else {
UNREACHABLE();
- return;
}
VisitAtomicCompareExchange(this, node, opcode);
}
void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
opcode = kArm64Word64AtomicCompareExchangeUint8;
@@ -3120,7 +3218,6 @@ void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
opcode = kArm64Word64AtomicCompareExchangeUint64;
} else {
UNREACHABLE();
- return;
}
VisitAtomicCompareExchange(this, node, opcode);
}
@@ -3128,7 +3225,7 @@ void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
void InstructionSelector::VisitWord32AtomicBinaryOperation(
Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
ArchOpcode uint16_op, ArchOpcode word32_op) {
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = int8_op;
@@ -3142,7 +3239,6 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
opcode = word32_op;
} else {
UNREACHABLE();
- return;
}
VisitAtomicBinop(this, node, opcode);
}
@@ -3164,7 +3260,7 @@ VISIT_ATOMIC_BINOP(Xor)
void InstructionSelector::VisitWord64AtomicBinaryOperation(
Node* node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode uint32_op,
ArchOpcode uint64_op) {
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
opcode = uint8_op;
@@ -3176,7 +3272,6 @@ void InstructionSelector::VisitWord64AtomicBinaryOperation(
opcode = uint64_op;
} else {
UNREACHABLE();
- return;
}
VisitAtomicBinop(this, node, opcode);
}
@@ -3223,24 +3318,14 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(F32x4RecipSqrtApprox, kArm64F32x4RecipSqrtApprox) \
V(I64x2Neg, kArm64I64x2Neg) \
V(I32x4SConvertF32x4, kArm64I32x4SConvertF32x4) \
- V(I32x4SConvertI16x8Low, kArm64I32x4SConvertI16x8Low) \
- V(I32x4SConvertI16x8High, kArm64I32x4SConvertI16x8High) \
V(I32x4Neg, kArm64I32x4Neg) \
V(I32x4UConvertF32x4, kArm64I32x4UConvertF32x4) \
- V(I32x4UConvertI16x8Low, kArm64I32x4UConvertI16x8Low) \
- V(I32x4UConvertI16x8High, kArm64I32x4UConvertI16x8High) \
V(I32x4Abs, kArm64I32x4Abs) \
- V(I16x8SConvertI8x16Low, kArm64I16x8SConvertI8x16Low) \
- V(I16x8SConvertI8x16High, kArm64I16x8SConvertI8x16High) \
V(I16x8Neg, kArm64I16x8Neg) \
- V(I16x8UConvertI8x16Low, kArm64I16x8UConvertI8x16Low) \
- V(I16x8UConvertI8x16High, kArm64I16x8UConvertI8x16High) \
V(I16x8Abs, kArm64I16x8Abs) \
V(I8x16Neg, kArm64I8x16Neg) \
V(I8x16Abs, kArm64I8x16Abs) \
V(S128Not, kArm64S128Not) \
- V(V64x2AnyTrue, kArm64V128AnyTrue) \
- V(V64x2AllTrue, kArm64V64x2AllTrue) \
V(V32x4AnyTrue, kArm64V128AnyTrue) \
V(V32x4AllTrue, kArm64V32x4AllTrue) \
V(V16x8AnyTrue, kArm64V128AnyTrue) \
@@ -3287,11 +3372,6 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I64x2Add, kArm64I64x2Add) \
V(I64x2Sub, kArm64I64x2Sub) \
V(I64x2Eq, kArm64I64x2Eq) \
- V(I64x2Ne, kArm64I64x2Ne) \
- V(I64x2GtS, kArm64I64x2GtS) \
- V(I64x2GeS, kArm64I64x2GeS) \
- V(I64x2GtU, kArm64I64x2GtU) \
- V(I64x2GeU, kArm64I64x2GeU) \
V(I32x4AddHoriz, kArm64I32x4AddHoriz) \
V(I32x4Mul, kArm64I32x4Mul) \
V(I32x4MinS, kArm64I32x4MinS) \
@@ -3306,9 +3386,9 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I32x4GeU, kArm64I32x4GeU) \
V(I32x4DotI16x8S, kArm64I32x4DotI16x8S) \
V(I16x8SConvertI32x4, kArm64I16x8SConvertI32x4) \
- V(I16x8AddSaturateS, kArm64I16x8AddSaturateS) \
+ V(I16x8AddSatS, kArm64I16x8AddSatS) \
V(I16x8AddHoriz, kArm64I16x8AddHoriz) \
- V(I16x8SubSaturateS, kArm64I16x8SubSaturateS) \
+ V(I16x8SubSatS, kArm64I16x8SubSatS) \
V(I16x8Mul, kArm64I16x8Mul) \
V(I16x8MinS, kArm64I16x8MinS) \
V(I16x8MaxS, kArm64I16x8MaxS) \
@@ -3317,16 +3397,17 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I16x8GtS, kArm64I16x8GtS) \
V(I16x8GeS, kArm64I16x8GeS) \
V(I16x8UConvertI32x4, kArm64I16x8UConvertI32x4) \
- V(I16x8AddSaturateU, kArm64I16x8AddSaturateU) \
- V(I16x8SubSaturateU, kArm64I16x8SubSaturateU) \
+ V(I16x8AddSatU, kArm64I16x8AddSatU) \
+ V(I16x8SubSatU, kArm64I16x8SubSatU) \
V(I16x8MinU, kArm64I16x8MinU) \
V(I16x8MaxU, kArm64I16x8MaxU) \
V(I16x8GtU, kArm64I16x8GtU) \
V(I16x8GeU, kArm64I16x8GeU) \
V(I16x8RoundingAverageU, kArm64I16x8RoundingAverageU) \
+ V(I16x8Q15MulRSatS, kArm64I16x8Q15MulRSatS) \
V(I8x16SConvertI16x8, kArm64I8x16SConvertI16x8) \
- V(I8x16AddSaturateS, kArm64I8x16AddSaturateS) \
- V(I8x16SubSaturateS, kArm64I8x16SubSaturateS) \
+ V(I8x16AddSatS, kArm64I8x16AddSatS) \
+ V(I8x16SubSatS, kArm64I8x16SubSatS) \
V(I8x16Mul, kArm64I8x16Mul) \
V(I8x16MinS, kArm64I8x16MinS) \
V(I8x16MaxS, kArm64I8x16MaxS) \
@@ -3335,8 +3416,8 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I8x16GtS, kArm64I8x16GtS) \
V(I8x16GeS, kArm64I8x16GeS) \
V(I8x16UConvertI16x8, kArm64I8x16UConvertI16x8) \
- V(I8x16AddSaturateU, kArm64I8x16AddSaturateU) \
- V(I8x16SubSaturateU, kArm64I8x16SubSaturateU) \
+ V(I8x16AddSatU, kArm64I8x16AddSatU) \
+ V(I8x16SubSatU, kArm64I8x16SubSatU) \
V(I8x16MinU, kArm64I8x16MinU) \
V(I8x16MaxU, kArm64I8x16MaxU) \
V(I8x16GtU, kArm64I8x16GtU) \
@@ -3716,6 +3797,69 @@ void InstructionSelector::VisitF64x2Pmax(Node* node) {
VisitPminOrPmax(this, kArm64F64x2Pmax, node);
}
+namespace {
+void VisitSignExtendLong(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node, int lane_size) {
+ InstructionCode code = opcode;
+ code |= MiscField::encode(lane_size);
+ VisitRR(selector, code, node);
+}
+} // namespace
+
+void InstructionSelector::VisitI64x2SConvertI32x4Low(Node* node) {
+ VisitSignExtendLong(this, kArm64Sxtl, node, 64);
+}
+
+void InstructionSelector::VisitI64x2SConvertI32x4High(Node* node) {
+ VisitSignExtendLong(this, kArm64Sxtl2, node, 64);
+}
+
+void InstructionSelector::VisitI64x2UConvertI32x4Low(Node* node) {
+ VisitSignExtendLong(this, kArm64Uxtl, node, 64);
+}
+
+void InstructionSelector::VisitI64x2UConvertI32x4High(Node* node) {
+ VisitSignExtendLong(this, kArm64Uxtl2, node, 64);
+}
+
+void InstructionSelector::VisitI32x4SConvertI16x8Low(Node* node) {
+ VisitSignExtendLong(this, kArm64Sxtl, node, 32);
+}
+
+void InstructionSelector::VisitI32x4SConvertI16x8High(Node* node) {
+ VisitSignExtendLong(this, kArm64Sxtl2, node, 32);
+}
+
+void InstructionSelector::VisitI32x4UConvertI16x8Low(Node* node) {
+ VisitSignExtendLong(this, kArm64Uxtl, node, 32);
+}
+
+void InstructionSelector::VisitI32x4UConvertI16x8High(Node* node) {
+ VisitSignExtendLong(this, kArm64Uxtl2, node, 32);
+}
+
+void InstructionSelector::VisitI16x8SConvertI8x16Low(Node* node) {
+ VisitSignExtendLong(this, kArm64Sxtl, node, 16);
+}
+
+void InstructionSelector::VisitI16x8SConvertI8x16High(Node* node) {
+ VisitSignExtendLong(this, kArm64Sxtl2, node, 16);
+}
+
+void InstructionSelector::VisitI16x8UConvertI8x16Low(Node* node) {
+ VisitSignExtendLong(this, kArm64Uxtl, node, 16);
+}
+
+void InstructionSelector::VisitI16x8UConvertI8x16High(Node* node) {
+ VisitSignExtendLong(this, kArm64Uxtl2, node, 16);
+}
+
+void InstructionSelector::VisitI8x16Popcnt(Node* node) {
+ InstructionCode code = kArm64Cnt;
+ code |= MiscField::encode(8);
+ VisitRR(this, code, node);
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/deps/v8/src/compiler/backend/code-generator-impl.h b/deps/v8/src/compiler/backend/code-generator-impl.h
index 88f82fe930..93113b97ca 100644
--- a/deps/v8/src/compiler/backend/code-generator-impl.h
+++ b/deps/v8/src/compiler/backend/code-generator-impl.h
@@ -257,17 +257,6 @@ class OutOfLineCode : public ZoneObject {
OutOfLineCode* const next_;
};
-inline bool HasCallDescriptorFlag(Instruction* instr,
- CallDescriptor::Flag flag) {
- STATIC_ASSERT(CallDescriptor::kFlagsBitsEncodedInInstructionCode == 10);
-#ifdef DEBUG
- static constexpr int kInstructionCodeFlagsMask =
- ((1 << CallDescriptor::kFlagsBitsEncodedInInstructionCode) - 1);
- DCHECK_EQ(static_cast<int>(flag) & kInstructionCodeFlagsMask, flag);
-#endif
- return MiscField::decode(instr->opcode()) & flag;
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/backend/code-generator.cc b/deps/v8/src/compiler/backend/code-generator.cc
index 33a80f52d0..0cb0e6172f 100644
--- a/deps/v8/src/compiler/backend/code-generator.cc
+++ b/deps/v8/src/compiler/backend/code-generator.cc
@@ -162,8 +162,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
DeoptimizeKind deopt_kind = exit->kind();
DeoptimizeReason deoptimization_reason = exit->reason();
- Address deopt_entry =
+ Builtins::Name deopt_entry =
Deoptimizer::GetDeoptimizationEntry(tasm()->isolate(), deopt_kind);
+ Label* jump_deoptimization_entry_label =
+ &jump_deoptimization_entry_labels_[static_cast<int>(deopt_kind)];
if (info()->source_positions()) {
tasm()->RecordDeoptReason(deoptimization_reason, exit->pos(),
deoptimization_id);
@@ -177,7 +179,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
}
tasm()->CallForDeoptimization(deopt_entry, deoptimization_id, exit->label(),
- deopt_kind);
+ deopt_kind, jump_deoptimization_entry_label);
exit->set_emitted();
return kSuccess;
}
@@ -324,7 +326,7 @@ void CodeGenerator::AssembleCode() {
// For some targets, we must make sure that constant and veneer pools are
// emitted before emitting the deoptimization exits.
- PrepareForDeoptimizationExits(static_cast<int>(deoptimization_exits_.size()));
+ PrepareForDeoptimizationExits(&deoptimization_exits_);
if (Deoptimizer::kSupportsFixedDeoptExitSizes) {
deopt_exit_start_offset_ = tasm()->pc_offset();
@@ -338,7 +340,7 @@ void CodeGenerator::AssembleCode() {
// Deoptimizer::kSupportsFixedDeoptExitSizes is true, lazy deopts
// might need additional instructions.
auto cmp = [](const DeoptimizationExit* a, const DeoptimizationExit* b) {
- static_assert(DeoptimizeKind::kLazy == DeoptimizeKind::kLastDeoptimizeKind,
+ static_assert(DeoptimizeKind::kLazy == kLastDeoptimizeKind,
"lazy deopts are expected to be emitted last");
if (a->kind() != b->kind()) {
return a->kind() < b->kind();
@@ -391,6 +393,9 @@ void CodeGenerator::AssembleCode() {
// size as reported by perf.
unwinding_info_writer_.Finish(tasm()->pc_offset());
+ // Final alignment before starting on the metadata section.
+ tasm()->Align(Code::kMetadataAlignment);
+
safepoints()->Emit(tasm(), frame()->GetTotalFrameSlotCount());
// Emit the exception handler table.
@@ -517,8 +522,9 @@ MaybeHandle<Code> CodeGenerator::FinalizeCode() {
CHECK_IMPLIES(info()->IsNativeContextIndependent(),
code->IsNativeContextIndependent(isolate()));
+ // Counts both compiled code and metadata.
isolate()->counters()->total_compiled_code_size()->Increment(
- code->raw_instruction_size());
+ code->raw_body_size());
LOG_CODE_EVENT(isolate(),
CodeLinePosInfoRecordEvent(code->raw_instruction_start(),
@@ -974,12 +980,12 @@ Label* CodeGenerator::AddJumpTable(Label** targets, size_t target_count) {
void CodeGenerator::RecordCallPosition(Instruction* instr) {
const bool needs_frame_state =
- HasCallDescriptorFlag(instr, CallDescriptor::kNeedsFrameState);
+ instr->HasCallDescriptorFlag(CallDescriptor::kNeedsFrameState);
RecordSafepoint(instr->reference_map(), needs_frame_state
? Safepoint::kLazyDeopt
: Safepoint::kNoLazyDeopt);
- if (HasCallDescriptorFlag(instr, CallDescriptor::kHasExceptionHandler)) {
+ if (instr->HasCallDescriptorFlag(CallDescriptor::kHasExceptionHandler)) {
InstructionOperandConverter i(this, instr);
RpoNumber handler_rpo = i.InputRpo(instr->InputCount() - 1);
DCHECK(instructions()->InstructionBlockAt(handler_rpo)->IsHandler());
diff --git a/deps/v8/src/compiler/backend/code-generator.h b/deps/v8/src/compiler/backend/code-generator.h
index 26d03f129a..6181bc7d15 100644
--- a/deps/v8/src/compiler/backend/code-generator.h
+++ b/deps/v8/src/compiler/backend/code-generator.h
@@ -406,7 +406,7 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
InstructionOperand* op, MachineType type);
void MarkLazyDeoptSite();
- void PrepareForDeoptimizationExits(int deopt_count);
+ void PrepareForDeoptimizationExits(ZoneDeque<DeoptimizationExit*>* exits);
DeoptimizationExit* AddDeoptimizationExit(Instruction* instr,
size_t frame_state_offset);
@@ -446,6 +446,14 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
int handler_table_offset_ = 0;
int last_lazy_deopt_pc_ = 0;
+ // Deoptimization exits must be as small as possible, since their count grows
+ // with function size. {jump_deoptimization_entry_labels_} is an optimization
+ // to that effect, which extracts the (potentially large) instruction
+ // sequence for the final jump to the deoptimization entry into a single spot
+ // per Code object. All deopt exits can then near-call to this label. Note:
+ // not used on all architectures.
+ Label jump_deoptimization_entry_labels_[kDeoptimizeKindCount];
+
// The maximal combined height of all frames produced upon deoptimization, and
// the maximal number of pushed arguments for function calls. Applied as an
// offset to the first stack check of an optimized function.
diff --git a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
index 077324a31f..1820e39799 100644
--- a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
@@ -695,10 +695,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
- HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ LoadCodeObjectEntry(reg, reg);
- if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
__ RetpolineCall(reg);
} else {
__ call(reg);
@@ -723,7 +723,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (DetermineStubCallMode() == StubCallMode::kCallWasmRuntimeStub) {
__ wasm_call(wasm_code, constant.rmode());
} else {
- if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
__ RetpolineCall(wasm_code, constant.rmode());
} else {
__ call(wasm_code, constant.rmode());
@@ -731,7 +731,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
} else {
Register reg = i.InputRegister(0);
- if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
__ RetpolineCall(reg);
} else {
__ call(reg);
@@ -753,10 +753,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
- HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ LoadCodeObjectEntry(reg, reg);
- if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
__ RetpolineJump(reg);
} else {
__ jmp(reg);
@@ -773,7 +773,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ jmp(wasm_code, constant.rmode());
} else {
Register reg = i.InputRegister(0);
- if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
__ RetpolineJump(reg);
} else {
__ jmp(reg);
@@ -787,9 +787,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CHECK(!HasImmediateInput(instr, 0));
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
- HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
- if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
__ RetpolineJump(reg);
} else {
__ jmp(reg);
@@ -927,8 +927,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchDeoptimize: {
DeoptimizationExit* exit =
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
- CodeGenResult result = AssembleDeoptimizerCall(exit);
- if (result != kSuccess) return result;
+ __ jmp(exit->label());
break;
}
case kArchRet:
@@ -2208,18 +2207,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kSSEF32x4Abs: {
XMMRegister dst = i.OutputSimd128Register();
- Operand src = i.InputOperand(0);
- if (src.is_reg(dst)) {
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ psrld(kScratchDoubleReg, 1);
- __ andps(dst, kScratchDoubleReg);
- } else {
- // TODO(zhin) Improve codegen for this case.
- __ pcmpeqd(dst, dst);
- __ movups(kScratchDoubleReg, src);
- __ psrld(dst, 1);
- __ andps(dst, kScratchDoubleReg);
- }
+ DCHECK_EQ(i.InputSimd128Register(0), dst);
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ psrld(kScratchDoubleReg, 1);
+ __ andps(dst, kScratchDoubleReg);
break;
}
case kAVXF32x4Abs: {
@@ -2232,18 +2223,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kSSEF32x4Neg: {
XMMRegister dst = i.OutputSimd128Register();
- Operand src = i.InputOperand(0);
- if (src.is_reg(dst)) {
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ pslld(kScratchDoubleReg, 31);
- __ xorps(dst, kScratchDoubleReg);
- } else {
- // TODO(zhin) Improve codegen for this case.
- __ pcmpeqd(dst, dst);
- __ movups(kScratchDoubleReg, src);
- __ pslld(dst, 31);
- __ xorps(dst, kScratchDoubleReg);
- }
+ DCHECK_EQ(dst, i.InputSimd128Register(0));
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ pslld(kScratchDoubleReg, 31);
+ __ xorps(dst, kScratchDoubleReg);
break;
}
case kAVXF32x4Neg: {
@@ -2255,9 +2238,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kSSEF32x4Sqrt: {
- // TODO(zhin) Improve codegen for this case.
- __ movups(kScratchDoubleReg, i.InputOperand(0));
- __ sqrtps(i.OutputSimd128Register(), kScratchDoubleReg);
+ __ sqrtps(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kAVXF32x4Sqrt: {
@@ -2882,7 +2863,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kSSEI16x8SConvertI32x4: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ packssdw(i.OutputSimd128Register(), i.InputOperand(1));
+ __ packssdw(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kAVXI16x8SConvertI32x4: {
@@ -2902,12 +2883,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputOperand(1));
break;
}
- case kSSEI16x8AddSaturateS: {
+ case kSSEI16x8AddSatS: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ paddsw(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
- case kAVXI16x8AddSaturateS: {
+ case kAVXI16x8AddSatS: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpaddsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
@@ -2936,12 +2917,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputOperand(1));
break;
}
- case kSSEI16x8SubSaturateS: {
+ case kSSEI16x8SubSatS: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ psubsw(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
- case kAVXI16x8SubSaturateS: {
+ case kAVXI16x8SubSatS: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpsubsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
@@ -3051,33 +3032,33 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kSSEI16x8UConvertI32x4: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ packusdw(i.OutputSimd128Register(), i.InputOperand(1));
+ __ packusdw(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kAVXI16x8UConvertI32x4: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
CpuFeatureScope avx_scope(tasm(), AVX);
XMMRegister dst = i.OutputSimd128Register();
- __ vpackusdw(dst, dst, i.InputOperand(1));
+ __ vpackusdw(dst, dst, i.InputSimd128Register(1));
break;
}
- case kSSEI16x8AddSaturateU: {
+ case kSSEI16x8AddSatU: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ paddusw(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
- case kAVXI16x8AddSaturateU: {
+ case kAVXI16x8AddSatU: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpaddusw(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
- case kSSEI16x8SubSaturateU: {
+ case kSSEI16x8SubSatU: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ psubusw(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
- case kAVXI16x8SubSaturateU: {
+ case kAVXI16x8SubSatU: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpsubusw(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
@@ -3290,12 +3271,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputOperand(1));
break;
}
- case kSSEI8x16AddSaturateS: {
+ case kSSEI8x16AddSatS: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ paddsb(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
- case kAVXI8x16AddSaturateS: {
+ case kAVXI8x16AddSatS: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpaddsb(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
@@ -3312,12 +3293,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputOperand(1));
break;
}
- case kSSEI8x16SubSaturateS: {
+ case kSSEI8x16SubSatS: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ psubsb(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
- case kAVXI8x16SubSaturateS: {
+ case kAVXI8x16SubSatS: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpsubsb(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
@@ -3495,23 +3476,23 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vpackuswb(dst, dst, i.InputOperand(1));
break;
}
- case kSSEI8x16AddSaturateU: {
+ case kSSEI8x16AddSatU: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ paddusb(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
- case kAVXI8x16AddSaturateU: {
+ case kAVXI8x16AddSatU: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpaddusb(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
- case kSSEI8x16SubSaturateU: {
+ case kSSEI8x16SubSatU: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ psubusb(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
- case kAVXI8x16SubSaturateU: {
+ case kAVXI8x16SubSatU: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpsubusb(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
@@ -3645,16 +3626,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kSSES128Not: {
XMMRegister dst = i.OutputSimd128Register();
- Operand src = i.InputOperand(0);
- if (src.is_reg(dst)) {
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ pxor(dst, kScratchDoubleReg);
- } else {
- // TODO(zhin) Improve codegen for this case.
- __ pcmpeqd(dst, dst);
- __ movups(kScratchDoubleReg, src);
- __ pxor(dst, kScratchDoubleReg);
- }
+ DCHECK_EQ(dst, i.InputSimd128Register(0));
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ pxor(dst, kScratchDoubleReg);
break;
}
case kAVXS128Not: {
@@ -3781,48 +3755,48 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ mov(esp, tmp);
break;
}
- case kIA32S8x16LoadSplat: {
+ case kIA32S128Load8Splat: {
__ Pinsrb(i.OutputSimd128Register(), i.MemoryOperand(), 0);
__ Pxor(kScratchDoubleReg, kScratchDoubleReg);
__ Pshufb(i.OutputSimd128Register(), kScratchDoubleReg);
break;
}
- case kIA32S16x8LoadSplat: {
+ case kIA32S128Load16Splat: {
__ Pinsrw(i.OutputSimd128Register(), i.MemoryOperand(), 0);
__ Pshuflw(i.OutputSimd128Register(), i.OutputSimd128Register(),
uint8_t{0});
__ Punpcklqdq(i.OutputSimd128Register(), i.OutputSimd128Register());
break;
}
- case kIA32S32x4LoadSplat: {
+ case kIA32S128Load32Splat: {
__ Vbroadcastss(i.OutputSimd128Register(), i.MemoryOperand());
break;
}
- case kIA32S64x2LoadSplat: {
+ case kIA32S128Load64Splat: {
__ Movddup(i.OutputSimd128Register(), i.MemoryOperand());
break;
}
- case kIA32I16x8Load8x8S: {
+ case kIA32S128Load8x8S: {
__ Pmovsxbw(i.OutputSimd128Register(), i.MemoryOperand());
break;
}
- case kIA32I16x8Load8x8U: {
+ case kIA32S128Load8x8U: {
__ Pmovzxbw(i.OutputSimd128Register(), i.MemoryOperand());
break;
}
- case kIA32I32x4Load16x4S: {
+ case kIA32S128Load16x4S: {
__ Pmovsxwd(i.OutputSimd128Register(), i.MemoryOperand());
break;
}
- case kIA32I32x4Load16x4U: {
+ case kIA32S128Load16x4U: {
__ Pmovzxwd(i.OutputSimd128Register(), i.MemoryOperand());
break;
}
- case kIA32I64x2Load32x2S: {
+ case kIA32S128Load32x2S: {
__ Pmovsxdq(i.OutputSimd128Register(), i.MemoryOperand());
break;
}
- case kIA32I64x2Load32x2U: {
+ case kIA32S128Load32x2U: {
__ Pmovzxdq(i.OutputSimd128Register(), i.MemoryOperand());
break;
}
@@ -4795,7 +4769,7 @@ void CodeGenerator::AssembleConstructFrame() {
}
}
-void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
+void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
auto call_descriptor = linkage()->GetIncomingDescriptor();
const RegList saves = call_descriptor->CalleeSavedRegisters();
@@ -4811,37 +4785,86 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
}
}
- // Might need ecx for scratch if pop_size is too big or if there is a variable
- // pop count.
+ // We might need ecx and edx for scratch.
+ DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & edx.bit());
DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & ecx.bit());
- size_t pop_size = call_descriptor->StackParameterCount() * kSystemPointerSize;
IA32OperandConverter g(this, nullptr);
+ int parameter_count =
+ static_cast<int>(call_descriptor->StackParameterCount());
+
+ // {aditional_pop_count} is only greater than zero if {parameter_count = 0}.
+ // Check RawMachineAssembler::PopAndReturn.
+ if (parameter_count != 0) {
+ if (additional_pop_count->IsImmediate()) {
+ DCHECK_EQ(g.ToConstant(additional_pop_count).ToInt32(), 0);
+ } else if (__ emit_debug_code()) {
+ __ cmp(g.ToRegister(additional_pop_count), Immediate(0));
+ __ Assert(equal, AbortReason::kUnexpectedAdditionalPopValue);
+ }
+ }
+
+ Register argc_reg = ecx;
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ // Functions with JS linkage have at least one parameter (the receiver).
+ // If {parameter_count} == 0, it means it is a builtin with
+ // kDontAdaptArgumentsSentinel, which takes care of JS arguments popping
+ // itself.
+ const bool drop_jsargs = frame_access_state()->has_frame() &&
+ call_descriptor->IsJSFunctionCall() &&
+ parameter_count != 0;
+#else
+ const bool drop_jsargs = false;
+#endif
if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
// Canonicalize JSFunction return sites for now if they always have the same
// number of return args.
- if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
+ if (additional_pop_count->IsImmediate() &&
+ g.ToConstant(additional_pop_count).ToInt32() == 0) {
if (return_label_.is_bound()) {
__ jmp(&return_label_);
return;
} else {
__ bind(&return_label_);
- AssembleDeconstructFrame();
}
- } else {
- AssembleDeconstructFrame();
}
+ if (drop_jsargs) {
+ // Get the actual argument count.
+ __ mov(argc_reg, Operand(ebp, StandardFrameConstants::kArgCOffset));
+ }
+ AssembleDeconstructFrame();
}
- DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & edx.bit());
- DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & ecx.bit());
- if (pop->IsImmediate()) {
- DCHECK_EQ(Constant::kInt32, g.ToConstant(pop).type());
- pop_size += g.ToConstant(pop).ToInt32() * kSystemPointerSize;
- __ Ret(static_cast<int>(pop_size), ecx);
+
+ if (drop_jsargs) {
+ // We must pop all arguments from the stack (including the receiver). This
+ // number of arguments is given by max(1 + argc_reg, parameter_count).
+ int parameter_count_without_receiver =
+ parameter_count - 1; // Exclude the receiver to simplify the
+ // computation. We'll account for it at the end.
+ Label mismatch_return;
+ Register scratch_reg = edx;
+ DCHECK_NE(argc_reg, scratch_reg);
+ __ cmp(argc_reg, Immediate(parameter_count_without_receiver));
+ __ j(greater, &mismatch_return, Label::kNear);
+ __ Ret(parameter_count * kSystemPointerSize, scratch_reg);
+ __ bind(&mismatch_return);
+ __ PopReturnAddressTo(scratch_reg);
+ __ lea(esp, Operand(esp, argc_reg, times_system_pointer_size,
+ kSystemPointerSize)); // Also pop the receiver.
+ // We use a return instead of a jump for better return address prediction.
+ __ PushReturnAddressFrom(scratch_reg);
+ __ Ret();
+ } else if (additional_pop_count->IsImmediate()) {
+ Register scratch_reg = ecx;
+ int additional_count = g.ToConstant(additional_pop_count).ToInt32();
+ size_t pop_size = (parameter_count + additional_count) * kSystemPointerSize;
+ CHECK_LE(pop_size, static_cast<size_t>(std::numeric_limits<int>::max()));
+ __ Ret(static_cast<int>(pop_size), scratch_reg);
} else {
- Register pop_reg = g.ToRegister(pop);
+ Register pop_reg = g.ToRegister(additional_pop_count);
Register scratch_reg = pop_reg == ecx ? edx : ecx;
+ int pop_size = static_cast<int>(parameter_count * kSystemPointerSize);
__ PopReturnAddressTo(scratch_reg);
__ lea(esp, Operand(esp, pop_reg, times_system_pointer_size,
static_cast<int>(pop_size)));
@@ -4852,7 +4875,8 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
void CodeGenerator::FinishCode() {}
-void CodeGenerator::PrepareForDeoptimizationExits(int deopt_count) {}
+void CodeGenerator::PrepareForDeoptimizationExits(
+ ZoneDeque<DeoptimizationExit*>* exits) {}
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h b/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
index eca9dc9227..a56486479d 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
+++ b/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
@@ -249,14 +249,14 @@ namespace compiler {
V(AVXI16x8SConvertI32x4) \
V(SSEI16x8Add) \
V(AVXI16x8Add) \
- V(SSEI16x8AddSaturateS) \
- V(AVXI16x8AddSaturateS) \
+ V(SSEI16x8AddSatS) \
+ V(AVXI16x8AddSatS) \
V(SSEI16x8AddHoriz) \
V(AVXI16x8AddHoriz) \
V(SSEI16x8Sub) \
V(AVXI16x8Sub) \
- V(SSEI16x8SubSaturateS) \
- V(AVXI16x8SubSaturateS) \
+ V(SSEI16x8SubSatS) \
+ V(AVXI16x8SubSatS) \
V(SSEI16x8Mul) \
V(AVXI16x8Mul) \
V(SSEI16x8MinS) \
@@ -276,10 +276,10 @@ namespace compiler {
V(IA32I16x8ShrU) \
V(SSEI16x8UConvertI32x4) \
V(AVXI16x8UConvertI32x4) \
- V(SSEI16x8AddSaturateU) \
- V(AVXI16x8AddSaturateU) \
- V(SSEI16x8SubSaturateU) \
- V(AVXI16x8SubSaturateU) \
+ V(SSEI16x8AddSatU) \
+ V(AVXI16x8AddSatU) \
+ V(SSEI16x8SubSatU) \
+ V(AVXI16x8SubSatU) \
V(SSEI16x8MinU) \
V(AVXI16x8MinU) \
V(SSEI16x8MaxU) \
@@ -303,12 +303,12 @@ namespace compiler {
V(IA32I8x16ShrS) \
V(SSEI8x16Add) \
V(AVXI8x16Add) \
- V(SSEI8x16AddSaturateS) \
- V(AVXI8x16AddSaturateS) \
+ V(SSEI8x16AddSatS) \
+ V(AVXI8x16AddSatS) \
V(SSEI8x16Sub) \
V(AVXI8x16Sub) \
- V(SSEI8x16SubSaturateS) \
- V(AVXI8x16SubSaturateS) \
+ V(SSEI8x16SubSatS) \
+ V(AVXI8x16SubSatS) \
V(SSEI8x16Mul) \
V(AVXI8x16Mul) \
V(SSEI8x16MinS) \
@@ -325,10 +325,10 @@ namespace compiler {
V(AVXI8x16GeS) \
V(SSEI8x16UConvertI16x8) \
V(AVXI8x16UConvertI16x8) \
- V(SSEI8x16AddSaturateU) \
- V(AVXI8x16AddSaturateU) \
- V(SSEI8x16SubSaturateU) \
- V(AVXI8x16SubSaturateU) \
+ V(SSEI8x16AddSatU) \
+ V(AVXI8x16AddSatU) \
+ V(SSEI8x16SubSatU) \
+ V(AVXI8x16SubSatU) \
V(IA32I8x16ShrU) \
V(SSEI8x16MinU) \
V(AVXI8x16MinU) \
@@ -357,16 +357,16 @@ namespace compiler {
V(IA32S128AndNot) \
V(IA32I8x16Swizzle) \
V(IA32I8x16Shuffle) \
- V(IA32S8x16LoadSplat) \
- V(IA32S16x8LoadSplat) \
- V(IA32S32x4LoadSplat) \
- V(IA32S64x2LoadSplat) \
- V(IA32I16x8Load8x8S) \
- V(IA32I16x8Load8x8U) \
- V(IA32I32x4Load16x4S) \
- V(IA32I32x4Load16x4U) \
- V(IA32I64x2Load32x2S) \
- V(IA32I64x2Load32x2U) \
+ V(IA32S128Load8Splat) \
+ V(IA32S128Load16Splat) \
+ V(IA32S128Load32Splat) \
+ V(IA32S128Load64Splat) \
+ V(IA32S128Load8x8S) \
+ V(IA32S128Load8x8U) \
+ V(IA32S128Load16x4S) \
+ V(IA32S128Load16x4U) \
+ V(IA32S128Load32x2S) \
+ V(IA32S128Load32x2U) \
V(IA32S32x4Swizzle) \
V(IA32S32x4Shuffle) \
V(IA32S16x8Blend) \
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
index 24abd58c7f..c8f3b19d0f 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
@@ -230,14 +230,14 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXI16x8SConvertI32x4:
case kSSEI16x8Add:
case kAVXI16x8Add:
- case kSSEI16x8AddSaturateS:
- case kAVXI16x8AddSaturateS:
+ case kSSEI16x8AddSatS:
+ case kAVXI16x8AddSatS:
case kSSEI16x8AddHoriz:
case kAVXI16x8AddHoriz:
case kSSEI16x8Sub:
case kAVXI16x8Sub:
- case kSSEI16x8SubSaturateS:
- case kAVXI16x8SubSaturateS:
+ case kSSEI16x8SubSatS:
+ case kAVXI16x8SubSatS:
case kSSEI16x8Mul:
case kAVXI16x8Mul:
case kSSEI16x8MinS:
@@ -257,10 +257,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32I16x8ShrU:
case kSSEI16x8UConvertI32x4:
case kAVXI16x8UConvertI32x4:
- case kSSEI16x8AddSaturateU:
- case kAVXI16x8AddSaturateU:
- case kSSEI16x8SubSaturateU:
- case kAVXI16x8SubSaturateU:
+ case kSSEI16x8AddSatU:
+ case kAVXI16x8AddSatU:
+ case kSSEI16x8SubSatU:
+ case kAVXI16x8SubSatU:
case kSSEI16x8MinU:
case kAVXI16x8MinU:
case kSSEI16x8MaxU:
@@ -284,12 +284,12 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32I8x16ShrS:
case kSSEI8x16Add:
case kAVXI8x16Add:
- case kSSEI8x16AddSaturateS:
- case kAVXI8x16AddSaturateS:
+ case kSSEI8x16AddSatS:
+ case kAVXI8x16AddSatS:
case kSSEI8x16Sub:
case kAVXI8x16Sub:
- case kSSEI8x16SubSaturateS:
- case kAVXI8x16SubSaturateS:
+ case kSSEI8x16SubSatS:
+ case kAVXI8x16SubSatS:
case kSSEI8x16Mul:
case kAVXI8x16Mul:
case kSSEI8x16MinS:
@@ -306,10 +306,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXI8x16GeS:
case kSSEI8x16UConvertI16x8:
case kAVXI8x16UConvertI16x8:
- case kSSEI8x16AddSaturateU:
- case kAVXI8x16AddSaturateU:
- case kSSEI8x16SubSaturateU:
- case kAVXI8x16SubSaturateU:
+ case kSSEI8x16AddSatU:
+ case kAVXI8x16AddSatU:
+ case kSSEI8x16SubSatU:
+ case kAVXI8x16SubSatU:
case kIA32I8x16ShrU:
case kSSEI8x16MinU:
case kAVXI8x16MinU:
@@ -399,16 +399,16 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32Movsd:
case kIA32Movdqu:
// Moves are used for memory load/store operations.
- case kIA32S8x16LoadSplat:
- case kIA32S16x8LoadSplat:
- case kIA32S32x4LoadSplat:
- case kIA32S64x2LoadSplat:
- case kIA32I16x8Load8x8S:
- case kIA32I16x8Load8x8U:
- case kIA32I32x4Load16x4S:
- case kIA32I32x4Load16x4U:
- case kIA32I64x2Load32x2S:
- case kIA32I64x2Load32x2U:
+ case kIA32S128Load8Splat:
+ case kIA32S128Load16Splat:
+ case kIA32S128Load32Splat:
+ case kIA32S128Load64Splat:
+ case kIA32S128Load8x8S:
+ case kIA32S128Load8x8U:
+ case kIA32S128Load16x4S:
+ case kIA32S128Load16x4U:
+ case kIA32S128Load32x2S:
+ case kIA32S128Load32x2U:
return instr->HasOutput() ? kIsLoadOperation : kHasSideEffect;
case kIA32Peek:
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
index fec4053871..c16584a195 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
@@ -162,12 +162,13 @@ class IA32OperandGenerator final : public OperandGenerator {
RegisterMode register_mode = kRegister) {
{
LoadMatcher<ExternalReferenceMatcher> m(node);
- if (m.index().HasValue() && m.object().HasValue() &&
- selector()->CanAddressRelativeToRootsRegister(m.object().Value())) {
+ if (m.index().HasResolvedValue() && m.object().HasResolvedValue() &&
+ selector()->CanAddressRelativeToRootsRegister(
+ m.object().ResolvedValue())) {
ptrdiff_t const delta =
- m.index().Value() +
+ m.index().ResolvedValue() +
TurboAssemblerBase::RootRegisterOffsetForExternalReference(
- selector()->isolate(), m.object().Value());
+ selector()->isolate(), m.object().ResolvedValue());
if (is_int32(delta)) {
inputs[(*input_count)++] = TempImmediate(static_cast<int32_t>(delta));
return kMode_Root;
@@ -364,46 +365,52 @@ void InstructionSelector::VisitAbortCSAAssert(Node* node) {
void InstructionSelector::VisitLoadTransform(Node* node) {
LoadTransformParameters params = LoadTransformParametersOf(node->op());
- InstructionCode opcode = kArchNop;
+ InstructionCode opcode;
switch (params.transformation) {
- case LoadTransformation::kS8x16LoadSplat:
- opcode = kIA32S8x16LoadSplat;
+ case LoadTransformation::kS128Load8Splat:
+ opcode = kIA32S128Load8Splat;
break;
- case LoadTransformation::kS16x8LoadSplat:
- opcode = kIA32S16x8LoadSplat;
+ case LoadTransformation::kS128Load16Splat:
+ opcode = kIA32S128Load16Splat;
break;
- case LoadTransformation::kS32x4LoadSplat:
- opcode = kIA32S32x4LoadSplat;
+ case LoadTransformation::kS128Load32Splat:
+ opcode = kIA32S128Load32Splat;
break;
- case LoadTransformation::kS64x2LoadSplat:
- opcode = kIA32S64x2LoadSplat;
+ case LoadTransformation::kS128Load64Splat:
+ opcode = kIA32S128Load64Splat;
break;
- case LoadTransformation::kI16x8Load8x8S:
- opcode = kIA32I16x8Load8x8S;
+ case LoadTransformation::kS128Load8x8S:
+ opcode = kIA32S128Load8x8S;
break;
- case LoadTransformation::kI16x8Load8x8U:
- opcode = kIA32I16x8Load8x8U;
+ case LoadTransformation::kS128Load8x8U:
+ opcode = kIA32S128Load8x8U;
break;
- case LoadTransformation::kI32x4Load16x4S:
- opcode = kIA32I32x4Load16x4S;
+ case LoadTransformation::kS128Load16x4S:
+ opcode = kIA32S128Load16x4S;
break;
- case LoadTransformation::kI32x4Load16x4U:
- opcode = kIA32I32x4Load16x4U;
+ case LoadTransformation::kS128Load16x4U:
+ opcode = kIA32S128Load16x4U;
break;
- case LoadTransformation::kI64x2Load32x2S:
- opcode = kIA32I64x2Load32x2S;
+ case LoadTransformation::kS128Load32x2S:
+ opcode = kIA32S128Load32x2S;
break;
- case LoadTransformation::kI64x2Load32x2U:
- opcode = kIA32I64x2Load32x2U;
+ case LoadTransformation::kS128Load32x2U:
+ opcode = kIA32S128Load32x2U;
+ break;
+ case LoadTransformation::kS128Load32Zero:
+ opcode = kIA32Movss;
+ break;
+ case LoadTransformation::kS128Load64Zero:
+ opcode = kIA32Movsd;
break;
default:
UNREACHABLE();
}
// IA32 supports unaligned loads.
- DCHECK_NE(params.kind, LoadKind::kUnaligned);
+ DCHECK_NE(params.kind, MemoryAccessKind::kUnaligned);
// Trap handler is not supported on IA32.
- DCHECK_NE(params.kind, LoadKind::kProtected);
+ DCHECK_NE(params.kind, MemoryAccessKind::kProtected);
IA32OperandGenerator g(this);
InstructionOperand outputs[1];
@@ -419,7 +426,7 @@ void InstructionSelector::VisitLoadTransform(Node* node) {
void InstructionSelector::VisitLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
switch (load_rep.representation()) {
case MachineRepresentation::kFloat32:
opcode = kIA32Movss;
@@ -503,7 +510,7 @@ void InstructionSelector::VisitStore(Node* node) {
code |= MiscField::encode(static_cast<int>(record_write_mode));
Emit(code, 0, nullptr, arraysize(inputs), inputs, temp_count, temps);
} else {
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
switch (rep) {
case MachineRepresentation::kFloat32:
opcode = kIA32Movss;
@@ -532,7 +539,6 @@ void InstructionSelector::VisitStore(Node* node) {
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
- return;
}
InstructionOperand val;
@@ -1779,7 +1785,8 @@ void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
Float64Matcher mleft(left);
- if (mleft.HasValue() && (bit_cast<uint64_t>(mleft.Value()) >> 32) == 0u) {
+ if (mleft.HasResolvedValue() &&
+ (bit_cast<uint64_t>(mleft.ResolvedValue()) >> 32) == 0u) {
Emit(kSSEFloat64LoadLowWord32, g.DefineAsRegister(node), g.Use(right));
return;
}
@@ -1818,7 +1825,7 @@ void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
IA32OperandGenerator g(this);
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
switch (rep) {
case MachineRepresentation::kWord8:
opcode = kWord32AtomicExchangeInt8;
@@ -1838,7 +1845,7 @@ void InstructionSelector::VisitWord32AtomicStore(Node* node) {
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
IA32OperandGenerator g(this);
MachineType type = AtomicOpType(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
if (type == MachineType::Int8()) {
opcode = kWord32AtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
@@ -1851,7 +1858,6 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
opcode = kWord32AtomicExchangeWord32;
} else {
UNREACHABLE();
- return;
}
VisitAtomicExchange(this, node, opcode, type.representation());
}
@@ -1864,7 +1870,7 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
Node* new_value = node->InputAt(3);
MachineType type = AtomicOpType(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
if (type == MachineType::Int8()) {
opcode = kWord32AtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
@@ -1877,7 +1883,6 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
opcode = kWord32AtomicCompareExchangeWord32;
} else {
UNREACHABLE();
- return;
}
AddressingMode addressing_mode;
InstructionOperand new_val_operand =
@@ -1896,7 +1901,7 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
ArchOpcode uint16_op, ArchOpcode word32_op) {
MachineType type = AtomicOpType(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
if (type == MachineType::Int8()) {
opcode = int8_op;
} else if (type == MachineType::Uint8()) {
@@ -1909,7 +1914,6 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
opcode = word32_op;
} else {
UNREACHABLE();
- return;
}
VisitAtomicBinOp(this, node, opcode, type.representation());
}
@@ -2079,10 +2083,10 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(I32x4GeU) \
V(I16x8SConvertI32x4) \
V(I16x8Add) \
- V(I16x8AddSaturateS) \
+ V(I16x8AddSatS) \
V(I16x8AddHoriz) \
V(I16x8Sub) \
- V(I16x8SubSaturateS) \
+ V(I16x8SubSatS) \
V(I16x8Mul) \
V(I16x8MinS) \
V(I16x8MaxS) \
@@ -2090,25 +2094,25 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(I16x8Ne) \
V(I16x8GtS) \
V(I16x8GeS) \
- V(I16x8AddSaturateU) \
- V(I16x8SubSaturateU) \
+ V(I16x8AddSatU) \
+ V(I16x8SubSatU) \
V(I16x8MinU) \
V(I16x8MaxU) \
V(I16x8GtU) \
V(I16x8GeU) \
V(I8x16SConvertI16x8) \
V(I8x16Add) \
- V(I8x16AddSaturateS) \
+ V(I8x16AddSatS) \
V(I8x16Sub) \
- V(I8x16SubSaturateS) \
+ V(I8x16SubSatS) \
V(I8x16MinS) \
V(I8x16MaxS) \
V(I8x16Eq) \
V(I8x16Ne) \
V(I8x16GtS) \
V(I8x16GeS) \
- V(I8x16AddSaturateU) \
- V(I8x16SubSaturateU) \
+ V(I8x16AddSatU) \
+ V(I8x16SubSatU) \
V(I8x16MinU) \
V(I8x16MaxU) \
V(I8x16GtU) \
@@ -2234,9 +2238,15 @@ void InstructionSelector::VisitF64x2ExtractLane(Node* node) {
void InstructionSelector::VisitI64x2SplatI32Pair(Node* node) {
IA32OperandGenerator g(this);
- InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
- InstructionOperand operand1 = g.Use(node->InputAt(1));
- Emit(kIA32I64x2SplatI32Pair, g.DefineAsRegister(node), operand0, operand1);
+ Int32Matcher match_left(node->InputAt(0));
+ Int32Matcher match_right(node->InputAt(1));
+ if (match_left.Is(0) && match_right.Is(0)) {
+ Emit(kIA32S128Zero, g.DefineAsRegister(node));
+ } else {
+ InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
+ InstructionOperand operand1 = g.Use(node->InputAt(1));
+ Emit(kIA32I64x2SplatI32Pair, g.DefineAsRegister(node), operand0, operand1);
+ }
}
void InstructionSelector::VisitI64x2ReplaceLaneI32Pair(Node* node) {
@@ -2333,6 +2343,7 @@ void InstructionSelector::VisitS128Select(Node* node) {
IA32OperandGenerator g(this);
InstructionOperand operand2 = g.UseRegister(node->InputAt(2));
if (IsSupported(AVX)) {
+ // AVX supports unaligned memory operands, so Use here is okay.
Emit(kAVXS128Select, g.DefineAsRegister(node), g.Use(node->InputAt(0)),
g.Use(node->InputAt(1)), operand2);
} else {
@@ -2351,7 +2362,13 @@ void InstructionSelector::VisitS128AndNot(Node* node) {
#define VISIT_SIMD_SPLAT(Type) \
void InstructionSelector::Visit##Type##Splat(Node* node) { \
- VisitRO(this, node, kIA32##Type##Splat); \
+ Int32Matcher int32_matcher(node->InputAt(0)); \
+ if (int32_matcher.Is(0)) { \
+ IA32OperandGenerator g(this); \
+ Emit(kIA32S128Zero, g.DefineAsRegister(node)); \
+ } else { \
+ VisitRO(this, node, kIA32##Type##Splat); \
+ } \
}
SIMD_INT_TYPES(VISIT_SIMD_SPLAT)
#undef VISIT_SIMD_SPLAT
@@ -2431,11 +2448,20 @@ SIMD_UNOP_LIST(VISIT_SIMD_UNOP)
#undef VISIT_SIMD_UNOP
#undef SIMD_UNOP_LIST
+// TODO(v8:9198): SSE instructions that read 16 bytes from memory require the
+// operand to be 16-byte aligned. AVX instructions relax this requirement, but
+// might have reduced performance if the memory crosses cache line. But since we
+// have limited xmm registers, this might be okay to alleviate register
+// pressure.
#define VISIT_SIMD_UNOP_PREFIX(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
IA32OperandGenerator g(this); \
- InstructionCode opcode = IsSupported(AVX) ? kAVX##Opcode : kSSE##Opcode; \
- Emit(opcode, g.DefineAsRegister(node), g.Use(node->InputAt(0))); \
+ if (IsSupported(AVX)) { \
+ Emit(kAVX##Opcode, g.DefineAsRegister(node), g.Use(node->InputAt(0))); \
+ } else { \
+ Emit(kSSE##Opcode, g.DefineSameAsFirst(node), \
+ g.UseRegister(node->InputAt(0))); \
+ } \
}
SIMD_UNOP_PREFIX_LIST(VISIT_SIMD_UNOP_PREFIX)
#undef VISIT_SIMD_UNOP_PREFIX
@@ -2479,11 +2505,15 @@ SIMD_BINOP_UNIFIED_SSE_AVX_LIST(VISIT_SIMD_BINOP_UNIFIED_SSE_AVX)
#undef VISIT_SIMD_BINOP_UNIFIED_SSE_AVX
#undef SIMD_BINOP_UNIFIED_SSE_AVX_LIST
+// TODO(v8:9198): SSE requires operand1 to be a register as we don't have memory
+// alignment yet. For AVX, memory operands are fine, but can have performance
+// issues if not aligned to 16/32 bytes (based on load size), see SDM Vol 1,
+// chapter 14.9
void VisitPack(InstructionSelector* selector, Node* node, ArchOpcode avx_opcode,
ArchOpcode sse_opcode) {
IA32OperandGenerator g(selector);
InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
- InstructionOperand operand1 = g.Use(node->InputAt(1));
+ InstructionOperand operand1 = g.UseRegister(node->InputAt(1));
if (selector->IsSupported(AVX)) {
selector->Emit(avx_opcode, g.DefineSameAsFirst(node), operand0, operand1);
} else {
diff --git a/deps/v8/src/compiler/backend/instruction-codes.h b/deps/v8/src/compiler/backend/instruction-codes.h
index 8772a78df0..f9e68cea57 100644
--- a/deps/v8/src/compiler/backend/instruction-codes.h
+++ b/deps/v8/src/compiler/backend/instruction-codes.h
@@ -63,104 +63,108 @@ inline RecordWriteMode WriteBarrierKindToRecordWriteMode(
// Target-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define COMMON_ARCH_OPCODE_LIST(V) \
- /* Tail call opcodes are grouped together to make IsTailCall fast */ \
- V(ArchTailCallCodeObjectFromJSFunction) \
- V(ArchTailCallCodeObject) \
- V(ArchTailCallAddress) \
- V(ArchTailCallWasm) \
- /* Update IsTailCall if further TailCall opcodes are added */ \
- \
- V(ArchCallCodeObject) \
- V(ArchCallJSFunction) \
- V(ArchPrepareCallCFunction) \
- V(ArchSaveCallerRegisters) \
- V(ArchRestoreCallerRegisters) \
- V(ArchCallCFunction) \
- V(ArchPrepareTailCall) \
- V(ArchCallWasmFunction) \
- V(ArchCallBuiltinPointer) \
- V(ArchJmp) \
- V(ArchBinarySearchSwitch) \
- V(ArchTableSwitch) \
- V(ArchNop) \
- V(ArchAbortCSAAssert) \
- V(ArchDebugBreak) \
- V(ArchComment) \
- V(ArchThrowTerminator) \
- V(ArchDeoptimize) \
- V(ArchRet) \
- V(ArchFramePointer) \
- V(ArchParentFramePointer) \
- V(ArchTruncateDoubleToI) \
- V(ArchStoreWithWriteBarrier) \
- V(ArchStackSlot) \
- V(ArchWordPoisonOnSpeculation) \
- V(ArchStackPointerGreaterThan) \
- V(ArchStackCheckOffset) \
- V(Word32AtomicLoadInt8) \
- V(Word32AtomicLoadUint8) \
- V(Word32AtomicLoadInt16) \
- V(Word32AtomicLoadUint16) \
- V(Word32AtomicLoadWord32) \
- V(Word32AtomicStoreWord8) \
- V(Word32AtomicStoreWord16) \
- V(Word32AtomicStoreWord32) \
- V(Word32AtomicExchangeInt8) \
- V(Word32AtomicExchangeUint8) \
- V(Word32AtomicExchangeInt16) \
- V(Word32AtomicExchangeUint16) \
- V(Word32AtomicExchangeWord32) \
- V(Word32AtomicCompareExchangeInt8) \
- V(Word32AtomicCompareExchangeUint8) \
- V(Word32AtomicCompareExchangeInt16) \
- V(Word32AtomicCompareExchangeUint16) \
- V(Word32AtomicCompareExchangeWord32) \
- V(Word32AtomicAddInt8) \
- V(Word32AtomicAddUint8) \
- V(Word32AtomicAddInt16) \
- V(Word32AtomicAddUint16) \
- V(Word32AtomicAddWord32) \
- V(Word32AtomicSubInt8) \
- V(Word32AtomicSubUint8) \
- V(Word32AtomicSubInt16) \
- V(Word32AtomicSubUint16) \
- V(Word32AtomicSubWord32) \
- V(Word32AtomicAndInt8) \
- V(Word32AtomicAndUint8) \
- V(Word32AtomicAndInt16) \
- V(Word32AtomicAndUint16) \
- V(Word32AtomicAndWord32) \
- V(Word32AtomicOrInt8) \
- V(Word32AtomicOrUint8) \
- V(Word32AtomicOrInt16) \
- V(Word32AtomicOrUint16) \
- V(Word32AtomicOrWord32) \
- V(Word32AtomicXorInt8) \
- V(Word32AtomicXorUint8) \
- V(Word32AtomicXorInt16) \
- V(Word32AtomicXorUint16) \
- V(Word32AtomicXorWord32) \
- V(Ieee754Float64Acos) \
- V(Ieee754Float64Acosh) \
- V(Ieee754Float64Asin) \
- V(Ieee754Float64Asinh) \
- V(Ieee754Float64Atan) \
- V(Ieee754Float64Atanh) \
- V(Ieee754Float64Atan2) \
- V(Ieee754Float64Cbrt) \
- V(Ieee754Float64Cos) \
- V(Ieee754Float64Cosh) \
- V(Ieee754Float64Exp) \
- V(Ieee754Float64Expm1) \
- V(Ieee754Float64Log) \
- V(Ieee754Float64Log1p) \
- V(Ieee754Float64Log10) \
- V(Ieee754Float64Log2) \
- V(Ieee754Float64Pow) \
- V(Ieee754Float64Sin) \
- V(Ieee754Float64Sinh) \
- V(Ieee754Float64Tan) \
+#define COMMON_ARCH_OPCODE_LIST(V) \
+ /* Tail call opcodes are grouped together to make IsTailCall fast */ \
+ /* and Arch call opcodes are grouped together to make */ \
+ /* IsCallWithDescriptorFlags fast */ \
+ V(ArchTailCallCodeObjectFromJSFunction) \
+ V(ArchTailCallCodeObject) \
+ V(ArchTailCallAddress) \
+ V(ArchTailCallWasm) \
+ /* Update IsTailCall if further TailCall opcodes are added */ \
+ \
+ V(ArchCallCodeObject) \
+ V(ArchCallJSFunction) \
+ V(ArchCallWasmFunction) \
+ V(ArchCallBuiltinPointer) \
+ /* Update IsCallWithDescriptorFlags if further Call opcodes are added */ \
+ \
+ V(ArchPrepareCallCFunction) \
+ V(ArchSaveCallerRegisters) \
+ V(ArchRestoreCallerRegisters) \
+ V(ArchCallCFunction) \
+ V(ArchPrepareTailCall) \
+ V(ArchJmp) \
+ V(ArchBinarySearchSwitch) \
+ V(ArchTableSwitch) \
+ V(ArchNop) \
+ V(ArchAbortCSAAssert) \
+ V(ArchDebugBreak) \
+ V(ArchComment) \
+ V(ArchThrowTerminator) \
+ V(ArchDeoptimize) \
+ V(ArchRet) \
+ V(ArchFramePointer) \
+ V(ArchParentFramePointer) \
+ V(ArchTruncateDoubleToI) \
+ V(ArchStoreWithWriteBarrier) \
+ V(ArchStackSlot) \
+ V(ArchWordPoisonOnSpeculation) \
+ V(ArchStackPointerGreaterThan) \
+ V(ArchStackCheckOffset) \
+ V(Word32AtomicLoadInt8) \
+ V(Word32AtomicLoadUint8) \
+ V(Word32AtomicLoadInt16) \
+ V(Word32AtomicLoadUint16) \
+ V(Word32AtomicLoadWord32) \
+ V(Word32AtomicStoreWord8) \
+ V(Word32AtomicStoreWord16) \
+ V(Word32AtomicStoreWord32) \
+ V(Word32AtomicExchangeInt8) \
+ V(Word32AtomicExchangeUint8) \
+ V(Word32AtomicExchangeInt16) \
+ V(Word32AtomicExchangeUint16) \
+ V(Word32AtomicExchangeWord32) \
+ V(Word32AtomicCompareExchangeInt8) \
+ V(Word32AtomicCompareExchangeUint8) \
+ V(Word32AtomicCompareExchangeInt16) \
+ V(Word32AtomicCompareExchangeUint16) \
+ V(Word32AtomicCompareExchangeWord32) \
+ V(Word32AtomicAddInt8) \
+ V(Word32AtomicAddUint8) \
+ V(Word32AtomicAddInt16) \
+ V(Word32AtomicAddUint16) \
+ V(Word32AtomicAddWord32) \
+ V(Word32AtomicSubInt8) \
+ V(Word32AtomicSubUint8) \
+ V(Word32AtomicSubInt16) \
+ V(Word32AtomicSubUint16) \
+ V(Word32AtomicSubWord32) \
+ V(Word32AtomicAndInt8) \
+ V(Word32AtomicAndUint8) \
+ V(Word32AtomicAndInt16) \
+ V(Word32AtomicAndUint16) \
+ V(Word32AtomicAndWord32) \
+ V(Word32AtomicOrInt8) \
+ V(Word32AtomicOrUint8) \
+ V(Word32AtomicOrInt16) \
+ V(Word32AtomicOrUint16) \
+ V(Word32AtomicOrWord32) \
+ V(Word32AtomicXorInt8) \
+ V(Word32AtomicXorUint8) \
+ V(Word32AtomicXorInt16) \
+ V(Word32AtomicXorUint16) \
+ V(Word32AtomicXorWord32) \
+ V(Ieee754Float64Acos) \
+ V(Ieee754Float64Acosh) \
+ V(Ieee754Float64Asin) \
+ V(Ieee754Float64Asinh) \
+ V(Ieee754Float64Atan) \
+ V(Ieee754Float64Atanh) \
+ V(Ieee754Float64Atan2) \
+ V(Ieee754Float64Cbrt) \
+ V(Ieee754Float64Cos) \
+ V(Ieee754Float64Cosh) \
+ V(Ieee754Float64Exp) \
+ V(Ieee754Float64Expm1) \
+ V(Ieee754Float64Log) \
+ V(Ieee754Float64Log1p) \
+ V(Ieee754Float64Log10) \
+ V(Ieee754Float64Log2) \
+ V(Ieee754Float64Pow) \
+ V(Ieee754Float64Sin) \
+ V(Ieee754Float64Sinh) \
+ V(Ieee754Float64Tan) \
V(Ieee754Float64Tanh)
#define ARCH_OPCODE_LIST(V) \
diff --git a/deps/v8/src/compiler/backend/instruction-selector.cc b/deps/v8/src/compiler/backend/instruction-selector.cc
index 1c14832bbf..b62cc83532 100644
--- a/deps/v8/src/compiler/backend/instruction-selector.cc
+++ b/deps/v8/src/compiler/backend/instruction-selector.cc
@@ -11,6 +11,7 @@
#include "src/codegen/tick-counter.h"
#include "src/compiler/backend/instruction-selector-impl.h"
#include "src/compiler/compiler-source-position-table.h"
+#include "src/compiler/js-heap-broker.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/pipeline.h"
@@ -28,9 +29,9 @@ InstructionSelector::InstructionSelector(
InstructionSequence* sequence, Schedule* schedule,
SourcePositionTable* source_positions, Frame* frame,
EnableSwitchJumpTable enable_switch_jump_table, TickCounter* tick_counter,
- size_t* max_unoptimized_frame_height, size_t* max_pushed_argument_count,
- SourcePositionMode source_position_mode, Features features,
- EnableScheduling enable_scheduling,
+ JSHeapBroker* broker, size_t* max_unoptimized_frame_height,
+ size_t* max_pushed_argument_count, SourcePositionMode source_position_mode,
+ Features features, EnableScheduling enable_scheduling,
EnableRootsRelativeAddressing enable_roots_relative_addressing,
PoisoningMitigationLevel poisoning_level, EnableTraceTurboJson trace_turbo)
: zone_(zone),
@@ -61,6 +62,7 @@ InstructionSelector::InstructionSelector(
instr_origins_(sequence->zone()),
trace_turbo_(trace_turbo),
tick_counter_(tick_counter),
+ broker_(broker),
max_unoptimized_frame_height_(max_unoptimized_frame_height),
max_pushed_argument_count_(max_pushed_argument_count)
#if V8_TARGET_ARCH_64_BIT
@@ -604,9 +606,8 @@ size_t InstructionSelector::AddOperandToStateValueDescriptor(
values->PushArgumentsLength();
return 0;
}
- case IrOpcode::kObjectState: {
+ case IrOpcode::kObjectState:
UNREACHABLE();
- }
case IrOpcode::kTypedObjectState:
case IrOpcode::kObjectId: {
size_t id = deduplicator->GetObjectId(input);
@@ -1129,6 +1130,7 @@ void InstructionSelector::VisitBlock(BasicBlock* block) {
node->opcode() == IrOpcode::kCall ||
node->opcode() == IrOpcode::kProtectedLoad ||
node->opcode() == IrOpcode::kProtectedStore ||
+ node->opcode() == IrOpcode::kLoadTransform ||
#define ADD_EFFECT_FOR_ATOMIC_OP(Opcode) \
node->opcode() == IrOpcode::k##Opcode ||
MACHINE_ATOMIC_OP_LIST(ADD_EFFECT_FOR_ATOMIC_OP)
@@ -1330,6 +1332,8 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kFinishRegion:
return MarkAsTagged(node), VisitFinishRegion(node);
case IrOpcode::kParameter: {
+ // Parameters should always be scheduled to the first block.
+ DCHECK_EQ(schedule()->block(node)->rpo_number(), 0);
MachineType type =
linkage()->GetParameterType(ParameterIndexOf(node->op()));
MarkAsRepresentation(type.representation(), node);
@@ -1411,6 +1415,10 @@ void InstructionSelector::VisitNode(Node* node) {
MarkAsRepresentation(MachineRepresentation::kSimd128, node);
return VisitLoadTransform(node);
}
+ case IrOpcode::kLoadLane: {
+ MarkAsRepresentation(MachineRepresentation::kSimd128, node);
+ return VisitLoadLane(node);
+ }
case IrOpcode::kPoisonedLoad: {
LoadRepresentation type = LoadRepresentationOf(node->op());
MarkAsRepresentation(type.representation(), node);
@@ -1420,6 +1428,10 @@ void InstructionSelector::VisitNode(Node* node) {
return VisitStore(node);
case IrOpcode::kProtectedStore:
return VisitProtectedStore(node);
+ case IrOpcode::kStoreLane: {
+ MarkAsRepresentation(MachineRepresentation::kSimd128, node);
+ return VisitStoreLane(node);
+ }
case IrOpcode::kWord32And:
return MarkAsWord32(node), VisitWord32And(node);
case IrOpcode::kWord32Or:
@@ -1981,6 +1993,16 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitI64x2ReplaceLaneI32Pair(node);
case IrOpcode::kI64x2Neg:
return MarkAsSimd128(node), VisitI64x2Neg(node);
+ case IrOpcode::kI64x2SConvertI32x4Low:
+ return MarkAsSimd128(node), VisitI64x2SConvertI32x4Low(node);
+ case IrOpcode::kI64x2SConvertI32x4High:
+ return MarkAsSimd128(node), VisitI64x2SConvertI32x4High(node);
+ case IrOpcode::kI64x2UConvertI32x4Low:
+ return MarkAsSimd128(node), VisitI64x2UConvertI32x4Low(node);
+ case IrOpcode::kI64x2UConvertI32x4High:
+ return MarkAsSimd128(node), VisitI64x2UConvertI32x4High(node);
+ case IrOpcode::kI64x2BitMask:
+ return MarkAsWord32(node), VisitI64x2BitMask(node);
case IrOpcode::kI64x2Shl:
return MarkAsSimd128(node), VisitI64x2Shl(node);
case IrOpcode::kI64x2ShrS:
@@ -1991,28 +2013,20 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitI64x2Sub(node);
case IrOpcode::kI64x2Mul:
return MarkAsSimd128(node), VisitI64x2Mul(node);
- case IrOpcode::kI64x2MinS:
- return MarkAsSimd128(node), VisitI64x2MinS(node);
- case IrOpcode::kI64x2MaxS:
- return MarkAsSimd128(node), VisitI64x2MaxS(node);
case IrOpcode::kI64x2Eq:
return MarkAsSimd128(node), VisitI64x2Eq(node);
- case IrOpcode::kI64x2Ne:
- return MarkAsSimd128(node), VisitI64x2Ne(node);
- case IrOpcode::kI64x2GtS:
- return MarkAsSimd128(node), VisitI64x2GtS(node);
- case IrOpcode::kI64x2GeS:
- return MarkAsSimd128(node), VisitI64x2GeS(node);
case IrOpcode::kI64x2ShrU:
return MarkAsSimd128(node), VisitI64x2ShrU(node);
- case IrOpcode::kI64x2MinU:
- return MarkAsSimd128(node), VisitI64x2MinU(node);
- case IrOpcode::kI64x2MaxU:
- return MarkAsSimd128(node), VisitI64x2MaxU(node);
- case IrOpcode::kI64x2GtU:
- return MarkAsSimd128(node), VisitI64x2GtU(node);
- case IrOpcode::kI64x2GeU:
- return MarkAsSimd128(node), VisitI64x2GeU(node);
+ case IrOpcode::kI64x2ExtMulLowI32x4S:
+ return MarkAsSimd128(node), VisitI64x2ExtMulLowI32x4S(node);
+ case IrOpcode::kI64x2ExtMulHighI32x4S:
+ return MarkAsSimd128(node), VisitI64x2ExtMulHighI32x4S(node);
+ case IrOpcode::kI64x2ExtMulLowI32x4U:
+ return MarkAsSimd128(node), VisitI64x2ExtMulLowI32x4U(node);
+ case IrOpcode::kI64x2ExtMulHighI32x4U:
+ return MarkAsSimd128(node), VisitI64x2ExtMulHighI32x4U(node);
+ case IrOpcode::kI64x2SignSelect:
+ return MarkAsSimd128(node), VisitI64x2SignSelect(node);
case IrOpcode::kI32x4Splat:
return MarkAsSimd128(node), VisitI32x4Splat(node);
case IrOpcode::kI32x4ExtractLane:
@@ -2073,6 +2087,20 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsWord32(node), VisitI32x4BitMask(node);
case IrOpcode::kI32x4DotI16x8S:
return MarkAsSimd128(node), VisitI32x4DotI16x8S(node);
+ case IrOpcode::kI32x4ExtMulLowI16x8S:
+ return MarkAsSimd128(node), VisitI32x4ExtMulLowI16x8S(node);
+ case IrOpcode::kI32x4ExtMulHighI16x8S:
+ return MarkAsSimd128(node), VisitI32x4ExtMulHighI16x8S(node);
+ case IrOpcode::kI32x4ExtMulLowI16x8U:
+ return MarkAsSimd128(node), VisitI32x4ExtMulLowI16x8U(node);
+ case IrOpcode::kI32x4ExtMulHighI16x8U:
+ return MarkAsSimd128(node), VisitI32x4ExtMulHighI16x8U(node);
+ case IrOpcode::kI32x4SignSelect:
+ return MarkAsSimd128(node), VisitI32x4SignSelect(node);
+ case IrOpcode::kI32x4ExtAddPairwiseI16x8S:
+ return MarkAsSimd128(node), VisitI32x4ExtAddPairwiseI16x8S(node);
+ case IrOpcode::kI32x4ExtAddPairwiseI16x8U:
+ return MarkAsSimd128(node), VisitI32x4ExtAddPairwiseI16x8U(node);
case IrOpcode::kI16x8Splat:
return MarkAsSimd128(node), VisitI16x8Splat(node);
case IrOpcode::kI16x8ExtractLaneU:
@@ -2095,14 +2123,14 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitI16x8SConvertI32x4(node);
case IrOpcode::kI16x8Add:
return MarkAsSimd128(node), VisitI16x8Add(node);
- case IrOpcode::kI16x8AddSaturateS:
- return MarkAsSimd128(node), VisitI16x8AddSaturateS(node);
+ case IrOpcode::kI16x8AddSatS:
+ return MarkAsSimd128(node), VisitI16x8AddSatS(node);
case IrOpcode::kI16x8AddHoriz:
return MarkAsSimd128(node), VisitI16x8AddHoriz(node);
case IrOpcode::kI16x8Sub:
return MarkAsSimd128(node), VisitI16x8Sub(node);
- case IrOpcode::kI16x8SubSaturateS:
- return MarkAsSimd128(node), VisitI16x8SubSaturateS(node);
+ case IrOpcode::kI16x8SubSatS:
+ return MarkAsSimd128(node), VisitI16x8SubSatS(node);
case IrOpcode::kI16x8Mul:
return MarkAsSimd128(node), VisitI16x8Mul(node);
case IrOpcode::kI16x8MinS:
@@ -2125,10 +2153,10 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitI16x8ShrU(node);
case IrOpcode::kI16x8UConvertI32x4:
return MarkAsSimd128(node), VisitI16x8UConvertI32x4(node);
- case IrOpcode::kI16x8AddSaturateU:
- return MarkAsSimd128(node), VisitI16x8AddSaturateU(node);
- case IrOpcode::kI16x8SubSaturateU:
- return MarkAsSimd128(node), VisitI16x8SubSaturateU(node);
+ case IrOpcode::kI16x8AddSatU:
+ return MarkAsSimd128(node), VisitI16x8AddSatU(node);
+ case IrOpcode::kI16x8SubSatU:
+ return MarkAsSimd128(node), VisitI16x8SubSatU(node);
case IrOpcode::kI16x8MinU:
return MarkAsSimd128(node), VisitI16x8MinU(node);
case IrOpcode::kI16x8MaxU:
@@ -2139,10 +2167,26 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitI16x8GeU(node);
case IrOpcode::kI16x8RoundingAverageU:
return MarkAsSimd128(node), VisitI16x8RoundingAverageU(node);
+ case IrOpcode::kI16x8Q15MulRSatS:
+ return MarkAsSimd128(node), VisitI16x8Q15MulRSatS(node);
case IrOpcode::kI16x8Abs:
return MarkAsSimd128(node), VisitI16x8Abs(node);
case IrOpcode::kI16x8BitMask:
return MarkAsWord32(node), VisitI16x8BitMask(node);
+ case IrOpcode::kI16x8ExtMulLowI8x16S:
+ return MarkAsSimd128(node), VisitI16x8ExtMulLowI8x16S(node);
+ case IrOpcode::kI16x8ExtMulHighI8x16S:
+ return MarkAsSimd128(node), VisitI16x8ExtMulHighI8x16S(node);
+ case IrOpcode::kI16x8ExtMulLowI8x16U:
+ return MarkAsSimd128(node), VisitI16x8ExtMulLowI8x16U(node);
+ case IrOpcode::kI16x8ExtMulHighI8x16U:
+ return MarkAsSimd128(node), VisitI16x8ExtMulHighI8x16U(node);
+ case IrOpcode::kI16x8SignSelect:
+ return MarkAsSimd128(node), VisitI16x8SignSelect(node);
+ case IrOpcode::kI16x8ExtAddPairwiseI8x16S:
+ return MarkAsSimd128(node), VisitI16x8ExtAddPairwiseI8x16S(node);
+ case IrOpcode::kI16x8ExtAddPairwiseI8x16U:
+ return MarkAsSimd128(node), VisitI16x8ExtAddPairwiseI8x16U(node);
case IrOpcode::kI8x16Splat:
return MarkAsSimd128(node), VisitI8x16Splat(node);
case IrOpcode::kI8x16ExtractLaneU:
@@ -2161,12 +2205,12 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitI8x16SConvertI16x8(node);
case IrOpcode::kI8x16Add:
return MarkAsSimd128(node), VisitI8x16Add(node);
- case IrOpcode::kI8x16AddSaturateS:
- return MarkAsSimd128(node), VisitI8x16AddSaturateS(node);
+ case IrOpcode::kI8x16AddSatS:
+ return MarkAsSimd128(node), VisitI8x16AddSatS(node);
case IrOpcode::kI8x16Sub:
return MarkAsSimd128(node), VisitI8x16Sub(node);
- case IrOpcode::kI8x16SubSaturateS:
- return MarkAsSimd128(node), VisitI8x16SubSaturateS(node);
+ case IrOpcode::kI8x16SubSatS:
+ return MarkAsSimd128(node), VisitI8x16SubSatS(node);
case IrOpcode::kI8x16Mul:
return MarkAsSimd128(node), VisitI8x16Mul(node);
case IrOpcode::kI8x16MinS:
@@ -2185,10 +2229,10 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitI8x16ShrU(node);
case IrOpcode::kI8x16UConvertI16x8:
return MarkAsSimd128(node), VisitI8x16UConvertI16x8(node);
- case IrOpcode::kI8x16AddSaturateU:
- return MarkAsSimd128(node), VisitI8x16AddSaturateU(node);
- case IrOpcode::kI8x16SubSaturateU:
- return MarkAsSimd128(node), VisitI8x16SubSaturateU(node);
+ case IrOpcode::kI8x16AddSatU:
+ return MarkAsSimd128(node), VisitI8x16AddSatU(node);
+ case IrOpcode::kI8x16SubSatU:
+ return MarkAsSimd128(node), VisitI8x16SubSatU(node);
case IrOpcode::kI8x16MinU:
return MarkAsSimd128(node), VisitI8x16MinU(node);
case IrOpcode::kI8x16MaxU:
@@ -2199,10 +2243,14 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitI8x16GeU(node);
case IrOpcode::kI8x16RoundingAverageU:
return MarkAsSimd128(node), VisitI8x16RoundingAverageU(node);
+ case IrOpcode::kI8x16Popcnt:
+ return MarkAsSimd128(node), VisitI8x16Popcnt(node);
case IrOpcode::kI8x16Abs:
return MarkAsSimd128(node), VisitI8x16Abs(node);
case IrOpcode::kI8x16BitMask:
return MarkAsWord32(node), VisitI8x16BitMask(node);
+ case IrOpcode::kI8x16SignSelect:
+ return MarkAsSimd128(node), VisitI8x16SignSelect(node);
case IrOpcode::kS128Const:
return MarkAsSimd128(node), VisitS128Const(node);
case IrOpcode::kS128Zero:
@@ -2223,10 +2271,6 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitI8x16Swizzle(node);
case IrOpcode::kI8x16Shuffle:
return MarkAsSimd128(node), VisitI8x16Shuffle(node);
- case IrOpcode::kV64x2AnyTrue:
- return MarkAsWord32(node), VisitV64x2AnyTrue(node);
- case IrOpcode::kV64x2AllTrue:
- return MarkAsWord32(node), VisitV64x2AllTrue(node);
case IrOpcode::kV32x4AnyTrue:
return MarkAsWord32(node), VisitV32x4AnyTrue(node);
case IrOpcode::kV32x4AllTrue:
@@ -2668,30 +2712,104 @@ void InstructionSelector::VisitI64x2ExtractLane(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI64x2Eq(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI64x2Ne(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI64x2GtS(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI64x2GeS(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI64x2GtU(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI64x2GeU(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitV64x2AnyTrue(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitV64x2AllTrue(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Qfma(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Qfms(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Qfma(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Qfms(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_ARM64
-void InstructionSelector::VisitI64x2MinS(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI64x2MaxS(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI64x2MinU(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI64x2MaxU(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X
-#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM64 && \
- !V8_TARGET_ARCH_ARM
-// TODO(v8:10583) Prototype i32x4.dot_i16x8_s
-void InstructionSelector::VisitI32x4DotI16x8S(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM64
- // && !V8_TARGET_ARCH_ARM
+#if !V8_TARGET_ARCH_ARM64
+// TODO(v8:10971) Prototype i16x8.q15mulr_sat_s
+void InstructionSelector::VisitI16x8Q15MulRSatS(Node* node) { UNIMPLEMENTED(); }
+
+// TODO(v8:10972) Prototype i64x2 widen i32x4.
+void InstructionSelector::VisitI64x2SConvertI32x4Low(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI64x2SConvertI32x4High(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI64x2UConvertI32x4Low(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI64x2UConvertI32x4High(Node* node) {
+ UNIMPLEMENTED();
+}
+
+// TODO(v8:11002) Prototype i8x16.popcnt.
+void InstructionSelector::VisitI8x16Popcnt(Node* node) { UNIMPLEMENTED(); }
+
+// TODO(v8:11008) Prototype extended multiplication.
+void InstructionSelector::VisitI64x2ExtMulLowI32x4S(Node* node) {
+ UNIMPLEMENTED();
+}
+void InstructionSelector::VisitI64x2ExtMulHighI32x4S(Node* node) {
+ UNIMPLEMENTED();
+}
+void InstructionSelector::VisitI64x2ExtMulLowI32x4U(Node* node) {
+ UNIMPLEMENTED();
+}
+void InstructionSelector::VisitI64x2ExtMulHighI32x4U(Node* node) {
+ UNIMPLEMENTED();
+}
+void InstructionSelector::VisitI32x4ExtMulLowI16x8S(Node* node) {
+ UNIMPLEMENTED();
+}
+void InstructionSelector::VisitI32x4ExtMulHighI16x8S(Node* node) {
+ UNIMPLEMENTED();
+}
+void InstructionSelector::VisitI32x4ExtMulLowI16x8U(Node* node) {
+ UNIMPLEMENTED();
+}
+void InstructionSelector::VisitI32x4ExtMulHighI16x8U(Node* node) {
+ UNIMPLEMENTED();
+}
+void InstructionSelector::VisitI16x8ExtMulLowI8x16S(Node* node) {
+ UNIMPLEMENTED();
+}
+void InstructionSelector::VisitI16x8ExtMulHighI8x16S(Node* node) {
+ UNIMPLEMENTED();
+}
+void InstructionSelector::VisitI16x8ExtMulLowI8x16U(Node* node) {
+ UNIMPLEMENTED();
+}
+void InstructionSelector::VisitI16x8ExtMulHighI8x16U(Node* node) {
+ UNIMPLEMENTED();
+}
+
+// TODO(v8:11086) Prototype extended pairwise add.
+void InstructionSelector::VisitI32x4ExtAddPairwiseI16x8S(Node* node) {
+ UNIMPLEMENTED();
+}
+void InstructionSelector::VisitI32x4ExtAddPairwiseI16x8U(Node* node) {
+ UNIMPLEMENTED();
+}
+void InstructionSelector::VisitI16x8ExtAddPairwiseI8x16S(Node* node) {
+ UNIMPLEMENTED();
+}
+void InstructionSelector::VisitI16x8ExtAddPairwiseI8x16U(Node* node) {
+ UNIMPLEMENTED();
+}
+#endif // !V8_TARGET_ARCH_ARM64
+
+#if !V8_TARGET_ARCH_X64
+// TODO(v8:10975): Prototyping load lane and store lane.
+void InstructionSelector::VisitLoadLane(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitStoreLane(Node* node) { UNIMPLEMENTED(); }
+
+// TODO(v8:10997) Prototype i64x2.bitmask.
+void InstructionSelector::VisitI64x2BitMask(Node* node) { UNIMPLEMENTED(); }
+
+// TODO(v8:10983) Prototyping sign select.
+void InstructionSelector::VisitI8x16SignSelect(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI16x8SignSelect(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI32x4SignSelect(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2SignSelect(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_X64
void InstructionSelector::VisitFinishRegion(Node* node) { EmitIdentity(node); }
@@ -2720,6 +2838,7 @@ constexpr InstructionCode EncodeCallDescriptorFlags(
// Note: Not all bits of `flags` are preserved.
STATIC_ASSERT(CallDescriptor::kFlagsBitsEncodedInInstructionCode ==
MiscField::kSize);
+ CONSTEXPR_DCHECK(Instruction::IsCallWithDescriptorFlags(opcode));
return opcode | MiscField::encode(flags & MiscField::kMax);
}
@@ -2838,7 +2957,7 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
}
// Select the appropriate opcode based on the call type.
- InstructionCode opcode = kArchNop;
+ InstructionCode opcode;
switch (call_descriptor->kind()) {
case CallDescriptor::kCallAddress: {
int misc_field = static_cast<int>(call_descriptor->ParameterCount());
@@ -2921,7 +3040,6 @@ void InstructionSelector::VisitTailCall(Node* node) {
break;
default:
UNREACHABLE();
- return;
}
int temps_count = GetTempsCountForTailCallFromJSFunction();
for (int i = 0; i < temps_count; i++) {
@@ -2940,7 +3058,6 @@ void InstructionSelector::VisitTailCall(Node* node) {
break;
default:
UNREACHABLE();
- return;
}
}
opcode = EncodeCallDescriptorFlags(opcode, call_descriptor->flags());
@@ -3072,6 +3189,7 @@ void InstructionSelector::VisitUnreachable(Node* node) {
void InstructionSelector::VisitStaticAssert(Node* node) {
Node* asserted = node->InputAt(0);
+ UnparkedScopeIfNeeded scope(broker_);
AllowHandleDereference allow_handle_dereference;
asserted->Print(4);
FATAL(
diff --git a/deps/v8/src/compiler/backend/instruction-selector.h b/deps/v8/src/compiler/backend/instruction-selector.h
index 6452e3ec4c..fc16814d45 100644
--- a/deps/v8/src/compiler/backend/instruction-selector.h
+++ b/deps/v8/src/compiler/backend/instruction-selector.h
@@ -272,7 +272,8 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
InstructionSequence* sequence, Schedule* schedule,
SourcePositionTable* source_positions, Frame* frame,
EnableSwitchJumpTable enable_switch_jump_table, TickCounter* tick_counter,
- size_t* max_unoptimized_frame_height, size_t* max_pushed_argument_count,
+ JSHeapBroker* broker, size_t* max_unoptimized_frame_height,
+ size_t* max_pushed_argument_count,
SourcePositionMode source_position_mode = kCallSourcePositions,
Features features = SupportedFeatures(),
EnableScheduling enable_scheduling = FLAG_turbo_instruction_scheduling
@@ -708,6 +709,9 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
ZoneVector<std::pair<int, int>> instr_origins_;
EnableTraceTurboJson trace_turbo_;
TickCounter* const tick_counter_;
+ // The broker is only used for unparking the LocalHeap for diagnostic printing
+ // for failed StaticAsserts.
+ JSHeapBroker* const broker_;
// Store the maximal unoptimized frame height and an maximal number of pushed
// arguments (for calls). Later used to apply an offset to stack checks.
diff --git a/deps/v8/src/compiler/backend/instruction.h b/deps/v8/src/compiler/backend/instruction.h
index 0419928792..55fce0aeeb 100644
--- a/deps/v8/src/compiler/backend/instruction.h
+++ b/deps/v8/src/compiler/backend/instruction.h
@@ -705,6 +705,9 @@ class V8_EXPORT_PRIVATE MoveOperands final
DCHECK(!source.IsInvalid() && !destination.IsInvalid());
}
+ MoveOperands(const MoveOperands&) = delete;
+ MoveOperands& operator=(const MoveOperands&) = delete;
+
const InstructionOperand& source() const { return source_; }
InstructionOperand& source() { return source_; }
void set_source(const InstructionOperand& operand) { source_ = operand; }
@@ -742,8 +745,6 @@ class V8_EXPORT_PRIVATE MoveOperands final
private:
InstructionOperand source_;
InstructionOperand destination_;
-
- DISALLOW_COPY_AND_ASSIGN(MoveOperands);
};
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, const MoveOperands&);
@@ -753,6 +754,8 @@ class V8_EXPORT_PRIVATE ParallelMove final
public NON_EXPORTED_BASE(ZoneObject) {
public:
explicit ParallelMove(Zone* zone) : ZoneVector<MoveOperands*>(zone) {}
+ ParallelMove(const ParallelMove&) = delete;
+ ParallelMove& operator=(const ParallelMove&) = delete;
MoveOperands* AddMove(const InstructionOperand& from,
const InstructionOperand& to) {
@@ -777,9 +780,6 @@ class V8_EXPORT_PRIVATE ParallelMove final
// to_eliminate must be Eliminated.
void PrepareInsertAfter(MoveOperands* move,
ZoneVector<MoveOperands*>* to_eliminate) const;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(ParallelMove);
};
std::ostream& operator<<(std::ostream&, const ParallelMove&);
@@ -814,6 +814,9 @@ class InstructionBlock;
class V8_EXPORT_PRIVATE Instruction final {
public:
+ Instruction(const Instruction&) = delete;
+ Instruction& operator=(const Instruction&) = delete;
+
size_t OutputCount() const { return OutputCountField::decode(bit_field_); }
const InstructionOperand* OutputAt(size_t i) const {
DCHECK_LT(i, OutputCount());
@@ -927,6 +930,23 @@ class V8_EXPORT_PRIVATE Instruction final {
return arch_opcode() == ArchOpcode::kArchThrowTerminator;
}
+ static constexpr bool IsCallWithDescriptorFlags(InstructionCode arch_opcode) {
+ return arch_opcode <= ArchOpcode::kArchCallBuiltinPointer;
+ }
+ bool IsCallWithDescriptorFlags() const {
+ return IsCallWithDescriptorFlags(arch_opcode());
+ }
+ bool HasCallDescriptorFlag(CallDescriptor::Flag flag) const {
+ DCHECK(IsCallWithDescriptorFlags());
+ STATIC_ASSERT(CallDescriptor::kFlagsBitsEncodedInInstructionCode == 10);
+#ifdef DEBUG
+ static constexpr int kInstructionCodeFlagsMask =
+ ((1 << CallDescriptor::kFlagsBitsEncodedInInstructionCode) - 1);
+ DCHECK_EQ(static_cast<int>(flag) & kInstructionCodeFlagsMask, flag);
+#endif
+ return MiscField::decode(opcode()) & flag;
+ }
+
enum GapPosition {
START,
END,
@@ -990,8 +1010,6 @@ class V8_EXPORT_PRIVATE Instruction final {
ReferenceMap* reference_map_;
InstructionBlock* block_;
InstructionOperand operands_[1];
-
- DISALLOW_COPY_AND_ASSIGN(Instruction);
};
std::ostream& operator<<(std::ostream&, const Instruction&);
@@ -1514,6 +1532,8 @@ class V8_EXPORT_PRIVATE InstructionSequence final
const Schedule* schedule);
InstructionSequence(Isolate* isolate, Zone* zone,
InstructionBlocks* instruction_blocks);
+ InstructionSequence(const InstructionSequence&) = delete;
+ InstructionSequence& operator=(const InstructionSequence&) = delete;
int NextVirtualRegister();
int VirtualRegisterCount() const { return next_virtual_register_; }
@@ -1696,8 +1716,6 @@ class V8_EXPORT_PRIVATE InstructionSequence final
// Used at construction time
InstructionBlock* current_block_;
-
- DISALLOW_COPY_AND_ASSIGN(InstructionSequence);
};
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
diff --git a/deps/v8/src/compiler/backend/mid-tier-register-allocator.cc b/deps/v8/src/compiler/backend/mid-tier-register-allocator.cc
index e033799cb9..43808526a8 100644
--- a/deps/v8/src/compiler/backend/mid-tier-register-allocator.cc
+++ b/deps/v8/src/compiler/backend/mid-tier-register-allocator.cc
@@ -23,6 +23,7 @@ namespace internal {
namespace compiler {
class RegisterState;
+class DeferredBlocksRegion;
// BlockState stores details associated with a particular basic block.
class BlockState final {
@@ -30,8 +31,10 @@ class BlockState final {
BlockState(int block_count, Zone* zone)
: general_registers_in_state_(nullptr),
double_registers_in_state_(nullptr),
+ deferred_blocks_region_(nullptr),
dominated_blocks_(block_count, zone),
- successors_phi_index_(-1) {}
+ successors_phi_index_(-1),
+ is_deferred_block_boundary_(false) {}
// Returns the RegisterState that applies to the input of this block. Can be
// |nullptr| if the no registers of |kind| have been allocated up to this
@@ -51,14 +54,34 @@ class BlockState final {
successors_phi_index_ = index;
}
+ // If this block is deferred, this represents region of deferred blocks
+ // that are directly reachable from this block.
+ DeferredBlocksRegion* deferred_blocks_region() const {
+ return deferred_blocks_region_;
+ }
+ void set_deferred_blocks_region(DeferredBlocksRegion* region) {
+ DCHECK_NULL(deferred_blocks_region_);
+ deferred_blocks_region_ = region;
+ }
+
+ // Returns true if this block represents either a transition from
+ // non-deferred to deferred or vice versa.
+ bool is_deferred_block_boundary() const {
+ return is_deferred_block_boundary_;
+ }
+ void MarkAsDeferredBlockBoundary() { is_deferred_block_boundary_ = true; }
+
MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(BlockState);
private:
RegisterState* general_registers_in_state_;
RegisterState* double_registers_in_state_;
+ DeferredBlocksRegion* deferred_blocks_region_;
+
BitVector dominated_blocks_;
int successors_phi_index_;
+ bool is_deferred_block_boundary_;
};
RegisterState* BlockState::register_in_state(RegisterKind kind) {
@@ -145,8 +168,7 @@ const InstructionBlock* MidTierRegisterAllocationData::GetBlock(
}
const BitVector* MidTierRegisterAllocationData::GetBlocksDominatedBy(
- int instr_index) {
- const InstructionBlock* block = GetBlock(instr_index);
+ const InstructionBlock* block) {
return block_state(block->rpo_number()).dominated_blocks();
}
@@ -225,6 +247,32 @@ class Range {
int end_;
};
+// Represents a connected region of deferred basic blocks.
+class DeferredBlocksRegion final {
+ public:
+ explicit DeferredBlocksRegion(Zone* zone, int number_of_blocks)
+ : spilled_vregs_(zone), blocks_covered_(number_of_blocks, zone) {}
+
+ void AddBlock(RpoNumber block, MidTierRegisterAllocationData* data) {
+ DCHECK(data->GetBlock(block)->IsDeferred());
+ blocks_covered_.Add(block.ToInt());
+ data->block_state(block).set_deferred_blocks_region(this);
+ }
+
+ // Adds |vreg| to the list of variables to potentially defer their output to
+ // a spill slot until we enter this deferred block region.
+ void DeferSpillOutputUntilEntry(int vreg) { spilled_vregs_.insert(vreg); }
+
+ ZoneSet<int>::iterator begin() const { return spilled_vregs_.begin(); }
+ ZoneSet<int>::iterator end() const { return spilled_vregs_.end(); }
+
+ const BitVector* blocks_covered() const { return &blocks_covered_; }
+
+ private:
+ ZoneSet<int> spilled_vregs_;
+ BitVector blocks_covered_;
+};
+
// VirtualRegisterData stores data specific to a particular virtual register,
// and tracks spilled operands for that virtual register.
class VirtualRegisterData final {
@@ -233,11 +281,17 @@ class VirtualRegisterData final {
// Define VirtualRegisterData with the type of output that produces this
// virtual register.
- void DefineAsUnallocatedOperand(int virtual_register, int instr_index);
+ void DefineAsUnallocatedOperand(int virtual_register, int instr_index,
+ bool is_deferred_block,
+ bool is_exceptional_call_output);
void DefineAsFixedSpillOperand(AllocatedOperand* operand,
- int virtual_register, int instr_index);
- void DefineAsConstantOperand(ConstantOperand* operand, int instr_index);
- void DefineAsPhi(int virtual_register, int instr_index);
+ int virtual_register, int instr_index,
+ bool is_deferred_block,
+ bool is_exceptional_call_output);
+ void DefineAsConstantOperand(ConstantOperand* operand, int instr_index,
+ bool is_deferred_block);
+ void DefineAsPhi(int virtual_register, int instr_index,
+ bool is_deferred_block);
// Spill an operand that is assigned to this virtual register.
void SpillOperand(InstructionOperand* operand, int instr_index,
@@ -254,6 +308,12 @@ class VirtualRegisterData final {
void EmitGapMoveToSpillSlot(AllocatedOperand from_operand, int instr_index,
MidTierRegisterAllocationData* data);
+ // Adds pending spills for deferred-blocks.
+ void AddDeferredSpillUse(int instr_index,
+ MidTierRegisterAllocationData* data);
+ void AddDeferredSpillOutput(AllocatedOperand allocated_op, int instr_index,
+ MidTierRegisterAllocationData* data);
+
// Accessors for spill operand, which may still be pending allocation.
bool HasSpillOperand() const { return spill_operand_ != nullptr; }
InstructionOperand* spill_operand() const {
@@ -271,7 +331,29 @@ class VirtualRegisterData final {
DCHECK_EQ(is_constant(), HasSpillOperand() && spill_operand_->IsConstant());
return is_constant();
}
- bool NeedsSpillAtOutput() const;
+
+ // Returns true if the virtual register should be spilled when it is output.
+ bool NeedsSpillAtOutput() const { return needs_spill_at_output_; }
+ void MarkAsNeedsSpillAtOutput() {
+ if (is_constant()) return;
+ needs_spill_at_output_ = true;
+ if (HasSpillRange()) spill_range()->ClearDeferredBlockSpills();
+ }
+
+ // Returns true if the virtual register should be spilled at entry to deferred
+ // blocks in which it is spilled (to avoid spilling on output on
+ // non-deferred blocks).
+ bool NeedsSpillAtDeferredBlocks() const;
+ void EmitDeferredSpillOutputs(MidTierRegisterAllocationData* data);
+
+ bool IsSpilledAt(int instr_index, MidTierRegisterAllocationData* data) {
+ DCHECK_GE(instr_index, output_instr_index());
+ if (NeedsSpillAtOutput() || HasConstantSpillOperand()) return true;
+ if (HasSpillOperand() && data->GetBlock(instr_index)->IsDeferred()) {
+ return true;
+ }
+ return false;
+ }
// Allocates pending spill operands to the |allocated| spill slot.
void AllocatePendingSpillOperand(const AllocatedOperand& allocated);
@@ -279,26 +361,44 @@ class VirtualRegisterData final {
int vreg() const { return vreg_; }
int output_instr_index() const { return output_instr_index_; }
bool is_constant() const { return is_constant_; }
-
bool is_phi() const { return is_phi_; }
- void set_is_phi(bool value) { is_phi_ = value; }
+ bool is_defined_in_deferred_block() const {
+ return is_defined_in_deferred_block_;
+ }
+ bool is_exceptional_call_output() const {
+ return is_exceptional_call_output_;
+ }
+
+ struct DeferredSpillSlotOutput {
+ public:
+ explicit DeferredSpillSlotOutput(int instr, AllocatedOperand op,
+ const BitVector* blocks)
+ : instr_index(instr), operand(op), live_blocks(blocks) {}
+
+ int instr_index;
+ AllocatedOperand operand;
+ const BitVector* live_blocks;
+ };
// Represents the range of instructions for which this virtual register needs
// to be spilled on the stack.
class SpillRange : public ZoneObject {
public:
// Defines a spill range for an output operand.
- SpillRange(int definition_instr_index, MidTierRegisterAllocationData* data)
+ SpillRange(int definition_instr_index,
+ const InstructionBlock* definition_block,
+ MidTierRegisterAllocationData* data)
: live_range_(definition_instr_index, definition_instr_index),
- live_blocks_(data->GetBlocksDominatedBy(definition_instr_index)) {}
+ live_blocks_(data->GetBlocksDominatedBy(definition_block)),
+ deferred_spill_outputs_(nullptr) {}
// Defines a spill range for a Phi variable.
SpillRange(const InstructionBlock* phi_block,
MidTierRegisterAllocationData* data)
: live_range_(phi_block->first_instruction_index(),
phi_block->first_instruction_index()),
- live_blocks_(data->GetBlocksDominatedBy(
- phi_block->first_instruction_index())) {
+ live_blocks_(data->GetBlocksDominatedBy(phi_block)),
+ deferred_spill_outputs_(nullptr) {
// For phis, add the gap move instructions in the predecssor blocks to
// the live range.
for (RpoNumber pred_rpo : phi_block->predecessors()) {
@@ -307,20 +407,63 @@ class VirtualRegisterData final {
}
}
+ SpillRange(const SpillRange&) = delete;
+ SpillRange& operator=(const SpillRange&) = delete;
+
bool IsLiveAt(int instr_index, InstructionBlock* block) {
- return live_range_.Contains(instr_index) &&
- live_blocks_->Contains(block->rpo_number().ToInt());
+ if (!live_range_.Contains(instr_index)) return false;
+
+ int block_rpo = block->rpo_number().ToInt();
+ if (!live_blocks_->Contains(block_rpo)) return false;
+
+ if (!HasDeferredBlockSpills()) {
+ return true;
+ } else {
+ // If this spill range is only output for deferred block, then the spill
+ // slot will only be live for the deferred blocks, not all blocks that
+ // the virtual register is live.
+ for (auto deferred_spill_output : *deferred_spill_outputs()) {
+ if (deferred_spill_output.live_blocks->Contains(block_rpo)) {
+ return true;
+ }
+ }
+ return false;
+ }
}
void ExtendRangeTo(int instr_index) { live_range_.AddInstr(instr_index); }
+ void AddDeferredSpillOutput(AllocatedOperand allocated_op, int instr_index,
+ MidTierRegisterAllocationData* data) {
+ if (deferred_spill_outputs_ == nullptr) {
+ Zone* zone = data->allocation_zone();
+ deferred_spill_outputs_ =
+ zone->New<ZoneVector<DeferredSpillSlotOutput>>(zone);
+ }
+ const InstructionBlock* block = data->GetBlock(instr_index);
+ DCHECK_EQ(block->first_instruction_index(), instr_index);
+ BlockState& block_state = data->block_state(block->rpo_number());
+ const BitVector* deferred_blocks =
+ block_state.deferred_blocks_region()->blocks_covered();
+ deferred_spill_outputs_->emplace_back(instr_index, allocated_op,
+ deferred_blocks);
+ }
+
+ void ClearDeferredBlockSpills() { deferred_spill_outputs_ = nullptr; }
+ bool HasDeferredBlockSpills() const {
+ return deferred_spill_outputs_ != nullptr;
+ }
+ const ZoneVector<DeferredSpillSlotOutput>* deferred_spill_outputs() const {
+ DCHECK(HasDeferredBlockSpills());
+ return deferred_spill_outputs_;
+ }
+
Range& live_range() { return live_range_; }
private:
Range live_range_;
const BitVector* live_blocks_;
-
- DISALLOW_COPY_AND_ASSIGN(SpillRange);
+ ZoneVector<DeferredSpillSlotOutput>* deferred_spill_outputs_;
};
bool HasSpillRange() const { return spill_range_ != nullptr; }
@@ -331,11 +474,14 @@ class VirtualRegisterData final {
private:
void Initialize(int virtual_register, InstructionOperand* spill_operand,
- int instr_index, bool is_phi, bool is_constant);
+ int instr_index, bool is_phi, bool is_constant,
+ bool is_defined_in_deferred_block,
+ bool is_exceptional_call_output);
- void AddPendingSpillOperand(PendingOperand* pending_operand);
void AddSpillUse(int instr_index, MidTierRegisterAllocationData* data);
+ void AddPendingSpillOperand(PendingOperand* pending_operand);
void EnsureSpillRange(MidTierRegisterAllocationData* data);
+ bool CouldSpillOnEntryToDeferred(const InstructionBlock* block);
InstructionOperand* spill_operand_;
SpillRange* spill_range_;
@@ -344,6 +490,9 @@ class VirtualRegisterData final {
int vreg_;
bool is_phi_ : 1;
bool is_constant_ : 1;
+ bool is_defined_in_deferred_block_ : 1;
+ bool needs_spill_at_output_ : 1;
+ bool is_exceptional_call_output_ : 1;
};
VirtualRegisterData& MidTierRegisterAllocationData::VirtualRegisterDataFor(
@@ -356,33 +505,45 @@ VirtualRegisterData& MidTierRegisterAllocationData::VirtualRegisterDataFor(
void VirtualRegisterData::Initialize(int virtual_register,
InstructionOperand* spill_operand,
int instr_index, bool is_phi,
- bool is_constant) {
+ bool is_constant,
+ bool is_defined_in_deferred_block,
+ bool is_exceptional_call_output) {
vreg_ = virtual_register;
spill_operand_ = spill_operand;
spill_range_ = nullptr;
output_instr_index_ = instr_index;
is_phi_ = is_phi;
is_constant_ = is_constant;
+ is_defined_in_deferred_block_ = is_defined_in_deferred_block;
+ needs_spill_at_output_ = !is_constant_ && spill_operand_ != nullptr;
+ is_exceptional_call_output_ = is_exceptional_call_output;
}
void VirtualRegisterData::DefineAsConstantOperand(ConstantOperand* operand,
- int instr_index) {
- Initialize(operand->virtual_register(), operand, instr_index, false, true);
+ int instr_index,
+ bool is_deferred_block) {
+ Initialize(operand->virtual_register(), operand, instr_index, false, true,
+ is_deferred_block, false);
}
-void VirtualRegisterData::DefineAsFixedSpillOperand(AllocatedOperand* operand,
- int virtual_register,
- int instr_index) {
- Initialize(virtual_register, operand, instr_index, false, false);
+void VirtualRegisterData::DefineAsFixedSpillOperand(
+ AllocatedOperand* operand, int virtual_register, int instr_index,
+ bool is_deferred_block, bool is_exceptional_call_output) {
+ Initialize(virtual_register, operand, instr_index, false, false,
+ is_deferred_block, is_exceptional_call_output);
}
-void VirtualRegisterData::DefineAsUnallocatedOperand(int virtual_register,
- int instr_index) {
- Initialize(virtual_register, nullptr, instr_index, false, false);
+void VirtualRegisterData::DefineAsUnallocatedOperand(
+ int virtual_register, int instr_index, bool is_deferred_block,
+ bool is_exceptional_call_output) {
+ Initialize(virtual_register, nullptr, instr_index, false, false,
+ is_deferred_block, is_exceptional_call_output);
}
-void VirtualRegisterData::DefineAsPhi(int virtual_register, int instr_index) {
- Initialize(virtual_register, nullptr, instr_index, true, false);
+void VirtualRegisterData::DefineAsPhi(int virtual_register, int instr_index,
+ bool is_deferred_block) {
+ Initialize(virtual_register, nullptr, instr_index, true, false,
+ is_deferred_block, false);
}
void VirtualRegisterData::EnsureSpillRange(
@@ -390,16 +551,27 @@ void VirtualRegisterData::EnsureSpillRange(
DCHECK(!is_constant());
if (HasSpillRange()) return;
+ const InstructionBlock* definition_block =
+ data->GetBlock(output_instr_index_);
if (is_phi()) {
// Define a spill slot that is defined for the phi's range.
- const InstructionBlock* definition_block =
- data->code()->InstructionAt(output_instr_index_)->block();
spill_range_ =
data->allocation_zone()->New<SpillRange>(definition_block, data);
} else {
+ if (is_exceptional_call_output()) {
+ // If this virtual register is output by a call which has an exception
+ // catch handler, then the output will only be live in the IfSuccess
+ // successor block, not the IfException side, so make the definition block
+ // the IfSuccess successor block explicitly.
+ DCHECK_EQ(output_instr_index_,
+ definition_block->last_instruction_index() - 1);
+ DCHECK_EQ(definition_block->SuccessorCount(), 2);
+ DCHECK(data->GetBlock(definition_block->successors()[1])->IsHandler());
+ definition_block = data->GetBlock(definition_block->successors()[0]);
+ }
// The spill slot will be defined after the instruction that outputs it.
- spill_range_ =
- data->allocation_zone()->New<SpillRange>(output_instr_index_ + 1, data);
+ spill_range_ = data->allocation_zone()->New<SpillRange>(
+ output_instr_index_ + 1, definition_block, data);
}
data->spilled_virtual_registers().Add(vreg());
}
@@ -407,8 +579,38 @@ void VirtualRegisterData::EnsureSpillRange(
void VirtualRegisterData::AddSpillUse(int instr_index,
MidTierRegisterAllocationData* data) {
if (is_constant()) return;
+
EnsureSpillRange(data);
spill_range_->ExtendRangeTo(instr_index);
+
+ const InstructionBlock* block = data->GetBlock(instr_index);
+ if (CouldSpillOnEntryToDeferred(block)) {
+ data->block_state(block->rpo_number())
+ .deferred_blocks_region()
+ ->DeferSpillOutputUntilEntry(vreg());
+ } else {
+ MarkAsNeedsSpillAtOutput();
+ }
+}
+
+void VirtualRegisterData::AddDeferredSpillUse(
+ int instr_index, MidTierRegisterAllocationData* data) {
+ DCHECK(data->GetBlock(instr_index)->IsDeferred());
+ DCHECK(!is_defined_in_deferred_block());
+ AddSpillUse(instr_index, data);
+}
+
+bool VirtualRegisterData::CouldSpillOnEntryToDeferred(
+ const InstructionBlock* block) {
+ return !NeedsSpillAtOutput() && block->IsDeferred() &&
+ !is_defined_in_deferred_block() && !is_constant();
+}
+
+void VirtualRegisterData::AddDeferredSpillOutput(
+ AllocatedOperand allocated_op, int instr_index,
+ MidTierRegisterAllocationData* data) {
+ DCHECK(!NeedsSpillAtOutput());
+ spill_range_->AddDeferredSpillOutput(allocated_op, instr_index, data);
}
void VirtualRegisterData::SpillOperand(InstructionOperand* operand,
@@ -424,8 +626,17 @@ void VirtualRegisterData::SpillOperand(InstructionOperand* operand,
}
}
-bool VirtualRegisterData::NeedsSpillAtOutput() const {
- return HasSpillOperand() && !is_constant();
+bool VirtualRegisterData::NeedsSpillAtDeferredBlocks() const {
+ return HasSpillRange() && spill_range()->HasDeferredBlockSpills();
+}
+
+void VirtualRegisterData::EmitDeferredSpillOutputs(
+ MidTierRegisterAllocationData* data) {
+ DCHECK(NeedsSpillAtDeferredBlocks());
+ for (auto deferred_spill : *spill_range()->deferred_spill_outputs()) {
+ EmitGapMoveToSpillSlot(deferred_spill.operand, deferred_spill.instr_index,
+ data);
+ }
}
void VirtualRegisterData::EmitGapMoveToInputFromSpillSlot(
@@ -511,17 +722,32 @@ class RegisterState final : public ZoneObject {
RegisterState(const RegisterState& other) V8_NOEXCEPT;
bool IsAllocated(RegisterIndex reg);
+ bool IsShared(RegisterIndex reg);
int VirtualRegisterForRegister(RegisterIndex reg);
// Commit the |reg| with the |allocated| operand.
void Commit(RegisterIndex reg, AllocatedOperand allocated,
InstructionOperand* operand, MidTierRegisterAllocationData* data);
+
// Spill the contents of |reg| for an instruction in |current_block| using
// the |allocated| operand to commit the spill gap move.
void Spill(RegisterIndex reg, AllocatedOperand allocated,
const InstructionBlock* current_block,
MidTierRegisterAllocationData* data);
+ // Add a pending spill of the contents of |reg| at the exit point of a
+ // deferred block at |instr_index| using |allocated| operand to commit the
+ // spill gap move, if the register never gets spilled in a non-deferred block.
+ void SpillForDeferred(RegisterIndex reg, AllocatedOperand allocated,
+ int instr_index, MidTierRegisterAllocationData* data);
+
+ // Add a pending gap move from |reg| to |virtual_register|'s spill at the
+ // entry point of a deferred block at |instr_index|, if the |virtual_register|
+ // never spilled in a non-deferred block.
+ void MoveToSpillSlotOnDeferred(RegisterIndex reg, int virtual_register,
+ int instr_index,
+ MidTierRegisterAllocationData* data);
+
// Allocate |reg| to |virtual_register| for the instruction at |instr_index|.
// If the register is later spilled, a gap move will be added immediately
// before |instr_index| to move |virtual_register| into this register.
@@ -583,18 +809,30 @@ class RegisterState final : public ZoneObject {
void Reset();
// Operations for committing, spilling and allocating uses of the register.
- void Commit(AllocatedOperand allocated_operand);
+ void Commit(AllocatedOperand allocated_operand,
+ MidTierRegisterAllocationData* data);
void Spill(AllocatedOperand allocated_op,
const InstructionBlock* current_block,
MidTierRegisterAllocationData* data);
void Use(int virtual_register, int instr_index);
void PendingUse(InstructionOperand* operand, int virtual_register,
int instr_index);
+ void SpillForDeferred(AllocatedOperand allocated, int instr_index,
+ MidTierRegisterAllocationData* data);
+ void MoveToSpillSlotOnDeferred(int virtual_register, int instr_index,
+ MidTierRegisterAllocationData* data);
// Mark register as holding a phi.
void MarkAsPhiMove();
bool is_phi_gap_move() const { return is_phi_gap_move_; }
+ // The register has deferred block spills, that will be emitted if the
+ // register is committed without having been spilled in a non-deferred block
+ void AddDeferredBlockSpill(int instr_index, bool on_exit, Zone* zone);
+ bool has_deferred_block_spills() const {
+ return deferred_block_spills_.has_value();
+ }
+
// Operations related to dealing with a Register that is shared across
// multiple basic blocks.
void CommitAtMerge();
@@ -627,6 +865,14 @@ class RegisterState final : public ZoneObject {
PendingOperand* pending_uses() const { return pending_uses_; }
private:
+ struct DeferredBlockSpill {
+ DeferredBlockSpill(int instr, bool on_exit)
+ : instr_index(instr), on_deferred_exit(on_exit) {}
+
+ int instr_index;
+ bool on_deferred_exit;
+ };
+
void SpillPendingUses(MidTierRegisterAllocationData* data);
void SpillPhiGapMove(AllocatedOperand allocated_op,
const InstructionBlock* block,
@@ -640,6 +886,7 @@ class RegisterState final : public ZoneObject {
int num_commits_required_;
int virtual_register_;
PendingOperand* pending_uses_;
+ base::Optional<ZoneVector<DeferredBlockSpill>> deferred_block_spills_;
};
void ResetDataFor(RegisterIndex reg);
@@ -667,6 +914,7 @@ void RegisterState::Register::Reset() {
num_commits_required_ = 0;
virtual_register_ = InstructionOperand::kInvalidVirtualRegister;
pending_uses_ = nullptr;
+ deferred_block_spills_.reset();
}
void RegisterState::Register::Use(int virtual_register, int instr_index) {
@@ -689,7 +937,6 @@ void RegisterState::Register::PendingUse(InstructionOperand* operand,
num_commits_required_ = 1;
}
DCHECK_EQ(virtual_register_, virtual_register);
- DCHECK_GE(last_use_instr_index_, instr_index);
PendingOperand pending_op(pending_uses());
InstructionOperand::ReplaceWith(operand, &pending_op);
@@ -701,19 +948,31 @@ void RegisterState::Register::MarkAsPhiMove() {
is_phi_gap_move_ = true;
}
+void RegisterState::Register::AddDeferredBlockSpill(int instr_index,
+ bool on_exit, Zone* zone) {
+ DCHECK(is_allocated());
+ if (!deferred_block_spills_) {
+ deferred_block_spills_.emplace(zone);
+ }
+ deferred_block_spills_->emplace_back(instr_index, on_exit);
+}
+
void RegisterState::Register::AddSharedUses(int shared_use_count) {
is_shared_ = true;
num_commits_required_ += shared_use_count;
}
void RegisterState::Register::CommitAtMerge() {
+ DCHECK(is_shared());
+ DCHECK(is_allocated());
--num_commits_required_;
// We should still have commits required that will be resolved in the merge
// block.
DCHECK_GT(num_commits_required_, 0);
}
-void RegisterState::Register::Commit(AllocatedOperand allocated_op) {
+void RegisterState::Register::Commit(AllocatedOperand allocated_op,
+ MidTierRegisterAllocationData* data) {
DCHECK(is_allocated());
DCHECK_GT(num_commits_required_, 0);
@@ -728,6 +987,29 @@ void RegisterState::Register::Commit(AllocatedOperand allocated_op) {
pending_use = next;
}
pending_uses_ = nullptr;
+
+ VirtualRegisterData& vreg_data =
+ data->VirtualRegisterDataFor(virtual_register());
+
+ // If there are deferred block gap moves pending, emit them now that the
+ // register has been committed.
+ if (has_deferred_block_spills()) {
+ for (DeferredBlockSpill& spill : *deferred_block_spills_) {
+ if (spill.on_deferred_exit) {
+ vreg_data.EmitGapMoveToInputFromSpillSlot(allocated_op,
+ spill.instr_index, data);
+ } else if (!vreg_data.NeedsSpillAtOutput()) {
+ vreg_data.AddDeferredSpillOutput(allocated_op, spill.instr_index,
+ data);
+ }
+ }
+ }
+
+ // If this register was used as a phi gap move, then it being commited
+ // is the point at which we have output the Phi.
+ if (is_phi_gap_move() && vreg_data.NeedsSpillAtDeferredBlocks()) {
+ vreg_data.EmitDeferredSpillOutputs(data);
+ }
}
DCHECK_IMPLIES(num_commits_required_ > 0, is_shared());
}
@@ -735,16 +1017,19 @@ void RegisterState::Register::Commit(AllocatedOperand allocated_op) {
void RegisterState::Register::Spill(AllocatedOperand allocated_op,
const InstructionBlock* current_block,
MidTierRegisterAllocationData* data) {
+ VirtualRegisterData& vreg_data =
+ data->VirtualRegisterDataFor(virtual_register());
+ SpillPendingUses(data);
if (is_phi_gap_move()) {
SpillPhiGapMove(allocated_op, current_block, data);
}
if (needs_gap_move_on_spill()) {
- VirtualRegisterData& vreg_data =
- data->VirtualRegisterDataFor(virtual_register());
vreg_data.EmitGapMoveToInputFromSpillSlot(allocated_op,
last_use_instr_index(), data);
}
- SpillPendingUses(data);
+ if (has_deferred_block_spills() || !current_block->IsDeferred()) {
+ vreg_data.MarkAsNeedsSpillAtOutput();
+ }
virtual_register_ = InstructionOperand::kInvalidVirtualRegister;
}
@@ -784,6 +1069,30 @@ void RegisterState::Register::SpillPendingUses(
pending_uses_ = nullptr;
}
+void RegisterState::Register::SpillForDeferred(
+ AllocatedOperand allocated, int instr_index,
+ MidTierRegisterAllocationData* data) {
+ DCHECK(is_allocated());
+ DCHECK(is_shared());
+ // Add a pending deferred spill, then commit the register (with the commit
+ // being fullfilled by the deferred spill if the register is fully commited).
+ data->VirtualRegisterDataFor(virtual_register())
+ .AddDeferredSpillUse(instr_index, data);
+ AddDeferredBlockSpill(instr_index, true, data->allocation_zone());
+ Commit(allocated, data);
+}
+
+void RegisterState::Register::MoveToSpillSlotOnDeferred(
+ int virtual_register, int instr_index,
+ MidTierRegisterAllocationData* data) {
+ if (!is_allocated()) {
+ virtual_register_ = virtual_register;
+ last_use_instr_index_ = instr_index;
+ num_commits_required_ = 1;
+ }
+ AddDeferredBlockSpill(instr_index, false, data->allocation_zone());
+}
+
RegisterState::RegisterState(RegisterKind kind, int num_allocatable_registers,
Zone* zone)
: register_data_(num_allocatable_registers, zone), zone_(zone) {}
@@ -802,7 +1111,7 @@ int RegisterState::VirtualRegisterForRegister(RegisterIndex reg) {
}
bool RegisterState::IsPhiGapMove(RegisterIndex reg) {
- DCHECK(RegisterState::IsAllocated(reg));
+ DCHECK(IsAllocated(reg));
return reg_data(reg).is_phi_gap_move();
}
@@ -811,7 +1120,7 @@ void RegisterState::Commit(RegisterIndex reg, AllocatedOperand allocated,
MidTierRegisterAllocationData* data) {
InstructionOperand::ReplaceWith(operand, &allocated);
if (IsAllocated(reg)) {
- reg_data(reg).Commit(allocated);
+ reg_data(reg).Commit(allocated, data);
ResetDataFor(reg);
}
}
@@ -824,6 +1133,22 @@ void RegisterState::Spill(RegisterIndex reg, AllocatedOperand allocated,
ResetDataFor(reg);
}
+void RegisterState::SpillForDeferred(RegisterIndex reg,
+ AllocatedOperand allocated,
+ int instr_index,
+ MidTierRegisterAllocationData* data) {
+ DCHECK(IsAllocated(reg));
+ reg_data(reg).SpillForDeferred(allocated, instr_index, data);
+ ResetDataFor(reg);
+}
+
+void RegisterState::MoveToSpillSlotOnDeferred(
+ RegisterIndex reg, int virtual_register, int instr_index,
+ MidTierRegisterAllocationData* data) {
+ EnsureRegisterData(reg);
+ reg_data(reg).MoveToSpillSlotOnDeferred(virtual_register, instr_index, data);
+}
+
void RegisterState::AllocateUse(RegisterIndex reg, int virtual_register,
InstructionOperand* operand, int instr_index,
MidTierRegisterAllocationData* data) {
@@ -848,6 +1173,10 @@ RegisterState::Register& RegisterState::reg_data(RegisterIndex reg) {
return *register_data_[reg.ToInt()];
}
+bool RegisterState::IsShared(RegisterIndex reg) {
+ return HasRegisterData(reg) && reg_data(reg).is_shared();
+}
+
bool RegisterState::IsAllocated(RegisterIndex reg) {
return HasRegisterData(reg) && reg_data(reg).is_allocated();
}
@@ -908,6 +1237,50 @@ RegisterState* RegisterState::Clone() {
return zone_->New<RegisterState>(*this);
}
+class RegisterBitVector {
+ public:
+ RegisterBitVector() : bits_(0) {}
+
+ bool Contains(RegisterIndex reg, MachineRepresentation rep) const {
+ return bits_ & reg.ToBit(rep);
+ }
+
+ RegisterIndex GetFirstSet() const {
+ return RegisterIndex(base::bits::CountTrailingZeros(bits_));
+ }
+
+ RegisterIndex GetFirstCleared(int max_reg) const {
+ int reg_index = base::bits::CountTrailingZeros(~bits_);
+ if (reg_index < max_reg) {
+ return RegisterIndex(reg_index);
+ } else {
+ return RegisterIndex::Invalid();
+ }
+ }
+
+ void Add(RegisterIndex reg, MachineRepresentation rep) {
+ bits_ |= reg.ToBit(rep);
+ }
+
+ void Clear(RegisterIndex reg, MachineRepresentation rep) {
+ bits_ &= ~reg.ToBit(rep);
+ }
+
+ RegisterBitVector Union(const RegisterBitVector& other) {
+ return RegisterBitVector(bits_ | other.bits_);
+ }
+
+ void Reset() { bits_ = 0; }
+ bool IsEmpty() const { return bits_ == 0; }
+
+ private:
+ explicit RegisterBitVector(uintptr_t bits) : bits_(bits) {}
+
+ static_assert(RegisterConfiguration::kMaxRegisters <= sizeof(uintptr_t) * 8,
+ "Maximum registers must fit in uintptr_t bitmap");
+ uintptr_t bits_;
+};
+
// A SinglePassRegisterAllocator is a fast register allocator that does a single
// pass through the instruction stream without performing any live-range
// analysis beforehand. It deals with a single RegisterKind, either general or
@@ -953,6 +1326,11 @@ class SinglePassRegisterAllocator final {
void EndBlock(const InstructionBlock* block);
void EndInstruction();
+ void UpdateForDeferredBlock(int instr_index);
+ void AllocateDeferredBlockSpillOutput(int instr_index,
+ RpoNumber deferred_block,
+ int virtual_register);
+
RegisterKind kind() const { return kind_; }
BitVector* assigned_registers() const { return assigned_registers_; }
@@ -985,6 +1363,12 @@ class SinglePassRegisterAllocator final {
// state into the current block.
void SpillRegisterAtMerge(RegisterState* reg_state, RegisterIndex reg);
+ // Introduce a gap move to move |virtual_register| from reg |from| to reg |to|
+ // on entry to a |successor| block.
+ void MoveRegisterOnMerge(RegisterIndex from, RegisterIndex to,
+ int virtual_register, RpoNumber successor,
+ RegisterState* succ_state);
+
// Update the virtual register data with the data in register_state()
void UpdateVirtualRegisterState();
@@ -1017,6 +1401,10 @@ class SinglePassRegisterAllocator final {
void SpillRegister(RegisterIndex reg);
void SpillRegisterForVirtualRegister(int virtual_register);
+ // Pre-emptively spill the register at the exit of deferred blocks such that
+ // uses of this register in non-deferred blocks don't need to be spilled.
+ void SpillRegisterForDeferred(RegisterIndex reg, int instr_index);
+
// Returns an AllocatedOperand corresponding to the use of |reg| for
// |virtual_register|.
AllocatedOperand AllocatedOperandForReg(RegisterIndex reg,
@@ -1031,13 +1419,15 @@ class SinglePassRegisterAllocator final {
// Helper functions to choose the best register for a given operand.
V8_INLINE RegisterIndex
- ChooseRegisterFor(VirtualRegisterData& virtual_register, UsePosition pos,
- bool must_use_register);
+ ChooseRegisterFor(VirtualRegisterData& virtual_register, int instr_index,
+ UsePosition pos, bool must_use_register);
V8_INLINE RegisterIndex ChooseRegisterFor(MachineRepresentation rep,
UsePosition pos,
bool must_use_register);
V8_INLINE RegisterIndex ChooseFreeRegister(MachineRepresentation rep,
UsePosition pos);
+ V8_INLINE RegisterIndex ChooseFreeRegister(
+ const RegisterBitVector& allocated_regs, MachineRepresentation rep);
V8_INLINE RegisterIndex ChooseRegisterToSpill(MachineRepresentation rep,
UsePosition pos);
@@ -1048,7 +1438,7 @@ class SinglePassRegisterAllocator final {
V8_INLINE void FreeRegister(RegisterIndex reg, int virtual_register);
V8_INLINE void MarkRegisterUse(RegisterIndex reg, MachineRepresentation rep,
UsePosition pos);
- V8_INLINE uintptr_t InUseBitmap(UsePosition pos);
+ V8_INLINE RegisterBitVector InUseBitmap(UsePosition pos);
V8_INLINE bool IsValidForRep(RegisterIndex reg, MachineRepresentation rep);
// Return the register allocated to |virtual_register|, if any.
@@ -1063,6 +1453,10 @@ class SinglePassRegisterAllocator final {
bool VirtualRegisterIsUnallocatedOrInReg(int virtual_register,
RegisterIndex reg);
+ // Returns a RegisterBitVector representing the allocated registers in
+ // reg_state.
+ RegisterBitVector GetAllocatedRegBitVector(RegisterState* reg_state);
+
// Check the consistency of reg->vreg and vreg->reg mappings if a debug build.
void CheckConsistency();
@@ -1101,11 +1495,9 @@ class SinglePassRegisterAllocator final {
MidTierRegisterAllocationData* data_;
- static_assert(RegisterConfiguration::kMaxRegisters <= sizeof(uintptr_t) * 8,
- "Maximum registers must fit in uintptr_t bitmap");
- uintptr_t in_use_at_instr_start_bits_;
- uintptr_t in_use_at_instr_end_bits_;
- uintptr_t allocated_registers_bits_;
+ RegisterBitVector in_use_at_instr_start_bits_;
+ RegisterBitVector in_use_at_instr_end_bits_;
+ RegisterBitVector allocated_registers_bits_;
// These fields are only used when kSimpleFPAliasing == false.
base::Optional<ZoneVector<RegisterIndex>> float32_reg_code_to_index_;
@@ -1129,9 +1521,9 @@ SinglePassRegisterAllocator::SinglePassRegisterAllocator(
assigned_registers_(data->code_zone()->New<BitVector>(
GetRegisterCount(data->config(), kind), data->code_zone())),
data_(data),
- in_use_at_instr_start_bits_(0),
- in_use_at_instr_end_bits_(0),
- allocated_registers_bits_(0) {
+ in_use_at_instr_start_bits_(),
+ in_use_at_instr_end_bits_(),
+ allocated_registers_bits_() {
for (int i = 0; i < num_allocatable_registers_; i++) {
int reg_code = index_to_reg_code_[i];
reg_code_to_index_[reg_code] = RegisterIndex(i);
@@ -1189,17 +1581,24 @@ RegisterIndex SinglePassRegisterAllocator::RegisterForVirtualRegister(
return virtual_register_to_reg_[virtual_register];
}
+void SinglePassRegisterAllocator::UpdateForDeferredBlock(int instr_index) {
+ if (!HasRegisterState()) return;
+ for (RegisterIndex reg : *register_state()) {
+ SpillRegisterForDeferred(reg, instr_index);
+ }
+}
+
void SinglePassRegisterAllocator::EndInstruction() {
- in_use_at_instr_end_bits_ = 0;
- in_use_at_instr_start_bits_ = 0;
+ in_use_at_instr_end_bits_.Reset();
+ in_use_at_instr_start_bits_.Reset();
}
void SinglePassRegisterAllocator::StartBlock(const InstructionBlock* block) {
DCHECK(!HasRegisterState());
DCHECK_NULL(current_block_);
- DCHECK_EQ(in_use_at_instr_start_bits_, 0);
- DCHECK_EQ(in_use_at_instr_end_bits_, 0);
- DCHECK_EQ(allocated_registers_bits_, 0);
+ DCHECK(in_use_at_instr_start_bits_.IsEmpty());
+ DCHECK(in_use_at_instr_end_bits_.IsEmpty());
+ DCHECK(allocated_registers_bits_.IsEmpty());
// Update the current block we are processing.
current_block_ = block;
@@ -1216,8 +1615,8 @@ void SinglePassRegisterAllocator::StartBlock(const InstructionBlock* block) {
}
void SinglePassRegisterAllocator::EndBlock(const InstructionBlock* block) {
- DCHECK_EQ(in_use_at_instr_start_bits_, 0);
- DCHECK_EQ(in_use_at_instr_end_bits_, 0);
+ DCHECK(in_use_at_instr_start_bits_.IsEmpty());
+ DCHECK(in_use_at_instr_end_bits_.IsEmpty());
// If we didn't allocate any registers of this kind, or we have reached the
// start, nothing to do here.
@@ -1236,9 +1635,8 @@ void SinglePassRegisterAllocator::EndBlock(const InstructionBlock* block) {
// Remove virtual register to register mappings and clear register state.
// We will update the register state when starting the next block.
- while (allocated_registers_bits_ != 0) {
- RegisterIndex reg(
- base::bits::CountTrailingZeros(allocated_registers_bits_));
+ while (!allocated_registers_bits_.IsEmpty()) {
+ RegisterIndex reg = allocated_registers_bits_.GetFirstSet();
FreeRegister(reg, VirtualRegisterForRegister(reg));
}
current_block_ = nullptr;
@@ -1275,19 +1673,51 @@ void SinglePassRegisterAllocator::MergeStateFrom(
UpdateVirtualRegisterState();
} else {
// Otherwise try to merge our state with the existing state.
- for (RegisterIndex reg : *register_state()) {
+ RegisterBitVector processed_regs;
+ RegisterBitVector succ_allocated_regs =
+ GetAllocatedRegBitVector(successor_registers);
+ for (RegisterIndex reg : *successor_registers) {
+ // If |reg| isn't allocated in successor registers, nothing to do.
+ if (!successor_registers->IsAllocated(reg)) continue;
+
+ int virtual_register =
+ successor_registers->VirtualRegisterForRegister(reg);
+ MachineRepresentation rep = RepresentationFor(virtual_register);
+
+ // If we have already processed |reg|, e.g., adding gap move to that
+ // register, then we can continue.
+ if (processed_regs.Contains(reg, rep)) continue;
+ processed_regs.Add(reg, rep);
+
if (register_state()->IsAllocated(reg)) {
if (successor_registers->Equals(reg, register_state())) {
// Both match, keep the merged register data.
register_state()->CommitAtMerge(reg);
} else {
- // TODO(rmcilroy) consider adding a gap move to shuffle register
- // into the same as the target. For now just spill.
- SpillRegisterAtMerge(successor_registers, reg);
+ // Try to find a new register for this successor register in the
+ // merge block, and add a gap move on entry of the successor block.
+ RegisterIndex new_reg =
+ RegisterForVirtualRegister(virtual_register);
+ if (!new_reg.is_valid()) {
+ new_reg = ChooseFreeRegister(
+ allocated_registers_bits_.Union(succ_allocated_regs), rep);
+ } else if (new_reg != reg) {
+ // Spill the |new_reg| in the successor block to be able to use it
+ // for this gap move. It would be spilled anyway since it contains
+ // a different virtual register than the merge block.
+ SpillRegisterAtMerge(successor_registers, new_reg);
+ }
+
+ if (new_reg.is_valid()) {
+ MoveRegisterOnMerge(new_reg, reg, virtual_register, successor,
+ successor_registers);
+ processed_regs.Add(new_reg, rep);
+ } else {
+ SpillRegisterAtMerge(successor_registers, reg);
+ }
}
- } else if (successor_registers->IsAllocated(reg)) {
- int virtual_register =
- successor_registers->VirtualRegisterForRegister(reg);
+ } else {
+ DCHECK(successor_registers->IsAllocated(reg));
if (RegisterForVirtualRegister(virtual_register).is_valid()) {
// If we already hold the virtual register in a different register
// then spill this register in the sucessor block to avoid
@@ -1298,7 +1728,6 @@ void SinglePassRegisterAllocator::MergeStateFrom(
// Register is free in our current register state, so merge the
// successor block's register details into it.
register_state()->CopyFrom(reg, successor_registers);
- int virtual_register = VirtualRegisterForRegister(reg);
AssignRegister(reg, virtual_register, UsePosition::kNone);
}
}
@@ -1307,6 +1736,18 @@ void SinglePassRegisterAllocator::MergeStateFrom(
}
}
+RegisterBitVector SinglePassRegisterAllocator::GetAllocatedRegBitVector(
+ RegisterState* reg_state) {
+ RegisterBitVector allocated_regs;
+ for (RegisterIndex reg : *reg_state) {
+ if (reg_state->IsAllocated(reg)) {
+ int virtual_register = reg_state->VirtualRegisterForRegister(reg);
+ allocated_regs.Add(reg, RepresentationFor(virtual_register));
+ }
+ }
+ return allocated_regs;
+}
+
void SinglePassRegisterAllocator::SpillRegisterAtMerge(RegisterState* reg_state,
RegisterIndex reg) {
DCHECK_NE(reg_state, register_state());
@@ -1317,6 +1758,17 @@ void SinglePassRegisterAllocator::SpillRegisterAtMerge(RegisterState* reg_state,
}
}
+void SinglePassRegisterAllocator::MoveRegisterOnMerge(
+ RegisterIndex from, RegisterIndex to, int virtual_register,
+ RpoNumber successor, RegisterState* succ_state) {
+ int instr_index = data()->GetBlock(successor)->first_instruction_index();
+ MoveOperands* move =
+ data()->AddPendingOperandGapMove(instr_index, Instruction::START);
+ succ_state->Commit(to, AllocatedOperandForReg(to, virtual_register),
+ &move->destination(), data());
+ AllocatePendingUse(from, virtual_register, &move->source(), instr_index);
+}
+
void SinglePassRegisterAllocator::UpdateVirtualRegisterState() {
// Update to the new register state and update vreg_to_register map and
// resetting any shared registers that were spilled by another block.
@@ -1339,8 +1791,8 @@ void SinglePassRegisterAllocator::CheckConsistency() {
RegisterIndex reg = RegisterForVirtualRegister(virtual_register);
if (reg.is_valid()) {
CHECK_EQ(virtual_register, VirtualRegisterForRegister(reg));
- CHECK(allocated_registers_bits_ &
- reg.ToBit(RepresentationFor(virtual_register)));
+ CHECK(allocated_registers_bits_.Contains(
+ reg, RepresentationFor(virtual_register)));
}
}
@@ -1348,8 +1800,8 @@ void SinglePassRegisterAllocator::CheckConsistency() {
int virtual_register = VirtualRegisterForRegister(reg);
if (virtual_register != InstructionOperand::kInvalidVirtualRegister) {
CHECK_EQ(reg, RegisterForVirtualRegister(virtual_register));
- CHECK(allocated_registers_bits_ &
- reg.ToBit(RepresentationFor(virtual_register)));
+ CHECK(allocated_registers_bits_.Contains(
+ reg, RepresentationFor(virtual_register)));
}
}
#endif
@@ -1422,8 +1874,8 @@ void SinglePassRegisterAllocator::AssignRegister(RegisterIndex reg,
UsePosition pos) {
MachineRepresentation rep = RepresentationFor(virtual_register);
assigned_registers()->Add(ToRegCode(reg, rep));
+ allocated_registers_bits_.Add(reg, rep);
MarkRegisterUse(reg, rep, pos);
- allocated_registers_bits_ |= reg.ToBit(rep);
if (virtual_register != InstructionOperand::kInvalidVirtualRegister) {
virtual_register_to_reg_[virtual_register] = reg;
}
@@ -1433,30 +1885,31 @@ void SinglePassRegisterAllocator::MarkRegisterUse(RegisterIndex reg,
MachineRepresentation rep,
UsePosition pos) {
if (pos == UsePosition::kStart || pos == UsePosition::kAll) {
- in_use_at_instr_start_bits_ |= reg.ToBit(rep);
+ in_use_at_instr_start_bits_.Add(reg, rep);
}
if (pos == UsePosition::kEnd || pos == UsePosition::kAll) {
- in_use_at_instr_end_bits_ |= reg.ToBit(rep);
+ in_use_at_instr_end_bits_.Add(reg, rep);
}
}
void SinglePassRegisterAllocator::FreeRegister(RegisterIndex reg,
int virtual_register) {
- allocated_registers_bits_ &= ~reg.ToBit(RepresentationFor(virtual_register));
+ allocated_registers_bits_.Clear(reg, RepresentationFor(virtual_register));
if (virtual_register != InstructionOperand::kInvalidVirtualRegister) {
virtual_register_to_reg_[virtual_register] = RegisterIndex::Invalid();
}
}
RegisterIndex SinglePassRegisterAllocator::ChooseRegisterFor(
- VirtualRegisterData& virtual_register, UsePosition pos,
+ VirtualRegisterData& virtual_register, int instr_index, UsePosition pos,
bool must_use_register) {
// If register is already allocated to the virtual register, use that.
RegisterIndex reg = RegisterForVirtualRegister(virtual_register.vreg());
+
// If we don't need a register, only try to allocate one if the virtual
// register hasn't yet been spilled, to try to avoid spilling it.
- if (!reg.is_valid() &&
- (must_use_register || !virtual_register.HasSpillOperand())) {
+ if (!reg.is_valid() && (must_use_register ||
+ !virtual_register.IsSpilledAt(instr_index, data()))) {
reg = ChooseRegisterFor(RepresentationFor(virtual_register.vreg()), pos,
must_use_register);
}
@@ -1473,14 +1926,14 @@ RegisterIndex SinglePassRegisterAllocator::ChooseRegisterFor(
return reg;
}
-uintptr_t SinglePassRegisterAllocator::InUseBitmap(UsePosition pos) {
+RegisterBitVector SinglePassRegisterAllocator::InUseBitmap(UsePosition pos) {
switch (pos) {
case UsePosition::kStart:
return in_use_at_instr_start_bits_;
case UsePosition::kEnd:
return in_use_at_instr_end_bits_;
case UsePosition::kAll:
- return in_use_at_instr_start_bits_ | in_use_at_instr_end_bits_;
+ return in_use_at_instr_start_bits_.Union(in_use_at_instr_end_bits_);
case UsePosition::kNone:
UNREACHABLE();
}
@@ -1508,20 +1961,21 @@ RegisterIndex SinglePassRegisterAllocator::ChooseFreeRegister(
MachineRepresentation rep, UsePosition pos) {
// Take the first free, non-blocked register, if available.
// TODO(rmcilroy): Consider a better heuristic.
- uintptr_t allocated_or_in_use = InUseBitmap(pos) | allocated_registers_bits_;
+ RegisterBitVector allocated_or_in_use =
+ InUseBitmap(pos).Union(allocated_registers_bits_);
+ return ChooseFreeRegister(allocated_or_in_use, rep);
+}
+RegisterIndex SinglePassRegisterAllocator::ChooseFreeRegister(
+ const RegisterBitVector& allocated_regs, MachineRepresentation rep) {
RegisterIndex chosen_reg = RegisterIndex::Invalid();
if (kSimpleFPAliasing || kind() == RegisterKind::kGeneral) {
- int reg_index = base::bits::CountTrailingZeros(~allocated_or_in_use);
- if (reg_index < num_allocatable_registers()) {
- chosen_reg = RegisterIndex(reg_index);
- }
+ chosen_reg = allocated_regs.GetFirstCleared(num_allocatable_registers());
} else {
// If we don't have simple fp aliasing, we need to check each register
// individually to get one with the required representation.
for (RegisterIndex reg : *register_state()) {
- if (IsValidForRep(reg, rep) &&
- (allocated_or_in_use & reg.ToBit(rep)) == 0) {
+ if (IsValidForRep(reg, rep) && !allocated_regs.Contains(reg, rep)) {
chosen_reg = reg;
break;
}
@@ -1534,7 +1988,7 @@ RegisterIndex SinglePassRegisterAllocator::ChooseFreeRegister(
RegisterIndex SinglePassRegisterAllocator::ChooseRegisterToSpill(
MachineRepresentation rep, UsePosition pos) {
- uintptr_t in_use = InUseBitmap(pos);
+ RegisterBitVector in_use = InUseBitmap(pos);
// Choose a register that will need to be spilled. Preferentially choose:
// - A register with only pending uses, to avoid having to add a gap move for
@@ -1550,7 +2004,7 @@ RegisterIndex SinglePassRegisterAllocator::ChooseRegisterToSpill(
bool already_spilled = false;
for (RegisterIndex reg : *register_state()) {
// Skip if register is in use, or not valid for representation.
- if (!IsValidForRep(reg, rep) || (in_use & reg.ToBit(rep))) continue;
+ if (!IsValidForRep(reg, rep) || in_use.Contains(reg, rep)) continue;
VirtualRegisterData& vreg_data =
VirtualRegisterDataFor(VirtualRegisterForRegister(reg));
@@ -1610,6 +2064,45 @@ void SinglePassRegisterAllocator::SpillRegisterForVirtualRegister(
}
}
+void SinglePassRegisterAllocator::SpillRegisterForDeferred(RegisterIndex reg,
+ int instr_index) {
+ // Committing the output operation, and mark the register use in this
+ // instruction, then mark it as free going forward.
+ if (register_state()->IsAllocated(reg) && register_state()->IsShared(reg)) {
+ int virtual_register = VirtualRegisterForRegister(reg);
+ AllocatedOperand allocated = AllocatedOperandForReg(reg, virtual_register);
+ register_state()->SpillForDeferred(reg, allocated, instr_index, data());
+ FreeRegister(reg, virtual_register);
+ }
+ CheckConsistency();
+}
+
+void SinglePassRegisterAllocator::AllocateDeferredBlockSpillOutput(
+ int instr_index, RpoNumber deferred_block, int virtual_register) {
+ DCHECK(data()->GetBlock(deferred_block)->IsDeferred());
+ VirtualRegisterData& vreg_data =
+ data()->VirtualRegisterDataFor(virtual_register);
+ if (!vreg_data.NeedsSpillAtOutput() &&
+ !DefinedAfter(virtual_register, instr_index, UsePosition::kEnd)) {
+ // If a register has been assigned to the virtual register, and the virtual
+ // register still doesn't need to be spilled at it's output, and add a
+ // pending move to output the virtual register to it's spill slot on entry
+ // of the deferred block (to avoid spilling on in non-deferred code).
+ // TODO(rmcilroy): Consider assigning a register even if the virtual
+ // register isn't yet assigned - currently doing this regresses performance.
+ RegisterIndex reg = RegisterForVirtualRegister(virtual_register);
+ if (reg.is_valid()) {
+ int deferred_block_start =
+ data()->GetBlock(deferred_block)->first_instruction_index();
+ register_state()->MoveToSpillSlotOnDeferred(reg, virtual_register,
+ deferred_block_start, data());
+ return;
+ } else {
+ vreg_data.MarkAsNeedsSpillAtOutput();
+ }
+ }
+}
+
AllocatedOperand SinglePassRegisterAllocator::AllocatedOperandForReg(
RegisterIndex reg, int virtual_register) {
MachineRepresentation rep = RepresentationFor(virtual_register);
@@ -1709,7 +2202,8 @@ void SinglePassRegisterAllocator::AllocateInput(UnallocatedOperand* operand,
bool must_use_register = operand->HasRegisterPolicy() ||
(vreg_data.is_constant() &&
!operand->HasRegisterOrSlotOrConstantPolicy());
- RegisterIndex reg = ChooseRegisterFor(vreg_data, pos, must_use_register);
+ RegisterIndex reg =
+ ChooseRegisterFor(vreg_data, instr_index, pos, must_use_register);
if (reg.is_valid()) {
if (must_use_register) {
@@ -1731,7 +2225,8 @@ void SinglePassRegisterAllocator::AllocateGapMoveInput(
// Gap move inputs should be unconstrained.
DCHECK(operand->HasRegisterOrSlotPolicy());
- RegisterIndex reg = ChooseRegisterFor(vreg_data, UsePosition::kStart, false);
+ RegisterIndex reg =
+ ChooseRegisterFor(vreg_data, instr_index, UsePosition::kStart, false);
if (reg.is_valid()) {
AllocatePendingUse(reg, virtual_register, operand, instr_index);
} else {
@@ -1769,7 +2264,8 @@ RegisterIndex SinglePassRegisterAllocator::AllocateOutput(
reg = FromRegCode(operand->fixed_register_index(),
RepresentationFor(virtual_register));
} else {
- reg = ChooseRegisterFor(vreg_data, pos, operand->HasRegisterPolicy());
+ reg = ChooseRegisterFor(vreg_data, instr_index, pos,
+ operand->HasRegisterPolicy());
}
// TODO(rmcilroy): support secondary storage.
@@ -1797,6 +2293,8 @@ RegisterIndex SinglePassRegisterAllocator::AllocateOutput(
vreg_data.EmitGapMoveFromOutputToSpillSlot(
*AllocatedOperand::cast(operand), current_block(), instr_index,
data());
+ } else if (vreg_data.NeedsSpillAtDeferredBlocks()) {
+ vreg_data.EmitDeferredSpillOutputs(data());
}
}
@@ -1965,8 +2463,12 @@ void SinglePassRegisterAllocator::AllocatePhi(int virtual_register,
SpillRegisterForVirtualRegister(virtual_register);
} else {
RegisterIndex reg = RegisterForVirtualRegister(virtual_register);
- DCHECK(reg.is_valid());
- register_state()->UseForPhiGapMove(reg);
+ if (reg.is_valid()) {
+ // If the register is valid, assign it as a phi gap move to be processed
+ // at the successor blocks. If no register or spill slot was used then
+ // the virtual register was never used.
+ register_state()->UseForPhiGapMove(reg);
+ }
}
}
@@ -1985,6 +2487,8 @@ class MidTierOutputProcessor final {
void DefineOutputs(const InstructionBlock* block);
private:
+ void PopulateDeferredBlockRegion(RpoNumber initial_block);
+
VirtualRegisterData& VirtualRegisterDataFor(int virtual_register) const {
return data()->VirtualRegisterDataFor(virtual_register);
}
@@ -1992,16 +2496,71 @@ class MidTierOutputProcessor final {
return data()->RepresentationFor(virtual_register);
}
+ bool IsDeferredBlockBoundary(const ZoneVector<RpoNumber>& blocks) {
+ return blocks.size() == 1 && !data()->GetBlock(blocks[0])->IsDeferred();
+ }
+
MidTierRegisterAllocationData* data() const { return data_; }
InstructionSequence* code() const { return data()->code(); }
- Zone* allocation_zone() const { return data()->allocation_zone(); }
+ Zone* zone() const { return data()->allocation_zone(); }
MidTierRegisterAllocationData* const data_;
+ ZoneQueue<RpoNumber> deferred_blocks_worklist_;
+ ZoneSet<RpoNumber> deferred_blocks_processed_;
};
MidTierOutputProcessor::MidTierOutputProcessor(
MidTierRegisterAllocationData* data)
- : data_(data) {}
+ : data_(data),
+ deferred_blocks_worklist_(data->allocation_zone()),
+ deferred_blocks_processed_(data->allocation_zone()) {}
+
+void MidTierOutputProcessor::PopulateDeferredBlockRegion(
+ RpoNumber initial_block) {
+ DeferredBlocksRegion* deferred_blocks_region =
+ zone()->New<DeferredBlocksRegion>(zone(),
+ code()->InstructionBlockCount());
+ DCHECK(deferred_blocks_worklist_.empty());
+ deferred_blocks_worklist_.push(initial_block);
+ deferred_blocks_processed_.insert(initial_block);
+ while (!deferred_blocks_worklist_.empty()) {
+ RpoNumber current = deferred_blocks_worklist_.front();
+ deferred_blocks_worklist_.pop();
+ deferred_blocks_region->AddBlock(current, data());
+
+ const InstructionBlock* curr_block = data()->GetBlock(current);
+ // Check for whether the predecessor blocks are still deferred.
+ if (IsDeferredBlockBoundary(curr_block->predecessors())) {
+ // If not, mark the predecessor as having a deferred successor.
+ data()
+ ->block_state(curr_block->predecessors()[0])
+ .MarkAsDeferredBlockBoundary();
+ } else {
+ // Otherwise process predecessors.
+ for (RpoNumber pred : curr_block->predecessors()) {
+ if (deferred_blocks_processed_.count(pred) == 0) {
+ deferred_blocks_worklist_.push(pred);
+ deferred_blocks_processed_.insert(pred);
+ }
+ }
+ }
+
+ // Check for whether the successor blocks are still deferred.
+ // Process any unprocessed successors if we aren't at a boundary.
+ if (IsDeferredBlockBoundary(curr_block->successors())) {
+ // If not, mark the predecessor as having a deferred successor.
+ data()->block_state(current).MarkAsDeferredBlockBoundary();
+ } else {
+ // Otherwise process successors.
+ for (RpoNumber succ : curr_block->successors()) {
+ if (deferred_blocks_processed_.count(succ) == 0) {
+ deferred_blocks_worklist_.push(succ);
+ deferred_blocks_processed_.insert(succ);
+ }
+ }
+ }
+ }
+}
void MidTierOutputProcessor::InitializeBlockState(
const InstructionBlock* block) {
@@ -2013,8 +2572,13 @@ void MidTierOutputProcessor::InitializeBlockState(
}
}
- // Mark this block as dominating itself.
BlockState& block_state = data()->block_state(block->rpo_number());
+
+ if (block->IsDeferred() && !block_state.deferred_blocks_region()) {
+ PopulateDeferredBlockRegion(block->rpo_number());
+ }
+
+ // Mark this block as dominating itself.
block_state.dominated_blocks()->Add(block->rpo_number().ToInt());
if (block->dominator().IsValid()) {
@@ -2030,6 +2594,8 @@ void MidTierOutputProcessor::InitializeBlockState(
void MidTierOutputProcessor::DefineOutputs(const InstructionBlock* block) {
int block_start = block->first_instruction_index();
+ bool is_deferred = block->IsDeferred();
+
for (int index = block->last_instruction_index(); index >= block_start;
index--) {
Instruction* instr = code()->InstructionAt(index);
@@ -2042,25 +2608,30 @@ void MidTierOutputProcessor::DefineOutputs(const InstructionBlock* block) {
ConstantOperand* constant_operand = ConstantOperand::cast(output);
int virtual_register = constant_operand->virtual_register();
VirtualRegisterDataFor(virtual_register)
- .DefineAsConstantOperand(constant_operand, index);
+ .DefineAsConstantOperand(constant_operand, index, is_deferred);
} else {
DCHECK(output->IsUnallocated());
UnallocatedOperand* unallocated_operand =
UnallocatedOperand::cast(output);
int virtual_register = unallocated_operand->virtual_register();
+ bool is_exceptional_call_output =
+ instr->IsCallWithDescriptorFlags() &&
+ instr->HasCallDescriptorFlag(CallDescriptor::kHasExceptionHandler);
if (unallocated_operand->HasFixedSlotPolicy()) {
// If output has a fixed slot policy, allocate its spill operand now
// so that the register allocator can use this knowledge.
MachineRepresentation rep = RepresentationFor(virtual_register);
- AllocatedOperand* fixed_spill_operand = AllocatedOperand::New(
- allocation_zone(), AllocatedOperand::STACK_SLOT, rep,
- unallocated_operand->fixed_slot_index());
+ AllocatedOperand* fixed_spill_operand =
+ AllocatedOperand::New(zone(), AllocatedOperand::STACK_SLOT, rep,
+ unallocated_operand->fixed_slot_index());
VirtualRegisterDataFor(virtual_register)
.DefineAsFixedSpillOperand(fixed_spill_operand, virtual_register,
- index);
+ index, is_deferred,
+ is_exceptional_call_output);
} else {
VirtualRegisterDataFor(virtual_register)
- .DefineAsUnallocatedOperand(virtual_register, index);
+ .DefineAsUnallocatedOperand(virtual_register, index, is_deferred,
+ is_exceptional_call_output);
}
}
}
@@ -2076,7 +2647,8 @@ void MidTierOutputProcessor::DefineOutputs(const InstructionBlock* block) {
for (PhiInstruction* phi : block->phis()) {
int virtual_register = phi->virtual_register();
VirtualRegisterDataFor(virtual_register)
- .DefineAsPhi(virtual_register, block->first_instruction_index());
+ .DefineAsPhi(virtual_register, block->first_instruction_index(),
+ is_deferred);
}
}
@@ -2095,6 +2667,8 @@ void DefineOutputs(MidTierRegisterAllocationData* data) {
class MidTierRegisterAllocator final {
public:
explicit MidTierRegisterAllocator(MidTierRegisterAllocationData* data);
+ MidTierRegisterAllocator(const MidTierRegisterAllocator&) = delete;
+ MidTierRegisterAllocator& operator=(const MidTierRegisterAllocator&) = delete;
void AllocateRegisters(const InstructionBlock* block);
void UpdateSpillRangesForLoops();
@@ -2130,8 +2704,6 @@ class MidTierRegisterAllocator final {
MidTierRegisterAllocationData* const data_;
SinglePassRegisterAllocator general_reg_allocator_;
SinglePassRegisterAllocator double_reg_allocator_;
-
- DISALLOW_COPY_AND_ASSIGN(MidTierRegisterAllocator);
};
MidTierRegisterAllocator::MidTierRegisterAllocator(
@@ -2142,9 +2714,31 @@ MidTierRegisterAllocator::MidTierRegisterAllocator(
void MidTierRegisterAllocator::AllocateRegisters(
const InstructionBlock* block) {
+ RpoNumber block_rpo = block->rpo_number();
+ bool is_deferred_block_boundary =
+ data()->block_state(block_rpo).is_deferred_block_boundary();
+
general_reg_allocator().StartBlock(block);
double_reg_allocator().StartBlock(block);
+ // If the block is not deferred but has deferred successors, then try to
+ // output spill slots for virtual_registers that are only spilled in the
+ // deferred blocks at the start of those deferred blocks to avoid spilling
+ // them at their output in non-deferred blocks.
+ if (is_deferred_block_boundary && !block->IsDeferred()) {
+ for (RpoNumber successor : block->successors()) {
+ if (!data()->GetBlock(successor)->IsDeferred()) continue;
+ DCHECK_GT(successor, block_rpo);
+ for (int virtual_register :
+ *data()->block_state(successor).deferred_blocks_region()) {
+ USE(virtual_register);
+ AllocatorFor(RepresentationFor(virtual_register))
+ .AllocateDeferredBlockSpillOutput(block->last_instruction_index(),
+ successor, virtual_register);
+ }
+ }
+ }
+
// Allocate registers for instructions in reverse, from the end of the block
// to the start.
int block_start = block->first_instruction_index();
@@ -2215,6 +2809,13 @@ void MidTierRegisterAllocator::AllocateRegisters(
// phi gap move operations that are needed to resolve phis in our successor.
if (instr_index == block->last_instruction_index()) {
AllocatePhiGapMoves(block);
+
+ // If this block is deferred but it's successor isn't, update the state to
+ // limit spills to the deferred blocks where possible.
+ if (is_deferred_block_boundary && block->IsDeferred()) {
+ general_reg_allocator().UpdateForDeferredBlock(instr_index);
+ double_reg_allocator().UpdateForDeferredBlock(instr_index);
+ }
}
// Allocate any unallocated gap move inputs.
@@ -2385,6 +2986,9 @@ void AllocateRegisters(MidTierRegisterAllocationData* data) {
class MidTierSpillSlotAllocator final {
public:
explicit MidTierSpillSlotAllocator(MidTierRegisterAllocationData* data);
+ MidTierSpillSlotAllocator(const MidTierSpillSlotAllocator&) = delete;
+ MidTierSpillSlotAllocator& operator=(const MidTierSpillSlotAllocator&) =
+ delete;
void Allocate(VirtualRegisterData* virtual_register);
@@ -2407,14 +3011,14 @@ class MidTierSpillSlotAllocator final {
ZonePriorityQueue<SpillSlot*, OrderByLastUse> allocated_slots_;
ZoneLinkedList<SpillSlot*> free_slots_;
int position_;
-
- DISALLOW_COPY_AND_ASSIGN(MidTierSpillSlotAllocator);
};
class MidTierSpillSlotAllocator::SpillSlot : public ZoneObject {
public:
SpillSlot(int stack_slot, int byte_width)
: stack_slot_(stack_slot), byte_width_(byte_width), range_() {}
+ SpillSlot(const SpillSlot&) = delete;
+ SpillSlot& operator=(const SpillSlot&) = delete;
void AddRange(const Range& range) { range_.AddRange(range); }
@@ -2429,8 +3033,6 @@ class MidTierSpillSlotAllocator::SpillSlot : public ZoneObject {
int stack_slot_;
int byte_width_;
Range range_;
-
- DISALLOW_COPY_AND_ASSIGN(SpillSlot);
};
bool MidTierSpillSlotAllocator::OrderByLastUse::operator()(
@@ -2525,6 +3127,9 @@ void AllocateSpillSlots(MidTierRegisterAllocationData* data) {
class MidTierReferenceMapPopulator final {
public:
explicit MidTierReferenceMapPopulator(MidTierRegisterAllocationData* data);
+ MidTierReferenceMapPopulator(const MidTierReferenceMapPopulator&) = delete;
+ MidTierReferenceMapPopulator& operator=(const MidTierReferenceMapPopulator&) =
+ delete;
void RecordReferences(const VirtualRegisterData& virtual_register);
@@ -2533,8 +3138,6 @@ class MidTierReferenceMapPopulator final {
InstructionSequence* code() const { return data()->code(); }
MidTierRegisterAllocationData* const data_;
-
- DISALLOW_COPY_AND_ASSIGN(MidTierReferenceMapPopulator);
};
MidTierReferenceMapPopulator::MidTierReferenceMapPopulator(
diff --git a/deps/v8/src/compiler/backend/mid-tier-register-allocator.h b/deps/v8/src/compiler/backend/mid-tier-register-allocator.h
index 6d8006badf..2440115095 100644
--- a/deps/v8/src/compiler/backend/mid-tier-register-allocator.h
+++ b/deps/v8/src/compiler/backend/mid-tier-register-allocator.h
@@ -34,6 +34,9 @@ class MidTierRegisterAllocationData final : public RegisterAllocationData {
InstructionSequence* code,
TickCounter* tick_counter,
const char* debug_name = nullptr);
+ MidTierRegisterAllocationData(const MidTierRegisterAllocationData&) = delete;
+ MidTierRegisterAllocationData& operator=(
+ const MidTierRegisterAllocationData&) = delete;
static MidTierRegisterAllocationData* cast(RegisterAllocationData* data) {
DCHECK_EQ(data->type(), Type::kMidTier);
@@ -57,8 +60,8 @@ class MidTierRegisterAllocationData final : public RegisterAllocationData {
const InstructionBlock* GetBlock(int instr_index);
// Returns a bitvector representing all the blocks that are dominated by the
- // output of the instruction at |instr_index|.
- const BitVector* GetBlocksDominatedBy(int instr_index);
+ // output of the instruction in |block|.
+ const BitVector* GetBlocksDominatedBy(const InstructionBlock* block);
// List of all instruction indexs that require a reference map.
ZoneVector<int>& reference_map_instructions() {
@@ -97,8 +100,6 @@ class MidTierRegisterAllocationData final : public RegisterAllocationData {
BitVector spilled_virtual_registers_;
TickCounter* const tick_counter_;
-
- DISALLOW_COPY_AND_ASSIGN(MidTierRegisterAllocationData);
};
// Phase 1: Process instruction outputs to determine how each virtual register
diff --git a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
index 5457883fee..c8265d73ae 100644
--- a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
@@ -657,7 +657,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
- HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ Call(reg, reg, Code::kHeaderSize - kHeapObjectTag);
}
@@ -697,7 +697,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
- HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ Addu(reg, reg, Code::kHeaderSize - kHeapObjectTag);
__ Jump(reg);
@@ -722,7 +722,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CHECK(!instr->InputAt(0)->IsImmediate());
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
- HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ Jump(reg);
frame_access_state()->ClearSPDelta();
@@ -868,8 +868,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchDeoptimize: {
DeoptimizationExit* exit =
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
- CodeGenResult result = AssembleDeoptimizerCall(exit);
- if (result != kSuccess) return result;
+ __ Branch(exit->label());
break;
}
case kArchRet:
@@ -1729,25 +1728,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ByteSwapSigned(i.OutputRegister(0), i.InputRegister(0), 4);
break;
}
- case kMipsS8x16LoadSplat: {
+ case kMipsS128Load8Splat: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ lb(kScratchReg, i.MemoryOperand());
__ fill_b(i.OutputSimd128Register(), kScratchReg);
break;
}
- case kMipsS16x8LoadSplat: {
+ case kMipsS128Load16Splat: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ lh(kScratchReg, i.MemoryOperand());
__ fill_h(i.OutputSimd128Register(), kScratchReg);
break;
}
- case kMipsS32x4LoadSplat: {
+ case kMipsS128Load32Splat: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ Lw(kScratchReg, i.MemoryOperand());
__ fill_w(i.OutputSimd128Register(), kScratchReg);
break;
}
- case kMipsS64x2LoadSplat: {
+ case kMipsS128Load64Splat: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
MemOperand memLow = i.MemoryOperand();
@@ -1759,7 +1758,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ilvr_w(dst, kSimd128ScratchReg, dst);
break;
}
- case kMipsI16x8Load8x8S: {
+ case kMipsS128Load8x8S: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
MemOperand memLow = i.MemoryOperand();
@@ -1773,7 +1772,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ilvr_b(dst, kSimd128ScratchReg, dst);
break;
}
- case kMipsI16x8Load8x8U: {
+ case kMipsS128Load8x8U: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
MemOperand memLow = i.MemoryOperand();
@@ -1786,7 +1785,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ilvr_b(dst, kSimd128RegZero, dst);
break;
}
- case kMipsI32x4Load16x4S: {
+ case kMipsS128Load16x4S: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
MemOperand memLow = i.MemoryOperand();
@@ -1800,7 +1799,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ilvr_h(dst, kSimd128ScratchReg, dst);
break;
}
- case kMipsI32x4Load16x4U: {
+ case kMipsS128Load16x4U: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
MemOperand memLow = i.MemoryOperand();
@@ -1813,7 +1812,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ilvr_h(dst, kSimd128RegZero, dst);
break;
}
- case kMipsI64x2Load32x2S: {
+ case kMipsS128Load32x2S: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
MemOperand memLow = i.MemoryOperand();
@@ -1827,7 +1826,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ilvr_w(dst, kSimd128ScratchReg, dst);
break;
}
- case kMipsI64x2Load32x2U: {
+ case kMipsS128Load32x2U: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
MemOperand memLow = i.MemoryOperand();
@@ -2585,6 +2584,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ copy_u_b(dst, scratch0, 0);
break;
}
+ case kMipsI32x4DotI16x8S: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ dotp_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
case kMipsI16x8Splat: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fill_h(i.OutputSimd128Register(), i.InputRegister(0));
@@ -2643,7 +2648,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kMipsI16x8AddSaturateS: {
+ case kMipsI16x8AddSatS: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ adds_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -2655,7 +2660,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kMipsI16x8SubSaturateS: {
+ case kMipsI16x8SubSatS: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ subs_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -2704,13 +2709,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(0));
break;
}
- case kMipsI16x8AddSaturateU: {
+ case kMipsI16x8AddSatU: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ adds_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kMipsI16x8SubSaturateU: {
+ case kMipsI16x8SubSatU: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ subs_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -2821,7 +2826,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kMipsI8x16AddSaturateS: {
+ case kMipsI8x16AddSatS: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ adds_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -2833,7 +2838,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kMipsI8x16SubSaturateS: {
+ case kMipsI8x16SubSatS: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ subs_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -2888,13 +2893,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputInt3(1));
break;
}
- case kMipsI8x16AddSaturateU: {
+ case kMipsI8x16AddSatU: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ adds_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kMipsI8x16SubSaturateU: {
+ case kMipsI8x16SubSatU: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ subs_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -4026,9 +4031,8 @@ void CodeGenerator::AssembleConstructFrame() {
}
}
-void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
+void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
auto call_descriptor = linkage()->GetIncomingDescriptor();
- int pop_count = static_cast<int>(call_descriptor->StackParameterCount());
const int returns = frame()->GetReturnSlotCount();
if (returns != 0) {
@@ -4048,41 +4052,81 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
}
MipsOperandConverter g(this, nullptr);
+ const int parameter_count =
+ static_cast<int>(call_descriptor->StackParameterCount());
+
+ // {aditional_pop_count} is only greater than zero if {parameter_count = 0}.
+ // Check RawMachineAssembler::PopAndReturn.
+ if (parameter_count != 0) {
+ if (additional_pop_count->IsImmediate()) {
+ DCHECK_EQ(g.ToConstant(additional_pop_count).ToInt32(), 0);
+ } else if (__ emit_debug_code()) {
+ __ Assert(eq, AbortReason::kUnexpectedAdditionalPopValue,
+ g.ToRegister(additional_pop_count),
+ Operand(static_cast<int64_t>(0)));
+ }
+ }
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ // Functions with JS linkage have at least one parameter (the receiver).
+ // If {parameter_count} == 0, it means it is a builtin with
+ // kDontAdaptArgumentsSentinel, which takes care of JS arguments popping
+ // itself.
+ const bool drop_jsargs = frame_access_state()->has_frame() &&
+ call_descriptor->IsJSFunctionCall() &&
+ parameter_count != 0;
+#else
+ const bool drop_jsargs = false;
+#endif
+
if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
// Canonicalize JSFunction return sites for now unless they have an variable
// number of stack slot pops.
- if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
+ if (additional_pop_count->IsImmediate() &&
+ g.ToConstant(additional_pop_count).ToInt32() == 0) {
if (return_label_.is_bound()) {
__ Branch(&return_label_);
return;
} else {
__ bind(&return_label_);
- AssembleDeconstructFrame();
}
- } else {
- AssembleDeconstructFrame();
}
+ if (drop_jsargs) {
+ // Get the actual argument count
+ __ Lw(t0, MemOperand(fp, StandardFrameConstants::kArgCOffset));
+ }
+ AssembleDeconstructFrame();
}
- if (pop->IsImmediate()) {
- DCHECK_EQ(Constant::kInt32, g.ToConstant(pop).type());
- pop_count += g.ToConstant(pop).ToInt32();
+
+ if (drop_jsargs) {
+ // We must pop all arguments from the stack (including the receiver). This
+ // number of arguments is given by max(1 + argc_reg, parameter_count).
+ __ Addu(t0, t0, Operand(1)); // Also pop the receiver.
+ if (parameter_count > 1) {
+ __ li(kScratchReg, parameter_count);
+ __ slt(kScratchReg2, t0, kScratchReg);
+ __ movn(t0, kScratchReg, kScratchReg2);
+ }
+ __ sll(t0, t0, kSystemPointerSizeLog2);
+ __ Addu(sp, sp, t0);
+ } else if (additional_pop_count->IsImmediate()) {
+ DCHECK_EQ(Constant::kInt32, g.ToConstant(additional_pop_count).type());
+ int additional_count = g.ToConstant(additional_pop_count).ToInt32();
+ __ Drop(parameter_count + additional_count);
} else {
- Register pop_reg = g.ToRegister(pop);
+ Register pop_reg = g.ToRegister(additional_pop_count);
+ __ Drop(parameter_count);
__ sll(pop_reg, pop_reg, kSystemPointerSizeLog2);
- __ Addu(sp, sp, Operand(pop_reg));
- }
- if (pop_count != 0) {
- __ DropAndRet(pop_count);
- } else {
- __ Ret();
+ __ Addu(sp, sp, pop_reg);
}
+ __ Ret();
}
void CodeGenerator::FinishCode() {}
-void CodeGenerator::PrepareForDeoptimizationExits(int deopt_count) {}
+void CodeGenerator::PrepareForDeoptimizationExits(
+ ZoneDeque<DeoptimizationExit*>* exits) {}
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
diff --git a/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h b/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h
index 46ce3d359a..47d439af58 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h
+++ b/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h
@@ -217,6 +217,7 @@ namespace compiler {
V(MipsI32x4GeU) \
V(MipsI32x4Abs) \
V(MipsI32x4BitMask) \
+ V(MipsI32x4DotI16x8S) \
V(MipsI16x8Splat) \
V(MipsI16x8ExtractLaneU) \
V(MipsI16x8ExtractLaneS) \
@@ -226,10 +227,10 @@ namespace compiler {
V(MipsI16x8ShrS) \
V(MipsI16x8ShrU) \
V(MipsI16x8Add) \
- V(MipsI16x8AddSaturateS) \
+ V(MipsI16x8AddSatS) \
V(MipsI16x8AddHoriz) \
V(MipsI16x8Sub) \
- V(MipsI16x8SubSaturateS) \
+ V(MipsI16x8SubSatS) \
V(MipsI16x8Mul) \
V(MipsI16x8MaxS) \
V(MipsI16x8MinS) \
@@ -237,8 +238,8 @@ namespace compiler {
V(MipsI16x8Ne) \
V(MipsI16x8GtS) \
V(MipsI16x8GeS) \
- V(MipsI16x8AddSaturateU) \
- V(MipsI16x8SubSaturateU) \
+ V(MipsI16x8AddSatU) \
+ V(MipsI16x8SubSatU) \
V(MipsI16x8MaxU) \
V(MipsI16x8MinU) \
V(MipsI16x8GtU) \
@@ -254,9 +255,9 @@ namespace compiler {
V(MipsI8x16Shl) \
V(MipsI8x16ShrS) \
V(MipsI8x16Add) \
- V(MipsI8x16AddSaturateS) \
+ V(MipsI8x16AddSatS) \
V(MipsI8x16Sub) \
- V(MipsI8x16SubSaturateS) \
+ V(MipsI8x16SubSatS) \
V(MipsI8x16Mul) \
V(MipsI8x16MaxS) \
V(MipsI8x16MinS) \
@@ -265,8 +266,8 @@ namespace compiler {
V(MipsI8x16GtS) \
V(MipsI8x16GeS) \
V(MipsI8x16ShrU) \
- V(MipsI8x16AddSaturateU) \
- V(MipsI8x16SubSaturateU) \
+ V(MipsI8x16AddSatU) \
+ V(MipsI8x16SubSatU) \
V(MipsI8x16MaxU) \
V(MipsI8x16MinU) \
V(MipsI8x16GtU) \
@@ -313,16 +314,16 @@ namespace compiler {
V(MipsS8x8Reverse) \
V(MipsS8x4Reverse) \
V(MipsS8x2Reverse) \
- V(MipsS8x16LoadSplat) \
- V(MipsS16x8LoadSplat) \
- V(MipsS32x4LoadSplat) \
- V(MipsS64x2LoadSplat) \
- V(MipsI16x8Load8x8S) \
- V(MipsI16x8Load8x8U) \
- V(MipsI32x4Load16x4S) \
- V(MipsI32x4Load16x4U) \
- V(MipsI64x2Load32x2S) \
- V(MipsI64x2Load32x2U) \
+ V(MipsS128Load8Splat) \
+ V(MipsS128Load16Splat) \
+ V(MipsS128Load32Splat) \
+ V(MipsS128Load64Splat) \
+ V(MipsS128Load8x8S) \
+ V(MipsS128Load8x8U) \
+ V(MipsS128Load16x4S) \
+ V(MipsS128Load16x4U) \
+ V(MipsS128Load32x2S) \
+ V(MipsS128Load32x2U) \
V(MipsMsaLd) \
V(MipsMsaSt) \
V(MipsI32x4SConvertI16x8Low) \
diff --git a/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
index 64e78b8122..bf28eec443 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
@@ -118,8 +118,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsFloorWS:
case kMipsI16x8Add:
case kMipsI16x8AddHoriz:
- case kMipsI16x8AddSaturateS:
- case kMipsI16x8AddSaturateU:
+ case kMipsI16x8AddSatS:
+ case kMipsI16x8AddSatU:
case kMipsI16x8Eq:
case kMipsI16x8ExtractLaneU:
case kMipsI16x8ExtractLaneS:
@@ -144,8 +144,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsI16x8ShrU:
case kMipsI16x8Splat:
case kMipsI16x8Sub:
- case kMipsI16x8SubSaturateS:
- case kMipsI16x8SubSaturateU:
+ case kMipsI16x8SubSatS:
+ case kMipsI16x8SubSatU:
case kMipsI16x8UConvertI32x4:
case kMipsI16x8UConvertI8x16High:
case kMipsI16x8UConvertI8x16Low:
@@ -180,9 +180,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsI32x4UConvertI16x8Low:
case kMipsI32x4Abs:
case kMipsI32x4BitMask:
+ case kMipsI32x4DotI16x8S:
case kMipsI8x16Add:
- case kMipsI8x16AddSaturateS:
- case kMipsI8x16AddSaturateU:
+ case kMipsI8x16AddSatS:
+ case kMipsI8x16AddSatU:
case kMipsI8x16Eq:
case kMipsI8x16ExtractLaneU:
case kMipsI8x16ExtractLaneS:
@@ -205,8 +206,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsI8x16ShrU:
case kMipsI8x16Splat:
case kMipsI8x16Sub:
- case kMipsI8x16SubSaturateS:
- case kMipsI8x16SubSaturateU:
+ case kMipsI8x16SubSatS:
+ case kMipsI8x16SubSatU:
case kMipsI8x16UConvertI16x8:
case kMipsI8x16Abs:
case kMipsI8x16BitMask:
@@ -315,16 +316,16 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsUlhu:
case kMipsUlw:
case kMipsUlwc1:
- case kMipsS8x16LoadSplat:
- case kMipsS16x8LoadSplat:
- case kMipsS32x4LoadSplat:
- case kMipsS64x2LoadSplat:
- case kMipsI16x8Load8x8S:
- case kMipsI16x8Load8x8U:
- case kMipsI32x4Load16x4S:
- case kMipsI32x4Load16x4U:
- case kMipsI64x2Load32x2S:
- case kMipsI64x2Load32x2U:
+ case kMipsS128Load8Splat:
+ case kMipsS128Load16Splat:
+ case kMipsS128Load32Splat:
+ case kMipsS128Load64Splat:
+ case kMipsS128Load8x8S:
+ case kMipsS128Load8x8U:
+ case kMipsS128Load16x4S:
+ case kMipsS128Load16x4U:
+ case kMipsS128Load32x2S:
+ case kMipsS128Load32x2U:
case kMipsWord32AtomicPairLoad:
return kIsLoadOperation;
diff --git a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
index b552b0dec1..9b6abc80fa 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
@@ -64,8 +64,8 @@ class MipsOperandGenerator final : public OperandGenerator {
bool CanBeImmediate(Node* node, InstructionCode opcode) {
Int32Matcher m(node);
- if (!m.HasValue()) return false;
- int32_t value = m.Value();
+ if (!m.HasResolvedValue()) return false;
+ int32_t value = m.ResolvedValue();
switch (ArchOpcodeField::decode(opcode)) {
case kMipsShl:
case kMipsSar:
@@ -292,35 +292,35 @@ void InstructionSelector::VisitLoadTransform(Node* node) {
InstructionCode opcode = kArchNop;
switch (params.transformation) {
- case LoadTransformation::kS8x16LoadSplat:
- opcode = kMipsS8x16LoadSplat;
+ case LoadTransformation::kS128Load8Splat:
+ opcode = kMipsS128Load8Splat;
break;
- case LoadTransformation::kS16x8LoadSplat:
- opcode = kMipsS16x8LoadSplat;
+ case LoadTransformation::kS128Load16Splat:
+ opcode = kMipsS128Load16Splat;
break;
- case LoadTransformation::kS32x4LoadSplat:
- opcode = kMipsS32x4LoadSplat;
+ case LoadTransformation::kS128Load32Splat:
+ opcode = kMipsS128Load32Splat;
break;
- case LoadTransformation::kS64x2LoadSplat:
- opcode = kMipsS64x2LoadSplat;
+ case LoadTransformation::kS128Load64Splat:
+ opcode = kMipsS128Load64Splat;
break;
- case LoadTransformation::kI16x8Load8x8S:
- opcode = kMipsI16x8Load8x8S;
+ case LoadTransformation::kS128Load8x8S:
+ opcode = kMipsS128Load8x8S;
break;
- case LoadTransformation::kI16x8Load8x8U:
- opcode = kMipsI16x8Load8x8U;
+ case LoadTransformation::kS128Load8x8U:
+ opcode = kMipsS128Load8x8U;
break;
- case LoadTransformation::kI32x4Load16x4S:
- opcode = kMipsI32x4Load16x4S;
+ case LoadTransformation::kS128Load16x4S:
+ opcode = kMipsS128Load16x4S;
break;
- case LoadTransformation::kI32x4Load16x4U:
- opcode = kMipsI32x4Load16x4U;
+ case LoadTransformation::kS128Load16x4U:
+ opcode = kMipsS128Load16x4U;
break;
- case LoadTransformation::kI64x2Load32x2S:
- opcode = kMipsI64x2Load32x2S;
+ case LoadTransformation::kS128Load32x2S:
+ opcode = kMipsS128Load32x2S;
break;
- case LoadTransformation::kI64x2Load32x2U:
- opcode = kMipsI64x2Load32x2U;
+ case LoadTransformation::kS128Load32x2U:
+ opcode = kMipsS128Load32x2U;
break;
default:
UNIMPLEMENTED();
@@ -431,7 +431,7 @@ void InstructionSelector::VisitStore(Node* node) {
code |= MiscField::encode(static_cast<int>(record_write_mode));
Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
switch (rep) {
case MachineRepresentation::kFloat32:
opcode = kMipsSwc1;
@@ -460,7 +460,6 @@ void InstructionSelector::VisitStore(Node* node) {
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
- return;
}
if (g.CanBeImmediate(index, opcode)) {
@@ -487,8 +486,8 @@ void InstructionSelector::VisitWord32And(Node* node) {
MipsOperandGenerator g(this);
Int32BinopMatcher m(node);
if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) &&
- m.right().HasValue()) {
- uint32_t mask = m.right().Value();
+ m.right().HasResolvedValue()) {
+ uint32_t mask = m.right().ResolvedValue();
uint32_t mask_width = base::bits::CountPopulation(mask);
uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
@@ -498,9 +497,9 @@ void InstructionSelector::VisitWord32And(Node* node) {
// Select Ext for And(Shr(x, imm), mask) where the mask is in the least
// significant bits.
Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue()) {
+ if (mleft.right().HasResolvedValue()) {
// Any shift value can match; int32 shifts use `value % 32`.
- uint32_t lsb = mleft.right().Value() & 0x1F;
+ uint32_t lsb = mleft.right().ResolvedValue() & 0x1F;
// Ext cannot extract bits past the register size, however since
// shifting the original value would have introduced some zeros we can
@@ -520,8 +519,8 @@ void InstructionSelector::VisitWord32And(Node* node) {
// Other cases fall through to the normal And operation.
}
}
- if (m.right().HasValue()) {
- uint32_t mask = m.right().Value();
+ if (m.right().HasResolvedValue()) {
+ uint32_t mask = m.right().ResolvedValue();
uint32_t shift = base::bits::CountPopulation(~mask);
uint32_t msb = base::bits::CountLeadingZeros32(~mask);
if (shift != 0 && shift != 32 && msb + shift == 32) {
@@ -544,7 +543,7 @@ void InstructionSelector::VisitWord32Xor(Node* node) {
if (m.left().IsWord32Or() && CanCover(node, m.left().node()) &&
m.right().Is(-1)) {
Int32BinopMatcher mleft(m.left().node());
- if (!mleft.right().HasValue()) {
+ if (!mleft.right().HasResolvedValue()) {
MipsOperandGenerator g(this);
Emit(kMipsNor, g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()),
@@ -570,12 +569,12 @@ void InstructionSelector::VisitWord32Shl(Node* node) {
Int32BinopMatcher mleft(m.left().node());
// Match Word32Shl(Word32And(x, mask), imm) to Shl where the mask is
// contiguous, and the shift immediate non-zero.
- if (mleft.right().HasValue()) {
- uint32_t mask = mleft.right().Value();
+ if (mleft.right().HasResolvedValue()) {
+ uint32_t mask = mleft.right().ResolvedValue();
uint32_t mask_width = base::bits::CountPopulation(mask);
uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
- uint32_t shift = m.right().Value();
+ uint32_t shift = m.right().ResolvedValue();
DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
DCHECK_NE(0u, shift);
if ((shift + mask_width) >= 32) {
@@ -594,13 +593,14 @@ void InstructionSelector::VisitWord32Shl(Node* node) {
void InstructionSelector::VisitWord32Shr(Node* node) {
Int32BinopMatcher m(node);
- if (m.left().IsWord32And() && m.right().HasValue()) {
- uint32_t lsb = m.right().Value() & 0x1F;
+ if (m.left().IsWord32And() && m.right().HasResolvedValue()) {
+ uint32_t lsb = m.right().ResolvedValue() & 0x1F;
Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue() && mleft.right().Value() != 0) {
+ if (mleft.right().HasResolvedValue() &&
+ mleft.right().ResolvedValue() != 0) {
// Select Ext for Shr(And(x, mask), imm) where the result of the mask is
// shifted into the least-significant bits.
- uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
+ uint32_t mask = (mleft.right().ResolvedValue() >> lsb) << lsb;
unsigned mask_width = base::bits::CountPopulation(mask);
unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
if ((mask_msb + mask_width + lsb) == 32) {
@@ -621,10 +621,10 @@ void InstructionSelector::VisitWord32Sar(Node* node) {
if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
m.left().IsWord32Shl() && CanCover(node, m.left().node())) {
Int32BinopMatcher mleft(m.left().node());
- if (m.right().HasValue() && mleft.right().HasValue()) {
+ if (m.right().HasResolvedValue() && mleft.right().HasResolvedValue()) {
MipsOperandGenerator g(this);
- uint32_t sar = m.right().Value();
- uint32_t shl = mleft.right().Value();
+ uint32_t sar = m.right().ResolvedValue();
+ uint32_t shl = mleft.right().ResolvedValue();
if ((sar == shl) && (sar == 16)) {
Emit(kMipsSeh, g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()));
@@ -685,7 +685,7 @@ static void VisitWord32PairShift(InstructionSelector* selector,
MipsOperandGenerator g(selector);
Int32Matcher m(node->InputAt(2));
InstructionOperand shift_operand;
- if (m.HasValue()) {
+ if (m.HasResolvedValue()) {
shift_operand = g.UseImmediate(m.node());
} else {
shift_operand = g.UseUniqueRegister(m.node());
@@ -869,8 +869,9 @@ void InstructionSelector::VisitInt32Add(Node* node) {
if (m.right().opcode() == IrOpcode::kWord32Shl &&
CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
Int32BinopMatcher mright(m.right().node());
- if (mright.right().HasValue() && !m.left().HasValue()) {
- int32_t shift_value = static_cast<int32_t>(mright.right().Value());
+ if (mright.right().HasResolvedValue() && !m.left().HasResolvedValue()) {
+ int32_t shift_value =
+ static_cast<int32_t>(mright.right().ResolvedValue());
if (shift_value > 0 && shift_value <= 31) {
Emit(kMipsLsa, g.DefineAsRegister(node),
g.UseRegister(m.left().node()),
@@ -885,8 +886,9 @@ void InstructionSelector::VisitInt32Add(Node* node) {
if (m.left().opcode() == IrOpcode::kWord32Shl &&
CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue() && !m.right().HasValue()) {
- int32_t shift_value = static_cast<int32_t>(mleft.right().Value());
+ if (mleft.right().HasResolvedValue() && !m.right().HasResolvedValue()) {
+ int32_t shift_value =
+ static_cast<int32_t>(mleft.right().ResolvedValue());
if (shift_value > 0 && shift_value <= 31) {
Emit(kMipsLsa, g.DefineAsRegister(node),
g.UseRegister(m.right().node()),
@@ -908,8 +910,8 @@ void InstructionSelector::VisitInt32Sub(Node* node) {
void InstructionSelector::VisitInt32Mul(Node* node) {
MipsOperandGenerator g(this);
Int32BinopMatcher m(node);
- if (m.right().HasValue() && m.right().Value() > 0) {
- uint32_t value = static_cast<uint32_t>(m.right().Value());
+ if (m.right().HasResolvedValue() && m.right().ResolvedValue() > 0) {
+ uint32_t value = static_cast<uint32_t>(m.right().ResolvedValue());
if (base::bits::IsPowerOfTwo(value)) {
Emit(kMipsShl | AddressingModeField::encode(kMode_None),
g.DefineAsRegister(node), g.UseRegister(m.left().node()),
@@ -1386,7 +1388,7 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) {
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
opcode = load_rep.IsUnsigned() ? kMipsLbu : kMipsLb;
@@ -1439,7 +1441,7 @@ void InstructionSelector::VisitUnalignedStore(Node* node) {
UnalignedStoreRepresentation rep = UnalignedStoreRepresentationOf(node->op());
// TODO(mips): I guess this could be done in a better way.
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
switch (rep) {
case MachineRepresentation::kFloat32:
opcode = kMipsUswc1;
@@ -1887,7 +1889,7 @@ void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
MipsOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
opcode =
@@ -1923,7 +1925,7 @@ void InstructionSelector::VisitWord32AtomicStore(Node* node) {
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
switch (rep) {
case MachineRepresentation::kWord8:
opcode = kWord32AtomicStoreWord8;
@@ -1957,7 +1959,7 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = kWord32AtomicExchangeInt8;
@@ -1971,7 +1973,6 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
opcode = kWord32AtomicExchangeWord32;
} else {
UNREACHABLE();
- return;
}
AddressingMode addressing_mode = kMode_MRI;
@@ -1996,7 +1997,7 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
Node* index = node->InputAt(1);
Node* old_value = node->InputAt(2);
Node* new_value = node->InputAt(3);
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = kWord32AtomicCompareExchangeInt8;
@@ -2010,7 +2011,6 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
opcode = kWord32AtomicCompareExchangeWord32;
} else {
UNREACHABLE();
- return;
}
AddressingMode addressing_mode = kMode_MRI;
@@ -2037,7 +2037,7 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = int8_op;
@@ -2051,7 +2051,6 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
opcode = word32_op;
} else {
UNREACHABLE();
- return;
}
AddressingMode addressing_mode = kMode_MRI;
@@ -2195,13 +2194,14 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I32x4GeU, kMipsI32x4GeU) \
V(I32x4Abs, kMipsI32x4Abs) \
V(I32x4BitMask, kMipsI32x4BitMask) \
+ V(I32x4DotI16x8S, kMipsI32x4DotI16x8S) \
V(I16x8Add, kMipsI16x8Add) \
- V(I16x8AddSaturateS, kMipsI16x8AddSaturateS) \
- V(I16x8AddSaturateU, kMipsI16x8AddSaturateU) \
+ V(I16x8AddSatS, kMipsI16x8AddSatS) \
+ V(I16x8AddSatU, kMipsI16x8AddSatU) \
V(I16x8AddHoriz, kMipsI16x8AddHoriz) \
V(I16x8Sub, kMipsI16x8Sub) \
- V(I16x8SubSaturateS, kMipsI16x8SubSaturateS) \
- V(I16x8SubSaturateU, kMipsI16x8SubSaturateU) \
+ V(I16x8SubSatS, kMipsI16x8SubSatS) \
+ V(I16x8SubSatU, kMipsI16x8SubSatU) \
V(I16x8Mul, kMipsI16x8Mul) \
V(I16x8MaxS, kMipsI16x8MaxS) \
V(I16x8MinS, kMipsI16x8MinS) \
@@ -2219,11 +2219,11 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I16x8Abs, kMipsI16x8Abs) \
V(I16x8BitMask, kMipsI16x8BitMask) \
V(I8x16Add, kMipsI8x16Add) \
- V(I8x16AddSaturateS, kMipsI8x16AddSaturateS) \
- V(I8x16AddSaturateU, kMipsI8x16AddSaturateU) \
+ V(I8x16AddSatS, kMipsI8x16AddSatS) \
+ V(I8x16AddSatU, kMipsI8x16AddSatU) \
V(I8x16Sub, kMipsI8x16Sub) \
- V(I8x16SubSaturateS, kMipsI8x16SubSaturateS) \
- V(I8x16SubSaturateU, kMipsI8x16SubSaturateU) \
+ V(I8x16SubSatS, kMipsI8x16SubSatS) \
+ V(I8x16SubSatU, kMipsI8x16SubSatU) \
V(I8x16Mul, kMipsI8x16Mul) \
V(I8x16MaxS, kMipsI8x16MaxS) \
V(I8x16MinS, kMipsI8x16MinS) \
diff --git a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
index bb01eab924..887b7e5740 100644
--- a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
@@ -628,7 +628,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
- HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ daddiu(reg, reg, Code::kHeaderSize - kHeapObjectTag);
__ Call(reg);
@@ -675,7 +675,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
- HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ daddiu(reg, reg, Code::kHeaderSize - kHeapObjectTag);
__ Jump(reg);
@@ -701,7 +701,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CHECK(!instr->InputAt(0)->IsImmediate());
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
- HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ Jump(reg);
frame_access_state()->ClearSPDelta();
@@ -847,8 +847,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchDeoptimize: {
DeoptimizationExit* exit =
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
- CodeGenResult result = AssembleDeoptimizerCall(exit);
- if (result != kSuccess) return result;
+ __ Branch(exit->label());
break;
}
case kArchRet:
@@ -1869,31 +1868,31 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ByteSwapSigned(i.OutputRegister(0), i.InputRegister(0), 4);
break;
}
- case kMips64S8x16LoadSplat: {
+ case kMips64S128Load8Splat: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ Lb(kScratchReg, i.MemoryOperand());
__ fill_b(i.OutputSimd128Register(), kScratchReg);
break;
}
- case kMips64S16x8LoadSplat: {
+ case kMips64S128Load16Splat: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ Lh(kScratchReg, i.MemoryOperand());
__ fill_h(i.OutputSimd128Register(), kScratchReg);
break;
}
- case kMips64S32x4LoadSplat: {
+ case kMips64S128Load32Splat: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ Lw(kScratchReg, i.MemoryOperand());
__ fill_w(i.OutputSimd128Register(), kScratchReg);
break;
}
- case kMips64S64x2LoadSplat: {
+ case kMips64S128Load64Splat: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ Ld(kScratchReg, i.MemoryOperand());
__ fill_d(i.OutputSimd128Register(), kScratchReg);
break;
}
- case kMips64I16x8Load8x8S: {
+ case kMips64S128Load8x8S: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register scratch = kSimd128ScratchReg;
@@ -1903,7 +1902,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ilvr_b(dst, scratch, dst);
break;
}
- case kMips64I16x8Load8x8U: {
+ case kMips64S128Load8x8U: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
@@ -1912,7 +1911,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ilvr_b(dst, kSimd128RegZero, dst);
break;
}
- case kMips64I32x4Load16x4S: {
+ case kMips64S128Load16x4S: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register scratch = kSimd128ScratchReg;
@@ -1922,7 +1921,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ilvr_h(dst, scratch, dst);
break;
}
- case kMips64I32x4Load16x4U: {
+ case kMips64S128Load16x4U: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
@@ -1931,7 +1930,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ilvr_h(dst, kSimd128RegZero, dst);
break;
}
- case kMips64I64x2Load32x2S: {
+ case kMips64S128Load32x2S: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
Simd128Register scratch = kSimd128ScratchReg;
@@ -1941,7 +1940,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ilvr_w(dst, scratch, dst);
break;
}
- case kMips64I64x2Load32x2U: {
+ case kMips64S128Load32x2U: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
@@ -1950,6 +1949,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ilvr_w(dst, kSimd128RegZero, dst);
break;
}
+ case kMips64S128Load32Zero: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ xor_v(dst, dst, dst);
+ __ Lwu(kScratchReg, i.MemoryOperand());
+ __ insert_w(dst, 0, kScratchReg);
+ break;
+ }
+ case kMips64S128Load64Zero: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ xor_v(dst, dst, dst);
+ __ Ld(kScratchReg, i.MemoryOperand());
+ __ insert_d(dst, 0, kScratchReg);
+ break;
+ }
case kWord32AtomicLoadInt8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Lb);
break;
@@ -2196,9 +2211,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Simd128Register src1 = i.InputSimd128Register(1);
Simd128Register scratch0 = kSimd128RegZero;
Simd128Register scratch1 = kSimd128ScratchReg;
- // MSA follows IEEE 754-2008 comparision rules:
- // 1. All NaN-related comparsions get false.
- // 2. +0.0 equals to -0.0.
// If inputs are -0.0. and +0.0, then write -0.0 to scratch1.
// scratch1 = (src0 == src1) ? (src0 | src1) : (src1 | src1).
@@ -2208,9 +2220,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// scratch0 = isNaN(src0) ? src0 : scratch1.
__ fseq_d(scratch0, src0, src0);
__ bsel_v(scratch0, src0, scratch1);
- // dst = (src0 < scratch0) ? src0 : scratch0.
- __ fslt_d(dst, src0, scratch0);
- __ bsel_v(dst, scratch0, src0);
+ // scratch1 = (src0 < scratch0) ? src0 : scratch0.
+ __ fslt_d(scratch1, src0, scratch0);
+ __ bsel_v(scratch1, scratch0, src0);
+ // Canonicalize the result.
+ __ fmin_d(dst, scratch1, scratch1);
break;
}
case kMips64F64x2Max: {
@@ -2220,9 +2234,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Simd128Register src1 = i.InputSimd128Register(1);
Simd128Register scratch0 = kSimd128RegZero;
Simd128Register scratch1 = kSimd128ScratchReg;
- // MSA follows IEEE 754-2008 comparision rules:
- // 1. All NaN-related comparsions get false.
- // 2. +0.0 equals to -0.0.
// If inputs are -0.0. and +0.0, then write +0.0 to scratch1.
// scratch1 = (src0 == src1) ? (src0 & src1) : (src1 & src1).
@@ -2232,9 +2243,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// scratch0 = isNaN(src0) ? src0 : scratch1.
__ fseq_d(scratch0, src0, src0);
__ bsel_v(scratch0, src0, scratch1);
- // dst = (scratch0 < src0) ? src0 : scratch0.
- __ fslt_d(dst, scratch0, src0);
- __ bsel_v(dst, scratch0, src0);
+ // scratch1 = (scratch0 < src0) ? src0 : scratch0.
+ __ fslt_d(scratch1, scratch0, src0);
+ __ bsel_v(scratch1, scratch0, src0);
+ // Canonicalize the result.
+ __ fmax_d(dst, scratch1, scratch1);
break;
}
case kMips64F64x2Eq: {
@@ -2590,9 +2603,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Simd128Register src1 = i.InputSimd128Register(1);
Simd128Register scratch0 = kSimd128RegZero;
Simd128Register scratch1 = kSimd128ScratchReg;
- // MSA follows IEEE 754-2008 comparision rules:
- // 1. All NaN-related comparsions get false.
- // 2. +0.0 equals to -0.0.
// If inputs are -0.0. and +0.0, then write +0.0 to scratch1.
// scratch1 = (src0 == src1) ? (src0 & src1) : (src1 & src1).
@@ -2602,9 +2612,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// scratch0 = isNaN(src0) ? src0 : scratch1.
__ fseq_w(scratch0, src0, src0);
__ bsel_v(scratch0, src0, scratch1);
- // dst = (scratch0 < src0) ? src0 : scratch0.
- __ fslt_w(dst, scratch0, src0);
- __ bsel_v(dst, scratch0, src0);
+ // scratch1 = (scratch0 < src0) ? src0 : scratch0.
+ __ fslt_w(scratch1, scratch0, src0);
+ __ bsel_v(scratch1, scratch0, src0);
+ // Canonicalize the result.
+ __ fmax_w(dst, scratch1, scratch1);
break;
}
case kMips64F32x4Min: {
@@ -2614,9 +2626,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Simd128Register src1 = i.InputSimd128Register(1);
Simd128Register scratch0 = kSimd128RegZero;
Simd128Register scratch1 = kSimd128ScratchReg;
- // MSA follows IEEE 754-2008 comparision rules:
- // 1. All NaN-related comparsions get false.
- // 2. +0.0 equals to -0.0.
// If inputs are -0.0. and +0.0, then write -0.0 to scratch1.
// scratch1 = (src0 == src1) ? (src0 | src1) : (src1 | src1).
@@ -2626,9 +2635,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// scratch0 = isNaN(src0) ? src0 : scratch1.
__ fseq_w(scratch0, src0, src0);
__ bsel_v(scratch0, src0, scratch1);
- // dst = (src0 < scratch0) ? src0 : scratch0.
- __ fslt_w(dst, src0, scratch0);
- __ bsel_v(dst, scratch0, src0);
+ // scratch1 = (src0 < scratch0) ? src0 : scratch0.
+ __ fslt_w(scratch1, src0, scratch0);
+ __ bsel_v(scratch1, scratch0, src0);
+ // Canonicalize the result.
+ __ fmin_w(dst, scratch1, scratch1);
break;
}
case kMips64F32x4Eq: {
@@ -2767,6 +2778,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ copy_u_b(dst, scratch0, 0);
break;
}
+ case kMips64I32x4DotI16x8S: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ dotp_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
case kMips64I16x8Splat: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fill_h(i.OutputSimd128Register(), i.InputRegister(0));
@@ -2843,7 +2860,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kMips64I16x8AddSaturateS: {
+ case kMips64I16x8AddSatS: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ adds_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -2855,7 +2872,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kMips64I16x8SubSaturateS: {
+ case kMips64I16x8SubSatS: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ subs_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -2904,13 +2921,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(0));
break;
}
- case kMips64I16x8AddSaturateU: {
+ case kMips64I16x8AddSatU: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ adds_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kMips64I16x8SubSaturateU: {
+ case kMips64I16x8SubSatU: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ subs_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -3034,7 +3051,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kMips64I8x16AddSaturateS: {
+ case kMips64I8x16AddSatS: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ adds_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -3046,7 +3063,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kMips64I8x16SubSaturateS: {
+ case kMips64I8x16SubSatS: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ subs_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -3107,13 +3124,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
- case kMips64I8x16AddSaturateU: {
+ case kMips64I8x16AddSatU: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ adds_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kMips64I8x16SubSaturateU: {
+ case kMips64I8x16SubSatU: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ subs_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -4297,7 +4314,7 @@ void CodeGenerator::AssembleConstructFrame() {
}
}
-void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
+void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
auto call_descriptor = linkage()->GetIncomingDescriptor();
const int returns = frame()->GetReturnSlotCount();
@@ -4318,41 +4335,81 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
}
MipsOperandConverter g(this, nullptr);
+
+ const int parameter_count =
+ static_cast<int>(call_descriptor->StackParameterCount());
+
+ // {aditional_pop_count} is only greater than zero if {parameter_count = 0}.
+ // Check RawMachineAssembler::PopAndReturn.
+ if (parameter_count != 0) {
+ if (additional_pop_count->IsImmediate()) {
+ DCHECK_EQ(g.ToConstant(additional_pop_count).ToInt32(), 0);
+ } else if (__ emit_debug_code()) {
+ __ Assert(eq, AbortReason::kUnexpectedAdditionalPopValue,
+ g.ToRegister(additional_pop_count),
+ Operand(static_cast<int64_t>(0)));
+ }
+ }
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ // Functions with JS linkage have at least one parameter (the receiver).
+ // If {parameter_count} == 0, it means it is a builtin with
+ // kDontAdaptArgumentsSentinel, which takes care of JS arguments popping
+ // itself.
+ const bool drop_jsargs = frame_access_state()->has_frame() &&
+ call_descriptor->IsJSFunctionCall() &&
+ parameter_count != 0;
+#else
+ const bool drop_jsargs = false;
+#endif
+
if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
// Canonicalize JSFunction return sites for now unless they have an variable
// number of stack slot pops.
- if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
+ if (additional_pop_count->IsImmediate() &&
+ g.ToConstant(additional_pop_count).ToInt32() == 0) {
if (return_label_.is_bound()) {
__ Branch(&return_label_);
return;
} else {
__ bind(&return_label_);
- AssembleDeconstructFrame();
}
- } else {
- AssembleDeconstructFrame();
}
+ if (drop_jsargs) {
+ // Get the actual argument count
+ __ Ld(t0, MemOperand(fp, StandardFrameConstants::kArgCOffset));
+ }
+ AssembleDeconstructFrame();
}
- int pop_count = static_cast<int>(call_descriptor->StackParameterCount());
- if (pop->IsImmediate()) {
- pop_count += g.ToConstant(pop).ToInt32();
+ if (drop_jsargs) {
+ // We must pop all arguments from the stack (including the receiver). This
+ // number of arguments is given by max(1 + argc_reg, parameter_count).
+ __ Daddu(t0, t0, Operand(1)); // Also pop the receiver.
+ if (parameter_count > 1) {
+ __ li(kScratchReg, parameter_count);
+ __ slt(kScratchReg2, t0, kScratchReg);
+ __ movn(t0, kScratchReg, kScratchReg2);
+ }
+ __ dsll(t0, t0, kSystemPointerSizeLog2);
+ __ Daddu(sp, sp, t0);
+ } else if (additional_pop_count->IsImmediate()) {
+ DCHECK_EQ(Constant::kInt32, g.ToConstant(additional_pop_count).type());
+ int additional_count = g.ToConstant(additional_pop_count).ToInt32();
+ __ Drop(parameter_count + additional_count);
} else {
- Register pop_reg = g.ToRegister(pop);
+ Register pop_reg = g.ToRegister(additional_pop_count);
+ __ Drop(parameter_count);
__ dsll(pop_reg, pop_reg, kSystemPointerSizeLog2);
__ Daddu(sp, sp, pop_reg);
}
- if (pop_count != 0) {
- __ DropAndRet(pop_count);
- } else {
- __ Ret();
- }
+ __ Ret();
}
void CodeGenerator::FinishCode() {}
-void CodeGenerator::PrepareForDeoptimizationExits(int deopt_count) {}
+void CodeGenerator::PrepareForDeoptimizationExits(
+ ZoneDeque<DeoptimizationExit*>* exits) {}
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h b/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
index 577db6347c..18a8e616e7 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
+++ b/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
@@ -252,6 +252,7 @@ namespace compiler {
V(Mips64I32x4GeU) \
V(Mips64I32x4Abs) \
V(Mips64I32x4BitMask) \
+ V(Mips64I32x4DotI16x8S) \
V(Mips64I16x8Splat) \
V(Mips64I16x8ExtractLaneU) \
V(Mips64I16x8ExtractLaneS) \
@@ -261,10 +262,10 @@ namespace compiler {
V(Mips64I16x8ShrS) \
V(Mips64I16x8ShrU) \
V(Mips64I16x8Add) \
- V(Mips64I16x8AddSaturateS) \
+ V(Mips64I16x8AddSatS) \
V(Mips64I16x8AddHoriz) \
V(Mips64I16x8Sub) \
- V(Mips64I16x8SubSaturateS) \
+ V(Mips64I16x8SubSatS) \
V(Mips64I16x8Mul) \
V(Mips64I16x8MaxS) \
V(Mips64I16x8MinS) \
@@ -272,8 +273,8 @@ namespace compiler {
V(Mips64I16x8Ne) \
V(Mips64I16x8GtS) \
V(Mips64I16x8GeS) \
- V(Mips64I16x8AddSaturateU) \
- V(Mips64I16x8SubSaturateU) \
+ V(Mips64I16x8AddSatU) \
+ V(Mips64I16x8SubSatU) \
V(Mips64I16x8MaxU) \
V(Mips64I16x8MinU) \
V(Mips64I16x8GtU) \
@@ -289,9 +290,9 @@ namespace compiler {
V(Mips64I8x16Shl) \
V(Mips64I8x16ShrS) \
V(Mips64I8x16Add) \
- V(Mips64I8x16AddSaturateS) \
+ V(Mips64I8x16AddSatS) \
V(Mips64I8x16Sub) \
- V(Mips64I8x16SubSaturateS) \
+ V(Mips64I8x16SubSatS) \
V(Mips64I8x16Mul) \
V(Mips64I8x16MaxS) \
V(Mips64I8x16MinS) \
@@ -300,8 +301,8 @@ namespace compiler {
V(Mips64I8x16GtS) \
V(Mips64I8x16GeS) \
V(Mips64I8x16ShrU) \
- V(Mips64I8x16AddSaturateU) \
- V(Mips64I8x16SubSaturateU) \
+ V(Mips64I8x16AddSatU) \
+ V(Mips64I8x16SubSatU) \
V(Mips64I8x16MaxU) \
V(Mips64I8x16MinU) \
V(Mips64I8x16GtU) \
@@ -348,16 +349,18 @@ namespace compiler {
V(Mips64S8x8Reverse) \
V(Mips64S8x4Reverse) \
V(Mips64S8x2Reverse) \
- V(Mips64S8x16LoadSplat) \
- V(Mips64S16x8LoadSplat) \
- V(Mips64S32x4LoadSplat) \
- V(Mips64S64x2LoadSplat) \
- V(Mips64I16x8Load8x8S) \
- V(Mips64I16x8Load8x8U) \
- V(Mips64I32x4Load16x4S) \
- V(Mips64I32x4Load16x4U) \
- V(Mips64I64x2Load32x2S) \
- V(Mips64I64x2Load32x2U) \
+ V(Mips64S128Load8Splat) \
+ V(Mips64S128Load16Splat) \
+ V(Mips64S128Load32Splat) \
+ V(Mips64S128Load64Splat) \
+ V(Mips64S128Load8x8S) \
+ V(Mips64S128Load8x8U) \
+ V(Mips64S128Load16x4S) \
+ V(Mips64S128Load16x4U) \
+ V(Mips64S128Load32x2S) \
+ V(Mips64S128Load32x2U) \
+ V(Mips64S128Load32Zero) \
+ V(Mips64S128Load64Zero) \
V(Mips64MsaLd) \
V(Mips64MsaSt) \
V(Mips64I32x4SConvertI16x8Low) \
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
index caf472bf30..0cbaf0cc47 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
@@ -149,8 +149,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64FloorWS:
case kMips64I16x8Add:
case kMips64I16x8AddHoriz:
- case kMips64I16x8AddSaturateS:
- case kMips64I16x8AddSaturateU:
+ case kMips64I16x8AddSatS:
+ case kMips64I16x8AddSatU:
case kMips64I16x8Eq:
case kMips64I16x8ExtractLaneU:
case kMips64I16x8ExtractLaneS:
@@ -175,8 +175,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64I16x8ShrU:
case kMips64I16x8Splat:
case kMips64I16x8Sub:
- case kMips64I16x8SubSaturateS:
- case kMips64I16x8SubSaturateU:
+ case kMips64I16x8SubSatS:
+ case kMips64I16x8SubSatU:
case kMips64I8x16UConvertI16x8:
case kMips64I16x8UConvertI32x4:
case kMips64I16x8UConvertI8x16High:
@@ -213,9 +213,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64I32x4UConvertI16x8Low:
case kMips64I32x4Abs:
case kMips64I32x4BitMask:
+ case kMips64I32x4DotI16x8S:
case kMips64I8x16Add:
- case kMips64I8x16AddSaturateS:
- case kMips64I8x16AddSaturateU:
+ case kMips64I8x16AddSatS:
+ case kMips64I8x16AddSatU:
case kMips64I8x16Eq:
case kMips64I8x16ExtractLaneU:
case kMips64I8x16ExtractLaneS:
@@ -236,8 +237,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64I8x16ShrU:
case kMips64I8x16Splat:
case kMips64I8x16Sub:
- case kMips64I8x16SubSaturateS:
- case kMips64I8x16SubSaturateU:
+ case kMips64I8x16SubSatS:
+ case kMips64I8x16SubSatU:
case kMips64I8x16RoundingAverageU:
case kMips64I8x16Abs:
case kMips64I8x16BitMask:
@@ -348,16 +349,18 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64Ulw:
case kMips64Ulwu:
case kMips64Ulwc1:
- case kMips64S8x16LoadSplat:
- case kMips64S16x8LoadSplat:
- case kMips64S32x4LoadSplat:
- case kMips64S64x2LoadSplat:
- case kMips64I16x8Load8x8S:
- case kMips64I16x8Load8x8U:
- case kMips64I32x4Load16x4S:
- case kMips64I32x4Load16x4U:
- case kMips64I64x2Load32x2S:
- case kMips64I64x2Load32x2U:
+ case kMips64S128Load8Splat:
+ case kMips64S128Load16Splat:
+ case kMips64S128Load32Splat:
+ case kMips64S128Load64Splat:
+ case kMips64S128Load8x8S:
+ case kMips64S128Load8x8U:
+ case kMips64S128Load16x4S:
+ case kMips64S128Load16x4U:
+ case kMips64S128Load32x2S:
+ case kMips64S128Load32x2U:
+ case kMips64S128Load32Zero:
+ case kMips64S128Load64Zero:
case kMips64Word64AtomicLoadUint8:
case kMips64Word64AtomicLoadUint16:
case kMips64Word64AtomicLoadUint32:
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
index 2c807b4183..216b83cdb2 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
@@ -386,35 +386,41 @@ void InstructionSelector::VisitLoadTransform(Node* node) {
InstructionCode opcode = kArchNop;
switch (params.transformation) {
- case LoadTransformation::kS8x16LoadSplat:
- opcode = kMips64S8x16LoadSplat;
+ case LoadTransformation::kS128Load8Splat:
+ opcode = kMips64S128Load8Splat;
break;
- case LoadTransformation::kS16x8LoadSplat:
- opcode = kMips64S16x8LoadSplat;
+ case LoadTransformation::kS128Load16Splat:
+ opcode = kMips64S128Load16Splat;
break;
- case LoadTransformation::kS32x4LoadSplat:
- opcode = kMips64S32x4LoadSplat;
+ case LoadTransformation::kS128Load32Splat:
+ opcode = kMips64S128Load32Splat;
break;
- case LoadTransformation::kS64x2LoadSplat:
- opcode = kMips64S64x2LoadSplat;
+ case LoadTransformation::kS128Load64Splat:
+ opcode = kMips64S128Load64Splat;
break;
- case LoadTransformation::kI16x8Load8x8S:
- opcode = kMips64I16x8Load8x8S;
+ case LoadTransformation::kS128Load8x8S:
+ opcode = kMips64S128Load8x8S;
break;
- case LoadTransformation::kI16x8Load8x8U:
- opcode = kMips64I16x8Load8x8U;
+ case LoadTransformation::kS128Load8x8U:
+ opcode = kMips64S128Load8x8U;
break;
- case LoadTransformation::kI32x4Load16x4S:
- opcode = kMips64I32x4Load16x4S;
+ case LoadTransformation::kS128Load16x4S:
+ opcode = kMips64S128Load16x4S;
break;
- case LoadTransformation::kI32x4Load16x4U:
- opcode = kMips64I32x4Load16x4U;
+ case LoadTransformation::kS128Load16x4U:
+ opcode = kMips64S128Load16x4U;
break;
- case LoadTransformation::kI64x2Load32x2S:
- opcode = kMips64I64x2Load32x2S;
+ case LoadTransformation::kS128Load32x2S:
+ opcode = kMips64S128Load32x2S;
break;
- case LoadTransformation::kI64x2Load32x2U:
- opcode = kMips64I64x2Load32x2U;
+ case LoadTransformation::kS128Load32x2U:
+ opcode = kMips64S128Load32x2U;
+ break;
+ case LoadTransformation::kS128Load32Zero:
+ opcode = kMips64S128Load32Zero;
+ break;
+ case LoadTransformation::kS128Load64Zero:
+ opcode = kMips64S128Load64Zero;
break;
default:
UNIMPLEMENTED();
@@ -504,7 +510,7 @@ void InstructionSelector::VisitStore(Node* node) {
code |= MiscField::encode(static_cast<int>(record_write_mode));
Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
switch (rep) {
case MachineRepresentation::kFloat32:
opcode = kMips64Swc1;
@@ -535,7 +541,6 @@ void InstructionSelector::VisitStore(Node* node) {
case MachineRepresentation::kCompressed: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
- return;
}
if (g.CanBeImmediate(index, opcode)) {
@@ -562,8 +567,8 @@ void InstructionSelector::VisitWord32And(Node* node) {
Mips64OperandGenerator g(this);
Int32BinopMatcher m(node);
if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) &&
- m.right().HasValue()) {
- uint32_t mask = m.right().Value();
+ m.right().HasResolvedValue()) {
+ uint32_t mask = m.right().ResolvedValue();
uint32_t mask_width = base::bits::CountPopulation(mask);
uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
@@ -573,9 +578,9 @@ void InstructionSelector::VisitWord32And(Node* node) {
// Select Ext for And(Shr(x, imm), mask) where the mask is in the least
// significant bits.
Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue()) {
+ if (mleft.right().HasResolvedValue()) {
// Any shift value can match; int32 shifts use `value % 32`.
- uint32_t lsb = mleft.right().Value() & 0x1F;
+ uint32_t lsb = mleft.right().ResolvedValue() & 0x1F;
// Ext cannot extract bits past the register size, however since
// shifting the original value would have introduced some zeros we can
@@ -591,8 +596,8 @@ void InstructionSelector::VisitWord32And(Node* node) {
// Other cases fall through to the normal And operation.
}
}
- if (m.right().HasValue()) {
- uint32_t mask = m.right().Value();
+ if (m.right().HasResolvedValue()) {
+ uint32_t mask = m.right().ResolvedValue();
uint32_t shift = base::bits::CountPopulation(~mask);
uint32_t msb = base::bits::CountLeadingZeros32(~mask);
if (shift != 0 && shift != 32 && msb + shift == 32) {
@@ -611,8 +616,8 @@ void InstructionSelector::VisitWord64And(Node* node) {
Mips64OperandGenerator g(this);
Int64BinopMatcher m(node);
if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) &&
- m.right().HasValue()) {
- uint64_t mask = m.right().Value();
+ m.right().HasResolvedValue()) {
+ uint64_t mask = m.right().ResolvedValue();
uint32_t mask_width = base::bits::CountPopulation(mask);
uint32_t mask_msb = base::bits::CountLeadingZeros64(mask);
if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
@@ -622,9 +627,10 @@ void InstructionSelector::VisitWord64And(Node* node) {
// Select Dext for And(Shr(x, imm), mask) where the mask is in the least
// significant bits.
Int64BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue()) {
+ if (mleft.right().HasResolvedValue()) {
// Any shift value can match; int64 shifts use `value % 64`.
- uint32_t lsb = static_cast<uint32_t>(mleft.right().Value() & 0x3F);
+ uint32_t lsb =
+ static_cast<uint32_t>(mleft.right().ResolvedValue() & 0x3F);
// Dext cannot extract bits past the register size, however since
// shifting the original value would have introduced some zeros we can
@@ -644,8 +650,8 @@ void InstructionSelector::VisitWord64And(Node* node) {
// Other cases fall through to the normal And operation.
}
}
- if (m.right().HasValue()) {
- uint64_t mask = m.right().Value();
+ if (m.right().HasResolvedValue()) {
+ uint64_t mask = m.right().ResolvedValue();
uint32_t shift = base::bits::CountPopulation(~mask);
uint32_t msb = base::bits::CountLeadingZeros64(~mask);
if (shift != 0 && shift < 32 && msb + shift == 64) {
@@ -674,7 +680,7 @@ void InstructionSelector::VisitWord32Xor(Node* node) {
if (m.left().IsWord32Or() && CanCover(node, m.left().node()) &&
m.right().Is(-1)) {
Int32BinopMatcher mleft(m.left().node());
- if (!mleft.right().HasValue()) {
+ if (!mleft.right().HasResolvedValue()) {
Mips64OperandGenerator g(this);
Emit(kMips64Nor32, g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()),
@@ -697,7 +703,7 @@ void InstructionSelector::VisitWord64Xor(Node* node) {
if (m.left().IsWord64Or() && CanCover(node, m.left().node()) &&
m.right().Is(-1)) {
Int64BinopMatcher mleft(m.left().node());
- if (!mleft.right().HasValue()) {
+ if (!mleft.right().HasResolvedValue()) {
Mips64OperandGenerator g(this);
Emit(kMips64Nor, g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()),
@@ -723,12 +729,12 @@ void InstructionSelector::VisitWord32Shl(Node* node) {
Int32BinopMatcher mleft(m.left().node());
// Match Word32Shl(Word32And(x, mask), imm) to Shl where the mask is
// contiguous, and the shift immediate non-zero.
- if (mleft.right().HasValue()) {
- uint32_t mask = mleft.right().Value();
+ if (mleft.right().HasResolvedValue()) {
+ uint32_t mask = mleft.right().ResolvedValue();
uint32_t mask_width = base::bits::CountPopulation(mask);
uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
- uint32_t shift = m.right().Value();
+ uint32_t shift = m.right().ResolvedValue();
DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
DCHECK_NE(0u, shift);
if ((shift + mask_width) >= 32) {
@@ -747,13 +753,14 @@ void InstructionSelector::VisitWord32Shl(Node* node) {
void InstructionSelector::VisitWord32Shr(Node* node) {
Int32BinopMatcher m(node);
- if (m.left().IsWord32And() && m.right().HasValue()) {
- uint32_t lsb = m.right().Value() & 0x1F;
+ if (m.left().IsWord32And() && m.right().HasResolvedValue()) {
+ uint32_t lsb = m.right().ResolvedValue() & 0x1F;
Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue() && mleft.right().Value() != 0) {
+ if (mleft.right().HasResolvedValue() &&
+ mleft.right().ResolvedValue() != 0) {
// Select Ext for Shr(And(x, mask), imm) where the result of the mask is
// shifted into the least-significant bits.
- uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
+ uint32_t mask = (mleft.right().ResolvedValue() >> lsb) << lsb;
unsigned mask_width = base::bits::CountPopulation(mask);
unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
if ((mask_msb + mask_width + lsb) == 32) {
@@ -773,10 +780,10 @@ void InstructionSelector::VisitWord32Sar(Node* node) {
Int32BinopMatcher m(node);
if (m.left().IsWord32Shl() && CanCover(node, m.left().node())) {
Int32BinopMatcher mleft(m.left().node());
- if (m.right().HasValue() && mleft.right().HasValue()) {
+ if (m.right().HasResolvedValue() && mleft.right().HasResolvedValue()) {
Mips64OperandGenerator g(this);
- uint32_t sar = m.right().Value();
- uint32_t shl = mleft.right().Value();
+ uint32_t sar = m.right().ResolvedValue();
+ uint32_t shl = mleft.right().ResolvedValue();
if ((sar == shl) && (sar == 16)) {
Emit(kMips64Seh, g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()));
@@ -812,12 +819,12 @@ void InstructionSelector::VisitWord64Shl(Node* node) {
// Match Word64Shl(Word64And(x, mask), imm) to Dshl where the mask is
// contiguous, and the shift immediate non-zero.
Int64BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue()) {
- uint64_t mask = mleft.right().Value();
+ if (mleft.right().HasResolvedValue()) {
+ uint64_t mask = mleft.right().ResolvedValue();
uint32_t mask_width = base::bits::CountPopulation(mask);
uint32_t mask_msb = base::bits::CountLeadingZeros64(mask);
if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
- uint64_t shift = m.right().Value();
+ uint64_t shift = m.right().ResolvedValue();
DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
DCHECK_NE(0u, shift);
@@ -837,13 +844,14 @@ void InstructionSelector::VisitWord64Shl(Node* node) {
void InstructionSelector::VisitWord64Shr(Node* node) {
Int64BinopMatcher m(node);
- if (m.left().IsWord64And() && m.right().HasValue()) {
- uint32_t lsb = m.right().Value() & 0x3F;
+ if (m.left().IsWord64And() && m.right().HasResolvedValue()) {
+ uint32_t lsb = m.right().ResolvedValue() & 0x3F;
Int64BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue() && mleft.right().Value() != 0) {
+ if (mleft.right().HasResolvedValue() &&
+ mleft.right().ResolvedValue() != 0) {
// Select Dext for Shr(And(x, mask), imm) where the result of the mask is
// shifted into the least-significant bits.
- uint64_t mask = (mleft.right().Value() >> lsb) << lsb;
+ uint64_t mask = (mleft.right().ResolvedValue() >> lsb) << lsb;
unsigned mask_width = base::bits::CountPopulation(mask);
unsigned mask_msb = base::bits::CountLeadingZeros64(mask);
if ((mask_msb + mask_width + lsb) == 64) {
@@ -935,8 +943,9 @@ void InstructionSelector::VisitInt32Add(Node* node) {
if (m.right().opcode() == IrOpcode::kWord32Shl &&
CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
Int32BinopMatcher mright(m.right().node());
- if (mright.right().HasValue() && !m.left().HasValue()) {
- int32_t shift_value = static_cast<int32_t>(mright.right().Value());
+ if (mright.right().HasResolvedValue() && !m.left().HasResolvedValue()) {
+ int32_t shift_value =
+ static_cast<int32_t>(mright.right().ResolvedValue());
if (shift_value > 0 && shift_value <= 31) {
Emit(kMips64Lsa, g.DefineAsRegister(node),
g.UseRegister(m.left().node()),
@@ -951,8 +960,9 @@ void InstructionSelector::VisitInt32Add(Node* node) {
if (m.left().opcode() == IrOpcode::kWord32Shl &&
CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue() && !m.right().HasValue()) {
- int32_t shift_value = static_cast<int32_t>(mleft.right().Value());
+ if (mleft.right().HasResolvedValue() && !m.right().HasResolvedValue()) {
+ int32_t shift_value =
+ static_cast<int32_t>(mleft.right().ResolvedValue());
if (shift_value > 0 && shift_value <= 31) {
Emit(kMips64Lsa, g.DefineAsRegister(node),
g.UseRegister(m.right().node()),
@@ -976,8 +986,9 @@ void InstructionSelector::VisitInt64Add(Node* node) {
if (m.right().opcode() == IrOpcode::kWord64Shl &&
CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
Int64BinopMatcher mright(m.right().node());
- if (mright.right().HasValue() && !m.left().HasValue()) {
- int32_t shift_value = static_cast<int32_t>(mright.right().Value());
+ if (mright.right().HasResolvedValue() && !m.left().HasResolvedValue()) {
+ int32_t shift_value =
+ static_cast<int32_t>(mright.right().ResolvedValue());
if (shift_value > 0 && shift_value <= 31) {
Emit(kMips64Dlsa, g.DefineAsRegister(node),
g.UseRegister(m.left().node()),
@@ -992,8 +1003,9 @@ void InstructionSelector::VisitInt64Add(Node* node) {
if (m.left().opcode() == IrOpcode::kWord64Shl &&
CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
Int64BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue() && !m.right().HasValue()) {
- int32_t shift_value = static_cast<int32_t>(mleft.right().Value());
+ if (mleft.right().HasResolvedValue() && !m.right().HasResolvedValue()) {
+ int32_t shift_value =
+ static_cast<int32_t>(mleft.right().ResolvedValue());
if (shift_value > 0 && shift_value <= 31) {
Emit(kMips64Dlsa, g.DefineAsRegister(node),
g.UseRegister(m.right().node()),
@@ -1019,8 +1031,8 @@ void InstructionSelector::VisitInt64Sub(Node* node) {
void InstructionSelector::VisitInt32Mul(Node* node) {
Mips64OperandGenerator g(this);
Int32BinopMatcher m(node);
- if (m.right().HasValue() && m.right().Value() > 0) {
- uint32_t value = static_cast<uint32_t>(m.right().Value());
+ if (m.right().HasResolvedValue() && m.right().ResolvedValue() > 0) {
+ uint32_t value = static_cast<uint32_t>(m.right().ResolvedValue());
if (base::bits::IsPowerOfTwo(value)) {
Emit(kMips64Shl | AddressingModeField::encode(kMode_None),
g.DefineAsRegister(node), g.UseRegister(m.left().node()),
@@ -1074,8 +1086,8 @@ void InstructionSelector::VisitInt64Mul(Node* node) {
Mips64OperandGenerator g(this);
Int64BinopMatcher m(node);
// TODO(dusmil): Add optimization for shifts larger than 32.
- if (m.right().HasValue() && m.right().Value() > 0) {
- uint32_t value = static_cast<uint32_t>(m.right().Value());
+ if (m.right().HasResolvedValue() && m.right().ResolvedValue() > 0) {
+ uint32_t value = static_cast<uint32_t>(m.right().ResolvedValue());
if (base::bits::IsPowerOfTwo(value)) {
Emit(kMips64Dshl | AddressingModeField::encode(kMode_None),
g.DefineAsRegister(node), g.UseRegister(m.left().node()),
@@ -1389,7 +1401,6 @@ void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
break;
default:
UNREACHABLE();
- return;
}
EmitLoad(this, value, opcode, node);
} else {
@@ -1746,7 +1757,7 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) {
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
switch (load_rep.representation()) {
case MachineRepresentation::kFloat32:
opcode = kMips64Ulwc1;
@@ -1799,7 +1810,7 @@ void InstructionSelector::VisitUnalignedStore(Node* node) {
Node* value = node->InputAt(2);
UnalignedStoreRepresentation rep = UnalignedStoreRepresentationOf(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
switch (rep) {
case MachineRepresentation::kFloat32:
opcode = kMips64Uswc1;
@@ -2532,7 +2543,7 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) {
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
opcode =
@@ -2553,7 +2564,7 @@ void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
switch (rep) {
case MachineRepresentation::kWord8:
opcode = kWord32AtomicStoreWord8;
@@ -2573,7 +2584,7 @@ void InstructionSelector::VisitWord32AtomicStore(Node* node) {
void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
opcode = kMips64Word64AtomicLoadUint8;
@@ -2595,7 +2606,7 @@ void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
switch (rep) {
case MachineRepresentation::kWord8:
opcode = kMips64Word64AtomicStoreWord8;
@@ -2617,7 +2628,7 @@ void InstructionSelector::VisitWord64AtomicStore(Node* node) {
}
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = kWord32AtomicExchangeInt8;
@@ -2631,14 +2642,13 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
opcode = kWord32AtomicExchangeWord32;
} else {
UNREACHABLE();
- return;
}
VisitAtomicExchange(this, node, opcode);
}
void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
opcode = kMips64Word64AtomicExchangeUint8;
@@ -2650,13 +2660,12 @@ void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
opcode = kMips64Word64AtomicExchangeUint64;
} else {
UNREACHABLE();
- return;
}
VisitAtomicExchange(this, node, opcode);
}
void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = kWord32AtomicCompareExchangeInt8;
@@ -2670,14 +2679,13 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
opcode = kWord32AtomicCompareExchangeWord32;
} else {
UNREACHABLE();
- return;
}
VisitAtomicCompareExchange(this, node, opcode);
}
void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
opcode = kMips64Word64AtomicCompareExchangeUint8;
@@ -2689,14 +2697,13 @@ void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
opcode = kMips64Word64AtomicCompareExchangeUint64;
} else {
UNREACHABLE();
- return;
}
VisitAtomicCompareExchange(this, node, opcode);
}
void InstructionSelector::VisitWord32AtomicBinaryOperation(
Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
ArchOpcode uint16_op, ArchOpcode word32_op) {
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = int8_op;
@@ -2710,7 +2717,6 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
opcode = word32_op;
} else {
UNREACHABLE();
- return;
}
VisitAtomicBinop(this, node, opcode);
@@ -2733,7 +2739,7 @@ VISIT_ATOMIC_BINOP(Xor)
void InstructionSelector::VisitWord64AtomicBinaryOperation(
Node* node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode uint32_op,
ArchOpcode uint64_op) {
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
opcode = uint8_op;
@@ -2745,7 +2751,6 @@ void InstructionSelector::VisitWord64AtomicBinaryOperation(
opcode = uint64_op;
} else {
UNREACHABLE();
- return;
}
VisitAtomicBinop(this, node, opcode);
}
@@ -2879,13 +2884,14 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I32x4GeS, kMips64I32x4GeS) \
V(I32x4GtU, kMips64I32x4GtU) \
V(I32x4GeU, kMips64I32x4GeU) \
+ V(I32x4DotI16x8S, kMips64I32x4DotI16x8S) \
V(I16x8Add, kMips64I16x8Add) \
- V(I16x8AddSaturateS, kMips64I16x8AddSaturateS) \
- V(I16x8AddSaturateU, kMips64I16x8AddSaturateU) \
+ V(I16x8AddSatS, kMips64I16x8AddSatS) \
+ V(I16x8AddSatU, kMips64I16x8AddSatU) \
V(I16x8AddHoriz, kMips64I16x8AddHoriz) \
V(I16x8Sub, kMips64I16x8Sub) \
- V(I16x8SubSaturateS, kMips64I16x8SubSaturateS) \
- V(I16x8SubSaturateU, kMips64I16x8SubSaturateU) \
+ V(I16x8SubSatS, kMips64I16x8SubSatS) \
+ V(I16x8SubSatU, kMips64I16x8SubSatU) \
V(I16x8Mul, kMips64I16x8Mul) \
V(I16x8MaxS, kMips64I16x8MaxS) \
V(I16x8MinS, kMips64I16x8MinS) \
@@ -2901,11 +2907,11 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I16x8SConvertI32x4, kMips64I16x8SConvertI32x4) \
V(I16x8UConvertI32x4, kMips64I16x8UConvertI32x4) \
V(I8x16Add, kMips64I8x16Add) \
- V(I8x16AddSaturateS, kMips64I8x16AddSaturateS) \
- V(I8x16AddSaturateU, kMips64I8x16AddSaturateU) \
+ V(I8x16AddSatS, kMips64I8x16AddSatS) \
+ V(I8x16AddSatU, kMips64I8x16AddSatU) \
V(I8x16Sub, kMips64I8x16Sub) \
- V(I8x16SubSaturateS, kMips64I8x16SubSaturateS) \
- V(I8x16SubSaturateU, kMips64I8x16SubSaturateU) \
+ V(I8x16SubSatS, kMips64I8x16SubSatS) \
+ V(I8x16SubSatU, kMips64I8x16SubSatU) \
V(I8x16Mul, kMips64I8x16Mul) \
V(I8x16MaxS, kMips64I8x16MaxS) \
V(I8x16MinS, kMips64I8x16MinS) \
diff --git a/deps/v8/src/compiler/backend/move-optimizer.h b/deps/v8/src/compiler/backend/move-optimizer.h
index ac3c407393..a63bd52d73 100644
--- a/deps/v8/src/compiler/backend/move-optimizer.h
+++ b/deps/v8/src/compiler/backend/move-optimizer.h
@@ -16,6 +16,9 @@ namespace compiler {
class V8_EXPORT_PRIVATE MoveOptimizer final {
public:
MoveOptimizer(Zone* local_zone, InstructionSequence* code);
+ MoveOptimizer(const MoveOptimizer&) = delete;
+ MoveOptimizer& operator=(const MoveOptimizer&) = delete;
+
void Run();
private:
@@ -57,8 +60,6 @@ class V8_EXPORT_PRIVATE MoveOptimizer final {
// at any given time, so we create two buffers.
ZoneVector<InstructionOperand> operand_buffer1;
ZoneVector<InstructionOperand> operand_buffer2;
-
- DISALLOW_COPY_AND_ASSIGN(MoveOptimizer);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/backend/ppc/OWNERS b/deps/v8/src/compiler/backend/ppc/OWNERS
index 6edd45a6ef..02c2cd757c 100644
--- a/deps/v8/src/compiler/backend/ppc/OWNERS
+++ b/deps/v8/src/compiler/backend/ppc/OWNERS
@@ -2,3 +2,4 @@ junyan@redhat.com
joransiu@ca.ibm.com
midawson@redhat.com
mfarazma@redhat.com
+vasili.skurydzin@ibm.com
diff --git a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
index 767247b2fd..ee1ef6d939 100644
--- a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
@@ -877,7 +877,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (HasRegisterInput(instr, 0)) {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
- HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ CallCodeObject(reg);
} else {
@@ -925,7 +925,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (HasRegisterInput(instr, 0)) {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
- HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ JumpCodeObject(reg);
} else {
@@ -962,7 +962,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CHECK(!instr->InputAt(0)->IsImmediate());
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
- HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ Jump(reg);
frame_access_state()->ClearSPDelta();
@@ -1131,8 +1131,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchDeoptimize: {
DeoptimizationExit* exit =
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
- CodeGenResult result = AssembleDeoptimizerCall(exit);
- if (result != kSuccess) return result;
+ __ b(exit->label());
break;
}
case kArchRet:
@@ -1929,21 +1928,30 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#endif
i.OutputRegister(0), kScratchDoubleReg);
#if V8_TARGET_ARCH_PPC64
- if (check_conversion) {
- // Set 2nd output to zero if conversion fails.
CRegister cr = cr7;
int crbit = v8::internal::Assembler::encode_crbit(
cr, static_cast<CRBit>(VXCVI % CRWIDTH));
__ mcrfs(cr, VXCVI); // extract FPSCR field containing VXCVI into cr7
+ // Handle conversion failures (such as overflow).
if (CpuFeatures::IsSupported(ISELECT)) {
- __ li(i.OutputRegister(1), Operand(1));
- __ isel(i.OutputRegister(1), r0, i.OutputRegister(1), crbit);
+ if (check_conversion) {
+ __ li(i.OutputRegister(1), Operand(1));
+ __ isel(i.OutputRegister(1), r0, i.OutputRegister(1), crbit);
+ } else {
+ __ isel(i.OutputRegister(0), r0, i.OutputRegister(0), crbit);
+ }
} else {
- __ li(i.OutputRegister(1), Operand::Zero());
- __ bc(v8::internal::kInstrSize * 2, BT, crbit);
- __ li(i.OutputRegister(1), Operand(1));
+ if (check_conversion) {
+ __ li(i.OutputRegister(1), Operand::Zero());
+ __ bc(v8::internal::kInstrSize * 2, BT, crbit);
+ __ li(i.OutputRegister(1), Operand(1));
+ } else {
+ __ mr(ip, i.OutputRegister(0));
+ __ li(i.OutputRegister(0), Operand::Zero());
+ __ bc(v8::internal::kInstrSize * 2, BT, crbit);
+ __ mr(i.OutputRegister(0), ip);
+ }
}
- }
#endif
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
@@ -2270,189 +2278,118 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vspltb(dst, dst, Operand(7));
break;
}
-#define SHIFT_TO_CORRECT_LANE(starting_lane_nummber, lane_input, \
- lane_width_in_bytes, input_register) \
- int shift_bits = abs(lane_input - starting_lane_nummber) * \
- lane_width_in_bytes * kBitsPerByte; \
- if (shift_bits > 0) { \
- __ li(ip, Operand(shift_bits)); \
- __ mtvsrd(kScratchDoubleReg, ip); \
- __ vspltb(kScratchDoubleReg, kScratchDoubleReg, Operand(7)); \
- if (lane_input < starting_lane_nummber) { \
- __ vsro(kScratchDoubleReg, input_register, kScratchDoubleReg); \
- } else { \
- DCHECK(lane_input > starting_lane_nummber); \
- __ vslo(kScratchDoubleReg, input_register, kScratchDoubleReg); \
- } \
- input_register = kScratchDoubleReg; \
- }
case kPPC_F64x2ExtractLane: {
- int32_t lane = 1 - i.InputInt8(1);
- Simd128Register src = i.InputSimd128Register(0);
- SHIFT_TO_CORRECT_LANE(0, lane, 8, src);
- __ mfvsrd(kScratchReg, src);
+ constexpr int lane_width_in_bytes = 8;
+ __ vextractd(kScratchDoubleReg, i.InputSimd128Register(0),
+ Operand((1 - i.InputInt8(1)) * lane_width_in_bytes));
+ __ mfvsrd(kScratchReg, kScratchDoubleReg);
__ MovInt64ToDouble(i.OutputDoubleRegister(), kScratchReg);
break;
}
case kPPC_F32x4ExtractLane: {
- int32_t lane = 3 - i.InputInt8(1);
- Simd128Register src = i.InputSimd128Register(0);
- SHIFT_TO_CORRECT_LANE(1, lane, 4, src)
- __ mfvsrwz(kScratchReg, src);
+ constexpr int lane_width_in_bytes = 4;
+ __ vextractuw(kScratchDoubleReg, i.InputSimd128Register(0),
+ Operand((3 - i.InputInt8(1)) * lane_width_in_bytes));
+ __ mfvsrd(kScratchReg, kScratchDoubleReg);
__ MovIntToFloat(i.OutputDoubleRegister(), kScratchReg);
break;
}
case kPPC_I64x2ExtractLane: {
- int32_t lane = 1 - i.InputInt8(1);
- Simd128Register src = i.InputSimd128Register(0);
- SHIFT_TO_CORRECT_LANE(0, lane, 8, src)
- __ mfvsrd(i.OutputRegister(), src);
+ constexpr int lane_width_in_bytes = 8;
+ __ vextractd(kScratchDoubleReg, i.InputSimd128Register(0),
+ Operand((1 - i.InputInt8(1)) * lane_width_in_bytes));
+ __ mfvsrd(i.OutputRegister(), kScratchDoubleReg);
break;
}
case kPPC_I32x4ExtractLane: {
- int32_t lane = 3 - i.InputInt8(1);
- Simd128Register src = i.InputSimd128Register(0);
- SHIFT_TO_CORRECT_LANE(1, lane, 4, src)
- __ mfvsrwz(i.OutputRegister(), src);
+ constexpr int lane_width_in_bytes = 4;
+ __ vextractuw(kScratchDoubleReg, i.InputSimd128Register(0),
+ Operand((3 - i.InputInt8(1)) * lane_width_in_bytes));
+ __ mfvsrd(i.OutputRegister(), kScratchDoubleReg);
break;
}
case kPPC_I16x8ExtractLaneU: {
- int32_t lane = 7 - i.InputInt8(1);
- Simd128Register src = i.InputSimd128Register(0);
- SHIFT_TO_CORRECT_LANE(2, lane, 2, src)
- __ mfvsrwz(r0, src);
- __ li(ip, Operand(16));
- __ srd(i.OutputRegister(), r0, ip);
+ constexpr int lane_width_in_bytes = 2;
+ __ vextractuh(kScratchDoubleReg, i.InputSimd128Register(0),
+ Operand((7 - i.InputInt8(1)) * lane_width_in_bytes));
+ __ mfvsrd(i.OutputRegister(), kScratchDoubleReg);
break;
}
case kPPC_I16x8ExtractLaneS: {
- int32_t lane = 7 - i.InputInt8(1);
- Simd128Register src = i.InputSimd128Register(0);
- SHIFT_TO_CORRECT_LANE(2, lane, 2, src)
- __ mfvsrwz(kScratchReg, src);
- __ sradi(i.OutputRegister(), kScratchReg, 16);
+ constexpr int lane_width_in_bytes = 2;
+ __ vextractuh(kScratchDoubleReg, i.InputSimd128Register(0),
+ Operand((7 - i.InputInt8(1)) * lane_width_in_bytes));
+ __ mfvsrd(kScratchReg, kScratchDoubleReg);
+ __ extsh(i.OutputRegister(), kScratchReg);
break;
}
case kPPC_I8x16ExtractLaneU: {
- int32_t lane = 15 - i.InputInt8(1);
- Simd128Register src = i.InputSimd128Register(0);
- SHIFT_TO_CORRECT_LANE(4, lane, 1, src)
- __ mfvsrwz(r0, src);
- __ li(ip, Operand(24));
- __ srd(i.OutputRegister(), r0, ip);
+ __ vextractub(kScratchDoubleReg, i.InputSimd128Register(0),
+ Operand(15 - i.InputInt8(1)));
+ __ mfvsrd(i.OutputRegister(), kScratchDoubleReg);
break;
}
case kPPC_I8x16ExtractLaneS: {
- int32_t lane = 15 - i.InputInt8(1);
- Simd128Register src = i.InputSimd128Register(0);
- SHIFT_TO_CORRECT_LANE(4, lane, 1, src)
- __ mfvsrwz(kScratchReg, src);
- __ sradi(i.OutputRegister(), kScratchReg, 24);
- break;
- }
-#undef SHIFT_TO_CORRECT_LANE
-#define GENERATE_REPLACE_LANE_MASK(lane, replacement_value_byte_lane, \
- lane_width_in_bytes) \
- uint64_t mask = 0; \
- for (int i = 0, j = 0; i <= kSimd128Size - 1; i++) { \
- mask <<= kBitsPerByte; \
- if (i >= lane * lane_width_in_bytes && \
- i < lane * lane_width_in_bytes + lane_width_in_bytes) { \
- mask |= replacement_value_byte_lane + j; \
- j++; \
- } else { \
- mask |= i; \
- } \
- if (i == (kSimd128Size / 2) - 1) { \
- __ mov(r0, Operand(mask)); \
- mask = 0; \
- } else if (i >= kSimd128Size - 1) { \
- __ mov(ip, Operand(mask)); \
- } \
- } \
- /* Need to maintain 16 byte alignment for lvx */ \
- __ mr(kScratchReg, sp); \
- __ ClearRightImm(sp, sp, Operand(base::bits::WhichPowerOfTwo(16))); \
- __ addi(sp, sp, Operand(-16)); \
- __ StoreP(ip, MemOperand(sp, 0)); \
- __ StoreP(r0, MemOperand(sp, 8)); \
- __ lvx(kScratchDoubleReg, MemOperand(r0, sp)); \
- __ mr(sp, kScratchReg);
+ __ vextractub(kScratchDoubleReg, i.InputSimd128Register(0),
+ Operand(15 - i.InputInt8(1)));
+ __ mfvsrd(kScratchReg, kScratchDoubleReg);
+ __ extsb(i.OutputRegister(), kScratchReg);
+ break;
+ }
case kPPC_F64x2ReplaceLane: {
- Simd128Register src = i.InputSimd128Register(0);
- Simd128Register dst = i.OutputSimd128Register();
- int32_t lane = 1 - i.InputInt8(1);
- constexpr int replacement_value_byte_lane = 16;
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
constexpr int lane_width_in_bytes = 8;
- GENERATE_REPLACE_LANE_MASK(lane, replacement_value_byte_lane,
- lane_width_in_bytes)
+ Simd128Register dst = i.OutputSimd128Register();
__ MovDoubleToInt64(r0, i.InputDoubleRegister(2));
- __ mtvsrd(dst, r0);
- __ vperm(dst, src, dst, kScratchDoubleReg);
+ __ mtvsrd(kScratchDoubleReg, r0);
+ __ vinsertd(dst, kScratchDoubleReg,
+ Operand((1 - i.InputInt8(1)) * lane_width_in_bytes));
break;
}
case kPPC_F32x4ReplaceLane: {
- Simd128Register src = i.InputSimd128Register(0);
- Simd128Register dst = i.OutputSimd128Register();
- int32_t lane = 3 - i.InputInt8(1);
- constexpr int replacement_value_byte_lane = 20;
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
constexpr int lane_width_in_bytes = 4;
- GENERATE_REPLACE_LANE_MASK(lane, replacement_value_byte_lane,
- lane_width_in_bytes)
- __ MovFloatToInt(kScratchReg, i.InputDoubleRegister(2));
- __ mtvsrd(dst, kScratchReg);
- __ vperm(dst, src, dst, kScratchDoubleReg);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ MovFloatToInt(r0, i.InputDoubleRegister(2));
+ __ mtvsrd(kScratchDoubleReg, r0);
+ __ vinsertw(dst, kScratchDoubleReg,
+ Operand((3 - i.InputInt8(1)) * lane_width_in_bytes));
break;
}
case kPPC_I64x2ReplaceLane: {
- Simd128Register src = i.InputSimd128Register(0);
- Simd128Register dst = i.OutputSimd128Register();
- int32_t lane = 1 - i.InputInt8(1);
- constexpr int replacement_value_byte_lane = 16;
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
constexpr int lane_width_in_bytes = 8;
- GENERATE_REPLACE_LANE_MASK(lane, replacement_value_byte_lane,
- lane_width_in_bytes)
- __ mtvsrd(dst, i.InputRegister(2));
- __ vperm(dst, src, dst, kScratchDoubleReg);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ mtvsrd(kScratchDoubleReg, i.InputRegister(2));
+ __ vinsertd(dst, kScratchDoubleReg,
+ Operand((1 - i.InputInt8(1)) * lane_width_in_bytes));
break;
}
case kPPC_I32x4ReplaceLane: {
- Simd128Register src = i.InputSimd128Register(0);
- Simd128Register dst = i.OutputSimd128Register();
- int32_t lane = 3 - i.InputInt8(1);
- constexpr int replacement_value_byte_lane = 20;
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
constexpr int lane_width_in_bytes = 4;
- GENERATE_REPLACE_LANE_MASK(lane, replacement_value_byte_lane,
- lane_width_in_bytes)
- __ mtvsrd(dst, i.InputRegister(2));
- __ vperm(dst, src, dst, kScratchDoubleReg);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ mtvsrd(kScratchDoubleReg, i.InputRegister(2));
+ __ vinsertw(dst, kScratchDoubleReg,
+ Operand((3 - i.InputInt8(1)) * lane_width_in_bytes));
break;
}
case kPPC_I16x8ReplaceLane: {
- Simd128Register src = i.InputSimd128Register(0);
- Simd128Register dst = i.OutputSimd128Register();
- int32_t lane = 7 - i.InputInt8(1);
- constexpr int replacement_value_byte_lane = 22;
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
constexpr int lane_width_in_bytes = 2;
- GENERATE_REPLACE_LANE_MASK(lane, replacement_value_byte_lane,
- lane_width_in_bytes)
- __ mtvsrd(dst, i.InputRegister(2));
- __ vperm(dst, src, dst, kScratchDoubleReg);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ mtvsrd(kScratchDoubleReg, i.InputRegister(2));
+ __ vinserth(dst, kScratchDoubleReg,
+ Operand((7 - i.InputInt8(1)) * lane_width_in_bytes));
break;
}
case kPPC_I8x16ReplaceLane: {
- Simd128Register src = i.InputSimd128Register(0);
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
Simd128Register dst = i.OutputSimd128Register();
- int32_t lane = 15 - i.InputInt8(1);
- constexpr int replacement_value_byte_lane = 23;
- constexpr int lane_width_in_bytes = 1;
- GENERATE_REPLACE_LANE_MASK(lane, replacement_value_byte_lane,
- lane_width_in_bytes)
- __ mtvsrd(dst, i.InputRegister(2));
- __ vperm(dst, src, dst, kScratchDoubleReg);
+ __ mtvsrd(kScratchDoubleReg, i.InputRegister(2));
+ __ vinsertb(dst, kScratchDoubleReg, Operand(15 - i.InputInt8(1)));
break;
}
-#undef GENERATE_REPLACE_LANE_MASK
case kPPC_F64x2Add: {
__ xvadddp(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -3248,51 +3185,58 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vperm(dst, src0, src1, kScratchDoubleReg);
break;
}
- case kPPC_I16x8AddSaturateS: {
+ case kPPC_I16x8AddSatS: {
__ vaddshs(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kPPC_I16x8SubSaturateS: {
+ case kPPC_I16x8SubSatS: {
__ vsubshs(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kPPC_I16x8AddSaturateU: {
+ case kPPC_I16x8AddSatU: {
__ vadduhs(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kPPC_I16x8SubSaturateU: {
+ case kPPC_I16x8SubSatU: {
__ vsubuhs(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kPPC_I8x16AddSaturateS: {
+ case kPPC_I8x16AddSatS: {
__ vaddsbs(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kPPC_I8x16SubSaturateS: {
+ case kPPC_I8x16SubSatS: {
__ vsubsbs(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kPPC_I8x16AddSaturateU: {
+ case kPPC_I8x16AddSatU: {
__ vaddubs(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kPPC_I8x16SubSaturateU: {
+ case kPPC_I8x16SubSatU: {
__ vsububs(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kPPC_I8x16Swizzle: {
- // Reverse the input to match IBM lane numbering.
- Simd128Register tempFPReg1 = i.ToSimd128Register(instr->TempAt(0));
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1),
+ tempFPReg1 = i.ToSimd128Register(instr->TempAt(0)),
+ tempFPReg2 = i.ToSimd128Register(instr->TempAt(1));
+ // Saturate the indices to 5 bits. Input indices more than 31 should
+ // return 0.
+ __ xxspltib(tempFPReg2, Operand(31));
+ __ vminub(tempFPReg2, src1, tempFPReg2);
__ addi(sp, sp, Operand(-16));
- __ stxvd(i.InputSimd128Register(0), MemOperand(r0, sp));
+ __ stxvd(src0, MemOperand(r0, sp));
__ ldbrx(r0, MemOperand(r0, sp));
__ li(ip, Operand(8));
__ ldbrx(ip, MemOperand(ip, sp));
@@ -3302,8 +3246,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ lxvd(kScratchDoubleReg, MemOperand(r0, sp));
__ addi(sp, sp, Operand(16));
__ vxor(tempFPReg1, tempFPReg1, tempFPReg1);
- __ vperm(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
- i.InputSimd128Register(1));
+ __ vperm(dst, kScratchDoubleReg, tempFPReg1, tempFPReg2);
break;
}
case kPPC_F64x2Qfma: {
@@ -3438,6 +3381,42 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ xvrspi(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
+ case kPPC_I32x4BitMask: {
+ __ mov(kScratchReg,
+ Operand(0x8080808000204060)); // Select 0 for the high bits.
+ __ mtvsrd(kScratchDoubleReg, kScratchReg);
+ __ vbpermq(kScratchDoubleReg, i.InputSimd128Register(0),
+ kScratchDoubleReg);
+ __ vextractub(kScratchDoubleReg, kScratchDoubleReg, Operand(6));
+ __ mfvsrd(i.OutputRegister(), kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I16x8BitMask: {
+ __ mov(kScratchReg, Operand(0x10203040506070));
+ __ mtvsrd(kScratchDoubleReg, kScratchReg);
+ __ vbpermq(kScratchDoubleReg, i.InputSimd128Register(0),
+ kScratchDoubleReg);
+ __ vextractub(kScratchDoubleReg, kScratchDoubleReg, Operand(6));
+ __ mfvsrd(i.OutputRegister(), kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I8x16BitMask: {
+ Register temp = i.ToRegister(instr->TempAt(0));
+ __ mov(temp, Operand(0x8101820283038));
+ __ mov(ip, Operand(0x4048505860687078));
+ __ mtvsrdd(kScratchDoubleReg, temp, ip);
+ __ vbpermq(kScratchDoubleReg, i.InputSimd128Register(0),
+ kScratchDoubleReg);
+ __ vextractuh(kScratchDoubleReg, kScratchDoubleReg, Operand(6));
+ __ mfvsrd(i.OutputRegister(), kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I32x4DotI16x8S: {
+ __ vxor(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ __ vmsumshm(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg);
+ break;
+ }
case kPPC_StoreCompressTagged: {
ASSEMBLE_STORE_INTEGER(StoreTaggedField, StoreTaggedFieldX);
break;
@@ -3885,7 +3864,10 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
void CodeGenerator::FinishCode() {}
-void CodeGenerator::PrepareForDeoptimizationExits(int deopt_count) {}
+void CodeGenerator::PrepareForDeoptimizationExits(
+ ZoneDeque<DeoptimizationExit*>* exits) {
+ // __ EmitConstantPool();
+}
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h b/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
index fb5151ebd4..a4cda21d48 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
+++ b/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
@@ -287,6 +287,8 @@ namespace compiler {
V(PPC_I32x4SConvertI16x8High) \
V(PPC_I32x4UConvertI16x8Low) \
V(PPC_I32x4UConvertI16x8High) \
+ V(PPC_I32x4BitMask) \
+ V(PPC_I32x4DotI16x8S) \
V(PPC_F32x4Qfma) \
V(PPC_F32x4Qfms) \
V(PPC_I16x8Splat) \
@@ -318,11 +320,12 @@ namespace compiler {
V(PPC_I16x8SConvertI8x16High) \
V(PPC_I16x8UConvertI8x16Low) \
V(PPC_I16x8UConvertI8x16High) \
- V(PPC_I16x8AddSaturateS) \
- V(PPC_I16x8SubSaturateS) \
- V(PPC_I16x8AddSaturateU) \
- V(PPC_I16x8SubSaturateU) \
+ V(PPC_I16x8AddSatS) \
+ V(PPC_I16x8SubSatS) \
+ V(PPC_I16x8AddSatU) \
+ V(PPC_I16x8SubSatU) \
V(PPC_I16x8RoundingAverageU) \
+ V(PPC_I16x8BitMask) \
V(PPC_I8x16Splat) \
V(PPC_I8x16ExtractLaneU) \
V(PPC_I8x16ExtractLaneS) \
@@ -347,13 +350,14 @@ namespace compiler {
V(PPC_I8x16Abs) \
V(PPC_I8x16SConvertI16x8) \
V(PPC_I8x16UConvertI16x8) \
- V(PPC_I8x16AddSaturateS) \
- V(PPC_I8x16SubSaturateS) \
- V(PPC_I8x16AddSaturateU) \
- V(PPC_I8x16SubSaturateU) \
+ V(PPC_I8x16AddSatS) \
+ V(PPC_I8x16SubSatS) \
+ V(PPC_I8x16AddSatU) \
+ V(PPC_I8x16SubSatU) \
V(PPC_I8x16RoundingAverageU) \
V(PPC_I8x16Shuffle) \
V(PPC_I8x16Swizzle) \
+ V(PPC_I8x16BitMask) \
V(PPC_V64x2AnyTrue) \
V(PPC_V32x4AnyTrue) \
V(PPC_V16x8AnyTrue) \
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
index 8beaa8539c..87ea3f3219 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
@@ -212,6 +212,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_I32x4SConvertI16x8High:
case kPPC_I32x4UConvertI16x8Low:
case kPPC_I32x4UConvertI16x8High:
+ case kPPC_I32x4BitMask:
+ case kPPC_I32x4DotI16x8S:
case kPPC_I16x8Splat:
case kPPC_I16x8ExtractLaneU:
case kPPC_I16x8ExtractLaneS:
@@ -241,11 +243,12 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_I16x8SConvertI8x16High:
case kPPC_I16x8UConvertI8x16Low:
case kPPC_I16x8UConvertI8x16High:
- case kPPC_I16x8AddSaturateS:
- case kPPC_I16x8SubSaturateS:
- case kPPC_I16x8AddSaturateU:
- case kPPC_I16x8SubSaturateU:
+ case kPPC_I16x8AddSatS:
+ case kPPC_I16x8SubSatS:
+ case kPPC_I16x8AddSatU:
+ case kPPC_I16x8SubSatU:
case kPPC_I16x8RoundingAverageU:
+ case kPPC_I16x8BitMask:
case kPPC_I8x16Splat:
case kPPC_I8x16ExtractLaneU:
case kPPC_I8x16ExtractLaneS:
@@ -270,13 +273,14 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_I8x16Abs:
case kPPC_I8x16SConvertI16x8:
case kPPC_I8x16UConvertI16x8:
- case kPPC_I8x16AddSaturateS:
- case kPPC_I8x16SubSaturateS:
- case kPPC_I8x16AddSaturateU:
- case kPPC_I8x16SubSaturateU:
+ case kPPC_I8x16AddSatS:
+ case kPPC_I8x16SubSatS:
+ case kPPC_I8x16AddSatU:
+ case kPPC_I8x16SubSatU:
case kPPC_I8x16RoundingAverageU:
case kPPC_I8x16Shuffle:
case kPPC_I8x16Swizzle:
+ case kPPC_I8x16BitMask:
case kPPC_V64x2AnyTrue:
case kPPC_V32x4AnyTrue:
case kPPC_V16x8AnyTrue:
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
index 0c61821cf5..9c66d6f733 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
@@ -314,7 +314,7 @@ void InstructionSelector::VisitStore(Node* node) {
CHECK_EQ(is_atomic, false);
Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
ImmediateMode mode = kInt16Imm;
switch (rep) {
case MachineRepresentation::kFloat32:
@@ -359,7 +359,6 @@ void InstructionSelector::VisitStore(Node* node) {
break;
case MachineRepresentation::kNone:
UNREACHABLE();
- return;
}
if (g.CanBeImmediate(offset, mode)) {
@@ -465,7 +464,8 @@ void InstructionSelector::VisitWord32And(Node* node) {
Int32BinopMatcher m(node);
int mb = 0;
int me = 0;
- if (m.right().HasValue() && IsContiguousMask32(m.right().Value(), &mb, &me)) {
+ if (m.right().HasResolvedValue() &&
+ IsContiguousMask32(m.right().ResolvedValue(), &mb, &me)) {
int sh = 0;
Node* left = m.left().node();
if ((m.left().IsWord32Shr() || m.left().IsWord32Shl()) &&
@@ -474,7 +474,7 @@ void InstructionSelector::VisitWord32And(Node* node) {
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().IsInRange(0, 31)) {
left = mleft.left().node();
- sh = mleft.right().Value();
+ sh = mleft.right().ResolvedValue();
if (m.left().IsWord32Shr()) {
// Adjust the mask such that it doesn't include any rotated bits.
if (mb > 31 - sh) mb = 31 - sh;
@@ -503,7 +503,8 @@ void InstructionSelector::VisitWord64And(Node* node) {
Int64BinopMatcher m(node);
int mb = 0;
int me = 0;
- if (m.right().HasValue() && IsContiguousMask64(m.right().Value(), &mb, &me)) {
+ if (m.right().HasResolvedValue() &&
+ IsContiguousMask64(m.right().ResolvedValue(), &mb, &me)) {
int sh = 0;
Node* left = m.left().node();
if ((m.left().IsWord64Shr() || m.left().IsWord64Shl()) &&
@@ -512,7 +513,7 @@ void InstructionSelector::VisitWord64And(Node* node) {
Int64BinopMatcher mleft(m.left().node());
if (mleft.right().IsInRange(0, 63)) {
left = mleft.left().node();
- sh = mleft.right().Value();
+ sh = mleft.right().ResolvedValue();
if (m.left().IsWord64Shr()) {
// Adjust the mask such that it doesn't include any rotated bits.
if (mb > 63 - sh) mb = 63 - sh;
@@ -626,11 +627,11 @@ void InstructionSelector::VisitWord32Shl(Node* node) {
if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) {
// Try to absorb logical-and into rlwinm
Int32BinopMatcher mleft(m.left().node());
- int sh = m.right().Value();
+ int sh = m.right().ResolvedValue();
int mb;
int me;
- if (mleft.right().HasValue() &&
- IsContiguousMask32(mleft.right().Value() << sh, &mb, &me)) {
+ if (mleft.right().HasResolvedValue() &&
+ IsContiguousMask32(mleft.right().ResolvedValue() << sh, &mb, &me)) {
// Adjust the mask such that it doesn't include any rotated bits.
if (me < sh) me = sh;
if (mb >= me) {
@@ -652,11 +653,11 @@ void InstructionSelector::VisitWord64Shl(Node* node) {
if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) {
// Try to absorb logical-and into rldic
Int64BinopMatcher mleft(m.left().node());
- int sh = m.right().Value();
+ int sh = m.right().ResolvedValue();
int mb;
int me;
- if (mleft.right().HasValue() &&
- IsContiguousMask64(mleft.right().Value() << sh, &mb, &me)) {
+ if (mleft.right().HasResolvedValue() &&
+ IsContiguousMask64(mleft.right().ResolvedValue() << sh, &mb, &me)) {
// Adjust the mask such that it doesn't include any rotated bits.
if (me < sh) me = sh;
if (mb >= me) {
@@ -695,11 +696,12 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) {
// Try to absorb logical-and into rlwinm
Int32BinopMatcher mleft(m.left().node());
- int sh = m.right().Value();
+ int sh = m.right().ResolvedValue();
int mb;
int me;
- if (mleft.right().HasValue() &&
- IsContiguousMask32((uint32_t)(mleft.right().Value()) >> sh, &mb, &me)) {
+ if (mleft.right().HasResolvedValue() &&
+ IsContiguousMask32((uint32_t)(mleft.right().ResolvedValue()) >> sh, &mb,
+ &me)) {
// Adjust the mask such that it doesn't include any rotated bits.
if (mb > 31 - sh) mb = 31 - sh;
sh = (32 - sh) & 0x1F;
@@ -721,11 +723,12 @@ void InstructionSelector::VisitWord64Shr(Node* node) {
if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) {
// Try to absorb logical-and into rldic
Int64BinopMatcher mleft(m.left().node());
- int sh = m.right().Value();
+ int sh = m.right().ResolvedValue();
int mb;
int me;
- if (mleft.right().HasValue() &&
- IsContiguousMask64((uint64_t)(mleft.right().Value()) >> sh, &mb, &me)) {
+ if (mleft.right().HasResolvedValue() &&
+ IsContiguousMask64((uint64_t)(mleft.right().ResolvedValue()) >> sh, &mb,
+ &me)) {
// Adjust the mask such that it doesn't include any rotated bits.
if (mb > 63 - sh) mb = 63 - sh;
sh = (64 - sh) & 0x3F;
@@ -842,7 +845,7 @@ void VisitPairShift(InstructionSelector* selector, InstructionCode opcode,
// no register aliasing of input registers with output registers.
Int32Matcher m(node->InputAt(2));
InstructionOperand shift_operand;
- if (m.HasValue()) {
+ if (m.HasResolvedValue()) {
shift_operand = g.UseImmediate(m.node());
} else {
shift_operand = g.UseUniqueRegister(m.node());
@@ -898,8 +901,8 @@ void InstructionSelector::VisitWord64Sar(Node* node) {
Node* displacement = mleft.displacement();
if (displacement != nullptr) {
Int64Matcher mdisplacement(displacement);
- DCHECK(mdisplacement.HasValue());
- offset = mdisplacement.Value();
+ DCHECK(mdisplacement.HasResolvedValue());
+ offset = mdisplacement.ResolvedValue();
}
offset = SmiWordOffset(offset);
if (g.CanBeImmediate(offset, kInt16Imm_4ByteAligned)) {
@@ -1951,7 +1954,7 @@ void VisitAtomicExchange(InstructionSelector* selector, Node* node,
}
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = kWord32AtomicExchangeInt8;
@@ -1965,13 +1968,12 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
opcode = kPPC_AtomicExchangeWord32;
} else {
UNREACHABLE();
- return;
}
VisitAtomicExchange(this, node, opcode);
}
void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
opcode = kPPC_AtomicExchangeUint8;
@@ -1983,7 +1985,6 @@ void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
opcode = kPPC_AtomicExchangeWord64;
} else {
UNREACHABLE();
- return;
}
VisitAtomicExchange(this, node, opcode);
}
@@ -2015,7 +2016,7 @@ void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
if (type == MachineType::Int8()) {
opcode = kWord32AtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
@@ -2028,14 +2029,13 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
opcode = kPPC_AtomicCompareExchangeWord32;
} else {
UNREACHABLE();
- return;
}
VisitAtomicCompareExchange(this, node, opcode);
}
void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
if (type == MachineType::Uint8()) {
opcode = kPPC_AtomicCompareExchangeUint8;
} else if (type == MachineType::Uint16()) {
@@ -2046,7 +2046,6 @@ void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
opcode = kPPC_AtomicCompareExchangeWord64;
} else {
UNREACHABLE();
- return;
}
VisitAtomicCompareExchange(this, node, opcode);
}
@@ -2062,7 +2061,7 @@ void VisitAtomicBinaryOperation(InstructionSelector* selector, Node* node,
Node* value = node->InputAt(2);
MachineType type = AtomicOpType(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
if (type == MachineType::Int8()) {
opcode = int8_op;
@@ -2082,7 +2081,6 @@ void VisitAtomicBinaryOperation(InstructionSelector* selector, Node* node,
opcode = uint64_op;
} else {
UNREACHABLE();
- return;
}
AddressingMode addressing_mode = kMode_MRR;
@@ -2191,6 +2189,7 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I32x4GeS) \
V(I32x4GtU) \
V(I32x4GeU) \
+ V(I32x4DotI16x8S) \
V(I16x8Add) \
V(I16x8AddHoriz) \
V(I16x8Sub) \
@@ -2207,10 +2206,10 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I16x8GeU) \
V(I16x8SConvertI32x4) \
V(I16x8UConvertI32x4) \
- V(I16x8AddSaturateS) \
- V(I16x8SubSaturateS) \
- V(I16x8AddSaturateU) \
- V(I16x8SubSaturateU) \
+ V(I16x8AddSatS) \
+ V(I16x8SubSatS) \
+ V(I16x8AddSatU) \
+ V(I16x8SubSatU) \
V(I16x8RoundingAverageU) \
V(I8x16Add) \
V(I8x16Sub) \
@@ -2227,10 +2226,10 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I8x16GeU) \
V(I8x16SConvertI16x8) \
V(I8x16UConvertI16x8) \
- V(I8x16AddSaturateS) \
- V(I8x16SubSaturateS) \
- V(I8x16AddSaturateU) \
- V(I8x16SubSaturateU) \
+ V(I8x16AddSatS) \
+ V(I8x16SubSatS) \
+ V(I8x16AddSatU) \
+ V(I8x16SubSatU) \
V(I8x16RoundingAverageU) \
V(I8x16Swizzle) \
V(S128And) \
@@ -2323,13 +2322,13 @@ SIMD_VISIT_EXTRACT_LANE(I8x16, U)
SIMD_VISIT_EXTRACT_LANE(I8x16, S)
#undef SIMD_VISIT_EXTRACT_LANE
-#define SIMD_VISIT_REPLACE_LANE(Type) \
- void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
- PPCOperandGenerator g(this); \
- int32_t lane = OpParameter<int32_t>(node->op()); \
- Emit(kPPC_##Type##ReplaceLane, g.DefineAsRegister(node), \
- g.UseUniqueRegister(node->InputAt(0)), g.UseImmediate(lane), \
- g.UseUniqueRegister(node->InputAt(1))); \
+#define SIMD_VISIT_REPLACE_LANE(Type) \
+ void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
+ PPCOperandGenerator g(this); \
+ int32_t lane = OpParameter<int32_t>(node->op()); \
+ Emit(kPPC_##Type##ReplaceLane, g.DefineSameAsFirst(node), \
+ g.UseRegister(node->InputAt(0)), g.UseImmediate(lane), \
+ g.UseRegister(node->InputAt(1))); \
}
SIMD_TYPES(SIMD_VISIT_REPLACE_LANE)
#undef SIMD_VISIT_REPLACE_LANE
@@ -2378,6 +2377,18 @@ SIMD_SHIFT_LIST(SIMD_VISIT_SHIFT)
SIMD_BOOL_LIST(SIMD_VISIT_BOOL)
#undef SIMD_VISIT_BOOL
#undef SIMD_BOOL_LIST
+
+#define SIMD_VISIT_BITMASK(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ PPCOperandGenerator g(this); \
+ InstructionOperand temps[] = {g.TempRegister()}; \
+ Emit(kPPC_##Opcode, g.DefineAsRegister(node), \
+ g.UseUniqueRegister(node->InputAt(0)), arraysize(temps), temps); \
+ }
+SIMD_VISIT_BITMASK(I8x16BitMask)
+SIMD_VISIT_BITMASK(I16x8BitMask)
+SIMD_VISIT_BITMASK(I32x4BitMask)
+#undef SIMD_VISIT_BITMASK
#undef SIMD_TYPES
void InstructionSelector::VisitI8x16Shuffle(Node* node) {
@@ -2419,12 +2430,6 @@ void InstructionSelector::VisitS128Select(Node* node) {
void InstructionSelector::VisitS128Const(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI8x16BitMask(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8BitMask(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4BitMask(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::EmitPrepareResults(
ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
Node* node) {
diff --git a/deps/v8/src/compiler/backend/register-allocator-verifier.h b/deps/v8/src/compiler/backend/register-allocator-verifier.h
index 6a99775e57..11bd4924f4 100644
--- a/deps/v8/src/compiler/backend/register-allocator-verifier.h
+++ b/deps/v8/src/compiler/backend/register-allocator-verifier.h
@@ -54,14 +54,14 @@ enum AssessmentKind { Final, Pending };
class Assessment : public ZoneObject {
public:
+ Assessment(const Assessment&) = delete;
+ Assessment& operator=(const Assessment&) = delete;
+
AssessmentKind kind() const { return kind_; }
protected:
explicit Assessment(AssessmentKind kind) : kind_(kind) {}
AssessmentKind kind_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(Assessment);
};
// PendingAssessments are associated to operands coming from the multiple
@@ -80,6 +80,9 @@ class PendingAssessment final : public Assessment {
operand_(operand),
aliases_(zone) {}
+ PendingAssessment(const PendingAssessment&) = delete;
+ PendingAssessment& operator=(const PendingAssessment&) = delete;
+
static const PendingAssessment* cast(const Assessment* assessment) {
CHECK(assessment->kind() == Pending);
return static_cast<const PendingAssessment*>(assessment);
@@ -99,8 +102,6 @@ class PendingAssessment final : public Assessment {
const InstructionBlock* const origin_;
InstructionOperand operand_;
ZoneSet<int> aliases_;
-
- DISALLOW_COPY_AND_ASSIGN(PendingAssessment);
};
// FinalAssessments are associated to operands that we know to be a certain
@@ -109,6 +110,8 @@ class FinalAssessment final : public Assessment {
public:
explicit FinalAssessment(int virtual_register)
: Assessment(Final), virtual_register_(virtual_register) {}
+ FinalAssessment(const FinalAssessment&) = delete;
+ FinalAssessment& operator=(const FinalAssessment&) = delete;
int virtual_register() const { return virtual_register_; }
static const FinalAssessment* cast(const Assessment* assessment) {
@@ -118,8 +121,6 @@ class FinalAssessment final : public Assessment {
private:
int virtual_register_;
-
- DISALLOW_COPY_AND_ASSIGN(FinalAssessment);
};
struct OperandAsKeyLess {
@@ -140,6 +141,9 @@ class BlockAssessments : public ZoneObject {
stale_ref_stack_slots_(zone),
spill_slot_delta_(spill_slot_delta),
zone_(zone) {}
+ BlockAssessments(const BlockAssessments&) = delete;
+ BlockAssessments& operator=(const BlockAssessments&) = delete;
+
void Drop(InstructionOperand operand) {
map_.erase(operand);
stale_ref_stack_slots_.erase(operand);
@@ -188,8 +192,6 @@ class BlockAssessments : public ZoneObject {
OperandSet stale_ref_stack_slots_;
int spill_slot_delta_;
Zone* zone_;
-
- DISALLOW_COPY_AND_ASSIGN(BlockAssessments);
};
class RegisterAllocatorVerifier final : public ZoneObject {
@@ -197,6 +199,9 @@ class RegisterAllocatorVerifier final : public ZoneObject {
RegisterAllocatorVerifier(Zone* zone, const RegisterConfiguration* config,
const InstructionSequence* sequence,
const Frame* frame);
+ RegisterAllocatorVerifier(const RegisterAllocatorVerifier&) = delete;
+ RegisterAllocatorVerifier& operator=(const RegisterAllocatorVerifier&) =
+ delete;
void VerifyAssignment(const char* caller_info);
void VerifyGapMoves();
@@ -290,8 +295,6 @@ class RegisterAllocatorVerifier final : public ZoneObject {
int spill_slot_delta_;
// TODO(chromium:725559): remove after we understand this bug's root cause.
const char* caller_info_ = nullptr;
-
- DISALLOW_COPY_AND_ASSIGN(RegisterAllocatorVerifier);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/backend/register-allocator.cc b/deps/v8/src/compiler/backend/register-allocator.cc
index 30724647c6..c0905b945f 100644
--- a/deps/v8/src/compiler/backend/register-allocator.cc
+++ b/deps/v8/src/compiler/backend/register-allocator.cc
@@ -1001,8 +1001,8 @@ void TopLevelLiveRange::AddUseInterval(LifetimePosition start,
// that each new use interval either precedes, intersects with or touches
// the last added interval.
DCHECK(start <= first_interval_->end());
- first_interval_->set_start(Min(start, first_interval_->start()));
- first_interval_->set_end(Max(end, first_interval_->end()));
+ first_interval_->set_start(std::min(start, first_interval_->start()));
+ first_interval_->set_end(std::max(end, first_interval_->end()));
}
}
}
@@ -3385,7 +3385,7 @@ void LinearScanAllocator::UpdateDeferredFixedRanges(SpillMode spill_mode,
for (auto active : active_live_ranges()) {
split_conflicting(range, active, [this](LiveRange* updated) {
next_active_ranges_change_ =
- Min(updated->End(), next_active_ranges_change_);
+ std::min(updated->End(), next_active_ranges_change_);
});
}
for (int reg = 0; reg < num_registers(); ++reg) {
@@ -3396,7 +3396,7 @@ void LinearScanAllocator::UpdateDeferredFixedRanges(SpillMode spill_mode,
for (auto inactive : inactive_live_ranges(reg)) {
split_conflicting(range, inactive, [this](LiveRange* updated) {
next_inactive_ranges_change_ =
- Min(updated->End(), next_inactive_ranges_change_);
+ std::min(updated->End(), next_inactive_ranges_change_);
});
}
}
@@ -4129,9 +4129,9 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current,
LifetimePosition::GapFromInstructionIndex(0);
} else {
use_pos[aliased_reg] =
- Min(block_pos[aliased_reg],
- range->NextLifetimePositionRegisterIsBeneficial(
- current->Start()));
+ std::min(block_pos[aliased_reg],
+ range->NextLifetimePositionRegisterIsBeneficial(
+ current->Start()));
}
}
}
@@ -4157,10 +4157,10 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current,
if (kSimpleFPAliasing || !check_fp_aliasing()) {
if (is_fixed) {
- block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection);
- use_pos[cur_reg] = Min(block_pos[cur_reg], use_pos[cur_reg]);
+ block_pos[cur_reg] = std::min(block_pos[cur_reg], next_intersection);
+ use_pos[cur_reg] = std::min(block_pos[cur_reg], use_pos[cur_reg]);
} else {
- use_pos[cur_reg] = Min(use_pos[cur_reg], next_intersection);
+ use_pos[cur_reg] = std::min(use_pos[cur_reg], next_intersection);
}
} else {
int alias_base_index = -1;
@@ -4171,11 +4171,12 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current,
int aliased_reg = alias_base_index + aliases;
if (is_fixed) {
block_pos[aliased_reg] =
- Min(block_pos[aliased_reg], next_intersection);
+ std::min(block_pos[aliased_reg], next_intersection);
use_pos[aliased_reg] =
- Min(block_pos[aliased_reg], use_pos[aliased_reg]);
+ std::min(block_pos[aliased_reg], use_pos[aliased_reg]);
} else {
- use_pos[aliased_reg] = Min(use_pos[aliased_reg], next_intersection);
+ use_pos[aliased_reg] =
+ std::min(use_pos[aliased_reg], next_intersection);
}
}
}
@@ -4206,8 +4207,9 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current,
if (spill_mode == SpillMode::kSpillDeferred) {
InstructionBlock* deferred_block =
code()->GetInstructionBlock(current->Start().ToInstructionIndex());
- new_end = Min(new_end, LifetimePosition::GapFromInstructionIndex(
- LastDeferredInstructionIndex(deferred_block)));
+ new_end =
+ std::min(new_end, LifetimePosition::GapFromInstructionIndex(
+ LastDeferredInstructionIndex(deferred_block)));
}
// We couldn't spill until the next register use. Split before the register
@@ -4315,7 +4317,7 @@ void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current,
if (next_pos == nullptr) {
SpillAfter(range, split_pos, spill_mode);
} else {
- next_intersection = Min(next_intersection, next_pos->pos());
+ next_intersection = std::min(next_intersection, next_pos->pos());
SpillBetween(range, split_pos, next_intersection, spill_mode);
}
it = InactiveToHandled(it);
@@ -4407,17 +4409,18 @@ void LinearScanAllocator::SpillBetweenUntil(LiveRange* range,
// second part, as that likely is the current position of the register
// allocator and we cannot add ranges to unhandled that start before
// the current position.
- LifetimePosition split_start = Max(second_part->Start().End(), until);
+ LifetimePosition split_start = std::max(second_part->Start().End(), until);
// If end is an actual use (which it typically is) we have to split
// so that there is a gap before so that we have space for moving the
// value into its position.
// However, if we have no choice, split right where asked.
- LifetimePosition third_part_end = Max(split_start, end.PrevStart().End());
+ LifetimePosition third_part_end =
+ std::max(split_start, end.PrevStart().End());
// Instead of spliting right after or even before the block boundary,
// split on the boumndary to avoid extra moves.
if (data()->IsBlockBoundary(end.Start())) {
- third_part_end = Max(split_start, end.Start());
+ third_part_end = std::max(split_start, end.Start());
}
LiveRange* third_part =
diff --git a/deps/v8/src/compiler/backend/register-allocator.h b/deps/v8/src/compiler/backend/register-allocator.h
index 87c0afbcfc..858fac8a4e 100644
--- a/deps/v8/src/compiler/backend/register-allocator.h
+++ b/deps/v8/src/compiler/backend/register-allocator.h
@@ -185,6 +185,10 @@ class TopLevelLiveRange;
class TopTierRegisterAllocationData final : public RegisterAllocationData {
public:
+ TopTierRegisterAllocationData(const TopTierRegisterAllocationData&) = delete;
+ TopTierRegisterAllocationData& operator=(
+ const TopTierRegisterAllocationData&) = delete;
+
static const TopTierRegisterAllocationData* cast(
const RegisterAllocationData* data) {
DCHECK_EQ(data->type(), Type::kTopTier);
@@ -374,8 +378,6 @@ class TopTierRegisterAllocationData final : public RegisterAllocationData {
ZoneVector<ZoneVector<LiveRange*>> spill_state_;
RegisterAllocationFlags flags_;
TickCounter* const tick_counter_;
-
- DISALLOW_COPY_AND_ASSIGN(TopTierRegisterAllocationData);
};
// Representation of the non-empty interval [start,end[.
@@ -385,6 +387,8 @@ class UseInterval final : public ZoneObject {
: start_(start), end_(end), next_(nullptr) {
DCHECK(start < end);
}
+ UseInterval(const UseInterval&) = delete;
+ UseInterval& operator=(const UseInterval&) = delete;
LifetimePosition start() const { return start_; }
void set_start(LifetimePosition start) { start_ = start; }
@@ -431,8 +435,6 @@ class UseInterval final : public ZoneObject {
LifetimePosition start_;
LifetimePosition end_;
UseInterval* next_;
-
- DISALLOW_COPY_AND_ASSIGN(UseInterval);
};
enum class UsePositionType : uint8_t {
@@ -456,6 +458,8 @@ class V8_EXPORT_PRIVATE UsePosition final
public:
UsePosition(LifetimePosition pos, InstructionOperand* operand, void* hint,
UsePositionHintType hint_type);
+ UsePosition(const UsePosition&) = delete;
+ UsePosition& operator=(const UsePosition&) = delete;
InstructionOperand* operand() const { return operand_; }
bool HasOperand() const { return operand_ != nullptr; }
@@ -507,8 +511,6 @@ class V8_EXPORT_PRIVATE UsePosition final
UsePosition* next_;
LifetimePosition const pos_;
uint32_t flags_;
-
- DISALLOW_COPY_AND_ASSIGN(UsePosition);
};
class SpillRange;
@@ -520,6 +522,9 @@ class LiveRangeBundle;
// intervals over the instruction ordering.
class V8_EXPORT_PRIVATE LiveRange : public NON_EXPORTED_BASE(ZoneObject) {
public:
+ LiveRange(const LiveRange&) = delete;
+ LiveRange& operator=(const LiveRange&) = delete;
+
UseInterval* first_interval() const { return first_interval_; }
UsePosition* first_pos() const { return first_pos_; }
TopLevelLiveRange* TopLevel() { return top_level_; }
@@ -713,8 +718,6 @@ class V8_EXPORT_PRIVATE LiveRange : public NON_EXPORTED_BASE(ZoneObject) {
LiveRangeBundle* bundle_ = nullptr;
// Next interval start, relative to the current linear scan position.
LifetimePosition next_start_;
-
- DISALLOW_COPY_AND_ASSIGN(LiveRange);
};
struct LiveRangeOrdering {
@@ -790,6 +793,9 @@ class LiveRangeBundle : public ZoneObject {
class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
public:
explicit TopLevelLiveRange(int vreg, MachineRepresentation rep);
+ TopLevelLiveRange(const TopLevelLiveRange&) = delete;
+ TopLevelLiveRange& operator=(const TopLevelLiveRange&) = delete;
+
int spill_start_index() const { return spill_start_index_; }
bool IsFixed() const { return vreg_ < 0; }
@@ -825,7 +831,7 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
bits_ = HasSlotUseField::update(bits_, SlotUseKind::kNoSlotUse);
}
void register_slot_use(SlotUseKind value) {
- bits_ = HasSlotUseField::update(bits_, Max(slot_use_kind(), value));
+ bits_ = HasSlotUseField::update(bits_, std::max(slot_use_kind(), value));
}
SlotUseKind slot_use_kind() const { return HasSlotUseField::decode(bits_); }
@@ -895,7 +901,7 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
InstructionOperand* operand);
void SetSpillOperand(InstructionOperand* operand);
void SetSpillStartIndex(int start) {
- spill_start_index_ = Min(start, spill_start_index_);
+ spill_start_index_ = std::min(start, spill_start_index_);
}
// Omits any moves from spill_move_insertion_locations_ that can be skipped.
@@ -1046,8 +1052,6 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
int spill_start_index_;
UsePosition* last_pos_;
LiveRange* last_child_covers_;
-
- DISALLOW_COPY_AND_ASSIGN(TopLevelLiveRange);
};
struct PrintableLiveRange {
@@ -1062,6 +1066,8 @@ class SpillRange final : public ZoneObject {
public:
static const int kUnassignedSlot = -1;
SpillRange(TopLevelLiveRange* range, Zone* zone);
+ SpillRange(const SpillRange&) = delete;
+ SpillRange& operator=(const SpillRange&) = delete;
UseInterval* interval() const { return use_interval_; }
@@ -1096,8 +1102,6 @@ class SpillRange final : public ZoneObject {
LifetimePosition end_position_;
int assigned_slot_;
int byte_width_;
-
- DISALLOW_COPY_AND_ASSIGN(SpillRange);
};
class LiveRangeBound {
@@ -1106,6 +1110,8 @@ class LiveRangeBound {
: range_(range), start_(range->Start()), end_(range->End()), skip_(skip) {
DCHECK(!range->IsEmpty());
}
+ LiveRangeBound(const LiveRangeBound&) = delete;
+ LiveRangeBound& operator=(const LiveRangeBound&) = delete;
bool CanCover(LifetimePosition position) {
return start_ <= position && position < end_;
@@ -1115,9 +1121,6 @@ class LiveRangeBound {
const LifetimePosition start_;
const LifetimePosition end_;
const bool skip_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(LiveRangeBound);
};
struct FindResult {
@@ -1128,6 +1131,9 @@ struct FindResult {
class LiveRangeBoundArray {
public:
LiveRangeBoundArray() : length_(0), start_(nullptr) {}
+ LiveRangeBoundArray(const LiveRangeBoundArray&) = delete;
+ LiveRangeBoundArray& operator=(const LiveRangeBoundArray&) = delete;
+
bool ShouldInitialize() { return start_ == nullptr; }
void Initialize(Zone* zone, TopLevelLiveRange* range);
LiveRangeBound* Find(const LifetimePosition position) const;
@@ -1140,14 +1146,15 @@ class LiveRangeBoundArray {
private:
size_t length_;
LiveRangeBound* start_;
-
- DISALLOW_COPY_AND_ASSIGN(LiveRangeBoundArray);
};
class LiveRangeFinder {
public:
explicit LiveRangeFinder(const TopTierRegisterAllocationData* data,
Zone* zone);
+ LiveRangeFinder(const LiveRangeFinder&) = delete;
+ LiveRangeFinder& operator=(const LiveRangeFinder&) = delete;
+
LiveRangeBoundArray* ArrayFor(int operand_index);
private:
@@ -1155,13 +1162,13 @@ class LiveRangeFinder {
const int bounds_length_;
LiveRangeBoundArray* const bounds_;
Zone* const zone_;
-
- DISALLOW_COPY_AND_ASSIGN(LiveRangeFinder);
};
class ConstraintBuilder final : public ZoneObject {
public:
explicit ConstraintBuilder(TopTierRegisterAllocationData* data);
+ ConstraintBuilder(const ConstraintBuilder&) = delete;
+ ConstraintBuilder& operator=(const ConstraintBuilder&) = delete;
// Phase 1 : insert moves to account for fixed register operands.
void MeetRegisterConstraints();
@@ -1185,14 +1192,14 @@ class ConstraintBuilder final : public ZoneObject {
void ResolvePhis(const InstructionBlock* block);
TopTierRegisterAllocationData* const data_;
-
- DISALLOW_COPY_AND_ASSIGN(ConstraintBuilder);
};
class LiveRangeBuilder final : public ZoneObject {
public:
explicit LiveRangeBuilder(TopTierRegisterAllocationData* data,
Zone* local_zone);
+ LiveRangeBuilder(const LiveRangeBuilder&) = delete;
+ LiveRangeBuilder& operator=(const LiveRangeBuilder&) = delete;
// Phase 3: compute liveness of all virtual register.
void BuildLiveRanges();
@@ -1264,8 +1271,6 @@ class LiveRangeBuilder final : public ZoneObject {
}
TopTierRegisterAllocationData* const data_;
ZoneMap<InstructionOperand*, UsePosition*> phi_hints_;
-
- DISALLOW_COPY_AND_ASSIGN(LiveRangeBuilder);
};
class BundleBuilder final : public ZoneObject {
@@ -1284,6 +1289,8 @@ class BundleBuilder final : public ZoneObject {
class RegisterAllocator : public ZoneObject {
public:
RegisterAllocator(TopTierRegisterAllocationData* data, RegisterKind kind);
+ RegisterAllocator(const RegisterAllocator&) = delete;
+ RegisterAllocator& operator=(const RegisterAllocator&) = delete;
protected:
using SpillMode = TopTierRegisterAllocationData::SpillMode;
@@ -1352,14 +1359,14 @@ class RegisterAllocator : public ZoneObject {
private:
bool no_combining_;
-
- DISALLOW_COPY_AND_ASSIGN(RegisterAllocator);
};
class LinearScanAllocator final : public RegisterAllocator {
public:
LinearScanAllocator(TopTierRegisterAllocationData* data, RegisterKind kind,
Zone* local_zone);
+ LinearScanAllocator(const LinearScanAllocator&) = delete;
+ LinearScanAllocator& operator=(const LinearScanAllocator&) = delete;
// Phase 4: compute register assignments.
void AllocateRegisters();
@@ -1506,13 +1513,13 @@ class LinearScanAllocator final : public RegisterAllocator {
#ifdef DEBUG
LifetimePosition allocation_finger_;
#endif
-
- DISALLOW_COPY_AND_ASSIGN(LinearScanAllocator);
};
class OperandAssigner final : public ZoneObject {
public:
explicit OperandAssigner(TopTierRegisterAllocationData* data);
+ OperandAssigner(const OperandAssigner&) = delete;
+ OperandAssigner& operator=(const OperandAssigner&) = delete;
// Phase 5: final decision on spilling mode.
void DecideSpillingMode();
@@ -1527,13 +1534,13 @@ class OperandAssigner final : public ZoneObject {
TopTierRegisterAllocationData* data() const { return data_; }
TopTierRegisterAllocationData* const data_;
-
- DISALLOW_COPY_AND_ASSIGN(OperandAssigner);
};
class ReferenceMapPopulator final : public ZoneObject {
public:
explicit ReferenceMapPopulator(TopTierRegisterAllocationData* data);
+ ReferenceMapPopulator(const ReferenceMapPopulator&) = delete;
+ ReferenceMapPopulator& operator=(const ReferenceMapPopulator&) = delete;
// Phase 10: compute values for pointer maps.
void PopulateReferenceMaps();
@@ -1544,8 +1551,6 @@ class ReferenceMapPopulator final : public ZoneObject {
bool SafePointsAreInOrder() const;
TopTierRegisterAllocationData* const data_;
-
- DISALLOW_COPY_AND_ASSIGN(ReferenceMapPopulator);
};
class LiveRangeBoundArray;
@@ -1559,6 +1564,8 @@ class LiveRangeBoundArray;
class LiveRangeConnector final : public ZoneObject {
public:
explicit LiveRangeConnector(TopTierRegisterAllocationData* data);
+ LiveRangeConnector(const LiveRangeConnector&) = delete;
+ LiveRangeConnector& operator=(const LiveRangeConnector&) = delete;
// Phase 8: reconnect split ranges with moves, when the control flow
// between the ranges is trivial (no branches).
@@ -1587,8 +1594,6 @@ class LiveRangeConnector final : public ZoneObject {
Zone* temp_zone);
TopTierRegisterAllocationData* const data_;
-
- DISALLOW_COPY_AND_ASSIGN(LiveRangeConnector);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
index f3ab25630f..4b51bb74b7 100644
--- a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
@@ -1378,7 +1378,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (HasRegisterInput(instr, 0)) {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
- HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ CallCodeObject(reg);
} else {
@@ -1424,7 +1424,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (HasRegisterInput(instr, 0)) {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
- HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ JumpCodeObject(reg);
} else {
@@ -1459,7 +1459,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CHECK(!instr->InputAt(0)->IsImmediate());
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
- HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ Jump(reg);
frame_access_state()->ClearSPDelta();
@@ -1588,8 +1588,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchDeoptimize: {
DeoptimizationExit* exit =
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
- CodeGenResult result = AssembleDeoptimizerCall(exit);
- if (result != kSuccess) return result;
+ __ b(exit->label());
break;
}
case kArchRet:
@@ -3422,24 +3421,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(3));
break;
}
- case kS390_I64x2MinS: {
- __ vmn(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0),
- Condition(3));
- break;
- }
case kS390_I32x4MinS: {
__ vmn(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(0),
Condition(2));
break;
}
- case kS390_I64x2MinU: {
- __ vmnl(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0),
- Condition(3));
- break;
- }
case kS390_I32x4MinU: {
__ vmnl(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(0),
@@ -3470,24 +3457,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(0));
break;
}
- case kS390_I64x2MaxS: {
- __ vmx(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0),
- Condition(3));
- break;
- }
case kS390_I32x4MaxS: {
__ vmx(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(0),
Condition(2));
break;
}
- case kS390_I64x2MaxU: {
- __ vmxl(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0),
- Condition(3));
- break;
- }
case kS390_I32x4MaxU: {
__ vmxl(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(0),
@@ -3552,14 +3527,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(0), Condition(0), Condition(2));
break;
}
- case kS390_I64x2Ne: {
- __ vceq(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(3));
- __ vno(i.OutputSimd128Register(), i.OutputSimd128Register(),
- i.OutputSimd128Register(), Condition(0), Condition(0),
- Condition(3));
- break;
- }
case kS390_I32x4Ne: {
__ vceq(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(2));
@@ -3596,25 +3563,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(2));
break;
}
- case kS390_I64x2GtS: {
- __ vch(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(3));
- break;
- }
case kS390_I32x4GtS: {
__ vch(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(2));
break;
}
- case kS390_I64x2GeS: {
- __ vceq(kScratchDoubleReg, i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(3));
- __ vch(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(3));
- __ vo(i.OutputSimd128Register(), i.OutputSimd128Register(),
- kScratchDoubleReg, Condition(0), Condition(0), Condition(3));
- break;
- }
case kS390_I32x4GeS: {
__ vceq(kScratchDoubleReg, i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(2));
@@ -3624,25 +3577,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
kScratchDoubleReg, Condition(0), Condition(0), Condition(2));
break;
}
- case kS390_I64x2GtU: {
- __ vchl(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(3));
- break;
- }
case kS390_I32x4GtU: {
__ vchl(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(2));
break;
}
- case kS390_I64x2GeU: {
- __ vceq(kScratchDoubleReg, i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(3));
- __ vchl(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(3));
- __ vo(i.OutputSimd128Register(), i.OutputSimd128Register(),
- kScratchDoubleReg, Condition(0), Condition(0), Condition(3));
- break;
- }
case kS390_I32x4GeU: {
__ vceq(kScratchDoubleReg, i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(2));
@@ -3867,7 +3806,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
// vector boolean unops
- case kS390_V64x2AnyTrue:
case kS390_V32x4AnyTrue:
case kS390_V16x8AnyTrue:
case kS390_V8x16AnyTrue: {
@@ -3893,10 +3831,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vtm(kScratchDoubleReg, kScratchDoubleReg, Condition(0), Condition(0), \
Condition(0)); \
__ locgr(Condition(8), dst, temp);
- case kS390_V64x2AllTrue: {
- SIMD_ALL_TRUE(3)
- break;
- }
case kS390_V32x4AllTrue: {
SIMD_ALL_TRUE(2)
break;
@@ -4070,12 +4004,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
#undef VECTOR_UNPACK
case kS390_I16x8SConvertI32x4:
+#ifdef V8_TARGET_BIG_ENDIAN
+ __ vpks(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0), Condition(0), Condition(2));
+#else
__ vpks(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(2));
+#endif
break;
case kS390_I8x16SConvertI16x8:
+#ifdef V8_TARGET_BIG_ENDIAN
+ __ vpks(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0), Condition(0), Condition(1));
+#else
__ vpks(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(1));
+#endif
break;
#define VECTOR_PACK_UNSIGNED(mode) \
Simd128Register tempFPReg = i.ToSimd128Register(instr->TempAt(0)); \
@@ -4084,17 +4028,29 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vmx(tempFPReg, i.InputSimd128Register(0), kScratchDoubleReg, \
Condition(0), Condition(0), Condition(mode)); \
__ vmx(kScratchDoubleReg, i.InputSimd128Register(1), kScratchDoubleReg, \
- Condition(0), Condition(0), Condition(mode)); \
- __ vpkls(i.OutputSimd128Register(), tempFPReg, kScratchDoubleReg, \
- Condition(0), Condition(mode));
+ Condition(0), Condition(0), Condition(mode));
case kS390_I16x8UConvertI32x4: {
// treat inputs as signed, and saturate to unsigned (negative to 0)
VECTOR_PACK_UNSIGNED(2)
+#ifdef V8_TARGET_BIG_ENDIAN
+ __ vpkls(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg,
+ Condition(0), Condition(2));
+#else
+ __ vpkls(i.OutputSimd128Register(), tempFPReg, kScratchDoubleReg,
+ Condition(0), Condition(2));
+#endif
break;
}
case kS390_I8x16UConvertI16x8: {
// treat inputs as signed, and saturate to unsigned (negative to 0)
VECTOR_PACK_UNSIGNED(1)
+#ifdef V8_TARGET_BIG_ENDIAN
+ __ vpkls(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg,
+ Condition(0), Condition(1));
+#else
+ __ vpkls(i.OutputSimd128Register(), tempFPReg, kScratchDoubleReg,
+ Condition(0), Condition(1));
+#endif
break;
}
#undef VECTOR_PACK_UNSIGNED
@@ -4115,25 +4071,40 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(mode)); \
__ op(tempFPReg1, tempFPReg1, tempFPReg2, Condition(0), Condition(0), \
Condition(mode + 1));
- case kS390_I16x8AddSaturateS: {
+ case kS390_I16x8AddSatS: {
BINOP_EXTRACT(va, vuph, vupl, 1)
+#ifdef V8_TARGET_BIG_ENDIAN
__ vpks(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
Condition(0), Condition(2));
+#else
+ __ vpks(i.OutputSimd128Register(), tempFPReg1, kScratchDoubleReg,
+ Condition(0), Condition(2));
+#endif
break;
}
- case kS390_I16x8SubSaturateS: {
+ case kS390_I16x8SubSatS: {
BINOP_EXTRACT(vs, vuph, vupl, 1)
+#ifdef V8_TARGET_BIG_ENDIAN
__ vpks(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
Condition(0), Condition(2));
+#else
+ __ vpks(i.OutputSimd128Register(), tempFPReg1, kScratchDoubleReg,
+ Condition(0), Condition(2));
+#endif
break;
}
- case kS390_I16x8AddSaturateU: {
+ case kS390_I16x8AddSatU: {
BINOP_EXTRACT(va, vuplh, vupll, 1)
+#ifdef V8_TARGET_BIG_ENDIAN
__ vpkls(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
Condition(0), Condition(2));
+#else
+ __ vpkls(i.OutputSimd128Register(), tempFPReg1, kScratchDoubleReg,
+ Condition(0), Condition(2));
+#endif
break;
}
- case kS390_I16x8SubSaturateU: {
+ case kS390_I16x8SubSatU: {
BINOP_EXTRACT(vs, vuplh, vupll, 1)
// negative to 0
__ vx(tempFPReg2, tempFPReg2, tempFPReg2, Condition(0), Condition(0),
@@ -4142,29 +4113,49 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(0), Condition(2));
__ vmx(tempFPReg1, tempFPReg2, tempFPReg1, Condition(0), Condition(0),
Condition(2));
+#ifdef V8_TARGET_BIG_ENDIAN
__ vpkls(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
Condition(0), Condition(2));
+#else
+ __ vpkls(i.OutputSimd128Register(), tempFPReg1, kScratchDoubleReg,
+ Condition(0), Condition(2));
+#endif
break;
}
- case kS390_I8x16AddSaturateS: {
+ case kS390_I8x16AddSatS: {
BINOP_EXTRACT(va, vuph, vupl, 0)
+#ifdef V8_TARGET_BIG_ENDIAN
__ vpks(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
Condition(0), Condition(1));
+#else
+ __ vpks(i.OutputSimd128Register(), tempFPReg1, kScratchDoubleReg,
+ Condition(0), Condition(1));
+#endif
break;
}
- case kS390_I8x16SubSaturateS: {
+ case kS390_I8x16SubSatS: {
BINOP_EXTRACT(vs, vuph, vupl, 0)
+#ifdef V8_TARGET_BIG_ENDIAN
__ vpks(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
Condition(0), Condition(1));
+#else
+ __ vpks(i.OutputSimd128Register(), tempFPReg1, kScratchDoubleReg,
+ Condition(0), Condition(1));
+#endif
break;
}
- case kS390_I8x16AddSaturateU: {
+ case kS390_I8x16AddSatU: {
BINOP_EXTRACT(va, vuplh, vupll, 0)
+#ifdef V8_TARGET_BIG_ENDIAN
__ vpkls(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
Condition(0), Condition(1));
+#else
+ __ vpkls(i.OutputSimd128Register(), tempFPReg1, kScratchDoubleReg,
+ Condition(0), Condition(1));
+#endif
break;
}
- case kS390_I8x16SubSaturateU: {
+ case kS390_I8x16SubSatU: {
BINOP_EXTRACT(vs, vuplh, vupll, 0)
// negative to 0
__ vx(tempFPReg2, tempFPReg2, tempFPReg2, Condition(0), Condition(0),
@@ -4173,8 +4164,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(0), Condition(1));
__ vmx(tempFPReg1, tempFPReg2, tempFPReg1, Condition(0), Condition(0),
Condition(1));
+#ifdef V8_TARGET_BIG_ENDIAN
__ vpkls(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
Condition(0), Condition(1));
+#else
+ __ vpkls(i.OutputSimd128Register(), tempFPReg1, kScratchDoubleReg,
+ Condition(0), Condition(1));
+
+#endif
break;
}
#undef BINOP_EXTRACT
@@ -4202,20 +4199,29 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Simd128Register dst = i.OutputSimd128Register(),
src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1);
+ Simd128Register tempFPReg1 = i.ToSimd128Register(instr->TempAt(0));
+ // Saturate the indices to 5 bits. Input indices more than 31 should
+ // return 0.
+ __ vrepi(kScratchDoubleReg, Operand(31), Condition(0));
+ __ vmnl(tempFPReg1, src1, kScratchDoubleReg, Condition(0), Condition(0),
+ Condition(0));
#ifdef V8_TARGET_BIG_ENDIAN
// input needs to be reversed
__ vlgv(r0, src0, MemOperand(r0, 0), Condition(3));
__ vlgv(r1, src0, MemOperand(r0, 1), Condition(3));
__ lrvgr(r0, r0);
__ lrvgr(r1, r1);
- __ vlvgp(kScratchDoubleReg, r1, r0);
- // clear scr0
- __ vx(src0, src0, src0, Condition(0), Condition(0), Condition(0));
- __ vperm(dst, kScratchDoubleReg, src0, src1, Condition(0), Condition(0));
+ __ vlvgp(dst, r1, r0);
+ // clear scratch
+ __ vx(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg,
+ Condition(0), Condition(0), Condition(0));
+ __ vperm(dst, dst, kScratchDoubleReg, tempFPReg1, Condition(0),
+ Condition(0));
#else
__ vx(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg,
Condition(0), Condition(0), Condition(0));
- __ vperm(dst, src0, kScratchDoubleReg, src1, Condition(0), Condition(0));
+ __ vperm(dst, src0, kScratchDoubleReg, tempFPReg1, Condition(0),
+ Condition(0));
#endif
break;
}
@@ -4743,7 +4749,8 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
void CodeGenerator::FinishCode() {}
-void CodeGenerator::PrepareForDeoptimizationExits(int deopt_count) {}
+void CodeGenerator::PrepareForDeoptimizationExits(
+ ZoneDeque<DeoptimizationExit*>* exits) {}
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
diff --git a/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h b/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h
index ab7973c089..f7d3370e50 100644
--- a/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h
+++ b/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h
@@ -261,15 +261,6 @@ namespace compiler {
V(S390_I64x2ReplaceLane) \
V(S390_I64x2ExtractLane) \
V(S390_I64x2Eq) \
- V(S390_I64x2Ne) \
- V(S390_I64x2GtS) \
- V(S390_I64x2GeS) \
- V(S390_I64x2GtU) \
- V(S390_I64x2GeU) \
- V(S390_I64x2MinS) \
- V(S390_I64x2MinU) \
- V(S390_I64x2MaxS) \
- V(S390_I64x2MaxU) \
V(S390_I32x4Splat) \
V(S390_I32x4ExtractLane) \
V(S390_I32x4ReplaceLane) \
@@ -328,10 +319,10 @@ namespace compiler {
V(S390_I16x8SConvertI8x16High) \
V(S390_I16x8UConvertI8x16Low) \
V(S390_I16x8UConvertI8x16High) \
- V(S390_I16x8AddSaturateS) \
- V(S390_I16x8SubSaturateS) \
- V(S390_I16x8AddSaturateU) \
- V(S390_I16x8SubSaturateU) \
+ V(S390_I16x8AddSatS) \
+ V(S390_I16x8SubSatS) \
+ V(S390_I16x8AddSatU) \
+ V(S390_I16x8SubSatU) \
V(S390_I16x8RoundingAverageU) \
V(S390_I16x8Abs) \
V(S390_I16x8BitMask) \
@@ -358,20 +349,18 @@ namespace compiler {
V(S390_I8x16Neg) \
V(S390_I8x16SConvertI16x8) \
V(S390_I8x16UConvertI16x8) \
- V(S390_I8x16AddSaturateS) \
- V(S390_I8x16SubSaturateS) \
- V(S390_I8x16AddSaturateU) \
- V(S390_I8x16SubSaturateU) \
+ V(S390_I8x16AddSatS) \
+ V(S390_I8x16SubSatS) \
+ V(S390_I8x16AddSatU) \
+ V(S390_I8x16SubSatU) \
V(S390_I8x16RoundingAverageU) \
V(S390_I8x16Abs) \
V(S390_I8x16BitMask) \
V(S390_I8x16Shuffle) \
V(S390_I8x16Swizzle) \
- V(S390_V64x2AnyTrue) \
V(S390_V32x4AnyTrue) \
V(S390_V16x8AnyTrue) \
V(S390_V8x16AnyTrue) \
- V(S390_V64x2AllTrue) \
V(S390_V32x4AllTrue) \
V(S390_V16x8AllTrue) \
V(S390_V8x16AllTrue) \
diff --git a/deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc b/deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc
index c0a854b7f1..be0b14c796 100644
--- a/deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc
@@ -207,15 +207,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_I64x2ReplaceLane:
case kS390_I64x2ExtractLane:
case kS390_I64x2Eq:
- case kS390_I64x2Ne:
- case kS390_I64x2GtS:
- case kS390_I64x2GeS:
- case kS390_I64x2GtU:
- case kS390_I64x2GeU:
- case kS390_I64x2MinS:
- case kS390_I64x2MinU:
- case kS390_I64x2MaxS:
- case kS390_I64x2MaxU:
case kS390_I32x4Splat:
case kS390_I32x4ExtractLane:
case kS390_I32x4ReplaceLane:
@@ -274,10 +265,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_I16x8SConvertI8x16High:
case kS390_I16x8UConvertI8x16Low:
case kS390_I16x8UConvertI8x16High:
- case kS390_I16x8AddSaturateS:
- case kS390_I16x8SubSaturateS:
- case kS390_I16x8AddSaturateU:
- case kS390_I16x8SubSaturateU:
+ case kS390_I16x8AddSatS:
+ case kS390_I16x8SubSatS:
+ case kS390_I16x8AddSatU:
+ case kS390_I16x8SubSatU:
case kS390_I16x8RoundingAverageU:
case kS390_I16x8Abs:
case kS390_I16x8BitMask:
@@ -304,20 +295,18 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_I8x16Neg:
case kS390_I8x16SConvertI16x8:
case kS390_I8x16UConvertI16x8:
- case kS390_I8x16AddSaturateS:
- case kS390_I8x16SubSaturateS:
- case kS390_I8x16AddSaturateU:
- case kS390_I8x16SubSaturateU:
+ case kS390_I8x16AddSatS:
+ case kS390_I8x16SubSatS:
+ case kS390_I8x16AddSatU:
+ case kS390_I8x16SubSatU:
case kS390_I8x16RoundingAverageU:
case kS390_I8x16Abs:
case kS390_I8x16BitMask:
case kS390_I8x16Shuffle:
case kS390_I8x16Swizzle:
- case kS390_V64x2AnyTrue:
case kS390_V32x4AnyTrue:
case kS390_V16x8AnyTrue:
case kS390_V8x16AnyTrue:
- case kS390_V64x2AllTrue:
case kS390_V32x4AllTrue:
case kS390_V16x8AllTrue:
case kS390_V8x16AllTrue:
diff --git a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
index ee3e996169..124193f50b 100644
--- a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
@@ -272,7 +272,7 @@ bool S390OpcodeOnlySupport12BitDisp(InstructionCode op) {
ArchOpcode SelectLoadOpcode(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
switch (load_rep.representation()) {
case MachineRepresentation::kFloat32:
opcode = kS390_LoadFloat32;
@@ -747,7 +747,7 @@ static void VisitGeneralStore(
code |= MiscField::encode(static_cast<int>(record_write_mode));
selector->Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
NodeMatcher m(value);
switch (rep) {
case MachineRepresentation::kFloat32:
@@ -799,7 +799,6 @@ static void VisitGeneralStore(
break;
case MachineRepresentation::kNone:
UNREACHABLE();
- return;
}
InstructionOperand inputs[4];
size_t input_count = 0;
@@ -899,7 +898,8 @@ void InstructionSelector::VisitWord64And(Node* node) {
Int64BinopMatcher m(node);
int mb = 0;
int me = 0;
- if (m.right().HasValue() && IsContiguousMask64(m.right().Value(), &mb, &me)) {
+ if (m.right().HasResolvedValue() &&
+ IsContiguousMask64(m.right().ResolvedValue(), &mb, &me)) {
int sh = 0;
Node* left = m.left().node();
if ((m.left().IsWord64Shr() || m.left().IsWord64Shl()) &&
@@ -907,7 +907,7 @@ void InstructionSelector::VisitWord64And(Node* node) {
Int64BinopMatcher mleft(m.left().node());
if (mleft.right().IsInRange(0, 63)) {
left = mleft.left().node();
- sh = mleft.right().Value();
+ sh = mleft.right().ResolvedValue();
if (m.left().IsWord64Shr()) {
// Adjust the mask such that it doesn't include any rotated bits.
if (mb > 63 - sh) mb = 63 - sh;
@@ -951,11 +951,11 @@ void InstructionSelector::VisitWord64Shl(Node* node) {
// TODO(mbrandy): eliminate left sign extension if right >= 32
if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) {
Int64BinopMatcher mleft(m.left().node());
- int sh = m.right().Value();
+ int sh = m.right().ResolvedValue();
int mb;
int me;
- if (mleft.right().HasValue() &&
- IsContiguousMask64(mleft.right().Value() << sh, &mb, &me)) {
+ if (mleft.right().HasResolvedValue() &&
+ IsContiguousMask64(mleft.right().ResolvedValue() << sh, &mb, &me)) {
// Adjust the mask such that it doesn't include any rotated bits.
if (me < sh) me = sh;
if (mb >= me) {
@@ -992,11 +992,12 @@ void InstructionSelector::VisitWord64Shr(Node* node) {
Int64BinopMatcher m(node);
if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) {
Int64BinopMatcher mleft(m.left().node());
- int sh = m.right().Value();
+ int sh = m.right().ResolvedValue();
int mb;
int me;
- if (mleft.right().HasValue() &&
- IsContiguousMask64((uint64_t)(mleft.right().Value()) >> sh, &mb, &me)) {
+ if (mleft.right().HasResolvedValue() &&
+ IsContiguousMask64((uint64_t)(mleft.right().ResolvedValue()) >> sh, &mb,
+ &me)) {
// Adjust the mask such that it doesn't include any rotated bits.
if (mb > 63 - sh) mb = 63 - sh;
sh = (64 - sh) & 0x3F;
@@ -1120,7 +1121,7 @@ void VisitPairShift(InstructionSelector* selector, InstructionCode opcode,
// no register aliasing of input registers with output registers.
Int32Matcher m(node->InputAt(2));
InstructionOperand shift_operand;
- if (m.HasValue()) {
+ if (m.HasResolvedValue()) {
shift_operand = g.UseImmediate(m.node());
} else {
shift_operand = g.UseUniqueRegister(m.node());
@@ -2302,7 +2303,7 @@ void VisitAtomicExchange(InstructionSelector* selector, Node* node,
}
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = kWord32AtomicExchangeInt8;
@@ -2316,13 +2317,12 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
opcode = kWord32AtomicExchangeWord32;
} else {
UNREACHABLE();
- return;
}
VisitAtomicExchange(this, node, opcode);
}
void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
opcode = kS390_Word64AtomicExchangeUint8;
@@ -2334,7 +2334,6 @@ void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
opcode = kS390_Word64AtomicExchangeUint64;
} else {
UNREACHABLE();
- return;
}
VisitAtomicExchange(this, node, opcode);
}
@@ -2372,7 +2371,7 @@ void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
if (type == MachineType::Int8()) {
opcode = kWord32AtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
@@ -2385,14 +2384,13 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
opcode = kWord32AtomicCompareExchangeWord32;
} else {
UNREACHABLE();
- return;
}
VisitAtomicCompareExchange(this, node, opcode);
}
void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
if (type == MachineType::Uint8()) {
opcode = kS390_Word64AtomicCompareExchangeUint8;
} else if (type == MachineType::Uint16()) {
@@ -2403,7 +2401,6 @@ void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
opcode = kS390_Word64AtomicCompareExchangeUint64;
} else {
UNREACHABLE();
- return;
}
VisitAtomicCompareExchange(this, node, opcode);
}
@@ -2447,7 +2444,7 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
ArchOpcode uint16_op, ArchOpcode word32_op) {
MachineType type = AtomicOpType(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
if (type == MachineType::Int8()) {
opcode = int8_op;
@@ -2461,7 +2458,6 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
opcode = word32_op;
} else {
UNREACHABLE();
- return;
}
VisitAtomicBinop(this, node, opcode);
}
@@ -2484,7 +2480,7 @@ void InstructionSelector::VisitWord64AtomicBinaryOperation(
Node* node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode word32_op,
ArchOpcode word64_op) {
MachineType type = AtomicOpType(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
if (type == MachineType::Uint8()) {
opcode = uint8_op;
@@ -2496,7 +2492,6 @@ void InstructionSelector::VisitWord64AtomicBinaryOperation(
opcode = word64_op;
} else {
UNREACHABLE();
- return;
}
VisitAtomicBinop(this, node, opcode);
}
@@ -2559,15 +2554,6 @@ void InstructionSelector::VisitWord64AtomicStore(Node* node) {
V(I64x2Sub) \
V(I64x2Mul) \
V(I64x2Eq) \
- V(I64x2Ne) \
- V(I64x2GtS) \
- V(I64x2GeS) \
- V(I64x2GtU) \
- V(I64x2GeU) \
- V(I64x2MinS) \
- V(I64x2MinU) \
- V(I64x2MaxS) \
- V(I64x2MaxU) \
V(I32x4Add) \
V(I32x4AddHoriz) \
V(I32x4Sub) \
@@ -2582,6 +2568,7 @@ void InstructionSelector::VisitWord64AtomicStore(Node* node) {
V(I32x4GeS) \
V(I32x4GtU) \
V(I32x4GeU) \
+ V(I32x4DotI16x8S) \
V(I16x8Add) \
V(I16x8AddHoriz) \
V(I16x8Sub) \
@@ -2598,10 +2585,10 @@ void InstructionSelector::VisitWord64AtomicStore(Node* node) {
V(I16x8GeU) \
V(I16x8SConvertI32x4) \
V(I16x8UConvertI32x4) \
- V(I16x8AddSaturateS) \
- V(I16x8SubSaturateS) \
- V(I16x8AddSaturateU) \
- V(I16x8SubSaturateU) \
+ V(I16x8AddSatS) \
+ V(I16x8SubSatS) \
+ V(I16x8AddSatU) \
+ V(I16x8SubSatU) \
V(I16x8RoundingAverageU) \
V(I8x16Add) \
V(I8x16Sub) \
@@ -2618,10 +2605,10 @@ void InstructionSelector::VisitWord64AtomicStore(Node* node) {
V(I8x16GeU) \
V(I8x16SConvertI16x8) \
V(I8x16UConvertI16x8) \
- V(I8x16AddSaturateS) \
- V(I8x16SubSaturateS) \
- V(I8x16AddSaturateU) \
- V(I8x16SubSaturateU) \
+ V(I8x16AddSatS) \
+ V(I8x16SubSatS) \
+ V(I8x16AddSatU) \
+ V(I8x16SubSatU) \
V(I8x16RoundingAverageU) \
V(S128And) \
V(S128Or) \
@@ -2677,11 +2664,9 @@ void InstructionSelector::VisitWord64AtomicStore(Node* node) {
V(I8x16ShrU)
#define SIMD_BOOL_LIST(V) \
- V(V64x2AnyTrue) \
V(V32x4AnyTrue) \
V(V16x8AnyTrue) \
V(V8x16AnyTrue) \
- V(V64x2AllTrue) \
V(V32x4AllTrue) \
V(V16x8AllTrue) \
V(V8x16AllTrue)
@@ -2855,9 +2840,10 @@ void InstructionSelector::VisitI8x16Shuffle(Node* node) {
void InstructionSelector::VisitI8x16Swizzle(Node* node) {
S390OperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempSimd128Register()};
Emit(kS390_I8x16Swizzle, g.DefineAsRegister(node),
g.UseUniqueRegister(node->InputAt(0)),
- g.UseUniqueRegister(node->InputAt(1)));
+ g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
}
void InstructionSelector::VisitS128Const(Node* node) {
diff --git a/deps/v8/src/compiler/backend/spill-placer.h b/deps/v8/src/compiler/backend/spill-placer.h
index 99181d074e..94a5358384 100644
--- a/deps/v8/src/compiler/backend/spill-placer.h
+++ b/deps/v8/src/compiler/backend/spill-placer.h
@@ -80,6 +80,9 @@ class SpillPlacer {
~SpillPlacer();
+ SpillPlacer(const SpillPlacer&) = delete;
+ SpillPlacer& operator=(const SpillPlacer&) = delete;
+
// Adds the given TopLevelLiveRange to the SpillPlacer's state. Will
// eventually commit spill moves for that range and mark the range to indicate
// whether its value is spilled at the definition or some later point, so that
@@ -158,8 +161,6 @@ class SpillPlacer {
// additional work.
RpoNumber first_block_ = RpoNumber::Invalid();
RpoNumber last_block_ = RpoNumber::Invalid();
-
- DISALLOW_COPY_AND_ASSIGN(SpillPlacer);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
index e0cf602b11..df1d6de835 100644
--- a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
@@ -487,13 +487,19 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
} \
} while (false)
-#define ASSEMBLE_SSE_BINOP(asm_instr) \
- do { \
- if (instr->InputAt(1)->IsFPRegister()) { \
- __ asm_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
- } else { \
- __ asm_instr(i.InputDoubleRegister(0), i.InputOperand(1)); \
- } \
+#define ASSEMBLE_SSE_BINOP(asm_instr) \
+ do { \
+ if (HasAddressingMode(instr)) { \
+ size_t index = 1; \
+ Operand right = i.MemoryOperand(&index); \
+ __ asm_instr(i.InputDoubleRegister(0), right); \
+ } else { \
+ if (instr->InputAt(1)->IsFPRegister()) { \
+ __ asm_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
+ } else { \
+ __ asm_instr(i.InputDoubleRegister(0), i.InputOperand(1)); \
+ } \
+ } \
} while (false)
#define ASSEMBLE_SSE_UNOP(asm_instr) \
@@ -505,16 +511,22 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
} \
} while (false)
-#define ASSEMBLE_AVX_BINOP(asm_instr) \
- do { \
- CpuFeatureScope avx_scope(tasm(), AVX); \
- if (instr->InputAt(1)->IsFPRegister()) { \
- __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
- i.InputDoubleRegister(1)); \
- } else { \
- __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
- i.InputOperand(1)); \
- } \
+#define ASSEMBLE_AVX_BINOP(asm_instr) \
+ do { \
+ CpuFeatureScope avx_scope(tasm(), AVX); \
+ if (HasAddressingMode(instr)) { \
+ size_t index = 1; \
+ Operand right = i.MemoryOperand(&index); \
+ __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), right); \
+ } else { \
+ if (instr->InputAt(1)->IsFPRegister()) { \
+ __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
+ i.InputDoubleRegister(1)); \
+ } else { \
+ __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
+ i.InputOperand(1)); \
+ } \
+ } \
} while (false)
#define ASSEMBLE_IEEE754_BINOP(name) \
@@ -553,6 +565,21 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
__ j(not_equal, &binop); \
} while (false)
+// Handles both SSE and AVX codegen. For SSE we use DefineSameAsFirst, so the
+// dst and first src will be the same. For AVX we don't restrict it that way, so
+// we will omit unnecessary moves.
+#define ASSEMBLE_SIMD_BINOP(opcode) \
+ do { \
+ if (CpuFeatures::IsSupported(AVX)) { \
+ CpuFeatureScope avx_scope(tasm(), AVX); \
+ __ v##opcode(i.OutputSimd128Register(), i.InputSimd128Register(0), \
+ i.InputSimd128Register(1)); \
+ } else { \
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); \
+ __ opcode(i.OutputSimd128Register(), i.InputSimd128Register(1)); \
+ } \
+ } while (false)
+
#define ASSEMBLE_SIMD_INSTR(opcode, dst_operand, index) \
do { \
if (instr->InputAt(index)->IsSimd128Register()) { \
@@ -603,21 +630,53 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
// This macro will directly emit the opcode if the shift is an immediate - the
// shift value will be taken modulo 2^width. Otherwise, it will emit code to
// perform the modulus operation.
-#define ASSEMBLE_SIMD_SHIFT(opcode, width) \
- do { \
- XMMRegister dst = i.OutputSimd128Register(); \
- DCHECK_EQ(dst, i.InputSimd128Register(0)); \
- if (HasImmediateInput(instr, 1)) { \
- __ opcode(dst, byte{i.InputInt##width(1)}); \
- } else { \
- XMMRegister tmp = i.TempSimd128Register(0); \
- Register tmp_shift = i.TempRegister(1); \
- constexpr int mask = (1 << width) - 1; \
- __ movq(tmp_shift, i.InputRegister(1)); \
- __ andq(tmp_shift, Immediate(mask)); \
- __ Movq(tmp, tmp_shift); \
- __ opcode(dst, tmp); \
- } \
+#define ASSEMBLE_SIMD_SHIFT(opcode, width) \
+ do { \
+ XMMRegister dst = i.OutputSimd128Register(); \
+ if (HasImmediateInput(instr, 1)) { \
+ if (CpuFeatures::IsSupported(AVX)) { \
+ CpuFeatureScope avx_scope(tasm(), AVX); \
+ __ v##opcode(dst, i.InputSimd128Register(0), \
+ byte{i.InputInt##width(1)}); \
+ } else { \
+ DCHECK_EQ(dst, i.InputSimd128Register(0)); \
+ __ opcode(dst, byte{i.InputInt##width(1)}); \
+ } \
+ } else { \
+ XMMRegister tmp = i.TempSimd128Register(0); \
+ Register tmp_shift = i.TempRegister(1); \
+ constexpr int mask = (1 << width) - 1; \
+ __ movq(tmp_shift, i.InputRegister(1)); \
+ __ andq(tmp_shift, Immediate(mask)); \
+ __ Movq(tmp, tmp_shift); \
+ if (CpuFeatures::IsSupported(AVX)) { \
+ CpuFeatureScope avx_scope(tasm(), AVX); \
+ __ v##opcode(dst, i.InputSimd128Register(0), tmp); \
+ } else { \
+ DCHECK_EQ(dst, i.InputSimd128Register(0)); \
+ __ opcode(dst, tmp); \
+ } \
+ } \
+ } while (false)
+
+#define ASSEMBLE_PINSR(ASM_INSTR) \
+ do { \
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
+ XMMRegister dst = i.OutputSimd128Register(); \
+ XMMRegister src = i.InputSimd128Register(0); \
+ uint8_t laneidx = i.InputUint8(1); \
+ if (HasAddressingMode(instr)) { \
+ __ ASM_INSTR(dst, src, i.MemoryOperand(2), laneidx); \
+ break; \
+ } \
+ if (instr->InputAt(2)->IsFPRegister()) { \
+ __ Movq(kScratchRegister, i.InputDoubleRegister(2)); \
+ __ ASM_INSTR(dst, src, kScratchRegister, laneidx); \
+ } else if (instr->InputAt(2)->IsRegister()) { \
+ __ ASM_INSTR(dst, src, i.InputRegister(2), laneidx); \
+ } else { \
+ __ ASM_INSTR(dst, src, i.InputOperand(2), laneidx); \
+ } \
} while (false)
void CodeGenerator::AssembleDeconstructFrame() {
@@ -664,7 +723,7 @@ void AdjustStackPointerForTailCall(Instruction* instr,
int new_slot_above_sp,
bool allow_shrinkage = true) {
int stack_slot_delta;
- if (HasCallDescriptorFlag(instr, CallDescriptor::kIsTailCallForTierUp)) {
+ if (instr->HasCallDescriptorFlag(CallDescriptor::kIsTailCallForTierUp)) {
// For this special tail-call mode, the callee has the same arguments and
// linkage as the caller, and arguments adapter frames must be preserved.
// Thus we simply have reset the stack pointer register to its original
@@ -710,7 +769,7 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
if (!pushes.empty() &&
(LocationOperand::cast(pushes.back()->destination()).index() + 1 ==
first_unused_stack_slot)) {
- DCHECK(!HasCallDescriptorFlag(instr, CallDescriptor::kIsTailCallForTierUp));
+ DCHECK(!instr->HasCallDescriptorFlag(CallDescriptor::kIsTailCallForTierUp));
X64OperandConverter g(this, instr);
for (auto move : pushes) {
LocationOperand destination_location(
@@ -800,10 +859,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
- HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ LoadCodeObjectEntry(reg, reg);
- if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
__ RetpolineCall(reg);
} else {
__ call(reg);
@@ -828,7 +887,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (DetermineStubCallMode() == StubCallMode::kCallWasmRuntimeStub) {
__ near_call(wasm_code, constant.rmode());
} else {
- if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
__ RetpolineCall(wasm_code, constant.rmode());
} else {
__ Call(wasm_code, constant.rmode());
@@ -836,7 +895,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
} else {
Register reg = i.InputRegister(0);
- if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
__ RetpolineCall(reg);
} else {
__ call(reg);
@@ -847,7 +906,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchTailCallCodeObjectFromJSFunction:
- if (!HasCallDescriptorFlag(instr, CallDescriptor::kIsTailCallForTierUp)) {
+ if (!instr->HasCallDescriptorFlag(CallDescriptor::kIsTailCallForTierUp)) {
AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
i.TempRegister(0), i.TempRegister(1),
i.TempRegister(2));
@@ -860,10 +919,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
- HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ LoadCodeObjectEntry(reg, reg);
- if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
__ RetpolineJump(reg);
} else {
__ jmp(reg);
@@ -886,7 +945,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
} else {
Register reg = i.InputRegister(0);
- if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
__ RetpolineJump(reg);
} else {
__ jmp(reg);
@@ -901,9 +960,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CHECK(!HasImmediateInput(instr, 0));
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
- HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
- if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
__ RetpolineJump(reg);
} else {
__ jmp(reg);
@@ -1040,9 +1099,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchDeoptimize: {
DeoptimizationExit* exit =
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
- CodeGenResult result = AssembleDeoptimizerCall(exit);
- if (result != kSuccess) return result;
- unwinding_info_writer_.MarkBlockWillExit();
+ __ jmp(exit->label());
break;
}
case kArchRet:
@@ -2327,15 +2384,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
- case kX64F64x2ReplaceLane: {
- if (instr->InputAt(2)->IsFPRegister()) {
- __ Movq(kScratchRegister, i.InputDoubleRegister(2));
- __ Pinsrq(i.OutputSimd128Register(), kScratchRegister, i.InputInt8(1));
- } else {
- __ Pinsrq(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
- }
- break;
- }
case kX64F64x2ExtractLane: {
__ Pextrq(kScratchRegister, i.InputSimd128Register(0), i.InputInt8(1));
__ Movq(i.OutputDoubleRegister(), kScratchRegister);
@@ -2346,19 +2394,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64F64x2Add: {
- ASSEMBLE_SSE_BINOP(Addpd);
+ ASSEMBLE_SIMD_BINOP(addpd);
break;
}
case kX64F64x2Sub: {
- ASSEMBLE_SSE_BINOP(Subpd);
+ ASSEMBLE_SIMD_BINOP(subpd);
break;
}
case kX64F64x2Mul: {
- ASSEMBLE_SSE_BINOP(Mulpd);
+ ASSEMBLE_SIMD_BINOP(mulpd);
break;
}
case kX64F64x2Div: {
- ASSEMBLE_SSE_BINOP(Divpd);
+ ASSEMBLE_SIMD_BINOP(divpd);
break;
}
case kX64F64x2Min: {
@@ -2401,23 +2449,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64F64x2Eq: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ Cmpeqpd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(cmpeqpd);
break;
}
case kX64F64x2Ne: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ Cmpneqpd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(cmpneqpd);
break;
}
case kX64F64x2Lt: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ Cmpltpd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(cmpltpd);
break;
}
case kX64F64x2Le: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ Cmplepd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(cmplepd);
break;
}
case kX64F64x2Qfma: {
@@ -2446,20 +2490,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
- // TODO(gdeepti): Get rid of redundant moves for F32x4Splat/Extract below
case kX64F32x4Splat: {
- XMMRegister dst = i.OutputSimd128Register();
- if (instr->InputAt(0)->IsFPRegister()) {
- __ Movss(dst, i.InputDoubleRegister(0));
- } else {
- __ Movss(dst, i.InputOperand(0));
- }
- __ Shufps(dst, dst, byte{0x0});
+ __ Shufps(i.OutputSimd128Register(), i.InputDoubleRegister(0), 0);
break;
}
case kX64F32x4ExtractLane: {
- __ Extractps(kScratchRegister, i.InputSimd128Register(0), i.InputInt8(1));
- __ Movd(i.OutputDoubleRegister(), kScratchRegister);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ XMMRegister src = i.InputSimd128Register(0);
+ // vshufps and leave junk in the 3 high lanes.
+ __ vshufps(i.OutputDoubleRegister(), src, src, i.InputInt8(1));
+ } else {
+ __ extractps(kScratchRegister, i.InputSimd128Register(0),
+ i.InputUint8(1));
+ __ movd(i.OutputDoubleRegister(), kScratchRegister);
+ }
break;
}
case kX64F32x4ReplaceLane: {
@@ -2533,8 +2578,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64F32x4Add: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ Addps(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(addps);
break;
}
case kX64F32x4AddHoriz: {
@@ -2543,18 +2587,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64F32x4Sub: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ Subps(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(subps);
break;
}
case kX64F32x4Mul: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ Mulps(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(mulps);
break;
}
case kX64F32x4Div: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ Divps(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(divps);
break;
}
case kX64F32x4Min: {
@@ -2597,25 +2638,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64F32x4Eq: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ Cmpps(i.OutputSimd128Register(), i.InputSimd128Register(1),
- int8_t{0x0});
+ ASSEMBLE_SIMD_BINOP(cmpeqps);
break;
}
case kX64F32x4Ne: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ Cmpps(i.OutputSimd128Register(), i.InputSimd128Register(1),
- int8_t{0x4});
+ ASSEMBLE_SIMD_BINOP(cmpneqps);
break;
}
case kX64F32x4Lt: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ Cmpltps(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(cmpltps);
break;
}
case kX64F32x4Le: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ Cmpleps(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(cmpleps);
break;
}
case kX64F32x4Qfma: {
@@ -2694,15 +2729,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Pextrq(i.OutputRegister(), i.InputSimd128Register(0), i.InputInt8(1));
break;
}
- case kX64I64x2ReplaceLane: {
- if (HasRegisterInput(instr, 2)) {
- __ Pinsrq(i.OutputSimd128Register(), i.InputRegister(2),
- i.InputInt8(1));
- } else {
- __ Pinsrq(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
- }
- break;
- }
case kX64I64x2Neg: {
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = i.InputSimd128Register(0);
@@ -2714,9 +2740,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Psubq(dst, src);
break;
}
+ case kX64I64x2BitMask: {
+ __ Movmskpd(i.OutputRegister(), i.InputSimd128Register(0));
+ break;
+ }
case kX64I64x2Shl: {
// Take shift value modulo 2^6.
- ASSEMBLE_SIMD_SHIFT(Psllq, 6);
+ ASSEMBLE_SIMD_SHIFT(psllq, 6);
break;
}
case kX64I64x2ShrS: {
@@ -2730,22 +2760,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// lower quadword
__ Pextrq(tmp, src, int8_t{0x0});
__ sarq_cl(tmp);
- __ Pinsrq(dst, tmp, int8_t{0x0});
+ __ Pinsrq(dst, tmp, uint8_t{0x0});
// upper quadword
__ Pextrq(tmp, src, int8_t{0x1});
__ sarq_cl(tmp);
- __ Pinsrq(dst, tmp, int8_t{0x1});
+ __ Pinsrq(dst, tmp, uint8_t{0x1});
break;
}
case kX64I64x2Add: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ Paddq(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(paddq);
break;
}
case kX64I64x2Sub: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ Psubq(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(psubq);
break;
}
case kX64I64x2Mul: {
@@ -2773,177 +2801,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Paddq(left, tmp2); // left == dst
break;
}
- case kX64I64x2MinS: {
- if (CpuFeatures::IsSupported(SSE4_2)) {
- CpuFeatureScope sse_scope_4_2(tasm(), SSE4_2);
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src0 = i.InputSimd128Register(0);
- XMMRegister src1 = i.InputSimd128Register(1);
- XMMRegister tmp = i.TempSimd128Register(0);
- DCHECK_EQ(tmp, xmm0);
-
- __ movaps(tmp, src1);
- __ pcmpgtq(tmp, src0);
- __ movaps(dst, src1);
- __ blendvpd(dst, src0); // implicit use of xmm0 as mask
- } else {
- CpuFeatureScope sse_scope_4_1(tasm(), SSE4_1);
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(1);
- XMMRegister tmp = i.TempSimd128Register(0);
- Register tmp1 = i.TempRegister(1);
- Register tmp2 = i.TempRegister(2);
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- // backup src since we cannot change it
- __ movaps(tmp, src);
-
- // compare the lower quardwords
- __ movq(tmp1, dst);
- __ movq(tmp2, tmp);
- __ cmpq(tmp1, tmp2);
- // tmp2 now has the min of lower quadwords
- __ cmovq(less_equal, tmp2, tmp1);
- // tmp1 now has the higher quadword
- // must do this before movq, movq clears top quadword
- __ pextrq(tmp1, dst, 1);
- // save tmp2 into dst
- __ movq(dst, tmp2);
- // tmp2 now has the higher quadword
- __ pextrq(tmp2, tmp, 1);
- // compare higher quadwords
- __ cmpq(tmp1, tmp2);
- // tmp2 now has the min of higher quadwords
- __ cmovq(less_equal, tmp2, tmp1);
- __ movq(tmp, tmp2);
- // dst = [tmp[0], dst[0]]
- __ punpcklqdq(dst, tmp);
- }
- break;
- }
- case kX64I64x2MaxS: {
- CpuFeatureScope sse_scope_4_2(tasm(), SSE4_2);
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(1);
- XMMRegister tmp = i.TempSimd128Register(0);
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- DCHECK_EQ(tmp, xmm0);
-
- __ movaps(tmp, src);
- __ pcmpgtq(tmp, dst);
- __ blendvpd(dst, src); // implicit use of xmm0 as mask
- break;
- }
case kX64I64x2Eq: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ Pcmpeqq(i.OutputSimd128Register(), i.InputSimd128Register(1));
- break;
- }
- case kX64I64x2Ne: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- XMMRegister tmp = i.TempSimd128Register(0);
- __ Pcmpeqq(i.OutputSimd128Register(), i.InputSimd128Register(1));
- __ Pcmpeqq(tmp, tmp);
- __ Pxor(i.OutputSimd128Register(), tmp);
- break;
- }
- case kX64I64x2GtS: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ Pcmpgtq(i.OutputSimd128Register(), i.InputSimd128Register(1));
- break;
- }
- case kX64I64x2GeS: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(1);
- XMMRegister tmp = i.TempSimd128Register(0);
-
- __ Movaps(tmp, src);
- __ Pcmpgtq(tmp, dst);
- __ Pcmpeqd(dst, dst);
- __ Pxor(dst, tmp);
+ ASSEMBLE_SIMD_BINOP(pcmpeqq);
break;
}
case kX64I64x2ShrU: {
// Take shift value modulo 2^6.
- ASSEMBLE_SIMD_SHIFT(Psrlq, 6);
- break;
- }
- case kX64I64x2MinU: {
- CpuFeatureScope sse_scope_4_2(tasm(), SSE4_2);
- CpuFeatureScope sse_scope_4_1(tasm(), SSE4_1);
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src0 = i.InputSimd128Register(0);
- XMMRegister src1 = i.InputSimd128Register(1);
- XMMRegister tmp0 = i.TempSimd128Register(0);
- XMMRegister tmp1 = i.TempSimd128Register(1);
- DCHECK_EQ(tmp1, xmm0);
-
- __ movaps(dst, src1);
- __ movaps(tmp0, src0);
-
- __ pcmpeqd(tmp1, tmp1);
- __ psllq(tmp1, 63);
-
- __ pxor(tmp0, tmp1);
- __ pxor(tmp1, dst);
-
- __ pcmpgtq(tmp1, tmp0);
- __ blendvpd(dst, src0); // implicit use of xmm0 as mask
- break;
- }
- case kX64I64x2MaxU: {
- CpuFeatureScope sse_scope_4_2(tasm(), SSE4_2);
- CpuFeatureScope sse_scope_4_1(tasm(), SSE4_1);
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(1);
- XMMRegister dst_tmp = i.TempSimd128Register(0);
- XMMRegister tmp = i.TempSimd128Register(1);
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- DCHECK_EQ(tmp, xmm0);
-
- __ movaps(dst_tmp, dst);
-
- __ pcmpeqd(tmp, tmp);
- __ psllq(tmp, 63);
-
- __ pxor(dst_tmp, tmp);
- __ pxor(tmp, src);
-
- __ pcmpgtq(tmp, dst_tmp);
- __ blendvpd(dst, src); // implicit use of xmm0 as mask
- break;
- }
- case kX64I64x2GtU: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(1);
- XMMRegister tmp = i.TempSimd128Register(0);
-
- __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ Psllq(kScratchDoubleReg, 63);
-
- __ Movaps(tmp, src);
- __ Pxor(tmp, kScratchDoubleReg);
- __ Pxor(dst, kScratchDoubleReg);
- __ Pcmpgtq(dst, tmp);
- break;
- }
- case kX64I64x2GeU: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_2);
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(1);
- XMMRegister tmp = i.TempSimd128Register(0);
-
- __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ Psllq(kScratchDoubleReg, 63);
-
- __ Movaps(tmp, src);
- __ Pxor(dst, kScratchDoubleReg);
- __ Pxor(tmp, kScratchDoubleReg);
- __ Pcmpgtq(tmp, dst);
- __ Pcmpeqd(dst, dst);
- __ Pxor(dst, tmp);
+ ASSEMBLE_SIMD_SHIFT(psrlq, 6);
break;
}
case kX64I32x4Splat: {
@@ -2960,15 +2824,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Pextrd(i.OutputRegister(), i.InputSimd128Register(0), i.InputInt8(1));
break;
}
- case kX64I32x4ReplaceLane: {
- if (HasRegisterInput(instr, 2)) {
- __ Pinsrd(i.OutputSimd128Register(), i.InputRegister(2),
- i.InputInt8(1));
- } else {
- __ Pinsrd(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
- }
- break;
- }
case kX64I32x4SConvertF32x4: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
XMMRegister dst = i.OutputSimd128Register();
@@ -3012,40 +2867,40 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I32x4Shl: {
// Take shift value modulo 2^5.
- ASSEMBLE_SIMD_SHIFT(Pslld, 5);
+ ASSEMBLE_SIMD_SHIFT(pslld, 5);
break;
}
case kX64I32x4ShrS: {
// Take shift value modulo 2^5.
- ASSEMBLE_SIMD_SHIFT(Psrad, 5);
+ ASSEMBLE_SIMD_SHIFT(psrad, 5);
break;
}
case kX64I32x4Add: {
- __ Paddd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(paddd);
break;
}
case kX64I32x4AddHoriz: {
- __ Phaddd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(phaddd);
break;
}
case kX64I32x4Sub: {
- __ Psubd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(psubd);
break;
}
case kX64I32x4Mul: {
- __ Pmulld(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(pmulld);
break;
}
case kX64I32x4MinS: {
- __ Pminsd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(pminsd);
break;
}
case kX64I32x4MaxS: {
- __ Pmaxsd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(pmaxsd);
break;
}
case kX64I32x4Eq: {
- __ Pcmpeqd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(pcmpeqd);
break;
}
case kX64I32x4Ne: {
@@ -3056,7 +2911,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I32x4GtS: {
- __ Pcmpgtd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(pcmpgtd);
break;
}
case kX64I32x4GeS: {
@@ -3076,8 +2931,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Maxps(dst, tmp2);
// scratch: float representation of max_signed
__ Pcmpeqd(tmp2, tmp2);
- __ Psrld(tmp2, uint8_t{1}); // 0x7fffffff
- __ Cvtdq2ps(tmp2, tmp2); // 0x4f000000
+ __ Psrld(tmp2, uint8_t{1}); // 0x7fffffff
+ __ Cvtdq2ps(tmp2, tmp2); // 0x4f000000
// tmp: convert (src-max_signed).
// Positive overflow lanes -> 0x7FFFFFFF
// Negative lanes -> 0
@@ -3106,15 +2961,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I32x4ShrU: {
// Take shift value modulo 2^5.
- ASSEMBLE_SIMD_SHIFT(Psrld, 5);
+ ASSEMBLE_SIMD_SHIFT(psrld, 5);
break;
}
case kX64I32x4MinU: {
- __ Pminud(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(pminud);
break;
}
case kX64I32x4MaxU: {
- __ Pmaxud(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(pmaxud);
break;
}
case kX64I32x4GtU: {
@@ -3143,7 +2998,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I32x4DotI16x8S: {
- __ Pmaddwd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(pmaddwd);
break;
}
case kX64S128Const: {
@@ -3159,7 +3014,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64S128Zero: {
XMMRegister dst = i.OutputSimd128Register();
- __ Xorps(dst, dst);
+ __ Pxor(dst, dst);
break;
}
case kX64S128AllOnes: {
@@ -3178,26 +3033,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Pshufd(dst, dst, uint8_t{0x0});
break;
}
- case kX64I16x8ExtractLaneU: {
- Register dst = i.OutputRegister();
- __ Pextrw(dst, i.InputSimd128Register(0), i.InputInt8(1));
- break;
- }
case kX64I16x8ExtractLaneS: {
Register dst = i.OutputRegister();
- __ Pextrw(dst, i.InputSimd128Register(0), i.InputInt8(1));
+ __ Pextrw(dst, i.InputSimd128Register(0), i.InputUint8(1));
__ movsxwl(dst, dst);
break;
}
- case kX64I16x8ReplaceLane: {
- if (HasRegisterInput(instr, 2)) {
- __ Pinsrw(i.OutputSimd128Register(), i.InputRegister(2),
- i.InputInt8(1));
- } else {
- __ Pinsrw(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
- }
- break;
- }
case kX64I16x8SConvertI8x16Low: {
__ Pmovsxbw(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
@@ -3222,53 +3063,52 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I16x8Shl: {
// Take shift value modulo 2^4.
- ASSEMBLE_SIMD_SHIFT(Psllw, 4);
+ ASSEMBLE_SIMD_SHIFT(psllw, 4);
break;
}
case kX64I16x8ShrS: {
// Take shift value modulo 2^4.
- ASSEMBLE_SIMD_SHIFT(Psraw, 4);
+ ASSEMBLE_SIMD_SHIFT(psraw, 4);
break;
}
case kX64I16x8SConvertI32x4: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ Packssdw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(packssdw);
break;
}
case kX64I16x8Add: {
- __ Paddw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(paddw);
break;
}
- case kX64I16x8AddSaturateS: {
- __ Paddsw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ case kX64I16x8AddSatS: {
+ ASSEMBLE_SIMD_BINOP(paddsw);
break;
}
case kX64I16x8AddHoriz: {
- __ Phaddw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(phaddw);
break;
}
case kX64I16x8Sub: {
- __ Psubw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(psubw);
break;
}
- case kX64I16x8SubSaturateS: {
- __ Psubsw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ case kX64I16x8SubSatS: {
+ ASSEMBLE_SIMD_BINOP(psubsw);
break;
}
case kX64I16x8Mul: {
- __ Pmullw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(pmullw);
break;
}
case kX64I16x8MinS: {
- __ Pminsw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(pminsw);
break;
}
case kX64I16x8MaxS: {
- __ Pmaxsw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(pmaxsw);
break;
}
case kX64I16x8Eq: {
- __ Pcmpeqw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(pcmpeqw);
break;
}
case kX64I16x8Ne: {
@@ -3279,7 +3119,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I16x8GtS: {
- __ Pcmpgtw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(pcmpgtw);
break;
}
case kX64I16x8GeS: {
@@ -3301,28 +3141,27 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I16x8ShrU: {
// Take shift value modulo 2^4.
- ASSEMBLE_SIMD_SHIFT(Psrlw, 4);
+ ASSEMBLE_SIMD_SHIFT(psrlw, 4);
break;
}
case kX64I16x8UConvertI32x4: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ Packusdw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(packusdw);
break;
}
- case kX64I16x8AddSaturateU: {
- __ Paddusw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ case kX64I16x8AddSatU: {
+ ASSEMBLE_SIMD_BINOP(paddusw);
break;
}
- case kX64I16x8SubSaturateU: {
- __ Psubusw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ case kX64I16x8SubSatU: {
+ ASSEMBLE_SIMD_BINOP(psubusw);
break;
}
case kX64I16x8MinU: {
- __ Pminuw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(pminuw);
break;
}
case kX64I16x8MaxU: {
- __ Pmaxuw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(pmaxuw);
break;
}
case kX64I16x8GtU: {
@@ -3343,7 +3182,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I16x8RoundingAverageU: {
- __ Pavgw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(pavgw);
break;
}
case kX64I16x8Abs: {
@@ -3369,29 +3208,56 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Pshufb(dst, kScratchDoubleReg);
break;
}
- case kX64I8x16ExtractLaneU: {
- Register dst = i.OutputRegister();
- __ Pextrb(dst, i.InputSimd128Register(0), i.InputInt8(1));
+ case kX64Pextrb: {
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
+ size_t index = 0;
+ if (HasAddressingMode(instr)) {
+ Operand operand = i.MemoryOperand(&index);
+ __ Pextrb(operand, i.InputSimd128Register(index),
+ i.InputUint8(index + 1));
+ } else {
+ __ Pextrb(i.OutputRegister(), i.InputSimd128Register(0),
+ i.InputUint8(1));
+ }
+ break;
+ }
+ case kX64Pextrw: {
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
+ size_t index = 0;
+ if (HasAddressingMode(instr)) {
+ Operand operand = i.MemoryOperand(&index);
+ __ Pextrw(operand, i.InputSimd128Register(index),
+ i.InputUint8(index + 1));
+ } else {
+ __ Pextrw(i.OutputRegister(), i.InputSimd128Register(0),
+ i.InputUint8(1));
+ }
break;
}
case kX64I8x16ExtractLaneS: {
Register dst = i.OutputRegister();
- __ Pextrb(dst, i.InputSimd128Register(0), i.InputInt8(1));
+ __ Pextrb(dst, i.InputSimd128Register(0), i.InputUint8(1));
__ movsxbl(dst, dst);
break;
}
- case kX64I8x16ReplaceLane: {
- if (HasRegisterInput(instr, 2)) {
- __ Pinsrb(i.OutputSimd128Register(), i.InputRegister(2),
- i.InputInt8(1));
- } else {
- __ Pinsrb(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
- }
+ case kX64Pinsrb: {
+ ASSEMBLE_PINSR(Pinsrb);
+ break;
+ }
+ case kX64Pinsrw: {
+ ASSEMBLE_PINSR(Pinsrw);
+ break;
+ }
+ case kX64Pinsrd: {
+ ASSEMBLE_PINSR(Pinsrd);
+ break;
+ }
+ case kX64Pinsrq: {
+ ASSEMBLE_PINSR(Pinsrq);
break;
}
case kX64I8x16SConvertI16x8: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ Packsswb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(packsswb);
break;
}
case kX64I8x16Neg: {
@@ -3472,19 +3338,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I8x16Add: {
- __ Paddb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(paddb);
break;
}
- case kX64I8x16AddSaturateS: {
- __ Paddsb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ case kX64I8x16AddSatS: {
+ ASSEMBLE_SIMD_BINOP(paddsb);
break;
}
case kX64I8x16Sub: {
- __ Psubb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(psubb);
break;
}
- case kX64I8x16SubSaturateS: {
- __ Psubsb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ case kX64I8x16SubSatS: {
+ ASSEMBLE_SIMD_BINOP(psubsb);
break;
}
case kX64I8x16Mul: {
@@ -3521,15 +3387,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I8x16MinS: {
- __ Pminsb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(pminsb);
break;
}
case kX64I8x16MaxS: {
- __ Pmaxsb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(pmaxsb);
break;
}
case kX64I8x16Eq: {
- __ Pcmpeqb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(pcmpeqb);
break;
}
case kX64I8x16Ne: {
@@ -3540,7 +3406,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I8x16GtS: {
- __ Pcmpgtb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(pcmpgtb);
break;
}
case kX64I8x16GeS: {
@@ -3551,8 +3417,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I8x16UConvertI16x8: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ Packuswb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(packuswb);
break;
}
case kX64I8x16ShrU: {
@@ -3588,20 +3453,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
- case kX64I8x16AddSaturateU: {
- __ Paddusb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ case kX64I8x16AddSatU: {
+ ASSEMBLE_SIMD_BINOP(paddusb);
break;
}
- case kX64I8x16SubSaturateU: {
- __ Psubusb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ case kX64I8x16SubSatU: {
+ ASSEMBLE_SIMD_BINOP(psubusb);
break;
}
case kX64I8x16MinU: {
- __ Pminub(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(pminub);
break;
}
case kX64I8x16MaxU: {
- __ Pmaxub(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(pmaxub);
break;
}
case kX64I8x16GtU: {
@@ -3622,7 +3487,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I8x16RoundingAverageU: {
- __ Pavgb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(pavgb);
break;
}
case kX64I8x16Abs: {
@@ -3633,16 +3498,50 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Pmovmskb(i.OutputRegister(), i.InputSimd128Register(0));
break;
}
+ case kX64I8x16SignSelect: {
+ __ Pblendvb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), i.InputSimd128Register(2));
+ break;
+ }
+ case kX64I16x8SignSelect: {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpsraw(kScratchDoubleReg, i.InputSimd128Register(2), 15);
+ __ vpblendvb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg);
+ } else {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ XMMRegister mask = i.InputSimd128Register(2);
+ DCHECK_EQ(xmm0, mask);
+ __ movapd(kScratchDoubleReg, mask);
+ __ pxor(mask, mask);
+ __ pcmpgtw(mask, kScratchDoubleReg);
+ __ pblendvb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ // Restore mask.
+ __ movapd(mask, kScratchDoubleReg);
+ }
+ break;
+ }
+ case kX64I32x4SignSelect: {
+ __ Blendvps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), i.InputSimd128Register(2));
+ break;
+ }
+ case kX64I64x2SignSelect: {
+ __ Blendvpd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), i.InputSimd128Register(2));
+ break;
+ }
case kX64S128And: {
- __ Pand(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(pand);
break;
}
case kX64S128Or: {
- __ Por(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(por);
break;
}
case kX64S128Xor: {
- __ Pxor(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ ASSEMBLE_SIMD_BINOP(pxor);
break;
}
case kX64S128Not: {
@@ -3734,76 +3633,93 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
- case kX64S8x16LoadSplat: {
+ case kX64S128Load8Splat: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
- __ Pinsrb(i.OutputSimd128Register(), i.MemoryOperand(), 0);
+ XMMRegister dst = i.OutputSimd128Register();
+ __ Pinsrb(dst, dst, i.MemoryOperand(), 0);
__ Pxor(kScratchDoubleReg, kScratchDoubleReg);
- __ Pshufb(i.OutputSimd128Register(), kScratchDoubleReg);
+ __ Pshufb(dst, kScratchDoubleReg);
break;
}
- case kX64S16x8LoadSplat: {
+ case kX64S128Load16Splat: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
- __ Pinsrw(i.OutputSimd128Register(), i.MemoryOperand(), 0);
- __ Pshuflw(i.OutputSimd128Register(), i.OutputSimd128Register(),
- uint8_t{0});
- __ Punpcklqdq(i.OutputSimd128Register(), i.OutputSimd128Register());
+ XMMRegister dst = i.OutputSimd128Register();
+ __ Pinsrw(dst, dst, i.MemoryOperand(), 0);
+ __ Pshuflw(dst, dst, uint8_t{0});
+ __ Punpcklqdq(dst, dst);
break;
}
- case kX64S32x4LoadSplat: {
+ case kX64S128Load32Splat: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vbroadcastss(i.OutputSimd128Register(), i.MemoryOperand());
} else {
- __ Movss(i.OutputSimd128Register(), i.MemoryOperand());
- __ Shufps(i.OutputSimd128Register(), i.OutputSimd128Register(),
+ __ movss(i.OutputSimd128Register(), i.MemoryOperand());
+ __ shufps(i.OutputSimd128Register(), i.OutputSimd128Register(),
byte{0});
}
break;
}
- case kX64S64x2LoadSplat: {
+ case kX64S128Load64Splat: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Movddup(i.OutputSimd128Register(), i.MemoryOperand());
break;
}
- case kX64I16x8Load8x8S: {
+ case kX64S128Load8x8S: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Pmovsxbw(i.OutputSimd128Register(), i.MemoryOperand());
break;
}
- case kX64I16x8Load8x8U: {
+ case kX64S128Load8x8U: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Pmovzxbw(i.OutputSimd128Register(), i.MemoryOperand());
break;
}
- case kX64I32x4Load16x4S: {
+ case kX64S128Load16x4S: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Pmovsxwd(i.OutputSimd128Register(), i.MemoryOperand());
break;
}
- case kX64I32x4Load16x4U: {
+ case kX64S128Load16x4U: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Pmovzxwd(i.OutputSimd128Register(), i.MemoryOperand());
break;
}
- case kX64I64x2Load32x2S: {
+ case kX64S128Load32x2S: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Pmovsxdq(i.OutputSimd128Register(), i.MemoryOperand());
break;
}
- case kX64I64x2Load32x2U: {
+ case kX64S128Load32x2U: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Pmovzxdq(i.OutputSimd128Register(), i.MemoryOperand());
break;
}
- case kX64S128LoadMem32Zero: {
+ case kX64S128Store32Lane: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
- __ Movd(i.OutputSimd128Register(), i.MemoryOperand());
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ uint8_t lane = i.InputUint8(index + 1);
+ if (lane == 0) {
+ __ Movss(operand, i.InputSimd128Register(index));
+ } else {
+ DCHECK_GE(3, lane);
+ __ Extractps(operand, i.InputSimd128Register(index), lane);
+ }
break;
}
- case kX64S128LoadMem64Zero: {
+ case kX64S128Store64Lane: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
- __ Movq(i.OutputSimd128Register(), i.MemoryOperand());
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ uint8_t lane = i.InputUint8(index + 1);
+ if (lane == 0) {
+ __ Movlps(operand, i.InputSimd128Register(index));
+ } else {
+ DCHECK_EQ(1, lane);
+ __ Movhps(operand, i.InputSimd128Register(index));
+ }
break;
}
case kX64S32x4Swizzle: {
@@ -4005,7 +3921,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Por(dst, kScratchDoubleReg);
break;
}
- case kX64V64x2AnyTrue:
case kX64V32x4AnyTrue:
case kX64V16x8AnyTrue:
case kX64V8x16AnyTrue: {
@@ -4021,10 +3936,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// comparison instruction used matters, e.g. given 0xff00, pcmpeqb returns
// 0x0011, pcmpeqw returns 0x0000, ptest will set ZF to 0 and 1
// respectively.
- case kX64V64x2AllTrue: {
- ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqq);
- break;
- }
case kX64V32x4AllTrue: {
ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqd);
break;
@@ -4592,18 +4503,25 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
unwinding_info_writer_.MarkBlockWillExit();
- // We might need rcx and rdx for scratch.
+ // We might need rcx and r10 for scratch.
DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & rcx.bit());
- DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & rdx.bit());
+ DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & r10.bit());
+ X64OperandConverter g(this, nullptr);
int parameter_count =
static_cast<int>(call_descriptor->StackParameterCount());
- X64OperandConverter g(this, nullptr);
- Register pop_reg = additional_pop_count->IsImmediate()
- ? rcx
- : g.ToRegister(additional_pop_count);
- Register scratch_reg = pop_reg == rcx ? rdx : rcx;
- Register argc_reg =
- additional_pop_count->IsImmediate() ? pop_reg : scratch_reg;
+
+ // {aditional_pop_count} is only greater than zero if {parameter_count = 0}.
+ // Check RawMachineAssembler::PopAndReturn.
+ if (parameter_count != 0) {
+ if (additional_pop_count->IsImmediate()) {
+ DCHECK_EQ(g.ToConstant(additional_pop_count).ToInt32(), 0);
+ } else if (__ emit_debug_code()) {
+ __ cmpq(g.ToRegister(additional_pop_count), Immediate(0));
+ __ Assert(equal, AbortReason::kUnexpectedAdditionalPopValue);
+ }
+ }
+
+ Register argc_reg = rcx;
#ifdef V8_NO_ARGUMENTS_ADAPTOR
// Functions with JS linkage have at least one parameter (the receiver).
// If {parameter_count} == 0, it means it is a builtin with
@@ -4636,41 +4554,33 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
}
if (drop_jsargs) {
- // In addition to the slots given by {additional_pop_count}, we must pop all
- // arguments from the stack (including the receiver). This number of
- // arguments is given by max(1 + argc_reg, parameter_count).
- Label argc_reg_has_final_count;
- // Exclude the receiver to simplify the computation. We'll account for it at
- // the end.
- int parameter_count_withouth_receiver = parameter_count - 1;
- if (parameter_count_withouth_receiver != 0) {
- __ cmpq(argc_reg, Immediate(parameter_count_withouth_receiver));
- __ j(greater_equal, &argc_reg_has_final_count, Label::kNear);
- __ movq(argc_reg, Immediate(parameter_count_withouth_receiver));
- __ bind(&argc_reg_has_final_count);
- }
- // Add additional pop count.
- if (additional_pop_count->IsImmediate()) {
- DCHECK_EQ(pop_reg, argc_reg);
- int additional_count = g.ToConstant(additional_pop_count).ToInt32();
- if (additional_count != 0) {
- __ addq(pop_reg, Immediate(additional_count));
- }
- } else {
- __ addq(pop_reg, argc_reg);
- }
+ // We must pop all arguments from the stack (including the receiver). This
+ // number of arguments is given by max(1 + argc_reg, parameter_count).
+ int parameter_count_without_receiver =
+ parameter_count - 1; // Exclude the receiver to simplify the
+ // computation. We'll account for it at the end.
+ Label mismatch_return;
+ Register scratch_reg = r10;
+ DCHECK_NE(argc_reg, scratch_reg);
+ __ cmpq(argc_reg, Immediate(parameter_count_without_receiver));
+ __ j(greater, &mismatch_return, Label::kNear);
+ __ Ret(parameter_count * kSystemPointerSize, scratch_reg);
+ __ bind(&mismatch_return);
__ PopReturnAddressTo(scratch_reg);
- __ leaq(rsp, Operand(rsp, pop_reg, times_system_pointer_size,
+ __ leaq(rsp, Operand(rsp, argc_reg, times_system_pointer_size,
kSystemPointerSize)); // Also pop the receiver.
// We use a return instead of a jump for better return address prediction.
__ PushReturnAddressFrom(scratch_reg);
__ Ret();
} else if (additional_pop_count->IsImmediate()) {
+ Register scratch_reg = r10;
int additional_count = g.ToConstant(additional_pop_count).ToInt32();
size_t pop_size = (parameter_count + additional_count) * kSystemPointerSize;
CHECK_LE(pop_size, static_cast<size_t>(std::numeric_limits<int>::max()));
__ Ret(static_cast<int>(pop_size), scratch_reg);
} else {
+ Register pop_reg = g.ToRegister(additional_pop_count);
+ Register scratch_reg = pop_reg == r10 ? rcx : r10;
int pop_size = static_cast<int>(parameter_count * kSystemPointerSize);
__ PopReturnAddressTo(scratch_reg);
__ leaq(rsp, Operand(rsp, pop_reg, times_system_pointer_size,
@@ -4682,7 +4592,8 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
void CodeGenerator::FinishCode() { tasm()->PatchConstPool(); }
-void CodeGenerator::PrepareForDeoptimizationExits(int deopt_count) {}
+void CodeGenerator::PrepareForDeoptimizationExits(
+ ZoneDeque<DeoptimizationExit*>* exits) {}
void CodeGenerator::IncrementStackAccessCounter(
InstructionOperand* source, InstructionOperand* destination) {
diff --git a/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
index 7312121a0a..f1958e8141 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
+++ b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
@@ -156,7 +156,6 @@ namespace compiler {
V(X64Peek) \
V(X64F64x2Splat) \
V(X64F64x2ExtractLane) \
- V(X64F64x2ReplaceLane) \
V(X64F64x2Abs) \
V(X64F64x2Neg) \
V(X64F64x2Sqrt) \
@@ -203,27 +202,18 @@ namespace compiler {
V(X64F32x4Round) \
V(X64I64x2Splat) \
V(X64I64x2ExtractLane) \
- V(X64I64x2ReplaceLane) \
V(X64I64x2Neg) \
+ V(X64I64x2BitMask) \
V(X64I64x2Shl) \
V(X64I64x2ShrS) \
V(X64I64x2Add) \
V(X64I64x2Sub) \
V(X64I64x2Mul) \
- V(X64I64x2MinS) \
- V(X64I64x2MaxS) \
V(X64I64x2Eq) \
- V(X64I64x2Ne) \
- V(X64I64x2GtS) \
- V(X64I64x2GeS) \
V(X64I64x2ShrU) \
- V(X64I64x2MinU) \
- V(X64I64x2MaxU) \
- V(X64I64x2GtU) \
- V(X64I64x2GeU) \
+ V(X64I64x2SignSelect) \
V(X64I32x4Splat) \
V(X64I32x4ExtractLane) \
- V(X64I32x4ReplaceLane) \
V(X64I32x4SConvertF32x4) \
V(X64I32x4SConvertI16x8Low) \
V(X64I32x4SConvertI16x8High) \
@@ -251,10 +241,9 @@ namespace compiler {
V(X64I32x4Abs) \
V(X64I32x4BitMask) \
V(X64I32x4DotI16x8S) \
+ V(X64I32x4SignSelect) \
V(X64I16x8Splat) \
- V(X64I16x8ExtractLaneU) \
V(X64I16x8ExtractLaneS) \
- V(X64I16x8ReplaceLane) \
V(X64I16x8SConvertI8x16Low) \
V(X64I16x8SConvertI8x16High) \
V(X64I16x8Neg) \
@@ -262,10 +251,10 @@ namespace compiler {
V(X64I16x8ShrS) \
V(X64I16x8SConvertI32x4) \
V(X64I16x8Add) \
- V(X64I16x8AddSaturateS) \
+ V(X64I16x8AddSatS) \
V(X64I16x8AddHoriz) \
V(X64I16x8Sub) \
- V(X64I16x8SubSaturateS) \
+ V(X64I16x8SubSatS) \
V(X64I16x8Mul) \
V(X64I16x8MinS) \
V(X64I16x8MaxS) \
@@ -277,8 +266,8 @@ namespace compiler {
V(X64I16x8UConvertI8x16High) \
V(X64I16x8ShrU) \
V(X64I16x8UConvertI32x4) \
- V(X64I16x8AddSaturateU) \
- V(X64I16x8SubSaturateU) \
+ V(X64I16x8AddSatU) \
+ V(X64I16x8SubSatU) \
V(X64I16x8MinU) \
V(X64I16x8MaxU) \
V(X64I16x8GtU) \
@@ -286,18 +275,23 @@ namespace compiler {
V(X64I16x8RoundingAverageU) \
V(X64I16x8Abs) \
V(X64I16x8BitMask) \
+ V(X64I16x8SignSelect) \
V(X64I8x16Splat) \
- V(X64I8x16ExtractLaneU) \
V(X64I8x16ExtractLaneS) \
- V(X64I8x16ReplaceLane) \
+ V(X64Pinsrb) \
+ V(X64Pinsrw) \
+ V(X64Pinsrd) \
+ V(X64Pinsrq) \
+ V(X64Pextrb) \
+ V(X64Pextrw) \
V(X64I8x16SConvertI16x8) \
V(X64I8x16Neg) \
V(X64I8x16Shl) \
V(X64I8x16ShrS) \
V(X64I8x16Add) \
- V(X64I8x16AddSaturateS) \
+ V(X64I8x16AddSatS) \
V(X64I8x16Sub) \
- V(X64I8x16SubSaturateS) \
+ V(X64I8x16SubSatS) \
V(X64I8x16Mul) \
V(X64I8x16MinS) \
V(X64I8x16MaxS) \
@@ -306,8 +300,8 @@ namespace compiler {
V(X64I8x16GtS) \
V(X64I8x16GeS) \
V(X64I8x16UConvertI16x8) \
- V(X64I8x16AddSaturateU) \
- V(X64I8x16SubSaturateU) \
+ V(X64I8x16AddSatU) \
+ V(X64I8x16SubSatU) \
V(X64I8x16ShrU) \
V(X64I8x16MinU) \
V(X64I8x16MaxU) \
@@ -316,6 +310,7 @@ namespace compiler {
V(X64I8x16RoundingAverageU) \
V(X64I8x16Abs) \
V(X64I8x16BitMask) \
+ V(X64I8x16SignSelect) \
V(X64S128Const) \
V(X64S128Zero) \
V(X64S128AllOnes) \
@@ -327,18 +322,18 @@ namespace compiler {
V(X64S128AndNot) \
V(X64I8x16Swizzle) \
V(X64I8x16Shuffle) \
- V(X64S8x16LoadSplat) \
- V(X64S16x8LoadSplat) \
- V(X64S32x4LoadSplat) \
- V(X64S64x2LoadSplat) \
- V(X64I16x8Load8x8S) \
- V(X64I16x8Load8x8U) \
- V(X64I32x4Load16x4S) \
- V(X64I32x4Load16x4U) \
- V(X64I64x2Load32x2S) \
- V(X64I64x2Load32x2U) \
- V(X64S128LoadMem32Zero) \
- V(X64S128LoadMem64Zero) \
+ V(X64S128Load8Splat) \
+ V(X64S128Load16Splat) \
+ V(X64S128Load32Splat) \
+ V(X64S128Load64Splat) \
+ V(X64S128Load8x8S) \
+ V(X64S128Load8x8U) \
+ V(X64S128Load16x4S) \
+ V(X64S128Load16x4U) \
+ V(X64S128Load32x2S) \
+ V(X64S128Load32x2U) \
+ V(X64S128Store32Lane) \
+ V(X64S128Store64Lane) \
V(X64S32x4Swizzle) \
V(X64S32x4Shuffle) \
V(X64S16x8Blend) \
@@ -364,8 +359,6 @@ namespace compiler {
V(X64S8x8Reverse) \
V(X64S8x4Reverse) \
V(X64S8x2Reverse) \
- V(X64V64x2AnyTrue) \
- V(X64V64x2AllTrue) \
V(X64V32x4AnyTrue) \
V(X64V32x4AllTrue) \
V(X64V16x8AnyTrue) \
diff --git a/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
index 169753b40e..2af0877e53 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
@@ -126,9 +126,12 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64Lea:
case kX64Dec32:
case kX64Inc32:
+ case kX64Pinsrb:
+ case kX64Pinsrw:
+ case kX64Pinsrd:
+ case kX64Pinsrq:
case kX64F64x2Splat:
case kX64F64x2ExtractLane:
- case kX64F64x2ReplaceLane:
case kX64F64x2Abs:
case kX64F64x2Neg:
case kX64F64x2Sqrt:
@@ -175,27 +178,18 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64F32x4Round:
case kX64I64x2Splat:
case kX64I64x2ExtractLane:
- case kX64I64x2ReplaceLane:
case kX64I64x2Neg:
+ case kX64I64x2BitMask:
case kX64I64x2Shl:
case kX64I64x2ShrS:
case kX64I64x2Add:
case kX64I64x2Sub:
case kX64I64x2Mul:
- case kX64I64x2MinS:
- case kX64I64x2MaxS:
case kX64I64x2Eq:
- case kX64I64x2Ne:
- case kX64I64x2GtS:
- case kX64I64x2GeS:
case kX64I64x2ShrU:
- case kX64I64x2MinU:
- case kX64I64x2MaxU:
- case kX64I64x2GtU:
- case kX64I64x2GeU:
+ case kX64I64x2SignSelect:
case kX64I32x4Splat:
case kX64I32x4ExtractLane:
- case kX64I32x4ReplaceLane:
case kX64I32x4SConvertF32x4:
case kX64I32x4SConvertI16x8Low:
case kX64I32x4SConvertI16x8High:
@@ -223,10 +217,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I32x4Abs:
case kX64I32x4BitMask:
case kX64I32x4DotI16x8S:
+ case kX64I32x4SignSelect:
case kX64I16x8Splat:
- case kX64I16x8ExtractLaneU:
case kX64I16x8ExtractLaneS:
- case kX64I16x8ReplaceLane:
case kX64I16x8SConvertI8x16Low:
case kX64I16x8SConvertI8x16High:
case kX64I16x8Neg:
@@ -234,10 +227,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I16x8ShrS:
case kX64I16x8SConvertI32x4:
case kX64I16x8Add:
- case kX64I16x8AddSaturateS:
+ case kX64I16x8AddSatS:
case kX64I16x8AddHoriz:
case kX64I16x8Sub:
- case kX64I16x8SubSaturateS:
+ case kX64I16x8SubSatS:
case kX64I16x8Mul:
case kX64I16x8MinS:
case kX64I16x8MaxS:
@@ -249,8 +242,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I16x8UConvertI8x16High:
case kX64I16x8UConvertI32x4:
case kX64I16x8ShrU:
- case kX64I16x8AddSaturateU:
- case kX64I16x8SubSaturateU:
+ case kX64I16x8AddSatU:
+ case kX64I16x8SubSatU:
case kX64I16x8MinU:
case kX64I16x8MaxU:
case kX64I16x8GtU:
@@ -258,18 +251,17 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I16x8RoundingAverageU:
case kX64I16x8Abs:
case kX64I16x8BitMask:
+ case kX64I16x8SignSelect:
case kX64I8x16Splat:
- case kX64I8x16ExtractLaneU:
case kX64I8x16ExtractLaneS:
- case kX64I8x16ReplaceLane:
case kX64I8x16SConvertI16x8:
case kX64I8x16Neg:
case kX64I8x16Shl:
case kX64I8x16ShrS:
case kX64I8x16Add:
- case kX64I8x16AddSaturateS:
+ case kX64I8x16AddSatS:
case kX64I8x16Sub:
- case kX64I8x16SubSaturateS:
+ case kX64I8x16SubSatS:
case kX64I8x16Mul:
case kX64I8x16MinS:
case kX64I8x16MaxS:
@@ -278,8 +270,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I8x16GtS:
case kX64I8x16GeS:
case kX64I8x16UConvertI16x8:
- case kX64I8x16AddSaturateU:
- case kX64I8x16SubSaturateU:
+ case kX64I8x16AddSatU:
+ case kX64I8x16SubSatU:
case kX64I8x16ShrU:
case kX64I8x16MinU:
case kX64I8x16MaxU:
@@ -288,6 +280,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I8x16RoundingAverageU:
case kX64I8x16Abs:
case kX64I8x16BitMask:
+ case kX64I8x16SignSelect:
case kX64S128And:
case kX64S128Or:
case kX64S128Xor:
@@ -297,8 +290,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64S128Zero:
case kX64S128AllOnes:
case kX64S128AndNot:
- case kX64V64x2AnyTrue:
- case kX64V64x2AllTrue:
case kX64V32x4AnyTrue:
case kX64V32x4AllTrue:
case kX64V16x8AnyTrue:
@@ -359,8 +350,12 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64Movb:
case kX64Movw:
+ case kX64S128Store32Lane:
+ case kX64S128Store64Lane:
return kHasSideEffect;
+ case kX64Pextrb:
+ case kX64Pextrw:
case kX64Movl:
if (instr->HasOutput()) {
DCHECK_LE(1, instr->InputCount());
@@ -378,18 +373,16 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64Movsd:
case kX64Movss:
case kX64Movdqu:
- case kX64S8x16LoadSplat:
- case kX64S16x8LoadSplat:
- case kX64S32x4LoadSplat:
- case kX64S64x2LoadSplat:
- case kX64I16x8Load8x8S:
- case kX64I16x8Load8x8U:
- case kX64I32x4Load16x4S:
- case kX64I32x4Load16x4U:
- case kX64I64x2Load32x2S:
- case kX64I64x2Load32x2U:
- case kX64S128LoadMem32Zero:
- case kX64S128LoadMem64Zero:
+ case kX64S128Load8Splat:
+ case kX64S128Load16Splat:
+ case kX64S128Load32Splat:
+ case kX64S128Load64Splat:
+ case kX64S128Load8x8S:
+ case kX64S128Load8x8U:
+ case kX64S128Load16x4S:
+ case kX64S128Load16x4U:
+ case kX64S128Load32x2S:
+ case kX64S128Load32x2U:
return instr->HasOutput() ? kIsLoadOperation : kHasSideEffect;
case kX64Peek:
diff --git a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
index db212677ea..7a8a2b4aa6 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
@@ -5,8 +5,11 @@
#include <algorithm>
#include "src/base/iterator.h"
+#include "src/base/logging.h"
#include "src/base/overflowing-math.h"
+#include "src/codegen/machine-type.h"
#include "src/compiler/backend/instruction-selector-impl.h"
+#include "src/compiler/machine-operator.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/roots/roots-inl.h"
@@ -90,6 +93,16 @@ class X64OperandGenerator final : public OperandGenerator {
return rep == MachineRepresentation::kWord32 ||
(COMPRESS_POINTERS_BOOL &&
(IsAnyTagged(rep) || IsAnyCompressed(rep)));
+ case kAVXFloat64Add:
+ case kAVXFloat64Sub:
+ case kAVXFloat64Mul:
+ DCHECK_EQ(MachineRepresentation::kFloat64, rep);
+ return true;
+ case kAVXFloat32Add:
+ case kAVXFloat32Sub:
+ case kAVXFloat32Mul:
+ DCHECK_EQ(MachineRepresentation::kFloat32, rep);
+ return true;
case kX64Cmp16:
case kX64Test16:
return rep == MachineRepresentation::kWord16;
@@ -178,12 +191,13 @@ class X64OperandGenerator final : public OperandGenerator {
size_t* input_count) {
{
LoadMatcher<ExternalReferenceMatcher> m(operand);
- if (m.index().HasValue() && m.object().HasValue() &&
- selector()->CanAddressRelativeToRootsRegister(m.object().Value())) {
+ if (m.index().HasResolvedValue() && m.object().HasResolvedValue() &&
+ selector()->CanAddressRelativeToRootsRegister(
+ m.object().ResolvedValue())) {
ptrdiff_t const delta =
- m.index().Value() +
+ m.index().ResolvedValue() +
TurboAssemblerBase::RootRegisterOffsetForExternalReference(
- selector()->isolate(), m.object().Value());
+ selector()->isolate(), m.object().ResolvedValue());
if (is_int32(delta)) {
inputs[(*input_count)++] = TempImmediate(static_cast<int32_t>(delta));
return kMode_Root;
@@ -229,7 +243,7 @@ class X64OperandGenerator final : public OperandGenerator {
namespace {
ArchOpcode GetLoadOpcode(LoadRepresentation load_rep) {
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
switch (load_rep.representation()) {
case MachineRepresentation::kFloat32:
opcode = kX64Movss;
@@ -332,53 +346,93 @@ void InstructionSelector::VisitAbortCSAAssert(Node* node) {
Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), rdx));
}
+void InstructionSelector::VisitLoadLane(Node* node) {
+ LoadLaneParameters params = LoadLaneParametersOf(node->op());
+ InstructionCode opcode = kArchNop;
+ if (params.rep == MachineType::Int8()) {
+ opcode = kX64Pinsrb;
+ } else if (params.rep == MachineType::Int16()) {
+ opcode = kX64Pinsrw;
+ } else if (params.rep == MachineType::Int32()) {
+ opcode = kX64Pinsrd;
+ } else if (params.rep == MachineType::Int64()) {
+ opcode = kX64Pinsrq;
+ } else {
+ UNREACHABLE();
+ }
+
+ X64OperandGenerator g(this);
+ InstructionOperand outputs[] = {g.DefineAsRegister(node)};
+ // Input 0 is value node, 1 is lane idx, and GetEffectiveAddressMemoryOperand
+ // uses up to 3 inputs. This ordering is consistent with other operations that
+ // use the same opcode.
+ InstructionOperand inputs[5];
+ size_t input_count = 0;
+
+ inputs[input_count++] = g.UseRegister(node->InputAt(2));
+ inputs[input_count++] = g.UseImmediate(params.laneidx);
+
+ AddressingMode mode =
+ g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+ opcode |= AddressingModeField::encode(mode);
+
+ DCHECK_GE(5, input_count);
+
+ // x64 supports unaligned loads.
+ DCHECK_NE(params.kind, MemoryAccessKind::kUnaligned);
+ if (params.kind == MemoryAccessKind::kProtected) {
+ opcode |= MiscField::encode(kMemoryAccessProtected);
+ }
+ Emit(opcode, 1, outputs, input_count, inputs);
+}
+
void InstructionSelector::VisitLoadTransform(Node* node) {
LoadTransformParameters params = LoadTransformParametersOf(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
switch (params.transformation) {
- case LoadTransformation::kS8x16LoadSplat:
- opcode = kX64S8x16LoadSplat;
+ case LoadTransformation::kS128Load8Splat:
+ opcode = kX64S128Load8Splat;
break;
- case LoadTransformation::kS16x8LoadSplat:
- opcode = kX64S16x8LoadSplat;
+ case LoadTransformation::kS128Load16Splat:
+ opcode = kX64S128Load16Splat;
break;
- case LoadTransformation::kS32x4LoadSplat:
- opcode = kX64S32x4LoadSplat;
+ case LoadTransformation::kS128Load32Splat:
+ opcode = kX64S128Load32Splat;
break;
- case LoadTransformation::kS64x2LoadSplat:
- opcode = kX64S64x2LoadSplat;
+ case LoadTransformation::kS128Load64Splat:
+ opcode = kX64S128Load64Splat;
break;
- case LoadTransformation::kI16x8Load8x8S:
- opcode = kX64I16x8Load8x8S;
+ case LoadTransformation::kS128Load8x8S:
+ opcode = kX64S128Load8x8S;
break;
- case LoadTransformation::kI16x8Load8x8U:
- opcode = kX64I16x8Load8x8U;
+ case LoadTransformation::kS128Load8x8U:
+ opcode = kX64S128Load8x8U;
break;
- case LoadTransformation::kI32x4Load16x4S:
- opcode = kX64I32x4Load16x4S;
+ case LoadTransformation::kS128Load16x4S:
+ opcode = kX64S128Load16x4S;
break;
- case LoadTransformation::kI32x4Load16x4U:
- opcode = kX64I32x4Load16x4U;
+ case LoadTransformation::kS128Load16x4U:
+ opcode = kX64S128Load16x4U;
break;
- case LoadTransformation::kI64x2Load32x2S:
- opcode = kX64I64x2Load32x2S;
+ case LoadTransformation::kS128Load32x2S:
+ opcode = kX64S128Load32x2S;
break;
- case LoadTransformation::kI64x2Load32x2U:
- opcode = kX64I64x2Load32x2U;
+ case LoadTransformation::kS128Load32x2U:
+ opcode = kX64S128Load32x2U;
break;
- case LoadTransformation::kS128LoadMem32Zero:
- opcode = kX64S128LoadMem32Zero;
+ case LoadTransformation::kS128Load32Zero:
+ opcode = kX64Movss;
break;
- case LoadTransformation::kS128LoadMem64Zero:
- opcode = kX64S128LoadMem64Zero;
+ case LoadTransformation::kS128Load64Zero:
+ opcode = kX64Movsd;
break;
default:
UNREACHABLE();
}
// x64 supports unaligned loads
- DCHECK_NE(params.kind, LoadKind::kUnaligned);
+ DCHECK_NE(params.kind, MemoryAccessKind::kUnaligned);
InstructionCode code = opcode;
- if (params.kind == LoadKind::kProtected) {
+ if (params.kind == MemoryAccessKind::kProtected) {
code |= MiscField::encode(kMemoryAccessProtected);
}
VisitLoad(node, node, code);
@@ -486,6 +540,40 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
// Architecture supports unaligned access, therefore VisitStore is used instead
void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitStoreLane(Node* node) {
+ X64OperandGenerator g(this);
+
+ StoreLaneParameters params = StoreLaneParametersOf(node->op());
+ InstructionCode opcode = kArchNop;
+ if (params.rep == MachineRepresentation::kWord8) {
+ opcode = kX64Pextrb;
+ } else if (params.rep == MachineRepresentation::kWord16) {
+ opcode = kX64Pextrw;
+ } else if (params.rep == MachineRepresentation::kWord32) {
+ opcode = kX64S128Store32Lane;
+ } else if (params.rep == MachineRepresentation::kWord64) {
+ opcode = kX64S128Store64Lane;
+ } else {
+ UNREACHABLE();
+ }
+
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ AddressingMode addressing_mode =
+ g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+ opcode |= AddressingModeField::encode(addressing_mode);
+
+ if (params.kind == MemoryAccessKind::kProtected) {
+ opcode |= MiscField::encode(kMemoryAccessProtected);
+ }
+
+ InstructionOperand value_operand = g.UseRegister(node->InputAt(2));
+ inputs[input_count++] = value_operand;
+ inputs[input_count++] = g.UseImmediate(params.laneidx);
+ DCHECK_GE(4, input_count);
+ Emit(opcode, 0, nullptr, input_count, inputs);
+}
+
// Shared routine for multiple binary operations.
static void VisitBinop(InstructionSelector* selector, Node* node,
InstructionCode opcode, FlagsContinuation* cont) {
@@ -635,7 +723,7 @@ bool TryMergeTruncateInt64ToInt32IntoLoad(InstructionSelector* selector,
if (load->opcode() == IrOpcode::kLoad && selector->CanCover(node, load)) {
LoadRepresentation load_rep = LoadRepresentationOf(load->op());
MachineRepresentation rep = load_rep.representation();
- InstructionCode opcode = kArchNop;
+ InstructionCode opcode;
switch (rep) {
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kWord8:
@@ -648,7 +736,7 @@ bool TryMergeTruncateInt64ToInt32IntoLoad(InstructionSelector* selector,
case MachineRepresentation::kWord64:
case MachineRepresentation::kTaggedSigned:
case MachineRepresentation::kTagged:
- case MachineRepresentation::kCompressed: // Fall through.
+ case MachineRepresentation::kCompressed: // Fall through.
opcode = kX64Movl;
break;
default:
@@ -1035,12 +1123,14 @@ void InstructionSelector::VisitInt32Sub(Node* node) {
// {EmitIdentity} reuses the virtual register of the first input
// for the output. This is exactly what we want here.
EmitIdentity(node);
- } else if (m.right().HasValue() && g.CanBeImmediate(m.right().node())) {
+ } else if (m.right().HasResolvedValue() &&
+ g.CanBeImmediate(m.right().node())) {
// Turn subtractions of constant values into immediate "leal" instructions
// by negating the value.
- Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI),
- g.DefineAsRegister(node), g.UseRegister(m.left().node()),
- g.TempImmediate(base::NegateWithWraparound(m.right().Value())));
+ Emit(
+ kX64Lea32 | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.TempImmediate(base::NegateWithWraparound(m.right().ResolvedValue())));
} else {
VisitBinop(this, node, kX64Sub32);
}
@@ -1052,12 +1142,12 @@ void InstructionSelector::VisitInt64Sub(Node* node) {
if (m.left().Is(0)) {
Emit(kX64Neg, g.DefineSameAsFirst(node), g.UseRegister(m.right().node()));
} else {
- if (m.right().HasValue() && g.CanBeImmediate(m.right().node())) {
+ if (m.right().HasResolvedValue() && g.CanBeImmediate(m.right().node())) {
// Turn subtractions of constant values into immediate "leaq" instructions
// by negating the value.
Emit(kX64Lea | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(node), g.UseRegister(m.left().node()),
- g.TempImmediate(-static_cast<int32_t>(m.right().Value())));
+ g.TempImmediate(-static_cast<int32_t>(m.right().ResolvedValue())));
return;
}
VisitBinop(this, node, kX64Sub);
@@ -1269,7 +1359,7 @@ void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
if (value->opcode() == IrOpcode::kLoad && CanCover(node, value)) {
LoadRepresentation load_rep = LoadRepresentationOf(value->op());
MachineRepresentation rep = load_rep.representation();
- InstructionCode opcode = kArchNop;
+ InstructionCode opcode;
switch (rep) {
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kWord8:
@@ -1283,7 +1373,6 @@ void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
break;
default:
UNREACHABLE();
- return;
}
InstructionOperand outputs[] = {g.DefineAsRegister(node)};
size_t input_count = 0;
@@ -1401,14 +1490,60 @@ void VisitRRO(InstructionSelector* selector, Node* node,
}
void VisitFloatBinop(InstructionSelector* selector, Node* node,
- ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
+ InstructionCode avx_opcode, InstructionCode sse_opcode) {
X64OperandGenerator g(selector);
- InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
- InstructionOperand operand1 = g.Use(node->InputAt(1));
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ InstructionOperand inputs[8];
+ size_t input_count = 0;
+ InstructionOperand outputs[1];
+ size_t output_count = 0;
+
+ if (left == right) {
+ // If both inputs refer to the same operand, enforce allocating a register
+ // for both of them to ensure that we don't end up generating code like
+ // this:
+ //
+ // movss rax, [rbp-0x10]
+ // addss rax, [rbp-0x10]
+ // jo label
+ InstructionOperand const input = g.UseRegister(left);
+ inputs[input_count++] = input;
+ inputs[input_count++] = input;
+ } else {
+ int effect_level = selector->GetEffectLevel(node);
+ if (node->op()->HasProperty(Operator::kCommutative) &&
+ (g.CanBeBetterLeftOperand(right) ||
+ g.CanBeMemoryOperand(avx_opcode, node, left, effect_level)) &&
+ (!g.CanBeBetterLeftOperand(left) ||
+ !g.CanBeMemoryOperand(avx_opcode, node, right, effect_level))) {
+ std::swap(left, right);
+ }
+ if (g.CanBeMemoryOperand(avx_opcode, node, right, effect_level)) {
+ inputs[input_count++] = g.UseRegister(left);
+ AddressingMode addressing_mode =
+ g.GetEffectiveAddressMemoryOperand(right, inputs, &input_count);
+ avx_opcode |= AddressingModeField::encode(addressing_mode);
+ sse_opcode |= AddressingModeField::encode(addressing_mode);
+ } else {
+ inputs[input_count++] = g.UseRegister(left);
+ inputs[input_count++] = g.Use(right);
+ }
+ }
+
+ DCHECK_NE(0u, input_count);
+ DCHECK_GE(arraysize(inputs), input_count);
+
if (selector->IsSupported(AVX)) {
- selector->Emit(avx_opcode, g.DefineAsRegister(node), operand0, operand1);
+ outputs[output_count++] = g.DefineAsRegister(node);
+ DCHECK_EQ(1u, output_count);
+ DCHECK_GE(arraysize(outputs), output_count);
+ selector->Emit(avx_opcode, output_count, outputs, input_count, inputs);
} else {
- selector->Emit(sse_opcode, g.DefineSameAsFirst(node), operand0, operand1);
+ outputs[output_count++] = g.DefineSameAsFirst(node);
+ DCHECK_EQ(1u, output_count);
+ DCHECK_GE(arraysize(outputs), output_count);
+ selector->Emit(sse_opcode, output_count, outputs, input_count, inputs);
}
}
@@ -1902,8 +2037,8 @@ void VisitWord64EqualImpl(InstructionSelector* selector, Node* node,
const RootsTable& roots_table = selector->isolate()->roots_table();
RootIndex root_index;
HeapObjectBinopMatcher m(node);
- if (m.right().HasValue() &&
- roots_table.IsRootHandle(m.right().Value(), &root_index)) {
+ if (m.right().HasResolvedValue() &&
+ roots_table.IsRootHandle(m.right().ResolvedValue(), &root_index)) {
InstructionCode opcode =
kX64Cmp | AddressingModeField::encode(kMode_Root);
return VisitCompare(
@@ -1929,14 +2064,14 @@ void VisitWord32EqualImpl(InstructionSelector* selector, Node* node,
// present.
{
CompressedHeapObjectBinopMatcher m(node);
- if (m.right().HasValue()) {
+ if (m.right().HasResolvedValue()) {
left = m.left().node();
- right = m.right().Value();
+ right = m.right().ResolvedValue();
} else {
HeapObjectBinopMatcher m2(node);
- if (m2.right().HasValue()) {
+ if (m2.right().HasResolvedValue()) {
left = m2.left().node();
- right = m2.right().Value();
+ right = m2.right().ResolvedValue();
}
}
}
@@ -2442,7 +2577,8 @@ void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
Float64Matcher mleft(left);
- if (mleft.HasValue() && (bit_cast<uint64_t>(mleft.Value()) >> 32) == 0u) {
+ if (mleft.HasResolvedValue() &&
+ (bit_cast<uint64_t>(mleft.ResolvedValue()) >> 32) == 0u) {
Emit(kSSEFloat64LoadLowWord32, g.DefineAsRegister(node), g.Use(right));
return;
}
@@ -2486,7 +2622,7 @@ void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
switch (rep) {
case MachineRepresentation::kWord8:
opcode = kWord32AtomicExchangeInt8;
@@ -2505,7 +2641,7 @@ void InstructionSelector::VisitWord32AtomicStore(Node* node) {
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
switch (rep) {
case MachineRepresentation::kWord8:
opcode = kX64Word64AtomicExchangeUint8;
@@ -2527,7 +2663,7 @@ void InstructionSelector::VisitWord64AtomicStore(Node* node) {
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
if (type == MachineType::Int8()) {
opcode = kWord32AtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
@@ -2540,14 +2676,13 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
opcode = kWord32AtomicExchangeWord32;
} else {
UNREACHABLE();
- return;
}
VisitAtomicExchange(this, node, opcode);
}
void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
if (type == MachineType::Uint8()) {
opcode = kX64Word64AtomicExchangeUint8;
} else if (type == MachineType::Uint16()) {
@@ -2558,14 +2693,13 @@ void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
opcode = kX64Word64AtomicExchangeUint64;
} else {
UNREACHABLE();
- return;
}
VisitAtomicExchange(this, node, opcode);
}
void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
if (type == MachineType::Int8()) {
opcode = kWord32AtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
@@ -2578,14 +2712,13 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
opcode = kWord32AtomicCompareExchangeWord32;
} else {
UNREACHABLE();
- return;
}
VisitAtomicCompareExchange(this, node, opcode);
}
void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
if (type == MachineType::Uint8()) {
opcode = kX64Word64AtomicCompareExchangeUint8;
} else if (type == MachineType::Uint16()) {
@@ -2596,7 +2729,6 @@ void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
opcode = kX64Word64AtomicCompareExchangeUint64;
} else {
UNREACHABLE();
- return;
}
VisitAtomicCompareExchange(this, node, opcode);
}
@@ -2605,7 +2737,7 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
ArchOpcode uint16_op, ArchOpcode word32_op) {
MachineType type = AtomicOpType(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
if (type == MachineType::Int8()) {
opcode = int8_op;
} else if (type == MachineType::Uint8()) {
@@ -2618,7 +2750,6 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
opcode = word32_op;
} else {
UNREACHABLE();
- return;
}
VisitAtomicBinop(this, node, opcode);
}
@@ -2641,7 +2772,7 @@ void InstructionSelector::VisitWord64AtomicBinaryOperation(
Node* node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode uint32_op,
ArchOpcode word64_op) {
MachineType type = AtomicOpType(node->op());
- ArchOpcode opcode = kArchNop;
+ ArchOpcode opcode;
if (type == MachineType::Uint8()) {
opcode = uint8_op;
} else if (type == MachineType::Uint16()) {
@@ -2652,7 +2783,6 @@ void InstructionSelector::VisitWord64AtomicBinaryOperation(
opcode = word64_op;
} else {
UNREACHABLE();
- return;
}
VisitAtomicBinop(this, node, opcode);
}
@@ -2670,96 +2800,87 @@ VISIT_ATOMIC_BINOP(Or)
VISIT_ATOMIC_BINOP(Xor)
#undef VISIT_ATOMIC_BINOP
-#define SIMD_TYPES(V) \
- V(F64x2) \
- V(F32x4) \
- V(I64x2) \
- V(I32x4) \
- V(I16x8) \
- V(I8x16)
+#define SIMD_BINOP_SSE_AVX_LIST(V) \
+ V(F64x2Add) \
+ V(F64x2Sub) \
+ V(F64x2Mul) \
+ V(F64x2Div) \
+ V(F64x2Eq) \
+ V(F64x2Ne) \
+ V(F64x2Lt) \
+ V(F64x2Le) \
+ V(F32x4Add) \
+ V(F32x4Sub) \
+ V(F32x4Mul) \
+ V(F32x4Div) \
+ V(F32x4Eq) \
+ V(F32x4Ne) \
+ V(F32x4Lt) \
+ V(F32x4Le) \
+ V(I64x2Add) \
+ V(I64x2Sub) \
+ V(I64x2Eq) \
+ V(I32x4Add) \
+ V(I32x4AddHoriz) \
+ V(I32x4Sub) \
+ V(I32x4Mul) \
+ V(I32x4MinS) \
+ V(I32x4MaxS) \
+ V(I32x4Eq) \
+ V(I32x4GtS) \
+ V(I32x4MinU) \
+ V(I32x4MaxU) \
+ V(I32x4DotI16x8S) \
+ V(I16x8SConvertI32x4) \
+ V(I16x8UConvertI32x4) \
+ V(I16x8Add) \
+ V(I16x8AddSatS) \
+ V(I16x8AddHoriz) \
+ V(I16x8Sub) \
+ V(I16x8SubSatS) \
+ V(I16x8Mul) \
+ V(I16x8MinS) \
+ V(I16x8MaxS) \
+ V(I16x8Eq) \
+ V(I16x8GtS) \
+ V(I16x8AddSatU) \
+ V(I16x8SubSatU) \
+ V(I16x8MinU) \
+ V(I16x8MaxU) \
+ V(I16x8RoundingAverageU) \
+ V(I8x16SConvertI16x8) \
+ V(I8x16UConvertI16x8) \
+ V(I8x16Add) \
+ V(I8x16AddSatS) \
+ V(I8x16Sub) \
+ V(I8x16SubSatS) \
+ V(I8x16MinS) \
+ V(I8x16MaxS) \
+ V(I8x16Eq) \
+ V(I8x16GtS) \
+ V(I8x16AddSatU) \
+ V(I8x16SubSatU) \
+ V(I8x16MinU) \
+ V(I8x16MaxU) \
+ V(I8x16RoundingAverageU) \
+ V(S128And) \
+ V(S128Or) \
+ V(S128Xor)
#define SIMD_BINOP_LIST(V) \
- V(F64x2Add) \
- V(F64x2Sub) \
- V(F64x2Mul) \
- V(F64x2Div) \
V(F64x2Min) \
V(F64x2Max) \
- V(F64x2Eq) \
- V(F64x2Ne) \
- V(F64x2Lt) \
- V(F64x2Le) \
- V(F32x4Add) \
V(F32x4AddHoriz) \
- V(F32x4Sub) \
- V(F32x4Mul) \
- V(F32x4Div) \
V(F32x4Min) \
V(F32x4Max) \
- V(F32x4Eq) \
- V(F32x4Ne) \
- V(F32x4Lt) \
- V(F32x4Le) \
- V(I64x2Add) \
- V(I64x2Sub) \
- V(I64x2Eq) \
- V(I64x2GtS) \
- V(I32x4Add) \
- V(I32x4AddHoriz) \
- V(I32x4Sub) \
- V(I32x4Mul) \
- V(I32x4MinS) \
- V(I32x4MaxS) \
- V(I32x4Eq) \
- V(I32x4GtS) \
V(I32x4GeS) \
- V(I32x4MinU) \
- V(I32x4MaxU) \
V(I32x4GeU) \
- V(I32x4DotI16x8S) \
- V(I16x8SConvertI32x4) \
- V(I16x8Add) \
- V(I16x8AddSaturateS) \
- V(I16x8AddHoriz) \
- V(I16x8Sub) \
- V(I16x8SubSaturateS) \
- V(I16x8Mul) \
- V(I16x8MinS) \
- V(I16x8MaxS) \
- V(I16x8Eq) \
- V(I16x8GtS) \
V(I16x8GeS) \
- V(I16x8AddSaturateU) \
- V(I16x8SubSaturateU) \
- V(I16x8MinU) \
- V(I16x8MaxU) \
V(I16x8GeU) \
- V(I16x8RoundingAverageU) \
- V(I8x16SConvertI16x8) \
- V(I8x16Add) \
- V(I8x16AddSaturateS) \
- V(I8x16Sub) \
- V(I8x16SubSaturateS) \
- V(I8x16MinS) \
- V(I8x16MaxS) \
- V(I8x16Eq) \
- V(I8x16GtS) \
V(I8x16GeS) \
- V(I8x16AddSaturateU) \
- V(I8x16SubSaturateU) \
- V(I8x16MinU) \
- V(I8x16MaxU) \
- V(I8x16GeU) \
- V(I8x16RoundingAverageU) \
- V(S128And) \
- V(S128Or) \
- V(S128Xor)
+ V(I8x16GeU)
#define SIMD_BINOP_ONE_TEMP_LIST(V) \
- V(I64x2Ne) \
- V(I64x2GeS) \
- V(I64x2GtU) \
- V(I64x2GeU) \
V(I32x4Ne) \
V(I32x4GtU) \
V(I16x8Ne) \
@@ -2776,6 +2897,7 @@ VISIT_ATOMIC_BINOP(Xor)
V(F32x4RecipApprox) \
V(F32x4RecipSqrtApprox) \
V(I64x2Neg) \
+ V(I64x2BitMask) \
V(I32x4SConvertI16x8Low) \
V(I32x4SConvertI16x8High) \
V(I32x4Neg) \
@@ -2809,13 +2931,11 @@ VISIT_ATOMIC_BINOP(Xor)
V(I8x16ShrU)
#define SIMD_ANYTRUE_LIST(V) \
- V(V64x2AnyTrue) \
V(V32x4AnyTrue) \
V(V16x8AnyTrue) \
V(V8x16AnyTrue)
#define SIMD_ALLTRUE_LIST(V) \
- V(V64x2AllTrue) \
V(V32x4AllTrue) \
V(V16x8AllTrue) \
V(V8x16AllTrue)
@@ -2845,56 +2965,97 @@ void InstructionSelector::VisitS128Zero(Node* node) {
Emit(kX64S128Zero, g.DefineAsRegister(node));
}
-#define VISIT_SIMD_SPLAT(Type) \
- void InstructionSelector::Visit##Type##Splat(Node* node) { \
- X64OperandGenerator g(this); \
- Emit(kX64##Type##Splat, g.DefineAsRegister(node), \
- g.Use(node->InputAt(0))); \
+#define SIMD_TYPES_FOR_SPLAT(V) \
+ V(I64x2) \
+ V(I32x4) \
+ V(I16x8) \
+ V(I8x16)
+
+// Splat with an optimization for const 0.
+#define VISIT_SIMD_SPLAT(Type) \
+ void InstructionSelector::Visit##Type##Splat(Node* node) { \
+ X64OperandGenerator g(this); \
+ Node* input = node->InputAt(0); \
+ if (g.CanBeImmediate(input) && g.GetImmediateIntegerValue(input) == 0) { \
+ Emit(kX64S128Zero, g.DefineAsRegister(node)); \
+ } else { \
+ Emit(kX64##Type##Splat, g.DefineAsRegister(node), g.Use(input)); \
+ } \
}
-SIMD_TYPES(VISIT_SIMD_SPLAT)
+SIMD_TYPES_FOR_SPLAT(VISIT_SIMD_SPLAT)
#undef VISIT_SIMD_SPLAT
+#undef SIMD_TYPES_FOR_SPLAT
-#define SIMD_VISIT_EXTRACT_LANE(Type, Sign) \
- void InstructionSelector::Visit##Type##ExtractLane##Sign(Node* node) { \
- X64OperandGenerator g(this); \
- int32_t lane = OpParameter<int32_t>(node->op()); \
- Emit(kX64##Type##ExtractLane##Sign, g.DefineAsRegister(node), \
- g.UseRegister(node->InputAt(0)), g.UseImmediate(lane)); \
- }
-SIMD_VISIT_EXTRACT_LANE(F64x2, )
-SIMD_VISIT_EXTRACT_LANE(F32x4, )
-SIMD_VISIT_EXTRACT_LANE(I64x2, )
-SIMD_VISIT_EXTRACT_LANE(I32x4, )
-SIMD_VISIT_EXTRACT_LANE(I16x8, U)
-SIMD_VISIT_EXTRACT_LANE(I16x8, S)
-SIMD_VISIT_EXTRACT_LANE(I8x16, U)
-SIMD_VISIT_EXTRACT_LANE(I8x16, S)
+void InstructionSelector::VisitF64x2Splat(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64F64x2Splat, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitF32x4Splat(Node* node) {
+ X64OperandGenerator g(this);
+ InstructionOperand dst =
+ IsSupported(AVX) ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node);
+ Emit(kX64F32x4Splat, dst, g.UseRegister(node->InputAt(0)));
+}
+
+#define SIMD_VISIT_EXTRACT_LANE(Type, Sign, Op) \
+ void InstructionSelector::Visit##Type##ExtractLane##Sign(Node* node) { \
+ X64OperandGenerator g(this); \
+ int32_t lane = OpParameter<int32_t>(node->op()); \
+ Emit(kX64##Op, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)), \
+ g.UseImmediate(lane)); \
+ }
+SIMD_VISIT_EXTRACT_LANE(F64x2, , F64x2ExtractLane)
+SIMD_VISIT_EXTRACT_LANE(F32x4, , F32x4ExtractLane)
+SIMD_VISIT_EXTRACT_LANE(I64x2, , I64x2ExtractLane)
+SIMD_VISIT_EXTRACT_LANE(I32x4, , I32x4ExtractLane)
+SIMD_VISIT_EXTRACT_LANE(I16x8, S, I16x8ExtractLaneS)
+SIMD_VISIT_EXTRACT_LANE(I16x8, U, Pextrw)
+SIMD_VISIT_EXTRACT_LANE(I8x16, S, I8x16ExtractLaneS)
+SIMD_VISIT_EXTRACT_LANE(I8x16, U, Pextrb)
#undef SIMD_VISIT_EXTRACT_LANE
-#define VISIT_SIMD_REPLACE_LANE(Type) \
- void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
- X64OperandGenerator g(this); \
- int32_t lane = OpParameter<int32_t>(node->op()); \
- Emit(kX64##Type##ReplaceLane, g.DefineSameAsFirst(node), \
- g.UseRegister(node->InputAt(0)), g.UseImmediate(lane), \
- g.Use(node->InputAt(1))); \
+void InstructionSelector::VisitF32x4ReplaceLane(Node* node) {
+ X64OperandGenerator g(this);
+ int32_t lane = OpParameter<int32_t>(node->op());
+ Emit(kX64F32x4ReplaceLane, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)), g.UseImmediate(lane),
+ g.Use(node->InputAt(1)));
+}
+
+#define VISIT_SIMD_REPLACE_LANE(TYPE, OPCODE) \
+ void InstructionSelector::Visit##TYPE##ReplaceLane(Node* node) { \
+ X64OperandGenerator g(this); \
+ int32_t lane = OpParameter<int32_t>(node->op()); \
+ Emit(OPCODE, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)), \
+ g.UseImmediate(lane), g.Use(node->InputAt(1))); \
}
-SIMD_TYPES(VISIT_SIMD_REPLACE_LANE)
+
+#define SIMD_TYPES_FOR_REPLACE_LANE(V) \
+ V(F64x2, kX64Pinsrq) \
+ V(I64x2, kX64Pinsrq) \
+ V(I32x4, kX64Pinsrd) \
+ V(I16x8, kX64Pinsrw) \
+ V(I8x16, kX64Pinsrb)
+
+SIMD_TYPES_FOR_REPLACE_LANE(VISIT_SIMD_REPLACE_LANE)
+#undef SIMD_TYPES_FOR_REPLACE_LANE
#undef VISIT_SIMD_REPLACE_LANE
-#define VISIT_SIMD_SHIFT(Opcode) \
- void InstructionSelector::Visit##Opcode(Node* node) { \
- X64OperandGenerator g(this); \
- if (g.CanBeImmediate(node->InputAt(1))) { \
- Emit(kX64##Opcode, g.DefineSameAsFirst(node), \
- g.UseRegister(node->InputAt(0)), g.UseImmediate(node->InputAt(1))); \
- } else { \
- InstructionOperand temps[] = {g.TempSimd128Register(), \
- g.TempRegister()}; \
- Emit(kX64##Opcode, g.DefineSameAsFirst(node), \
- g.UseUniqueRegister(node->InputAt(0)), \
- g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps); \
- } \
+#define VISIT_SIMD_SHIFT(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ X64OperandGenerator g(this); \
+ InstructionOperand dst = IsSupported(AVX) ? g.DefineAsRegister(node) \
+ : g.DefineSameAsFirst(node); \
+ if (g.CanBeImmediate(node->InputAt(1))) { \
+ Emit(kX64##Opcode, dst, g.UseRegister(node->InputAt(0)), \
+ g.UseImmediate(node->InputAt(1))); \
+ } else { \
+ InstructionOperand temps[] = {g.TempSimd128Register(), \
+ g.TempRegister()}; \
+ Emit(kX64##Opcode, dst, g.UseUniqueRegister(node->InputAt(0)), \
+ g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps); \
+ } \
}
SIMD_SHIFT_OPCODES(VISIT_SIMD_SHIFT)
#undef VISIT_SIMD_SHIFT
@@ -2938,6 +3099,21 @@ SIMD_BINOP_LIST(VISIT_SIMD_BINOP)
#undef VISIT_SIMD_BINOP
#undef SIMD_BINOP_LIST
+#define VISIT_SIMD_BINOP(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ X64OperandGenerator g(this); \
+ if (IsSupported(AVX)) { \
+ Emit(kX64##Opcode, g.DefineAsRegister(node), \
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); \
+ } else { \
+ Emit(kX64##Opcode, g.DefineSameAsFirst(node), \
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); \
+ } \
+ }
+SIMD_BINOP_SSE_AVX_LIST(VISIT_SIMD_BINOP)
+#undef VISIT_SIMD_BINOP
+#undef SIMD_BINOP_SSE_AVX_LIST
+
#define VISIT_SIMD_BINOP_ONE_TEMP(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
X64OperandGenerator g(this); \
@@ -2970,7 +3146,6 @@ SIMD_ANYTRUE_LIST(VISIT_SIMD_ANYTRUE)
SIMD_ALLTRUE_LIST(VISIT_SIMD_ALLTRUE)
#undef VISIT_SIMD_ALLTRUE
#undef SIMD_ALLTRUE_LIST
-#undef SIMD_TYPES
void InstructionSelector::VisitS128Select(Node* node) {
X64OperandGenerator g(this);
@@ -2979,6 +3154,40 @@ void InstructionSelector::VisitS128Select(Node* node) {
g.UseRegister(node->InputAt(2)));
}
+namespace {
+void VisitSignSelect(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ X64OperandGenerator g(selector);
+ // signselect(x, y, -1) = x
+ // pblendvb(dst, x, y, -1) = dst <- y, so we need to swap x and y.
+ if (selector->IsSupported(AVX)) {
+ selector->Emit(
+ opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(1)),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(2)));
+ } else {
+ selector->Emit(
+ opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(1)),
+ g.UseRegister(node->InputAt(0)), g.UseFixed(node->InputAt(2), xmm0));
+ }
+}
+} // namespace
+
+void InstructionSelector::VisitI8x16SignSelect(Node* node) {
+ VisitSignSelect(this, node, kX64I8x16SignSelect);
+}
+
+void InstructionSelector::VisitI16x8SignSelect(Node* node) {
+ VisitSignSelect(this, node, kX64I16x8SignSelect);
+}
+
+void InstructionSelector::VisitI32x4SignSelect(Node* node) {
+ VisitSignSelect(this, node, kX64I32x4SignSelect);
+}
+
+void InstructionSelector::VisitI64x2SignSelect(Node* node) {
+ VisitSignSelect(this, node, kX64I64x2SignSelect);
+}
+
void InstructionSelector::VisitS128AndNot(Node* node) {
X64OperandGenerator g(this);
// andnps a b does ~a & b, but we want a & !b, so flip the input.
@@ -3045,48 +3254,6 @@ void InstructionSelector::VisitI64x2Mul(Node* node) {
g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
}
-void InstructionSelector::VisitI64x2MinS(Node* node) {
- X64OperandGenerator g(this);
- if (this->IsSupported(SSE4_2)) {
- InstructionOperand temps[] = {g.TempFpRegister(xmm0)};
- Emit(kX64I64x2MinS, g.DefineAsRegister(node),
- g.UseUniqueRegister(node->InputAt(0)),
- g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
- } else {
- InstructionOperand temps[] = {g.TempSimd128Register(), g.TempRegister(),
- g.TempRegister()};
- Emit(kX64I64x2MinS, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
- arraysize(temps), temps);
- }
-}
-
-void InstructionSelector::VisitI64x2MaxS(Node* node) {
- X64OperandGenerator g(this);
- InstructionOperand temps[] = {g.TempFpRegister(xmm0)};
- Emit(kX64I64x2MaxS, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
- arraysize(temps), temps);
-}
-
-void InstructionSelector::VisitI64x2MinU(Node* node) {
- X64OperandGenerator g(this);
- InstructionOperand temps[] = {g.TempSimd128Register(),
- g.TempFpRegister(xmm0)};
- Emit(kX64I64x2MinU, g.DefineAsRegister(node),
- g.UseUniqueRegister(node->InputAt(0)),
- g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
-}
-
-void InstructionSelector::VisitI64x2MaxU(Node* node) {
- X64OperandGenerator g(this);
- InstructionOperand temps[] = {g.TempSimd128Register(),
- g.TempFpRegister(xmm0)};
- Emit(kX64I64x2MaxU, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
- arraysize(temps), temps);
-}
-
void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) {
X64OperandGenerator g(this);
InstructionOperand temps[] = {g.TempSimd128Register()};
@@ -3102,12 +3269,6 @@ void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) {
g.UseRegister(node->InputAt(0)), arraysize(temps), temps);
}
-void InstructionSelector::VisitI16x8UConvertI32x4(Node* node) {
- X64OperandGenerator g(this);
- Emit(kX64I16x8UConvertI32x4, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
-}
-
void InstructionSelector::VisitI16x8BitMask(Node* node) {
X64OperandGenerator g(this);
InstructionOperand temps[] = {g.TempSimd128Register()};
@@ -3115,12 +3276,6 @@ void InstructionSelector::VisitI16x8BitMask(Node* node) {
g.UseUniqueRegister(node->InputAt(0)), arraysize(temps), temps);
}
-void InstructionSelector::VisitI8x16UConvertI16x8(Node* node) {
- X64OperandGenerator g(this);
- Emit(kX64I8x16UConvertI16x8, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
-}
-
void InstructionSelector::VisitI8x16Mul(Node* node) {
X64OperandGenerator g(this);
InstructionOperand temps[] = {g.TempSimd128Register()};
diff --git a/deps/v8/src/compiler/bytecode-analysis.h b/deps/v8/src/compiler/bytecode-analysis.h
index 32c5168466..d7eaa137f1 100644
--- a/deps/v8/src/compiler/bytecode-analysis.h
+++ b/deps/v8/src/compiler/bytecode-analysis.h
@@ -100,6 +100,8 @@ class V8_EXPORT_PRIVATE BytecodeAnalysis : public ZoneObject {
public:
BytecodeAnalysis(Handle<BytecodeArray> bytecode_array, Zone* zone,
BailoutId osr_bailout_id, bool analyze_liveness);
+ BytecodeAnalysis(const BytecodeAnalysis&) = delete;
+ BytecodeAnalysis& operator=(const BytecodeAnalysis&) = delete;
// Return true if the given offset is a loop header
bool IsLoopHeader(int offset) const;
@@ -166,8 +168,6 @@ class V8_EXPORT_PRIVATE BytecodeAnalysis : public ZoneObject {
ZoneMap<int, LoopInfo> header_to_info_;
int osr_entry_point_;
BytecodeLivenessMap liveness_map_;
-
- DISALLOW_COPY_AND_ASSIGN(BytecodeAnalysis);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc
index 7855bc4c44..14d014bca6 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.cc
+++ b/deps/v8/src/compiler/bytecode-graph-builder.cc
@@ -35,13 +35,16 @@ class BytecodeGraphBuilder {
BytecodeGraphBuilder(JSHeapBroker* broker, Zone* local_zone,
NativeContextRef const& native_context,
SharedFunctionInfoRef const& shared_info,
- FeedbackVectorRef const& feedback_vector,
+ FeedbackCellRef const& feedback_cell,
BailoutId osr_offset, JSGraph* jsgraph,
CallFrequency const& invocation_frequency,
SourcePositionTable* source_positions, int inlining_id,
CodeKind code_kind, BytecodeGraphBuilderFlags flags,
TickCounter* tick_counter);
+ BytecodeGraphBuilder(const BytecodeGraphBuilder&) = delete;
+ BytecodeGraphBuilder& operator=(const BytecodeGraphBuilder&) = delete;
+
// Creates a graph by visiting bytecodes.
void CreateGraph();
@@ -67,6 +70,7 @@ class BytecodeGraphBuilder {
bool native_context_independent() const {
return CodeKindIsNativeContextIndependentJSFunction(code_kind_);
}
+ bool is_turboprop() const { return code_kind_ == CodeKind::TURBOPROP; }
bool generate_full_feedback_collection() const {
// NCI code currently collects full feedback.
DCHECK_IMPLIES(native_context_independent(),
@@ -117,10 +121,16 @@ class BytecodeGraphBuilder {
// Checks the optimization marker and potentially triggers compilation or
// installs the finished code object.
- // Only relevant for specific code kinds (see
- // CodeKindChecksOptimizationMarker).
+ // Only relevant for specific code kinds (see CodeKindCanTierUp).
void MaybeBuildTierUpCheck();
+ // Like bytecode, NCI code must collect call feedback to preserve proper
+ // behavior of inlining heuristics when tiering up to Turbofan in the future.
+ // The invocation count (how often a particular JSFunction has been called)
+ // is tracked by the callee. For bytecode, this happens in the
+ // InterpreterEntryTrampoline, for NCI code it happens here in the prologue.
+ void MaybeBuildIncrementInvocationCount();
+
// Builder for loading the a native context field.
Node* BuildLoadNativeContextField(int index);
@@ -254,7 +264,7 @@ class BytecodeGraphBuilder {
const Operator* op, Node* receiver, FeedbackSlot load_slot,
FeedbackSlot call_slot);
JSTypeHintLowering::LoweringResult TryBuildSimplifiedLoadNamed(
- const Operator* op, Node* receiver, FeedbackSlot slot);
+ const Operator* op, FeedbackSlot slot);
JSTypeHintLowering::LoweringResult TryBuildSimplifiedLoadKeyed(
const Operator* op, Node* receiver, Node* key, FeedbackSlot slot);
JSTypeHintLowering::LoweringResult TryBuildSimplifiedStoreNamed(
@@ -277,7 +287,7 @@ class BytecodeGraphBuilder {
uint32_t depth);
// Helper function to create for-in mode from the recorded type feedback.
- ForInMode GetForInMode(int operand_index);
+ ForInMode GetForInMode(FeedbackSlot slot);
// Helper function to compute call frequency from the recorded type
// feedback. Returns unknown if invocation count is unknown. Returns 0 if
@@ -415,6 +425,7 @@ class BytecodeGraphBuilder {
// The native context for which we optimize.
NativeContextRef const native_context_;
SharedFunctionInfoRef const shared_info_;
+ FeedbackCellRef const feedback_cell_;
FeedbackVectorRef const feedback_vector_;
CallFrequency const invocation_frequency_;
JSTypeHintLowering const type_hint_lowering_;
@@ -480,8 +491,6 @@ class BytecodeGraphBuilder {
static constexpr int kCompareOperationHintIndex = 1;
static constexpr int kCountOperationHintIndex = 0;
static constexpr int kUnaryOperationHintIndex = 0;
-
- DISALLOW_COPY_AND_ASSIGN(BytecodeGraphBuilder);
};
// The abstract execution environment simulates the content of the interpreter
@@ -980,7 +989,7 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
JSHeapBroker* broker, Zone* local_zone,
NativeContextRef const& native_context,
SharedFunctionInfoRef const& shared_info,
- FeedbackVectorRef const& feedback_vector, BailoutId osr_offset,
+ FeedbackCellRef const& feedback_cell, BailoutId osr_offset,
JSGraph* jsgraph, CallFrequency const& invocation_frequency,
SourcePositionTable* source_positions, int inlining_id, CodeKind code_kind,
BytecodeGraphBuilderFlags flags, TickCounter* tick_counter)
@@ -989,10 +998,11 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
jsgraph_(jsgraph),
native_context_(native_context),
shared_info_(shared_info),
- feedback_vector_(feedback_vector),
+ feedback_cell_(feedback_cell),
+ feedback_vector_(feedback_cell.value().AsFeedbackVector()),
invocation_frequency_(invocation_frequency),
type_hint_lowering_(
- broker, jsgraph, feedback_vector,
+ broker, jsgraph, feedback_vector_,
(flags & BytecodeGraphBuilderFlag::kBailoutOnUninitialized)
? JSTypeHintLowering::kBailoutOnUninitialized
: JSTypeHintLowering::kNoFlags),
@@ -1046,23 +1056,17 @@ void BytecodeGraphBuilder::CreateFeedbackCellNode() {
DCHECK_NULL(feedback_cell_node_);
if (native_context_independent()) {
feedback_cell_node_ = BuildLoadFeedbackCell();
+ } else if (is_turboprop()) {
+ feedback_cell_node_ = jsgraph()->Constant(feedback_cell_);
}
}
Node* BytecodeGraphBuilder::BuildLoadFeedbackCell() {
DCHECK(native_context_independent());
DCHECK_NULL(feedback_cell_node_);
-
- Environment* env = environment();
- Node* control = env->GetControlDependency();
- Node* effect = env->GetEffectDependency();
-
- Node* feedback_cell = effect = graph()->NewNode(
+ return NewNode(
simplified()->LoadField(AccessBuilder::ForJSFunctionFeedbackCell()),
- GetFunctionClosure(), effect, control);
-
- env->UpdateEffectDependency(effect);
- return feedback_cell;
+ GetFunctionClosure());
}
void BytecodeGraphBuilder::CreateFeedbackVectorNode() {
@@ -1079,38 +1083,22 @@ Node* BytecodeGraphBuilder::BuildLoadFeedbackVector() {
// The feedback vector must exist and remain live while the generated code
// lives. Specifically that means it must be created when NCI code is
// installed, and must not be flushed.
-
- Environment* env = environment();
- Node* control = env->GetControlDependency();
- Node* effect = env->GetEffectDependency();
-
- Node* vector = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForFeedbackCellValue()),
- feedback_cell_node(), effect, control);
-
- env->UpdateEffectDependency(effect);
- return vector;
+ return NewNode(simplified()->LoadField(AccessBuilder::ForFeedbackCellValue()),
+ feedback_cell_node());
}
Node* BytecodeGraphBuilder::BuildLoadFeedbackCell(int index) {
if (native_context_independent()) {
- Environment* env = environment();
- Node* control = env->GetControlDependency();
- Node* effect = env->GetEffectDependency();
-
// TODO(jgruber,v8:8888): Assumes that the feedback vector has been
// allocated.
- Node* closure_feedback_cell_array = effect = graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForFeedbackVectorClosureFeedbackCellArray()),
- feedback_vector_node(), effect, control);
+ Node* closure_feedback_cell_array =
+ NewNode(simplified()->LoadField(
+ AccessBuilder::ForFeedbackVectorClosureFeedbackCellArray()),
+ feedback_vector_node());
- Node* feedback_cell = effect = graph()->NewNode(
+ return NewNode(
simplified()->LoadField(AccessBuilder::ForFixedArraySlot(index)),
- closure_feedback_cell_array, effect, control);
-
- env->UpdateEffectDependency(effect);
- return feedback_cell;
+ closure_feedback_cell_array);
} else {
return jsgraph()->Constant(feedback_vector().GetClosureFeedbackCell(index));
}
@@ -1126,34 +1114,49 @@ void BytecodeGraphBuilder::CreateNativeContextNode() {
Node* BytecodeGraphBuilder::BuildLoadNativeContext() {
DCHECK(native_context_independent());
DCHECK_NULL(native_context_node_);
-
- Environment* env = environment();
- Node* control = env->GetControlDependency();
- Node* effect = env->GetEffectDependency();
- Node* context = env->Context();
-
- Node* context_map = effect =
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
- context, effect, control);
- Node* native_context = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMapNativeContext()),
- context_map, effect, control);
-
- env->UpdateEffectDependency(effect);
- return native_context;
+ Node* context_map = NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ environment()->Context());
+ return NewNode(simplified()->LoadField(AccessBuilder::ForMapNativeContext()),
+ context_map);
}
void BytecodeGraphBuilder::MaybeBuildTierUpCheck() {
- if (!CodeKindChecksOptimizationMarker(code_kind())) return;
+ // For OSR we don't tier up, so we don't need to build this check. Also
+ // tiering up currently tail calls to IET which tail calls aren't supported
+ // with OSR. See AdjustStackPointerForTailCall.
+ if (!CodeKindCanTierUp(code_kind()) || osr_) return;
+
+ int parameter_count = bytecode_array().parameter_count();
+ Node* target = GetFunctionClosure();
+ Node* new_target = graph()->NewNode(
+ common()->Parameter(
+ Linkage::GetJSCallNewTargetParamIndex(parameter_count),
+ "%new.target"),
+ graph()->start());
+ Node* argc = graph()->NewNode(
+ common()->Parameter(Linkage::GetJSCallArgCountParamIndex(parameter_count),
+ "%argc"),
+ graph()->start());
+ DCHECK_EQ(environment()->Context()->opcode(), IrOpcode::kParameter);
+ Node* context = environment()->Context();
- Environment* env = environment();
- Node* control = env->GetControlDependency();
- Node* effect = env->GetEffectDependency();
+ NewNode(simplified()->TierUpCheck(), feedback_vector_node(), target,
+ new_target, argc, context);
+}
- effect = graph()->NewNode(simplified()->TierUpCheck(), feedback_vector_node(),
- effect, control);
+void BytecodeGraphBuilder::MaybeBuildIncrementInvocationCount() {
+ if (!generate_full_feedback_collection()) return;
- env->UpdateEffectDependency(effect);
+ Node* current_invocation_count =
+ NewNode(simplified()->LoadField(
+ AccessBuilder::ForFeedbackVectorInvocationCount()),
+ feedback_vector_node());
+ Node* next_invocation_count =
+ NewNode(simplified()->NumberAdd(), current_invocation_count,
+ jsgraph()->SmiConstant(1));
+ NewNode(simplified()->StoreField(
+ AccessBuilder::ForFeedbackVectorInvocationCount()),
+ feedback_vector_node(), next_invocation_count);
}
Node* BytecodeGraphBuilder::BuildLoadNativeContextField(int index) {
@@ -1190,6 +1193,7 @@ void BytecodeGraphBuilder::CreateGraph() {
CreateFeedbackCellNode();
CreateFeedbackVectorNode();
MaybeBuildTierUpCheck();
+ MaybeBuildIncrementInvocationCount();
CreateNativeContextNode();
VisitBytecodes();
@@ -1828,7 +1832,7 @@ BytecodeGraphBuilder::Environment* BytecodeGraphBuilder::CheckContextExtensions(
// in the same scope as the variable itself has no way of shadowing it.
Environment* slow_environment = nullptr;
for (uint32_t d = 0; d < depth; d++) {
- if (scope_info.HasContextExtension()) {
+ if (scope_info.HasContextExtensionSlot()) {
slow_environment = CheckContextExtensionAtDepth(slow_environment, d);
}
DCHECK_IMPLIES(!scope_info.HasOuterScopeInfo(), d + 1 == depth);
@@ -2015,7 +2019,7 @@ void BytecodeGraphBuilder::VisitLdaNamedProperty() {
const Operator* op = javascript()->LoadNamed(name.object(), feedback);
JSTypeHintLowering::LoweringResult lowering =
- TryBuildSimplifiedLoadNamed(op, object, feedback.slot);
+ TryBuildSimplifiedLoadNamed(op, feedback.slot);
if (lowering.IsExit()) return;
Node* node = nullptr;
@@ -2048,10 +2052,24 @@ void BytecodeGraphBuilder::VisitLdaNamedPropertyFromSuper() {
Node* home_object = environment()->LookupAccumulator();
NameRef name(broker(),
bytecode_iterator().GetConstantForIndexOperand(1, isolate()));
- const Operator* op = javascript()->LoadNamedFromSuper(name.object());
- // TODO(marja, v8:9237): Use lowering.
- Node* node = NewNode(op, receiver, home_object);
+ FeedbackSource feedback =
+ CreateFeedbackSource(bytecode_iterator().GetIndexOperand(2));
+ const Operator* op =
+ javascript()->LoadNamedFromSuper(name.object(), feedback);
+
+ JSTypeHintLowering::LoweringResult lowering =
+ TryBuildSimplifiedLoadNamed(op, feedback.slot);
+ if (lowering.IsExit()) return;
+
+ Node* node = nullptr;
+ if (lowering.IsSideEffectFree()) {
+ node = lowering.value();
+ } else {
+ DCHECK(!lowering.Changed());
+ DCHECK(IrOpcode::IsFeedbackCollectingOpcode(op->opcode()));
+ node = NewNode(op, receiver, home_object, feedback_vector_node());
+ }
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
@@ -2903,6 +2921,31 @@ void BytecodeGraphBuilder::VisitThrowSuperAlreadyCalledIfNotHole() {
Runtime::kThrowSuperAlreadyCalledError);
}
+void BytecodeGraphBuilder::VisitThrowIfNotSuperConstructor() {
+ Node* constructor =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+ Node* check_is_constructor =
+ NewNode(simplified()->ObjectIsConstructor(), constructor);
+ NewBranch(check_is_constructor, BranchHint::kTrue);
+ {
+ SubEnvironment sub_environment(this);
+ NewIfFalse();
+ BuildLoopExitsForFunctionExit(bytecode_analysis().GetInLivenessFor(
+ bytecode_iterator().current_offset()));
+ Node* node =
+ NewNode(javascript()->CallRuntime(Runtime::kThrowNotSuperConstructor),
+ constructor, GetFunctionClosure());
+ environment()->RecordAfterState(node, Environment::kAttachFrameState);
+ Node* control = NewNode(common()->Throw());
+ MergeControlToLeaveFunction(control);
+ }
+ NewIfTrue();
+
+ constructor = NewNode(common()->TypeGuard(Type::Callable()), constructor);
+ environment()->BindRegister(bytecode_iterator().GetRegisterOperand(0),
+ constructor);
+}
+
void BytecodeGraphBuilder::BuildUnaryOp(const Operator* op) {
DCHECK(JSOperator::IsUnaryWithFeedback(op->opcode()));
PrepareEagerCheckpoint();
@@ -2952,8 +2995,7 @@ void BytecodeGraphBuilder::BuildBinaryOp(const Operator* op) {
}
// Helper function to create for-in mode from the recorded type feedback.
-ForInMode BytecodeGraphBuilder::GetForInMode(int operand_index) {
- FeedbackSlot slot = bytecode_iterator().GetSlotOperand(operand_index);
+ForInMode BytecodeGraphBuilder::GetForInMode(FeedbackSlot slot) {
FeedbackSource source(feedback_vector(), slot);
switch (broker()->GetFeedbackForForIn(source)) {
case ForInHint::kNone:
@@ -3610,7 +3652,9 @@ void BytecodeGraphBuilder::VisitForInPrepare() {
TryBuildSimplifiedForInPrepare(enumerator, slot);
if (lowering.IsExit()) return;
DCHECK(!lowering.Changed());
- Node* node = NewNode(javascript()->ForInPrepare(GetForInMode(1)), enumerator);
+ FeedbackSource feedback = CreateFeedbackSource(slot);
+ Node* node = NewNode(javascript()->ForInPrepare(GetForInMode(slot), feedback),
+ enumerator, feedback_vector_node());
environment()->BindRegistersToProjections(
bytecode_iterator().GetRegisterOperand(0), node);
}
@@ -3639,12 +3683,9 @@ void BytecodeGraphBuilder::VisitForInNext() {
Node* cache_array = environment()->LookupRegister(
interpreter::Register(catch_reg_pair_index + 1));
- // We need to rename the {index} here, as in case of OSR we loose the
+ // We need to rename the {index} here, as in case of OSR we lose the
// information that the {index} is always a valid unsigned Smi value.
- index = graph()->NewNode(common()->TypeGuard(Type::UnsignedSmall()), index,
- environment()->GetEffectDependency(),
- environment()->GetControlDependency());
- environment()->UpdateEffectDependency(index);
+ index = NewNode(common()->TypeGuard(Type::UnsignedSmall()), index);
FeedbackSlot slot = bytecode_iterator().GetSlotOperand(3);
JSTypeHintLowering::LoweringResult lowering = TryBuildSimplifiedForInNext(
@@ -3652,8 +3693,10 @@ void BytecodeGraphBuilder::VisitForInNext() {
if (lowering.IsExit()) return;
DCHECK(!lowering.Changed());
- Node* node = NewNode(javascript()->ForInNext(GetForInMode(3)), receiver,
- cache_array, cache_type, index);
+ FeedbackSource feedback = CreateFeedbackSource(slot);
+ Node* node =
+ NewNode(javascript()->ForInNext(GetForInMode(slot), feedback), receiver,
+ cache_array, cache_type, index, feedback_vector_node());
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
@@ -4071,13 +4114,13 @@ void BytecodeGraphBuilder::BuildJumpIfJSReceiver() {
}
void BytecodeGraphBuilder::BuildUpdateInterruptBudget(int delta) {
- if (native_context_independent()) {
- // Keep uses of this in sync with Ignition's UpdateInterruptBudget.
- int delta_with_current_bytecode =
- delta - bytecode_iterator().current_bytecode_size();
- NewNode(simplified()->UpdateInterruptBudget(delta_with_current_bytecode),
- feedback_cell_node());
- }
+ if (!CodeKindCanTierUp(code_kind())) return;
+
+ // Keep uses of this in sync with Ignition's UpdateInterruptBudget.
+ int delta_with_current_bytecode =
+ delta - bytecode_iterator().current_bytecode_size();
+ NewNode(simplified()->UpdateInterruptBudget(delta_with_current_bytecode),
+ feedback_cell_node());
}
JSTypeHintLowering::LoweringResult
@@ -4193,14 +4236,12 @@ BytecodeGraphBuilder::TryBuildSimplifiedGetIterator(const Operator* op,
JSTypeHintLowering::LoweringResult
BytecodeGraphBuilder::TryBuildSimplifiedLoadNamed(const Operator* op,
- Node* receiver,
FeedbackSlot slot) {
if (!CanApplyTypeHintLowering(op)) return NoChange();
Node* effect = environment()->GetEffectDependency();
Node* control = environment()->GetControlDependency();
JSTypeHintLowering::LoweringResult early_reduction =
- type_hint_lowering().ReduceLoadNamedOperation(op, receiver, effect,
- control, slot);
+ type_hint_lowering().ReduceLoadNamedOperation(op, effect, control, slot);
ApplyEarlyReduction(early_reduction);
return early_reduction;
}
@@ -4472,17 +4513,18 @@ void BytecodeGraphBuilder::UpdateSourcePosition(int offset) {
void BuildGraphFromBytecode(JSHeapBroker* broker, Zone* local_zone,
SharedFunctionInfoRef const& shared_info,
- FeedbackVectorRef const& feedback_vector,
+ FeedbackCellRef const& feedback_cell,
BailoutId osr_offset, JSGraph* jsgraph,
CallFrequency const& invocation_frequency,
SourcePositionTable* source_positions,
int inlining_id, CodeKind code_kind,
BytecodeGraphBuilderFlags flags,
TickCounter* tick_counter) {
- DCHECK(broker->IsSerializedForCompilation(shared_info, feedback_vector));
+ DCHECK(broker->IsSerializedForCompilation(
+ shared_info, feedback_cell.value().AsFeedbackVector()));
BytecodeGraphBuilder builder(
broker, local_zone, broker->target_native_context(), shared_info,
- feedback_vector, osr_offset, jsgraph, invocation_frequency,
+ feedback_cell, osr_offset, jsgraph, invocation_frequency,
source_positions, inlining_id, code_kind, flags, tick_counter);
builder.CreateGraph();
}
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.h b/deps/v8/src/compiler/bytecode-graph-builder.h
index a8423904f8..501451ec55 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.h
+++ b/deps/v8/src/compiler/bytecode-graph-builder.h
@@ -41,7 +41,7 @@ using BytecodeGraphBuilderFlags = base::Flags<BytecodeGraphBuilderFlag>;
// on AIX (v8:8193).
void BuildGraphFromBytecode(JSHeapBroker* broker, Zone* local_zone,
SharedFunctionInfoRef const& shared_info,
- FeedbackVectorRef const& feedback_vector,
+ FeedbackCellRef const& feedback_cell,
BailoutId osr_offset, JSGraph* jsgraph,
CallFrequency const& invocation_frequency,
SourcePositionTable* source_positions,
diff --git a/deps/v8/src/compiler/bytecode-liveness-map.h b/deps/v8/src/compiler/bytecode-liveness-map.h
index b377b55ecb..c68492d8bf 100644
--- a/deps/v8/src/compiler/bytecode-liveness-map.h
+++ b/deps/v8/src/compiler/bytecode-liveness-map.h
@@ -20,6 +20,8 @@ class BytecodeLivenessState : public ZoneObject {
public:
BytecodeLivenessState(int register_count, Zone* zone)
: bit_vector_(register_count + 1, zone) {}
+ BytecodeLivenessState(const BytecodeLivenessState&) = delete;
+ BytecodeLivenessState& operator=(const BytecodeLivenessState&) = delete;
const BitVector& bit_vector() const { return bit_vector_; }
@@ -71,8 +73,6 @@ class BytecodeLivenessState : public ZoneObject {
private:
BitVector bit_vector_;
-
- DISALLOW_COPY_AND_ASSIGN(BytecodeLivenessState);
};
struct BytecodeLiveness {
diff --git a/deps/v8/src/compiler/c-linkage.cc b/deps/v8/src/compiler/c-linkage.cc
index 5b395067f0..2c5338b0d7 100644
--- a/deps/v8/src/compiler/c-linkage.cc
+++ b/deps/v8/src/compiler/c-linkage.cc
@@ -4,9 +4,8 @@
#include "src/codegen/assembler-inl.h"
#include "src/codegen/macro-assembler.h"
-
+#include "src/compiler/globals.h"
#include "src/compiler/linkage.h"
-
#include "src/zone/zone.h"
namespace v8 {
diff --git a/deps/v8/src/compiler/code-assembler.cc b/deps/v8/src/compiler/code-assembler.cc
index 273058ba25..185a2d0670 100644
--- a/deps/v8/src/compiler/code-assembler.cc
+++ b/deps/v8/src/compiler/code-assembler.cc
@@ -318,17 +318,18 @@ TNode<Float64T> CodeAssembler::Float64Constant(double value) {
bool CodeAssembler::ToInt32Constant(Node* node, int32_t* out_value) {
{
Int64Matcher m(node);
- if (m.HasValue() && m.IsInRange(std::numeric_limits<int32_t>::min(),
- std::numeric_limits<int32_t>::max())) {
- *out_value = static_cast<int32_t>(m.Value());
+ if (m.HasResolvedValue() &&
+ m.IsInRange(std::numeric_limits<int32_t>::min(),
+ std::numeric_limits<int32_t>::max())) {
+ *out_value = static_cast<int32_t>(m.ResolvedValue());
return true;
}
}
{
Int32Matcher m(node);
- if (m.HasValue()) {
- *out_value = m.Value();
+ if (m.HasResolvedValue()) {
+ *out_value = m.ResolvedValue();
return true;
}
}
@@ -338,8 +339,8 @@ bool CodeAssembler::ToInt32Constant(Node* node, int32_t* out_value) {
bool CodeAssembler::ToInt64Constant(Node* node, int64_t* out_value) {
Int64Matcher m(node);
- if (m.HasValue()) *out_value = m.Value();
- return m.HasValue();
+ if (m.HasResolvedValue()) *out_value = m.ResolvedValue();
+ return m.HasResolvedValue();
}
bool CodeAssembler::ToSmiConstant(Node* node, Smi* out_value) {
@@ -347,8 +348,8 @@ bool CodeAssembler::ToSmiConstant(Node* node, Smi* out_value) {
node = node->InputAt(0);
}
IntPtrMatcher m(node);
- if (m.HasValue()) {
- intptr_t value = m.Value();
+ if (m.HasResolvedValue()) {
+ intptr_t value = m.ResolvedValue();
// Make sure that the value is actually a smi
CHECK_EQ(0, value & ((static_cast<intptr_t>(1) << kSmiShiftSize) - 1));
*out_value = Smi(static_cast<Address>(value));
@@ -363,8 +364,8 @@ bool CodeAssembler::ToIntPtrConstant(Node* node, intptr_t* out_value) {
node = node->InputAt(0);
}
IntPtrMatcher m(node);
- if (m.HasValue()) *out_value = m.Value();
- return m.HasValue();
+ if (m.HasResolvedValue()) *out_value = m.ResolvedValue();
+ return m.HasResolvedValue();
}
bool CodeAssembler::IsUndefinedConstant(TNode<Object> node) {
@@ -377,7 +378,7 @@ bool CodeAssembler::IsNullConstant(TNode<Object> node) {
return m.Is(isolate()->factory()->null_value());
}
-Node* CodeAssembler::Parameter(int index) {
+Node* CodeAssembler::UntypedParameter(int index) {
if (index == kTargetParameterIndex) return raw_assembler()->TargetParameter();
return raw_assembler()->Parameter(index);
}
@@ -390,8 +391,8 @@ bool CodeAssembler::IsJSFunctionCall() const {
TNode<Context> CodeAssembler::GetJSContextParameter() {
auto call_descriptor = raw_assembler()->call_descriptor();
DCHECK(call_descriptor->IsJSFunctionCall());
- return CAST(Parameter(Linkage::GetJSCallContextParamIndex(
- static_cast<int>(call_descriptor->JSParameterCount()))));
+ return Parameter<Context>(Linkage::GetJSCallContextParamIndex(
+ static_cast<int>(call_descriptor->JSParameterCount())));
}
void CodeAssembler::Return(TNode<Object> value) {
@@ -802,19 +803,21 @@ Node* CodeAssembler::AtomicStore(MachineRepresentation rep, Node* base,
return raw_assembler()->AtomicStore(rep, base, offset, value, value_high);
}
-#define ATOMIC_FUNCTION(name) \
- Node* CodeAssembler::Atomic##name(MachineType type, Node* base, \
- Node* offset, Node* value, \
- Node* value_high) { \
- return raw_assembler()->Atomic##name(type, base, offset, value, \
- value_high); \
+#define ATOMIC_FUNCTION(name) \
+ Node* CodeAssembler::Atomic##name( \
+ MachineType type, TNode<RawPtrT> base, TNode<UintPtrT> offset, \
+ Node* value, base::Optional<TNode<UintPtrT>> value_high) { \
+ Node* value_high_node = nullptr; \
+ if (value_high) value_high_node = *value_high; \
+ return raw_assembler()->Atomic##name(type, base, offset, value, \
+ value_high_node); \
}
-ATOMIC_FUNCTION(Exchange)
ATOMIC_FUNCTION(Add)
ATOMIC_FUNCTION(Sub)
ATOMIC_FUNCTION(And)
ATOMIC_FUNCTION(Or)
ATOMIC_FUNCTION(Xor)
+ATOMIC_FUNCTION(Exchange)
#undef ATOMIC_FUNCTION
Node* CodeAssembler::AtomicCompareExchange(MachineType type, Node* base,
@@ -835,10 +838,6 @@ Node* CodeAssembler::StoreRoot(RootIndex root_index, Node* value) {
value);
}
-Node* CodeAssembler::Retain(Node* value) {
- return raw_assembler()->Retain(value);
-}
-
Node* CodeAssembler::Projection(int index, Node* value) {
DCHECK_LT(index, value->op()->ValueOutputCount());
return raw_assembler()->Projection(index, value);
@@ -894,7 +893,7 @@ class NodeArray {
};
} // namespace
-TNode<Object> CodeAssembler::CallRuntimeImpl(
+Node* CodeAssembler::CallRuntimeImpl(
Runtime::FunctionId function, TNode<Object> context,
std::initializer_list<TNode<Object>> args) {
int result_size = Runtime::FunctionForId(function)->result_size;
@@ -924,7 +923,7 @@ TNode<Object> CodeAssembler::CallRuntimeImpl(
raw_assembler()->CallN(call_descriptor, inputs.size(), inputs.data());
HandleException(return_value);
CallEpilogue();
- return UncheckedCast<Object>(return_value);
+ return return_value;
}
void CodeAssembler::TailCallRuntimeImpl(
@@ -955,8 +954,7 @@ void CodeAssembler::TailCallRuntimeImpl(
Node* CodeAssembler::CallStubN(StubCallMode call_mode,
const CallInterfaceDescriptor& descriptor,
- size_t result_size, int input_count,
- Node* const* inputs) {
+ int input_count, Node* const* inputs) {
DCHECK(call_mode == StubCallMode::kCallCodeObject ||
call_mode == StubCallMode::kCallBuiltinPointer);
@@ -974,7 +972,6 @@ Node* CodeAssembler::CallStubN(StubCallMode call_mode,
// Extra arguments not mentioned in the descriptor are passed on the stack.
int stack_parameter_count = argc - descriptor.GetRegisterParameterCount();
DCHECK_LE(descriptor.GetStackParameterCount(), stack_parameter_count);
- DCHECK_EQ(result_size, descriptor.GetReturnCount());
auto call_descriptor = Linkage::GetStubCallDescriptor(
zone(), descriptor, stack_parameter_count, CallDescriptor::kNoFlags,
@@ -1010,8 +1007,7 @@ void CodeAssembler::TailCallStubImpl(const CallInterfaceDescriptor& descriptor,
Node* CodeAssembler::CallStubRImpl(StubCallMode call_mode,
const CallInterfaceDescriptor& descriptor,
- size_t result_size, TNode<Object> target,
- TNode<Object> context,
+ TNode<Object> target, TNode<Object> context,
std::initializer_list<Node*> args) {
DCHECK(call_mode == StubCallMode::kCallCodeObject ||
call_mode == StubCallMode::kCallBuiltinPointer);
@@ -1026,8 +1022,7 @@ Node* CodeAssembler::CallStubRImpl(StubCallMode call_mode,
inputs.Add(context);
}
- return CallStubN(call_mode, descriptor, result_size, inputs.size(),
- inputs.data());
+ return CallStubN(call_mode, descriptor, inputs.size(), inputs.data());
}
Node* CodeAssembler::CallJSStubImpl(const CallInterfaceDescriptor& descriptor,
@@ -1049,7 +1044,7 @@ Node* CodeAssembler::CallJSStubImpl(const CallInterfaceDescriptor& descriptor,
if (descriptor.HasContextParameter()) {
inputs.Add(context);
}
- return CallStubN(StubCallMode::kCallCodeObject, descriptor, 1, inputs.size(),
+ return CallStubN(StubCallMode::kCallCodeObject, descriptor, inputs.size(),
inputs.data());
}
diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h
index 203e1eea37..792ecd385c 100644
--- a/deps/v8/src/compiler/code-assembler.h
+++ b/deps/v8/src/compiler/code-assembler.h
@@ -8,9 +8,11 @@
#include <initializer_list>
#include <map>
#include <memory>
+#include <sstream>
// Clients of this interface shouldn't depend on lots of compiler internals.
// Do not include anything from src/compiler here!
+#include "include/cppgc/source-location.h"
#include "src/base/macros.h"
#include "src/base/type-traits.h"
#include "src/builtins/builtins.h"
@@ -376,10 +378,12 @@ class V8_EXPORT_PRIVATE CodeAssembler {
explicit CodeAssembler(CodeAssemblerState* state) : state_(state) {}
~CodeAssembler();
+ CodeAssembler(const CodeAssembler&) = delete;
+ CodeAssembler& operator=(const CodeAssembler&) = delete;
+
static Handle<Code> GenerateCode(CodeAssemblerState* state,
const AssemblerOptions& options,
const ProfileDataFromFile* profile_data);
-
bool Is64() const;
bool Is32() const;
bool IsFloat64RoundUpSupported() const;
@@ -566,7 +570,30 @@ class V8_EXPORT_PRIVATE CodeAssembler {
static constexpr int kTargetParameterIndex = -1;
- Node* Parameter(int value);
+ template <class T>
+ TNode<T> Parameter(
+ int value, cppgc::SourceLocation loc = cppgc::SourceLocation::Current()) {
+ static_assert(
+ std::is_convertible<TNode<T>, TNode<Object>>::value,
+ "Parameter is only for tagged types. Use UncheckedParameter instead.");
+ std::stringstream message;
+ message << "Parameter " << value;
+ if (loc.FileName()) {
+ message << " at " << loc.FileName() << ":" << loc.Line();
+ }
+ size_t buf_size = message.str().size() + 1;
+ char* message_dup = zone()->NewArray<char>(buf_size);
+ snprintf(message_dup, buf_size, "%s", message.str().c_str());
+
+ return Cast(UntypedParameter(value), message_dup);
+ }
+
+ template <class T>
+ TNode<T> UncheckedParameter(int value) {
+ return UncheckedCast<T>(UntypedParameter(value));
+ }
+
+ Node* UntypedParameter(int value);
TNode<Context> GetJSContextParameter();
void Return(TNode<Object> value);
@@ -758,31 +785,31 @@ class V8_EXPORT_PRIVATE CodeAssembler {
Node* AtomicStore(MachineRepresentation rep, Node* base, Node* offset,
Node* value, Node* value_high = nullptr);
+ Node* AtomicAdd(MachineType type, TNode<RawPtrT> base, TNode<UintPtrT> offset,
+ Node* value, base::Optional<TNode<UintPtrT>> value_high);
+
+ Node* AtomicSub(MachineType type, TNode<RawPtrT> base, TNode<UintPtrT> offset,
+ Node* value, base::Optional<TNode<UintPtrT>> value_high);
+
+ Node* AtomicAnd(MachineType type, TNode<RawPtrT> base, TNode<UintPtrT> offset,
+ Node* value, base::Optional<TNode<UintPtrT>> value_high);
+
+ Node* AtomicOr(MachineType type, TNode<RawPtrT> base, TNode<UintPtrT> offset,
+ Node* value, base::Optional<TNode<UintPtrT>> value_high);
+
+ Node* AtomicXor(MachineType type, TNode<RawPtrT> base, TNode<UintPtrT> offset,
+ Node* value, base::Optional<TNode<UintPtrT>> value_high);
+
// Exchange value at raw memory location
- Node* AtomicExchange(MachineType type, Node* base, Node* offset, Node* value,
- Node* value_high = nullptr);
+ Node* AtomicExchange(MachineType type, TNode<RawPtrT> base,
+ TNode<UintPtrT> offset, Node* value,
+ base::Optional<TNode<UintPtrT>> value_high);
// Compare and Exchange value at raw memory location
Node* AtomicCompareExchange(MachineType type, Node* base, Node* offset,
Node* old_value, Node* new_value,
Node* old_value_high = nullptr,
Node* new_value_high = nullptr);
-
- Node* AtomicAdd(MachineType type, Node* base, Node* offset, Node* value,
- Node* value_high = nullptr);
-
- Node* AtomicSub(MachineType type, Node* base, Node* offset, Node* value,
- Node* value_high = nullptr);
-
- Node* AtomicAnd(MachineType type, Node* base, Node* offset, Node* value,
- Node* value_high = nullptr);
-
- Node* AtomicOr(MachineType type, Node* base, Node* offset, Node* value,
- Node* value_high = nullptr);
-
- Node* AtomicXor(MachineType type, Node* base, Node* offset, Node* value,
- Node* value_high = nullptr);
-
// Store a value to the root array.
Node* StoreRoot(RootIndex root_index, Node* value);
@@ -964,10 +991,6 @@ class V8_EXPORT_PRIVATE CodeAssembler {
// kSetOverflowToMin.
TNode<Int32T> TruncateFloat32ToInt32(SloppyTNode<Float32T> value);
- // No-op that guarantees that the value is kept alive till this point even
- // if GC happens.
- Node* Retain(Node* value);
-
// Projections
Node* Projection(int index, Node* value);
@@ -980,11 +1003,11 @@ class V8_EXPORT_PRIVATE CodeAssembler {
}
// Calls
- template <class... TArgs>
- TNode<Object> CallRuntime(Runtime::FunctionId function, TNode<Object> context,
- TArgs... args) {
- return CallRuntimeImpl(function, context,
- {implicit_cast<TNode<Object>>(args)...});
+ template <class T = Object, class... TArgs>
+ TNode<T> CallRuntime(Runtime::FunctionId function, TNode<Object> context,
+ TArgs... args) {
+ return UncheckedCast<T>(CallRuntimeImpl(
+ function, context, {implicit_cast<TNode<Object>>(args)...}));
}
template <class... TArgs>
@@ -1018,27 +1041,15 @@ class V8_EXPORT_PRIVATE CodeAssembler {
TNode<T> CallStub(const CallInterfaceDescriptor& descriptor,
TNode<Code> target, TNode<Object> context, TArgs... args) {
return UncheckedCast<T>(CallStubR(StubCallMode::kCallCodeObject, descriptor,
- 1, target, context, args...));
+ target, context, args...));
}
- template <class... TArgs>
- Node* CallStubR(StubCallMode call_mode,
- const CallInterfaceDescriptor& descriptor, size_t result_size,
- TNode<Object> target, TNode<Object> context, TArgs... args) {
- return CallStubRImpl(call_mode, descriptor, result_size, target, context,
- {args...});
- }
-
- Node* CallStubN(StubCallMode call_mode,
- const CallInterfaceDescriptor& descriptor, size_t result_size,
- int input_count, Node* const* inputs);
-
template <class T = Object, class... TArgs>
TNode<T> CallBuiltinPointer(const CallInterfaceDescriptor& descriptor,
TNode<BuiltinPtr> target, TNode<Object> context,
TArgs... args) {
return UncheckedCast<T>(CallStubR(StubCallMode::kCallBuiltinPointer,
- descriptor, 1, target, context, args...));
+ descriptor, target, context, args...));
}
template <class... TArgs>
@@ -1185,9 +1196,8 @@ class V8_EXPORT_PRIVATE CodeAssembler {
Node* function, MachineType return_type, SaveFPRegsMode mode,
std::initializer_list<CFunctionArg> args);
- TNode<Object> CallRuntimeImpl(Runtime::FunctionId function,
- TNode<Object> context,
- std::initializer_list<TNode<Object>> args);
+ Node* CallRuntimeImpl(Runtime::FunctionId function, TNode<Object> context,
+ std::initializer_list<TNode<Object>> args);
void TailCallRuntimeImpl(Runtime::FunctionId function, TNode<Int32T> arity,
TNode<Object> context,
@@ -1201,16 +1211,27 @@ class V8_EXPORT_PRIVATE CodeAssembler {
const CallInterfaceDescriptor& descriptor, Node* target, Node* context,
std::initializer_list<Node*> args);
+ template <class... TArgs>
+ Node* CallStubR(StubCallMode call_mode,
+ const CallInterfaceDescriptor& descriptor,
+ TNode<Object> target, TNode<Object> context, TArgs... args) {
+ return CallStubRImpl(call_mode, descriptor, target, context, {args...});
+ }
+
Node* CallStubRImpl(StubCallMode call_mode,
const CallInterfaceDescriptor& descriptor,
- size_t result_size, TNode<Object> target,
- TNode<Object> context, std::initializer_list<Node*> args);
+ TNode<Object> target, TNode<Object> context,
+ std::initializer_list<Node*> args);
Node* CallJSStubImpl(const CallInterfaceDescriptor& descriptor,
TNode<Object> target, TNode<Object> context,
TNode<Object> function, TNode<Object> new_target,
TNode<Int32T> arity, std::initializer_list<Node*> args);
+ Node* CallStubN(StubCallMode call_mode,
+ const CallInterfaceDescriptor& descriptor, int input_count,
+ Node* const* inputs);
+
// These two don't have definitions and are here only for catching use cases
// where the cast is not necessary.
TNode<Int32T> Signed(TNode<Int32T> x);
@@ -1224,8 +1245,6 @@ class V8_EXPORT_PRIVATE CodeAssembler {
void CallEpilogue();
CodeAssemblerState* state_;
-
- DISALLOW_COPY_AND_ASSIGN(CodeAssembler);
};
// TODO(solanes, v8:6949): this class should be merged into
@@ -1233,6 +1252,9 @@ class V8_EXPORT_PRIVATE CodeAssembler {
// CodeAssemblerVariableLists.
class V8_EXPORT_PRIVATE CodeAssemblerVariable {
public:
+ CodeAssemblerVariable(const CodeAssemblerVariable&) = delete;
+ CodeAssemblerVariable& operator=(const CodeAssemblerVariable&) = delete;
+
Node* value() const;
MachineRepresentation rep() const;
bool IsBound() const;
@@ -1264,7 +1286,6 @@ class V8_EXPORT_PRIVATE CodeAssemblerVariable {
};
Impl* impl_;
CodeAssemblerState* state_;
- DISALLOW_COPY_AND_ASSIGN(CodeAssemblerVariable);
};
std::ostream& operator<<(std::ostream&, const CodeAssemblerVariable&);
@@ -1331,6 +1352,11 @@ class V8_EXPORT_PRIVATE CodeAssemblerLabel {
: CodeAssemblerLabel(assembler, 1, &merged_variable, type) {}
~CodeAssemblerLabel();
+ // Cannot be copied because the destructor explicitly call the destructor of
+ // the underlying {RawMachineLabel}, hence only one pointer can point to it.
+ CodeAssemblerLabel(const CodeAssemblerLabel&) = delete;
+ CodeAssemblerLabel& operator=(const CodeAssemblerLabel&) = delete;
+
inline bool is_bound() const { return bound_; }
inline bool is_used() const { return merge_count_ != 0; }
@@ -1358,10 +1384,6 @@ class V8_EXPORT_PRIVATE CodeAssemblerLabel {
std::map<CodeAssemblerVariable::Impl*, std::vector<Node*>,
CodeAssemblerVariable::ImplComparator>
variable_merges_;
-
- // Cannot be copied because the destructor explicitly call the destructor of
- // the underlying {RawMachineLabel}, hence only one pointer can point to it.
- DISALLOW_COPY_AND_ASSIGN(CodeAssemblerLabel);
};
class CodeAssemblerParameterizedLabelBase {
@@ -1442,6 +1464,9 @@ class V8_EXPORT_PRIVATE CodeAssemblerState {
~CodeAssemblerState();
+ CodeAssemblerState(const CodeAssemblerState&) = delete;
+ CodeAssemblerState& operator=(const CodeAssemblerState&) = delete;
+
const char* name() const { return name_; }
int parameter_count() const;
@@ -1485,8 +1510,6 @@ class V8_EXPORT_PRIVATE CodeAssemblerState {
std::vector<FileAndLine> macro_call_stack_;
VariableId NextVariableId() { return next_variable_id_++; }
-
- DISALLOW_COPY_AND_ASSIGN(CodeAssemblerState);
};
class V8_EXPORT_PRIVATE ScopedExceptionHandler {
diff --git a/deps/v8/src/compiler/common-node-cache.h b/deps/v8/src/compiler/common-node-cache.h
index b1a8370a7f..561f5e61f4 100644
--- a/deps/v8/src/compiler/common-node-cache.h
+++ b/deps/v8/src/compiler/common-node-cache.h
@@ -36,6 +36,9 @@ class CommonNodeCache final {
relocatable_int64_constants_(zone) {}
~CommonNodeCache() = default;
+ CommonNodeCache(const CommonNodeCache&) = delete;
+ CommonNodeCache& operator=(const CommonNodeCache&) = delete;
+
Node** FindInt32Constant(int32_t value) {
return int32_constants_.Find(value);
}
@@ -94,8 +97,6 @@ class CommonNodeCache final {
IntPtrNodeCache heap_constants_;
RelocInt32NodeCache relocatable_int32_constants_;
RelocInt64NodeCache relocatable_int64_constants_;
-
- DISALLOW_COPY_AND_ASSIGN(CommonNodeCache);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/common-operator-reducer.cc b/deps/v8/src/compiler/common-operator-reducer.cc
index c04617c244..70f4bbf47b 100644
--- a/deps/v8/src/compiler/common-operator-reducer.cc
+++ b/deps/v8/src/compiler/common-operator-reducer.cc
@@ -20,18 +20,15 @@ namespace compiler {
namespace {
Decision DecideCondition(JSHeapBroker* broker, Node* const cond) {
- switch (cond->opcode()) {
- case IrOpcode::kFoldConstant: {
- return DecideCondition(broker, cond->InputAt(1));
- }
+ Node* unwrapped = SkipValueIdentities(cond);
+ switch (unwrapped->opcode()) {
case IrOpcode::kInt32Constant: {
- Int32Matcher mcond(cond);
- return mcond.Value() ? Decision::kTrue : Decision::kFalse;
+ Int32Matcher m(unwrapped);
+ return m.ResolvedValue() ? Decision::kTrue : Decision::kFalse;
}
case IrOpcode::kHeapConstant: {
- HeapObjectMatcher mcond(cond);
- return mcond.Ref(broker).BooleanValue() ? Decision::kTrue
- : Decision::kFalse;
+ HeapObjectMatcher m(unwrapped);
+ return m.Ref(broker).BooleanValue() ? Decision::kTrue : Decision::kFalse;
}
default:
return Decision::kUnknown;
@@ -436,7 +433,7 @@ Reduction CommonOperatorReducer::ReduceSwitch(Node* node) {
// non-matching cases as dead code (same for an unused IfDefault), because the
// Switch itself will be marked as dead code.
Int32Matcher mswitched(switched_value);
- if (mswitched.HasValue()) {
+ if (mswitched.HasResolvedValue()) {
bool matched = false;
size_t const projection_count = node->op()->ControlOutputCount();
@@ -447,7 +444,7 @@ Reduction CommonOperatorReducer::ReduceSwitch(Node* node) {
Node* if_value = projections[i];
DCHECK_EQ(IrOpcode::kIfValue, if_value->opcode());
const IfValueParameters& p = IfValueParametersOf(if_value->op());
- if (p.value() == mswitched.Value()) {
+ if (p.value() == mswitched.ResolvedValue()) {
matched = true;
Replace(if_value, control);
break;
diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc
index a125113b6b..8a1bfca8c7 100644
--- a/deps/v8/src/compiler/common-operator.cc
+++ b/deps/v8/src/compiler/common-operator.cc
@@ -569,7 +569,7 @@ IfValueParameters const& IfValueParametersOf(const Operator* op) {
V(TrapDivUnrepresentable) \
V(TrapRemByZero) \
V(TrapFloatUnrepresentable) \
- V(TrapFuncInvalid) \
+ V(TrapTableOutOfBounds) \
V(TrapFuncSigMismatch)
#define CACHED_PARAMETER_LIST(V) \
diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h
index f68780394a..b6cede1cc5 100644
--- a/deps/v8/src/compiler/common-operator.h
+++ b/deps/v8/src/compiler/common-operator.h
@@ -459,6 +459,8 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
: public NON_EXPORTED_BASE(ZoneObject) {
public:
explicit CommonOperatorBuilder(Zone* zone);
+ CommonOperatorBuilder(const CommonOperatorBuilder&) = delete;
+ CommonOperatorBuilder& operator=(const CommonOperatorBuilder&) = delete;
const Operator* Dead();
const Operator* DeadValue(MachineRepresentation rep);
@@ -563,8 +565,6 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const CommonOperatorGlobalCache& cache_;
Zone* const zone_;
-
- DISALLOW_COPY_AND_ASSIGN(CommonOperatorBuilder);
};
// Node wrappers.
diff --git a/deps/v8/src/compiler/compilation-dependencies.cc b/deps/v8/src/compiler/compilation-dependencies.cc
index 263a5a5f1e..5fc6007114 100644
--- a/deps/v8/src/compiler/compilation-dependencies.cc
+++ b/deps/v8/src/compiler/compilation-dependencies.cc
@@ -170,8 +170,9 @@ class FieldRepresentationDependency final : public CompilationDependency {
bool IsValid() const override {
DisallowHeapAllocation no_heap_allocation;
Handle<Map> owner = owner_.object();
- return representation_.Equals(
- owner->instance_descriptors().GetDetails(descriptor_).representation());
+ return representation_.Equals(owner->instance_descriptors(kRelaxedLoad)
+ .GetDetails(descriptor_)
+ .representation());
}
void Install(const MaybeObjectHandle& code) const override {
@@ -208,7 +209,8 @@ class FieldTypeDependency final : public CompilationDependency {
DisallowHeapAllocation no_heap_allocation;
Handle<Map> owner = owner_.object();
Handle<Object> type = type_.object();
- return *type == owner->instance_descriptors().GetFieldType(descriptor_);
+ return *type ==
+ owner->instance_descriptors(kRelaxedLoad).GetFieldType(descriptor_);
}
void Install(const MaybeObjectHandle& code) const override {
@@ -236,7 +238,9 @@ class FieldConstnessDependency final : public CompilationDependency {
DisallowHeapAllocation no_heap_allocation;
Handle<Map> owner = owner_.object();
return PropertyConstness::kConst ==
- owner->instance_descriptors().GetDetails(descriptor_).constness();
+ owner->instance_descriptors(kRelaxedLoad)
+ .GetDetails(descriptor_)
+ .constness();
}
void Install(const MaybeObjectHandle& code) const override {
diff --git a/deps/v8/src/compiler/compiler-source-position-table.h b/deps/v8/src/compiler/compiler-source-position-table.h
index 6c3ab684a8..9974a2daad 100644
--- a/deps/v8/src/compiler/compiler-source-position-table.h
+++ b/deps/v8/src/compiler/compiler-source-position-table.h
@@ -30,6 +30,8 @@ class V8_EXPORT_PRIVATE SourcePositionTable final
Init(source_positions_->GetSourcePosition(node));
}
~Scope() { source_positions_->current_position_ = prev_position_; }
+ Scope(const Scope&) = delete;
+ Scope& operator=(const Scope&) = delete;
private:
void Init(SourcePosition position) {
@@ -38,10 +40,11 @@ class V8_EXPORT_PRIVATE SourcePositionTable final
SourcePositionTable* const source_positions_;
SourcePosition const prev_position_;
- DISALLOW_COPY_AND_ASSIGN(Scope);
};
explicit SourcePositionTable(Graph* graph);
+ SourcePositionTable(const SourcePositionTable&) = delete;
+ SourcePositionTable& operator=(const SourcePositionTable&) = delete;
void AddDecorator();
void RemoveDecorator();
@@ -63,8 +66,6 @@ class V8_EXPORT_PRIVATE SourcePositionTable final
Decorator* decorator_;
SourcePosition current_position_;
NodeAuxData<SourcePosition, SourcePosition::Unknown> table_;
-
- DISALLOW_COPY_AND_ASSIGN(SourcePositionTable);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/constant-folding-reducer.h b/deps/v8/src/compiler/constant-folding-reducer.h
index f98ab0595e..88f0cb6a57 100644
--- a/deps/v8/src/compiler/constant-folding-reducer.h
+++ b/deps/v8/src/compiler/constant-folding-reducer.h
@@ -20,6 +20,8 @@ class V8_EXPORT_PRIVATE ConstantFoldingReducer final
ConstantFoldingReducer(Editor* editor, JSGraph* jsgraph,
JSHeapBroker* broker);
~ConstantFoldingReducer() final;
+ ConstantFoldingReducer(const ConstantFoldingReducer&) = delete;
+ ConstantFoldingReducer& operator=(const ConstantFoldingReducer&) = delete;
const char* reducer_name() const override { return "ConstantFoldingReducer"; }
@@ -31,8 +33,6 @@ class V8_EXPORT_PRIVATE ConstantFoldingReducer final
JSGraph* const jsgraph_;
JSHeapBroker* const broker_;
-
- DISALLOW_COPY_AND_ASSIGN(ConstantFoldingReducer);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/control-flow-optimizer.cc b/deps/v8/src/compiler/control-flow-optimizer.cc
index 930976fbed..5bcee55285 100644
--- a/deps/v8/src/compiler/control-flow-optimizer.cc
+++ b/deps/v8/src/compiler/control-flow-optimizer.cc
@@ -79,8 +79,8 @@ bool ControlFlowOptimizer::TryBuildSwitch(Node* node) {
if (cond->opcode() != IrOpcode::kWord32Equal) return false;
Int32BinopMatcher m(cond);
Node* index = m.left().node();
- if (!m.right().HasValue()) return false;
- int32_t value = m.right().Value();
+ if (!m.right().HasResolvedValue()) return false;
+ int32_t value = m.right().ResolvedValue();
ZoneSet<int32_t> values(zone());
values.insert(value);
@@ -104,8 +104,8 @@ bool ControlFlowOptimizer::TryBuildSwitch(Node* node) {
if (cond1->opcode() != IrOpcode::kWord32Equal) break;
Int32BinopMatcher m1(cond1);
if (m1.left().node() != index) break;
- if (!m1.right().HasValue()) break;
- int32_t value1 = m1.right().Value();
+ if (!m1.right().HasResolvedValue()) break;
+ int32_t value1 = m1.right().ResolvedValue();
if (values.find(value1) != values.end()) break;
DCHECK_NE(value, value1);
diff --git a/deps/v8/src/compiler/control-flow-optimizer.h b/deps/v8/src/compiler/control-flow-optimizer.h
index 07fc9e6fc2..060fed8274 100644
--- a/deps/v8/src/compiler/control-flow-optimizer.h
+++ b/deps/v8/src/compiler/control-flow-optimizer.h
@@ -27,6 +27,8 @@ class V8_EXPORT_PRIVATE ControlFlowOptimizer final {
ControlFlowOptimizer(Graph* graph, CommonOperatorBuilder* common,
MachineOperatorBuilder* machine,
TickCounter* tick_counter, Zone* zone);
+ ControlFlowOptimizer(const ControlFlowOptimizer&) = delete;
+ ControlFlowOptimizer& operator=(const ControlFlowOptimizer&) = delete;
void Optimize();
@@ -50,8 +52,6 @@ class V8_EXPORT_PRIVATE ControlFlowOptimizer final {
NodeMarker<bool> queued_;
Zone* const zone_;
TickCounter* const tick_counter_;
-
- DISALLOW_COPY_AND_ASSIGN(ControlFlowOptimizer);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/csa-load-elimination.cc b/deps/v8/src/compiler/csa-load-elimination.cc
index c29a472364..17250bba5e 100644
--- a/deps/v8/src/compiler/csa-load-elimination.cc
+++ b/deps/v8/src/compiler/csa-load-elimination.cc
@@ -94,13 +94,13 @@ bool OffsetMayAlias(Node* offset1, MachineRepresentation repr1, Node* offset2,
IntPtrMatcher matcher1(offset1);
IntPtrMatcher matcher2(offset2);
// If either of the offsets is variable, accesses may alias
- if (!matcher1.HasValue() || !matcher2.HasValue()) {
+ if (!matcher1.HasResolvedValue() || !matcher2.HasResolvedValue()) {
return true;
}
// Otherwise, we return whether accesses overlap
- intptr_t start1 = matcher1.Value();
+ intptr_t start1 = matcher1.ResolvedValue();
intptr_t end1 = start1 + ElementSizeInBytes(repr1);
- intptr_t start2 = matcher2.Value();
+ intptr_t start2 = matcher2.ResolvedValue();
intptr_t end2 = start2 + ElementSizeInBytes(repr2);
return !(end1 <= start2 || end2 <= start1);
}
diff --git a/deps/v8/src/compiler/csa-load-elimination.h b/deps/v8/src/compiler/csa-load-elimination.h
index 9460858d04..f738475a94 100644
--- a/deps/v8/src/compiler/csa-load-elimination.h
+++ b/deps/v8/src/compiler/csa-load-elimination.h
@@ -36,6 +36,8 @@ class V8_EXPORT_PRIVATE CsaLoadElimination final
jsgraph_(jsgraph),
zone_(zone) {}
~CsaLoadElimination() final = default;
+ CsaLoadElimination(const CsaLoadElimination&) = delete;
+ CsaLoadElimination& operator=(const CsaLoadElimination&) = delete;
const char* reducer_name() const override { return "CsaLoadElimination"; }
@@ -107,8 +109,6 @@ class V8_EXPORT_PRIVATE CsaLoadElimination final
NodeAuxData<AbstractState const*> node_states_;
JSGraph* const jsgraph_;
Zone* zone_;
-
- DISALLOW_COPY_AND_ASSIGN(CsaLoadElimination);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/dead-code-elimination.h b/deps/v8/src/compiler/dead-code-elimination.h
index 5f2ba329e2..7fb22838c7 100644
--- a/deps/v8/src/compiler/dead-code-elimination.h
+++ b/deps/v8/src/compiler/dead-code-elimination.h
@@ -42,6 +42,8 @@ class V8_EXPORT_PRIVATE DeadCodeElimination final
DeadCodeElimination(Editor* editor, Graph* graph,
CommonOperatorBuilder* common, Zone* temp_zone);
~DeadCodeElimination() final = default;
+ DeadCodeElimination(const DeadCodeElimination&) = delete;
+ DeadCodeElimination& operator=(const DeadCodeElimination&) = delete;
const char* reducer_name() const override { return "DeadCodeElimination"; }
@@ -76,8 +78,6 @@ class V8_EXPORT_PRIVATE DeadCodeElimination final
CommonOperatorBuilder* const common_;
Node* const dead_;
Zone* zone_;
-
- DISALLOW_COPY_AND_ASSIGN(DeadCodeElimination);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/decompression-optimizer.h b/deps/v8/src/compiler/decompression-optimizer.h
index 1d94739e45..330202d4c2 100644
--- a/deps/v8/src/compiler/decompression-optimizer.h
+++ b/deps/v8/src/compiler/decompression-optimizer.h
@@ -39,6 +39,8 @@ class V8_EXPORT_PRIVATE DecompressionOptimizer final {
CommonOperatorBuilder* common,
MachineOperatorBuilder* machine);
~DecompressionOptimizer() = default;
+ DecompressionOptimizer(const DecompressionOptimizer&) = delete;
+ DecompressionOptimizer& operator=(const DecompressionOptimizer&) = delete;
// Assign States to the nodes, and then change the node's Operator to use the
// compressed version if possible.
@@ -114,8 +116,6 @@ class V8_EXPORT_PRIVATE DecompressionOptimizer final {
// themselves. In a way, it functions as a NodeSet since each node will be
// contained at most once. It's a Vector since we care about insertion speed.
NodeVector compressed_candidate_nodes_;
-
- DISALLOW_COPY_AND_ASSIGN(DecompressionOptimizer);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc
index 98ca00c78b..015f1cce6f 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.cc
+++ b/deps/v8/src/compiler/effect-control-linearizer.cc
@@ -14,6 +14,7 @@
#include "src/compiler/feedback-source.h"
#include "src/compiler/graph-assembler.h"
#include "src/compiler/js-graph.h"
+#include "src/compiler/js-heap-broker.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-origin-table.h"
@@ -37,7 +38,8 @@ class EffectControlLinearizer {
SourcePositionTable* source_positions,
NodeOriginTable* node_origins,
MaskArrayIndexEnable mask_array_index,
- MaintainSchedule maintain_schedule)
+ MaintainSchedule maintain_schedule,
+ JSHeapBroker* broker)
: js_graph_(js_graph),
schedule_(schedule),
temp_zone_(temp_zone),
@@ -45,9 +47,11 @@ class EffectControlLinearizer {
maintain_schedule_(maintain_schedule),
source_positions_(source_positions),
node_origins_(node_origins),
+ broker_(broker),
graph_assembler_(js_graph, temp_zone, base::nullopt,
should_maintain_schedule() ? schedule : nullptr),
- frame_state_zapper_(nullptr) {}
+ frame_state_zapper_(nullptr),
+ fast_api_call_stack_slot_(nullptr) {}
void Run();
@@ -284,17 +288,11 @@ class EffectControlLinearizer {
DeoptimizeReason reason);
// Helper functions used in LowerDynamicCheckMaps
- void CheckPolymorphic(Node* expected_polymorphic_array, Node* actual_map,
- Node* actual_handler, GraphAssemblerLabel<0>* done,
- Node* frame_state);
- void ProcessMonomorphic(Node* handler, GraphAssemblerLabel<0>* done,
- Node* frame_state, int slot, Node* vector);
- void BranchOnICState(int slot_index, Node* vector, Node* value_map,
- Node* frame_state, GraphAssemblerLabel<0>* monomorphic,
- GraphAssemblerLabel<0>* maybe_poly,
- GraphAssemblerLabel<0>* migrate, Node** strong_feedback,
- Node** poly_array);
-
+ void BuildCallDynamicMapChecksBuiltin(Node* actual_value,
+ Node* actual_handler,
+ int feedback_slot_index,
+ GraphAssemblerLabel<0>* done,
+ Node* frame_state);
bool should_maintain_schedule() const {
return maintain_schedule_ == MaintainSchedule::kMaintain;
}
@@ -311,6 +309,7 @@ class EffectControlLinearizer {
}
MachineOperatorBuilder* machine() const { return js_graph_->machine(); }
JSGraphAssembler* gasm() { return &graph_assembler_; }
+ JSHeapBroker* broker() const { return broker_; }
JSGraph* js_graph_;
Schedule* schedule_;
@@ -320,8 +319,11 @@ class EffectControlLinearizer {
RegionObservability region_observability_ = RegionObservability::kObservable;
SourcePositionTable* source_positions_;
NodeOriginTable* node_origins_;
+ JSHeapBroker* broker_;
JSGraphAssembler graph_assembler_;
Node* frame_state_zapper_; // For tracking down compiler::Node::New crashes.
+ Node* fast_api_call_stack_slot_; // For caching the stack slot allocated for
+ // fast API calls.
};
namespace {
@@ -1887,230 +1889,65 @@ void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
}
}
-void EffectControlLinearizer::CheckPolymorphic(Node* expected_polymorphic_array,
- Node* actual_map,
- Node* actual_handler,
- GraphAssemblerLabel<0>* done,
- Node* frame_state) {
- Node* expected_polymorphic_array_map =
- __ LoadField(AccessBuilder::ForMap(), expected_polymorphic_array);
- Node* is_weak_fixed_array = __ TaggedEqual(expected_polymorphic_array_map,
- __ WeakFixedArrayMapConstant());
- __ DeoptimizeIfNot(DeoptimizeReason::kTransitionedToMegamorphicIC,
- FeedbackSource(), is_weak_fixed_array, frame_state,
- IsSafetyCheck::kCriticalSafetyCheck);
-
- Node* polymorphic_array = expected_polymorphic_array;
-
- // This is now a weak pointer that we're holding in the register, we
- // need to be careful about spilling and reloading it (as it could
- // get cleared in between). There's no runtime call here that could
- // cause a spill so we should be safe.
- Node* weak_actual_map = MakeWeakForComparison(actual_map);
- Node* length = ChangeSmiToInt32(__ LoadField(
- AccessBuilder::ForWeakFixedArrayLength(), polymorphic_array));
- auto do_handler_check = __ MakeLabel(MachineRepresentation::kWord32);
-
- GraphAssemblerLabel<0> labels[] = {__ MakeLabel(), __ MakeLabel(),
- __ MakeLabel(), __ MakeLabel()};
-
- STATIC_ASSERT(FLAG_max_minimorphic_map_checks == arraysize(labels));
- DCHECK_GE(FLAG_max_minimorphic_map_checks,
- FLAG_max_valid_polymorphic_map_count);
-
- // The following generates a switch based on the length of the
- // array:
- //
- // if length >= 4: goto labels[3]
- // if length == 3: goto labels[2]
- // if length == 2: goto labels[1]
- // if length == 1: goto labels[0]
- __ GotoIf(__ Int32LessThanOrEqual(
- __ Int32Constant(FeedbackIterator::SizeFor(4)), length),
- &labels[3]);
- __ GotoIf(
- __ Word32Equal(length, __ Int32Constant(FeedbackIterator::SizeFor(3))),
- &labels[2]);
- __ GotoIf(
- __ Word32Equal(length, __ Int32Constant(FeedbackIterator::SizeFor(2))),
- &labels[1]);
- __ GotoIf(
- __ Word32Equal(length, __ Int32Constant(FeedbackIterator::SizeFor(1))),
- &labels[0]);
-
- // We should never have an polymorphic feedback array of size 0.
+void EffectControlLinearizer::BuildCallDynamicMapChecksBuiltin(
+ Node* actual_value, Node* actual_handler, int feedback_slot_index,
+ GraphAssemblerLabel<0>* done, Node* frame_state) {
+ Node* slot_index = __ IntPtrConstant(feedback_slot_index);
+ Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
+ auto builtin = Builtins::kDynamicMapChecks;
+ Node* result = CallBuiltin(builtin, properties, slot_index, actual_value,
+ actual_handler);
+ __ GotoIf(__ WordEqual(result, __ IntPtrConstant(static_cast<int>(
+ DynamicMapChecksStatus::kSuccess))),
+ done);
+ __ DeoptimizeIf(DeoptimizeKind::kBailout, DeoptimizeReason::kMissingMap,
+ FeedbackSource(),
+ __ WordEqual(result, __ IntPtrConstant(static_cast<int>(
+ DynamicMapChecksStatus::kBailout))),
+ frame_state, IsSafetyCheck::kCriticalSafetyCheck);
+ __ DeoptimizeIf(DeoptimizeReason::kWrongHandler, FeedbackSource(),
+ __ WordEqual(result, __ IntPtrConstant(static_cast<int>(
+ DynamicMapChecksStatus::kDeopt))),
+ frame_state, IsSafetyCheck::kCriticalSafetyCheck);
__ Unreachable(done);
-
- // This loop generates code like this to do the dynamic map check:
- //
- // labels[3]:
- // maybe_map = load(polymorphic_array, i)
- // if weak_actual_map == maybe_map goto handler_check
- // goto labels[2]
- // labels[2]:
- // maybe_map = load(polymorphic_array, i - 1)
- // if weak_actual_map == maybe_map goto handler_check
- // goto labels[1]
- // labels[1]:
- // maybe_map = load(polymorphic_array, i - 2)
- // if weak_actual_map == maybe_map goto handler_check
- // goto labels[0]
- // labels[0]:
- // maybe_map = load(polymorphic_array, i - 3)
- // if weak_actual_map == maybe_map goto handler_check
- // bailout
- for (int i = arraysize(labels) - 1; i >= 0; i--) {
- __ Bind(&labels[i]);
- Node* maybe_map = __ LoadField(AccessBuilder::ForWeakFixedArraySlot(
- FeedbackIterator::MapIndexForEntry(i)),
- polymorphic_array);
- Node* map_check = __ TaggedEqual(maybe_map, weak_actual_map);
-
- int handler_index = FeedbackIterator::HandlerIndexForEntry(i);
- __ GotoIf(map_check, &do_handler_check, __ Int32Constant(handler_index));
- if (i > 0) {
- __ Goto(&labels[i - 1]);
- } else {
- // TODO(turbofan): Add support for gasm->Deoptimize.
- __ DeoptimizeIf(DeoptimizeKind::kBailout, DeoptimizeReason::kMissingMap,
- FeedbackSource(), __ IntPtrConstant(1),
- FrameState(frame_state));
- __ Unreachable(done);
- }
- }
-
- __ Bind(&do_handler_check);
- Node* handler_index = do_handler_check.PhiAt(0);
- Node* maybe_handler =
- __ LoadElement(AccessBuilder::ForWeakFixedArrayElement(),
- polymorphic_array, handler_index);
- __ DeoptimizeIfNot(DeoptimizeReason::kWrongHandler, FeedbackSource(),
- __ TaggedEqual(maybe_handler, actual_handler), frame_state,
- IsSafetyCheck::kCriticalSafetyCheck);
- __ Goto(done);
-}
-
-void EffectControlLinearizer::ProcessMonomorphic(Node* handler,
- GraphAssemblerLabel<0>* done,
- Node* frame_state, int slot,
- Node* vector) {
- Node* feedback_slot_handler =
- __ LoadField(AccessBuilder::ForFeedbackVectorSlot(slot + 1), vector);
- Node* handler_check = __ TaggedEqual(handler, feedback_slot_handler);
- __ DeoptimizeIfNot(DeoptimizeReason::kWrongHandler, FeedbackSource(),
- handler_check, frame_state,
- IsSafetyCheck::kCriticalSafetyCheck);
- __ Goto(done);
-}
-
-void EffectControlLinearizer::BranchOnICState(
- int slot_index, Node* vector, Node* value_map, Node* frame_state,
- GraphAssemblerLabel<0>* monomorphic, GraphAssemblerLabel<0>* maybe_poly,
- GraphAssemblerLabel<0>* migrate, Node** strong_feedback,
- Node** poly_array) {
- Node* feedback =
- __ LoadField(AccessBuilder::ForFeedbackVectorSlot(slot_index), vector);
-
- Node* mono_check = BuildIsWeakReferenceTo(feedback, value_map);
- __ GotoIf(mono_check, monomorphic);
-
- Node* is_strong_ref = BuildIsStrongReference(feedback);
- if (migrate != nullptr) {
- auto check_poly = __ MakeLabel();
-
- __ GotoIf(is_strong_ref, &check_poly);
- Node* is_cleared = BuildIsClearedWeakReference(feedback);
- __ DeoptimizeIf(DeoptimizeKind::kBailout, DeoptimizeReason::kMissingMap,
- FeedbackSource(), is_cleared, frame_state,
- IsSafetyCheck::kCriticalSafetyCheck);
- *strong_feedback = BuildStrongReferenceFromWeakReference(feedback);
- __ Goto(migrate);
-
- __ Bind(&check_poly);
- } else {
- __ DeoptimizeIfNot(DeoptimizeKind::kBailout, DeoptimizeReason::kMissingMap,
- FeedbackSource(), is_strong_ref, frame_state,
- IsSafetyCheck::kCriticalSafetyCheck);
- }
-
- *poly_array = feedback;
- __ Goto(maybe_poly);
}
void EffectControlLinearizer::LowerDynamicCheckMaps(Node* node,
Node* frame_state) {
DynamicCheckMapsParameters const& p =
DynamicCheckMapsParametersOf(node->op());
- Node* value = node->InputAt(0);
+ Node* actual_value = node->InputAt(0);
FeedbackSource const& feedback = p.feedback();
- Node* vector = __ HeapConstant(feedback.vector);
- Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
- Node* handler = p.handler()->IsSmi()
- ? __ SmiConstant(Smi::ToInt(*p.handler()))
- : __ HeapConstant(Handle<HeapObject>::cast(p.handler()));
+ Node* actual_value_map = __ LoadField(AccessBuilder::ForMap(), actual_value);
+ Node* actual_handler =
+ p.handler()->IsSmi()
+ ? __ SmiConstant(Smi::ToInt(*p.handler()))
+ : __ HeapConstant(Handle<HeapObject>::cast(p.handler()));
auto done = __ MakeLabel();
+ auto call_builtin = __ MakeDeferredLabel();
- // Emit monomorphic checks only if current state is monomorphic. In
- // case the current state is polymorphic, and if we ever go back to
- // monomorphic start, we will deopt and reoptimize the code.
- if (p.state() == DynamicCheckMapsParameters::kMonomorphic) {
- auto monomorphic_map_match = __ MakeLabel();
- auto maybe_poly = __ MakeLabel();
- Node* strong_feedback;
- Node* poly_array;
-
- if (p.flags() & CheckMapsFlag::kTryMigrateInstance) {
- auto map_check_failed = __ MakeDeferredLabel();
- BranchOnICState(feedback.index(), vector, value_map, frame_state,
- &monomorphic_map_match, &maybe_poly, &map_check_failed,
- &strong_feedback, &poly_array);
-
- __ Bind(&map_check_failed);
- {
- MigrateInstanceOrDeopt(value, value_map, frame_state, FeedbackSource(),
- DeoptimizeReason::kMissingMap);
-
- // Check if new map matches.
- Node* new_value_map = __ LoadField(AccessBuilder::ForMap(), value);
- Node* mono_check = __ TaggedEqual(strong_feedback, new_value_map);
- __ DeoptimizeIfNot(DeoptimizeKind::kBailout,
- DeoptimizeReason::kMissingMap, FeedbackSource(),
- mono_check, frame_state,
- IsSafetyCheck::kCriticalSafetyCheck);
- ProcessMonomorphic(handler, &done, frame_state, feedback.index(),
- vector);
- }
+ ZoneHandleSet<Map> maps = p.maps();
+ size_t const map_count = maps.size();
+ for (size_t i = 0; i < map_count; ++i) {
+ Node* map = __ HeapConstant(maps[i]);
+ Node* check = __ TaggedEqual(actual_value_map, map);
+ if (i == map_count - 1) {
+ __ BranchWithCriticalSafetyCheck(check, &done, &call_builtin);
} else {
- BranchOnICState(feedback.index(), vector, value_map, frame_state,
- &monomorphic_map_match, &maybe_poly, nullptr,
- &strong_feedback, &poly_array);
+ auto next_map = __ MakeLabel();
+ __ BranchWithCriticalSafetyCheck(check, &done, &next_map);
+ __ Bind(&next_map);
}
+ }
- __ Bind(&monomorphic_map_match);
- ProcessMonomorphic(handler, &done, frame_state, feedback.index(), vector);
-
- __ Bind(&maybe_poly);
- // TODO(mythria): ICs don't drop deprecated maps from feedback vector.
- // So it is not equired to migrate the instance for polymorphic case.
- // When we change dynamic map checks to check only four maps re-evaluate
- // if this is required.
- CheckPolymorphic(poly_array, value_map, handler, &done, frame_state);
- } else {
- DCHECK_EQ(p.state(), DynamicCheckMapsParameters::kPolymorphic);
- Node* feedback_slot = __ LoadField(
- AccessBuilder::ForFeedbackVectorSlot(feedback.index()), vector);
- // If the IC state at code generation time is not monomorphic, we don't
- // handle monomorphic states and just deoptimize if IC transitions to
- // monomorphic. For polymorphic ICs it is not required to migrate deprecated
- // maps since ICs don't discard deprecated maps from feedback.
- Node* is_poly_or_megamorphic = BuildIsStrongReference(feedback_slot);
- __ DeoptimizeIfNot(DeoptimizeReason::kTransitionedToMonomorphicIC,
- FeedbackSource(), is_poly_or_megamorphic, frame_state,
- IsSafetyCheck::kCriticalSafetyCheck);
- CheckPolymorphic(feedback_slot, value_map, handler, &done, frame_state);
+ __ Bind(&call_builtin);
+ {
+ BuildCallDynamicMapChecksBuiltin(actual_value, actual_handler,
+ feedback.index(), &done, frame_state);
}
+
__ Bind(&done);
}
@@ -2310,7 +2147,7 @@ Node* EffectControlLinearizer::LowerCheckedInt32Div(Node* node,
// are all zero, and if so we know that we can perform a division
// safely (and fast by doing an arithmetic - aka sign preserving -
// right shift on {lhs}).
- int32_t divisor = m.Value();
+ int32_t divisor = m.ResolvedValue();
Node* mask = __ Int32Constant(divisor - 1);
Node* shift = __ Int32Constant(base::bits::WhichPowerOfTwo(divisor));
Node* check = __ Word32Equal(__ Word32And(lhs, mask), zero);
@@ -2532,7 +2369,7 @@ Node* EffectControlLinearizer::LowerCheckedUint32Div(Node* node,
// are all zero, and if so we know that we can perform a division
// safely (and fast by doing a logical - aka zero extending - right
// shift on {lhs}).
- uint32_t divisor = m.Value();
+ uint32_t divisor = m.ResolvedValue();
Node* mask = __ Uint32Constant(divisor - 1);
Node* shift = __ Uint32Constant(base::bits::WhichPowerOfTwo(divisor));
Node* check = __ Word32Equal(__ Word32And(lhs, mask), zero);
@@ -3742,36 +3579,26 @@ void EffectControlLinearizer::LowerTierUpCheck(Node* node) {
TierUpCheckNode n(node);
TNode<FeedbackVector> vector = n.feedback_vector();
- Node* optimization_marker = __ LoadField(
- AccessBuilder::ForFeedbackVectorOptimizedCodeWeakOrSmi(), vector);
+ Node* optimization_state =
+ __ LoadField(AccessBuilder::ForFeedbackVectorFlags(), vector);
// TODO(jgruber): The branch introduces a sequence of spills before the
// branch (and restores at `fallthrough`) that are completely unnecessary
// since the IfFalse continuation ends in a tail call. Investigate how to
// avoid these and fix it.
- // TODO(jgruber): Combine the checks below for none/queued, e.g. by
- // reorganizing OptimizationMarker values such that the least significant bit
- // says whether the value is interesting or not. Also update the related
- // check in the InterpreterEntryTrampoline.
-
auto fallthrough = __ MakeLabel();
- auto optimization_marker_is_not_none = __ MakeDeferredLabel();
- auto optimization_marker_is_neither_none_nor_queued = __ MakeDeferredLabel();
- __ BranchWithHint(
- __ TaggedEqual(optimization_marker, __ SmiConstant(static_cast<int>(
- OptimizationMarker::kNone))),
- &fallthrough, &optimization_marker_is_not_none, BranchHint::kTrue);
-
- __ Bind(&optimization_marker_is_not_none);
+ auto has_optimized_code_or_marker = __ MakeDeferredLabel();
__ BranchWithHint(
- __ TaggedEqual(optimization_marker,
- __ SmiConstant(static_cast<int>(
- OptimizationMarker::kInOptimizationQueue))),
- &fallthrough, &optimization_marker_is_neither_none_nor_queued,
- BranchHint::kNone);
+ __ Word32Equal(
+ __ Word32And(optimization_state,
+ __ Uint32Constant(
+ FeedbackVector::
+ kHasNoTopTierCodeOrCompileOptimizedMarkerMask)),
+ __ Int32Constant(0)),
+ &fallthrough, &has_optimized_code_or_marker, BranchHint::kTrue);
- __ Bind(&optimization_marker_is_neither_none_nor_queued);
+ __ Bind(&has_optimized_code_or_marker);
// The optimization marker field contains a non-trivial value, and some
// action has to be taken. For example, perhaps tier-up has been requested
@@ -3781,17 +3608,8 @@ void EffectControlLinearizer::LowerTierUpCheck(Node* node) {
// Currently we delegate these tasks to the InterpreterEntryTrampoline.
// TODO(jgruber,v8:8888): Consider a dedicated builtin instead.
- const int parameter_count =
- StartNode{graph()->start()}.FormalParameterCount();
TNode<HeapObject> code =
__ HeapConstant(BUILTIN_CODE(isolate(), InterpreterEntryTrampoline));
- Node* target = __ Parameter(Linkage::kJSCallClosureParamIndex);
- Node* new_target =
- __ Parameter(Linkage::GetJSCallNewTargetParamIndex(parameter_count));
- Node* argc =
- __ Parameter(Linkage::GetJSCallArgCountParamIndex(parameter_count));
- Node* context =
- __ Parameter(Linkage::GetJSCallContextParamIndex(parameter_count));
JSTrampolineDescriptor descriptor;
CallDescriptor::Flags flags = CallDescriptor::kFixedTargetRegister |
@@ -3799,8 +3617,8 @@ void EffectControlLinearizer::LowerTierUpCheck(Node* node) {
auto call_descriptor = Linkage::GetStubCallDescriptor(
graph()->zone(), descriptor, descriptor.GetStackParameterCount(), flags,
Operator::kNoProperties);
- Node* nodes[] = {code, target, new_target, argc,
- context, __ effect(), __ control()};
+ Node* nodes[] = {code, n.target(), n.new_target(), n.input_count(),
+ n.context(), __ effect(), __ control()};
#ifdef DEBUG
static constexpr int kCodeContextEffectControl = 4;
@@ -5235,13 +5053,21 @@ Node* EffectControlLinearizer::LowerFastApiCall(Node* node) {
CHECK_EQ(FastApiCallNode::ArityForArgc(c_arg_count, js_arg_count),
value_input_count);
- // Add the { has_error } output parameter.
- int kAlign = 4;
- int kSize = 4;
- Node* has_error = __ StackSlot(kSize, kAlign);
- // Generate the store to `has_error`.
+ if (fast_api_call_stack_slot_ == nullptr) {
+ // Add the { fallback } output parameter.
+ int kAlign = 4;
+ int kSize = sizeof(v8::FastApiCallbackOptions);
+ // If this check fails, probably you've added new fields to
+ // v8::FastApiCallbackOptions, which means you'll need to write code
+ // that initializes and reads from them too (see the Store and Load to
+ // fast_api_call_stack_slot_ below).
+ CHECK_EQ(kSize, 1);
+ fast_api_call_stack_slot_ = __ StackSlot(kSize, kAlign);
+ }
+
+ // Generate the store to `fast_api_call_stack_slot_`.
__ Store(StoreRepresentation(MachineRepresentation::kWord32, kNoWriteBarrier),
- has_error, 0, jsgraph()->ZeroConstant());
+ fast_api_call_stack_slot_, 0, jsgraph()->ZeroConstant());
MachineSignature::Builder builder(
graph()->zone(), 1, c_arg_count + FastApiCallNode::kHasErrorInputCount);
@@ -5252,7 +5078,7 @@ Node* EffectControlLinearizer::LowerFastApiCall(Node* node) {
MachineTypeFor(c_signature->ArgumentInfo(i).GetType());
builder.AddParam(machine_type);
}
- builder.AddParam(MachineType::Pointer()); // has_error
+ builder.AddParam(MachineType::Pointer()); // fast_api_call_stack_slot_
CallDescriptor* call_descriptor =
Linkage::GetSimplifiedCDescriptor(graph()->zone(), builder.Build());
@@ -5261,19 +5087,26 @@ Node* EffectControlLinearizer::LowerFastApiCall(Node* node) {
Node** const inputs = graph()->zone()->NewArray<Node*>(
c_arg_count + FastApiCallNode::kFastCallExtraInputCount);
- for (int i = 0; i < c_arg_count + FastApiCallNode::kFastTargetInputCount;
- ++i) {
- inputs[i] = NodeProperties::GetValueInput(node, i);
+ inputs[0] = NodeProperties::GetValueInput(node, 0); // the target
+ for (int i = FastApiCallNode::kFastTargetInputCount;
+ i < c_arg_count + FastApiCallNode::kFastTargetInputCount; ++i) {
+ if (c_signature->ArgumentInfo(i - 1).GetType() ==
+ CTypeInfo::Type::kFloat32) {
+ inputs[i] =
+ __ TruncateFloat64ToFloat32(NodeProperties::GetValueInput(node, i));
+ } else {
+ inputs[i] = NodeProperties::GetValueInput(node, i);
+ }
}
- inputs[c_arg_count + 1] = has_error;
+ inputs[c_arg_count + 1] = fast_api_call_stack_slot_;
inputs[c_arg_count + 2] = __ effect();
inputs[c_arg_count + 3] = __ control();
__ Call(call_descriptor,
c_arg_count + FastApiCallNode::kFastCallExtraInputCount, inputs);
- // Generate the load from `has_error`.
- Node* load = __ Load(MachineType::Int32(), has_error, 0);
+ // Generate the load from `fast_api_call_stack_slot_`.
+ Node* load = __ Load(MachineType::Int32(), fast_api_call_stack_slot_, 0);
TNode<Boolean> cond =
TNode<Boolean>::UncheckedCast(__ Word32Equal(load, __ Int32Constant(0)));
@@ -6527,9 +6360,9 @@ Node* EffectControlLinearizer::LowerFindOrderedHashMapEntryForInt32Key(
auto if_match = __ MakeLabel();
auto if_notmatch = __ MakeLabel();
auto if_notsmi = __ MakeDeferredLabel();
- __ GotoIfNot(ObjectIsSmi(candidate_key), &if_notsmi);
- __ Branch(__ Word32Equal(ChangeSmiToInt32(candidate_key), key), &if_match,
- &if_notmatch);
+ __ GotoIfNot(ObjectIsSmi(candidate_key), &if_notsmi);
+ __ Branch(__ Word32Equal(ChangeSmiToInt32(candidate_key), key), &if_match,
+ &if_notmatch);
__ Bind(&if_notsmi);
__ GotoIfNot(
@@ -6627,10 +6460,11 @@ void LinearizeEffectControl(JSGraph* graph, Schedule* schedule, Zone* temp_zone,
SourcePositionTable* source_positions,
NodeOriginTable* node_origins,
MaskArrayIndexEnable mask_array_index,
- MaintainSchedule maintain_schedule) {
- EffectControlLinearizer linearizer(graph, schedule, temp_zone,
- source_positions, node_origins,
- mask_array_index, maintain_schedule);
+ MaintainSchedule maintain_schedule,
+ JSHeapBroker* broker) {
+ EffectControlLinearizer linearizer(
+ graph, schedule, temp_zone, source_positions, node_origins,
+ mask_array_index, maintain_schedule, broker);
linearizer.Run();
}
diff --git a/deps/v8/src/compiler/effect-control-linearizer.h b/deps/v8/src/compiler/effect-control-linearizer.h
index d747da1676..fbfd3046dc 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.h
+++ b/deps/v8/src/compiler/effect-control-linearizer.h
@@ -21,6 +21,7 @@ class JSGraph;
class NodeOriginTable;
class Schedule;
class SourcePositionTable;
+class JSHeapBroker;
enum class MaskArrayIndexEnable { kDoNotMaskArrayIndex, kMaskArrayIndex };
@@ -29,7 +30,8 @@ enum class MaintainSchedule { kMaintain, kDiscard };
V8_EXPORT_PRIVATE void LinearizeEffectControl(
JSGraph* graph, Schedule* schedule, Zone* temp_zone,
SourcePositionTable* source_positions, NodeOriginTable* node_origins,
- MaskArrayIndexEnable mask_array_index, MaintainSchedule maintain_schedule);
+ MaskArrayIndexEnable mask_array_index, MaintainSchedule maintain_schedule,
+ JSHeapBroker* broker);
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/escape-analysis-reducer.cc b/deps/v8/src/compiler/escape-analysis-reducer.cc
index 89a8d4e118..f4ab1c9709 100644
--- a/deps/v8/src/compiler/escape-analysis-reducer.cc
+++ b/deps/v8/src/compiler/escape-analysis-reducer.cc
@@ -5,6 +5,7 @@
#include "src/compiler/escape-analysis-reducer.h"
#include "src/compiler/all-nodes.h"
+#include "src/compiler/node-matchers.h"
#include "src/compiler/simplified-operator.h"
#include "src/compiler/type-cache.h"
#include "src/execution/frame-constants.h"
@@ -68,17 +69,6 @@ Reduction EscapeAnalysisReducer::ReplaceNode(Node* original,
return NoChange();
}
-namespace {
-
-Node* SkipTypeGuards(Node* node) {
- while (node->opcode() == IrOpcode::kTypeGuard) {
- node = NodeProperties::GetValueInput(node, 0);
- }
- return node;
-}
-
-} // namespace
-
Node* EscapeAnalysisReducer::ObjectIdNode(const VirtualObject* vobject) {
VirtualObject::Id id = vobject->id();
if (id >= object_id_cache_.size()) object_id_cache_.resize(id + 1);
@@ -185,8 +175,8 @@ Node* EscapeAnalysisReducer::ReduceDeoptState(Node* node, Node* effect,
i);
}
return new_node.Get();
- } else if (const VirtualObject* vobject =
- analysis_result().GetVirtualObject(SkipTypeGuards(node))) {
+ } else if (const VirtualObject* vobject = analysis_result().GetVirtualObject(
+ SkipValueIdentities(node))) {
if (vobject->HasEscaped()) return node;
if (deduplicator->SeenBefore(vobject)) {
return ObjectIdNode(vobject);
@@ -315,7 +305,6 @@ void EscapeAnalysisReducer::Finalize() {
formal_parameter_count,
Type::Constant(params.formal_parameter_count(),
jsgraph()->graph()->zone()));
-#ifdef V8_REVERSE_JSARGS
Node* offset_to_first_elem = jsgraph()->Constant(
CommonFrameConstants::kFixedSlotCountAboveFp);
if (!NodeProperties::IsTyped(offset_to_first_elem)) {
@@ -337,22 +326,6 @@ void EscapeAnalysisReducer::Finalize() {
jsgraph()->simplified()->NumberAdd(), offset,
formal_parameter_count);
}
-#else
- // {offset} is a reverted index starting from 1. The base address is
- // adapted to allow offsets starting from 1.
- Node* offset = jsgraph()->graph()->NewNode(
- jsgraph()->simplified()->NumberSubtract(), arguments_length,
- index);
- if (type == CreateArgumentsType::kRestParameter) {
- // In the case of rest parameters we should skip the formal
- // parameters.
- NodeProperties::SetType(offset,
- TypeCache::Get()->kArgumentsLengthType);
- offset = jsgraph()->graph()->NewNode(
- jsgraph()->simplified()->NumberSubtract(), offset,
- formal_parameter_count);
- }
-#endif
NodeProperties::SetType(offset,
TypeCache::Get()->kArgumentsLengthType);
NodeProperties::ReplaceValueInput(load, arguments_frame, 0);
diff --git a/deps/v8/src/compiler/escape-analysis-reducer.h b/deps/v8/src/compiler/escape-analysis-reducer.h
index 1c1267b3c7..49b672a26b 100644
--- a/deps/v8/src/compiler/escape-analysis-reducer.h
+++ b/deps/v8/src/compiler/escape-analysis-reducer.h
@@ -85,6 +85,8 @@ class V8_EXPORT_PRIVATE EscapeAnalysisReducer final
public:
EscapeAnalysisReducer(Editor* editor, JSGraph* jsgraph,
EscapeAnalysisResult analysis_result, Zone* zone);
+ EscapeAnalysisReducer(const EscapeAnalysisReducer&) = delete;
+ EscapeAnalysisReducer& operator=(const EscapeAnalysisReducer&) = delete;
Reduction Reduce(Node* node) override;
const char* reducer_name() const override { return "EscapeAnalysisReducer"; }
@@ -111,8 +113,6 @@ class V8_EXPORT_PRIVATE EscapeAnalysisReducer final
NodeHashCache node_cache_;
ZoneSet<Node*> arguments_elements_;
Zone* const zone_;
-
- DISALLOW_COPY_AND_ASSIGN(EscapeAnalysisReducer);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/escape-analysis.cc b/deps/v8/src/compiler/escape-analysis.cc
index 2a096b6933..07587524a4 100644
--- a/deps/v8/src/compiler/escape-analysis.cc
+++ b/deps/v8/src/compiler/escape-analysis.cc
@@ -119,6 +119,9 @@ class VariableTracker {
public:
VariableTracker(JSGraph* graph, EffectGraphReducer* reducer, Zone* zone);
+ VariableTracker(const VariableTracker&) = delete;
+ VariableTracker& operator=(const VariableTracker&) = delete;
+
Variable NewVariable() { return Variable(next_variable_++); }
Node* Get(Variable var, Node* effect) { return table_.Get(effect).Get(var); }
Zone* zone() { return zone_; }
@@ -155,8 +158,6 @@ class VariableTracker {
EffectGraphReducer* reducer_;
int next_variable_ = 0;
TickCounter* const tick_counter_;
-
- DISALLOW_COPY_AND_ASSIGN(VariableTracker);
};
// Encapsulates the current state of the escape analysis reducer to preserve
@@ -170,6 +171,8 @@ class EscapeAnalysisTracker : public ZoneObject {
variable_states_(jsgraph, reducer, zone),
jsgraph_(jsgraph),
zone_(zone) {}
+ EscapeAnalysisTracker(const EscapeAnalysisTracker&) = delete;
+ EscapeAnalysisTracker& operator=(const EscapeAnalysisTracker&) = delete;
class Scope : public VariableTracker::Scope {
public:
@@ -276,8 +279,6 @@ class EscapeAnalysisTracker : public ZoneObject {
VirtualObject::Id next_object_id_ = 0;
JSGraph* const jsgraph_;
Zone* const zone_;
-
- DISALLOW_COPY_AND_ASSIGN(EscapeAnalysisTracker);
};
EffectGraphReducer::EffectGraphReducer(
@@ -559,9 +560,9 @@ void ReduceNode(const Operator* op, EscapeAnalysisTracker::Scope* current,
switch (op->opcode()) {
case IrOpcode::kAllocate: {
NumberMatcher size(current->ValueInput(0));
- if (!size.HasValue()) break;
- int size_int = static_cast<int>(size.Value());
- if (size_int != size.Value()) break;
+ if (!size.HasResolvedValue()) break;
+ int size_int = static_cast<int>(size.ResolvedValue());
+ if (size_int != size.ResolvedValue()) break;
if (const VirtualObject* vobject = current->InitVirtualObject(size_int)) {
// Initialize with dead nodes as a sentinel for uninitialized memory.
for (Variable field : *vobject) {
diff --git a/deps/v8/src/compiler/feedback-source.cc b/deps/v8/src/compiler/feedback-source.cc
index 8c3d175c28..a8a67f786f 100644
--- a/deps/v8/src/compiler/feedback-source.cc
+++ b/deps/v8/src/compiler/feedback-source.cc
@@ -17,9 +17,6 @@ FeedbackSource::FeedbackSource(Handle<FeedbackVector> vector_,
FeedbackSource::FeedbackSource(FeedbackVectorRef vector_, FeedbackSlot slot_)
: FeedbackSource(vector_.object(), slot_) {}
-FeedbackSource::FeedbackSource(FeedbackNexus const& nexus)
- : FeedbackSource(nexus.vector_handle(), nexus.slot()) {}
-
int FeedbackSource::index() const {
CHECK(IsValid());
return FeedbackVector::GetIndex(slot);
diff --git a/deps/v8/src/compiler/feedback-source.h b/deps/v8/src/compiler/feedback-source.h
index 8484acb455..29c22cde9c 100644
--- a/deps/v8/src/compiler/feedback-source.h
+++ b/deps/v8/src/compiler/feedback-source.h
@@ -17,7 +17,6 @@ struct FeedbackSource {
V8_EXPORT_PRIVATE FeedbackSource(Handle<FeedbackVector> vector_,
FeedbackSlot slot_);
FeedbackSource(FeedbackVectorRef vector_, FeedbackSlot slot_);
- explicit FeedbackSource(FeedbackNexus const& nexus);
bool IsValid() const { return !vector.is_null() && !slot.IsInvalid(); }
int index() const;
diff --git a/deps/v8/src/compiler/frame.h b/deps/v8/src/compiler/frame.h
index 18f0df8c80..7d1a9dfb3a 100644
--- a/deps/v8/src/compiler/frame.h
+++ b/deps/v8/src/compiler/frame.h
@@ -89,6 +89,8 @@ class CallDescriptor;
class V8_EXPORT_PRIVATE Frame : public ZoneObject {
public:
explicit Frame(int fixed_frame_size_in_slots);
+ Frame(const Frame&) = delete;
+ Frame& operator=(const Frame&) = delete;
inline int GetTotalFrameSlotCount() const { return frame_slot_count_; }
inline int GetFixedSlotCount() const { return fixed_slot_count_; }
@@ -173,8 +175,6 @@ class V8_EXPORT_PRIVATE Frame : public ZoneObject {
int return_slot_count_;
BitVector* allocated_registers_;
BitVector* allocated_double_registers_;
-
- DISALLOW_COPY_AND_ASSIGN(Frame);
};
// Represents an offset from either the stack pointer or frame pointer.
diff --git a/deps/v8/src/compiler/globals.h b/deps/v8/src/compiler/globals.h
index fe96783c23..ff5b5a5732 100644
--- a/deps/v8/src/compiler/globals.h
+++ b/deps/v8/src/compiler/globals.h
@@ -71,4 +71,13 @@ inline std::ostream& operator<<(std::ostream& os,
} // namespace internal
} // namespace v8
+// Support for floating point parameters in calls to C.
+// It's currently enabled only for the platforms listed below. We don't plan
+// to add support for IA32, because it has a totally different approach
+// (using FP stack). As support is added to more platforms, please make sure
+// to list them here in order to enable tests of this functionality.
+#if defined(V8_TARGET_ARCH_X64)
+#define V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
+#endif
+
#endif // V8_COMPILER_GLOBALS_H_
diff --git a/deps/v8/src/compiler/graph-assembler.cc b/deps/v8/src/compiler/graph-assembler.cc
index 975efedf0f..aaa0644da6 100644
--- a/deps/v8/src/compiler/graph-assembler.cc
+++ b/deps/v8/src/compiler/graph-assembler.cc
@@ -351,6 +351,10 @@ Node* GraphAssembler::IntPtrConstant(intptr_t value) {
return AddClonedNode(mcgraph()->IntPtrConstant(value));
}
+Node* GraphAssembler::UintPtrConstant(uintptr_t value) {
+ return AddClonedNode(mcgraph()->UintPtrConstant(value));
+}
+
Node* GraphAssembler::Int32Constant(int32_t value) {
return AddClonedNode(mcgraph()->Int32Constant(value));
}
@@ -709,6 +713,18 @@ Node* GraphAssembler::LoadUnaligned(MachineType type, Node* object,
return AddNode(graph()->NewNode(op, object, offset, effect(), control()));
}
+Node* GraphAssembler::ProtectedStore(MachineRepresentation rep, Node* object,
+ Node* offset, Node* value) {
+ return AddNode(graph()->NewNode(machine()->ProtectedStore(rep), object,
+ offset, value, effect(), control()));
+}
+
+Node* GraphAssembler::ProtectedLoad(MachineType type, Node* object,
+ Node* offset) {
+ return AddNode(graph()->NewNode(machine()->ProtectedLoad(type), object,
+ offset, effect(), control()));
+}
+
Node* GraphAssembler::Retain(Node* buffer) {
return AddNode(graph()->NewNode(common()->Retain(), buffer, effect()));
}
diff --git a/deps/v8/src/compiler/graph-assembler.h b/deps/v8/src/compiler/graph-assembler.h
index 1be52317c0..eb7f6cc3c0 100644
--- a/deps/v8/src/compiler/graph-assembler.h
+++ b/deps/v8/src/compiler/graph-assembler.h
@@ -47,6 +47,7 @@ class BasicBlock;
V(Float64ExtractLowWord32) \
V(Float64SilenceNaN) \
V(RoundFloat64ToInt32) \
+ V(TruncateFloat64ToFloat32) \
V(TruncateFloat64ToInt64) \
V(TruncateFloat64ToWord32) \
V(TruncateInt64ToInt32) \
@@ -236,6 +237,7 @@ class V8_EXPORT_PRIVATE GraphAssembler {
// Value creation.
Node* IntPtrConstant(intptr_t value);
+ Node* UintPtrConstant(uintptr_t value);
Node* Uint32Constant(uint32_t value);
Node* Int32Constant(int32_t value);
Node* Int64Constant(int64_t value);
@@ -303,6 +305,10 @@ class V8_EXPORT_PRIVATE GraphAssembler {
Node* value);
Node* LoadUnaligned(MachineType type, Node* object, Node* offset);
+ Node* ProtectedStore(MachineRepresentation rep, Node* object, Node* offset,
+ Node* value);
+ Node* ProtectedLoad(MachineType type, Node* object, Node* offset);
+
Node* Retain(Node* buffer);
Node* UnsafePointerAdd(Node* base, Node* external);
diff --git a/deps/v8/src/compiler/graph-reducer.h b/deps/v8/src/compiler/graph-reducer.h
index 95454098d5..171033fe53 100644
--- a/deps/v8/src/compiler/graph-reducer.h
+++ b/deps/v8/src/compiler/graph-reducer.h
@@ -139,6 +139,9 @@ class V8_EXPORT_PRIVATE GraphReducer
JSHeapBroker* broker, Node* dead = nullptr);
~GraphReducer() override;
+ GraphReducer(const GraphReducer&) = delete;
+ GraphReducer& operator=(const GraphReducer&) = delete;
+
Graph* graph() const { return graph_; }
void AddReducer(Reducer* reducer);
@@ -190,8 +193,6 @@ class V8_EXPORT_PRIVATE GraphReducer
ZoneStack<NodeState> stack_;
TickCounter* const tick_counter_;
JSHeapBroker* const broker_;
-
- DISALLOW_COPY_AND_ASSIGN(GraphReducer);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/graph-trimmer.h b/deps/v8/src/compiler/graph-trimmer.h
index 5a5f525ef4..8e420226d1 100644
--- a/deps/v8/src/compiler/graph-trimmer.h
+++ b/deps/v8/src/compiler/graph-trimmer.h
@@ -20,6 +20,8 @@ class V8_EXPORT_PRIVATE GraphTrimmer final {
public:
GraphTrimmer(Zone* zone, Graph* graph);
~GraphTrimmer();
+ GraphTrimmer(const GraphTrimmer&) = delete;
+ GraphTrimmer& operator=(const GraphTrimmer&) = delete;
// Trim nodes in the {graph} that are not reachable from {graph->end()}.
void TrimGraph();
@@ -50,8 +52,6 @@ class V8_EXPORT_PRIVATE GraphTrimmer final {
Graph* const graph_;
NodeMarker<bool> is_live_;
NodeVector live_;
-
- DISALLOW_COPY_AND_ASSIGN(GraphTrimmer);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/graph-visualizer.cc b/deps/v8/src/compiler/graph-visualizer.cc
index 36372f5d02..c633f4fa62 100644
--- a/deps/v8/src/compiler/graph-visualizer.cc
+++ b/deps/v8/src/compiler/graph-visualizer.cc
@@ -277,6 +277,8 @@ class JSONGraphNodeWriter {
positions_(positions),
origins_(origins),
first_node_(true) {}
+ JSONGraphNodeWriter(const JSONGraphNodeWriter&) = delete;
+ JSONGraphNodeWriter& operator=(const JSONGraphNodeWriter&) = delete;
void Print() {
for (Node* const node : all_.reachable) PrintNode(node);
@@ -349,8 +351,6 @@ class JSONGraphNodeWriter {
const SourcePositionTable* positions_;
const NodeOriginTable* origins_;
bool first_node_;
-
- DISALLOW_COPY_AND_ASSIGN(JSONGraphNodeWriter);
};
@@ -358,6 +358,8 @@ class JSONGraphEdgeWriter {
public:
JSONGraphEdgeWriter(std::ostream& os, Zone* zone, const Graph* graph)
: os_(os), all_(zone, graph, false), first_edge_(true) {}
+ JSONGraphEdgeWriter(const JSONGraphEdgeWriter&) = delete;
+ JSONGraphEdgeWriter& operator=(const JSONGraphEdgeWriter&) = delete;
void Print() {
for (Node* const node : all_.reachable) PrintEdges(node);
@@ -400,8 +402,6 @@ class JSONGraphEdgeWriter {
std::ostream& os_;
AllNodes all_;
bool first_edge_;
-
- DISALLOW_COPY_AND_ASSIGN(JSONGraphEdgeWriter);
};
std::ostream& operator<<(std::ostream& os, const GraphAsJSON& ad) {
@@ -420,6 +420,8 @@ std::ostream& operator<<(std::ostream& os, const GraphAsJSON& ad) {
class GraphC1Visualizer {
public:
GraphC1Visualizer(std::ostream& os, Zone* zone); // NOLINT
+ GraphC1Visualizer(const GraphC1Visualizer&) = delete;
+ GraphC1Visualizer& operator=(const GraphC1Visualizer&) = delete;
void PrintCompilation(const OptimizedCompilationInfo* info);
void PrintSchedule(const char* phase, const Schedule* schedule,
@@ -470,8 +472,6 @@ class GraphC1Visualizer {
std::ostream& os_;
int indent_;
Zone* zone_;
-
- DISALLOW_COPY_AND_ASSIGN(GraphC1Visualizer);
};
diff --git a/deps/v8/src/compiler/graph.h b/deps/v8/src/compiler/graph.h
index 8d2acfded7..c8af078895 100644
--- a/deps/v8/src/compiler/graph.h
+++ b/deps/v8/src/compiler/graph.h
@@ -34,6 +34,8 @@ using NodeId = uint32_t;
class V8_EXPORT_PRIVATE Graph final : public NON_EXPORTED_BASE(ZoneObject) {
public:
explicit Graph(Zone* zone);
+ Graph(const Graph&) = delete;
+ Graph& operator=(const Graph&) = delete;
// Scope used when creating a subgraph for inlining. Automatically preserves
// the original start and end nodes of the graph, and resets them when you
@@ -46,13 +48,13 @@ class V8_EXPORT_PRIVATE Graph final : public NON_EXPORTED_BASE(ZoneObject) {
graph_->SetStart(start_);
graph_->SetEnd(end_);
}
+ SubgraphScope(const SubgraphScope&) = delete;
+ SubgraphScope& operator=(const SubgraphScope&) = delete;
private:
Graph* const graph_;
Node* const start_;
Node* const end_;
-
- DISALLOW_COPY_AND_ASSIGN(SubgraphScope);
};
// Base implementation used by all factory methods.
@@ -105,8 +107,6 @@ class V8_EXPORT_PRIVATE Graph final : public NON_EXPORTED_BASE(ZoneObject) {
Mark mark_max_;
NodeId next_node_id_;
ZoneVector<GraphDecorator*> decorators_;
-
- DISALLOW_COPY_AND_ASSIGN(Graph);
};
diff --git a/deps/v8/src/compiler/heap-refs.h b/deps/v8/src/compiler/heap-refs.h
index f66b678632..b268593a48 100644
--- a/deps/v8/src/compiler/heap-refs.h
+++ b/deps/v8/src/compiler/heap-refs.h
@@ -13,6 +13,7 @@
#include "src/utils/boxed-float.h"
namespace v8 {
+
class CFunctionInfo;
namespace internal {
@@ -32,6 +33,7 @@ class NativeContext;
class ScriptContextTable;
namespace compiler {
+
// Whether we are loading a property or storing to a property.
// For a store during literal creation, do not walk up the prototype chain.
enum class AccessMode { kLoad, kStore, kStoreInLiteral, kHas };
@@ -58,9 +60,13 @@ enum class OddballType : uint8_t {
#define HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(V) \
/* Subtypes of FixedArray */ \
V(ObjectBoilerplateDescription) \
+ V(ScopeInfo) \
+ /* Subtypes of Name */ \
+ V(Symbol) \
/* Subtypes of HeapObject */ \
V(AccessorInfo) \
V(ArrayBoilerplateDescription) \
+ V(CallHandlerInfo) \
V(Cell) \
V(TemplateObjectDescription)
@@ -80,7 +86,6 @@ enum class OddballType : uint8_t {
V(NativeContext) \
/* Subtypes of FixedArray */ \
V(Context) \
- V(ScopeInfo) \
V(ScriptContextTable) \
/* Subtypes of FixedArrayBase */ \
V(BytecodeArray) \
@@ -89,13 +94,11 @@ enum class OddballType : uint8_t {
/* Subtypes of Name */ \
V(InternalizedString) \
V(String) \
- V(Symbol) \
/* Subtypes of JSReceiver */ \
V(JSObject) \
/* Subtypes of HeapObject */ \
V(AllocationSite) \
V(BigInt) \
- V(CallHandlerInfo) \
V(Code) \
V(DescriptorArray) \
V(FeedbackCell) \
@@ -316,7 +319,7 @@ class JSBoundFunctionRef : public JSObjectRef {
Handle<JSBoundFunction> object() const;
- void Serialize();
+ bool Serialize();
bool serialized() const;
// The following are available only after calling Serialize().
@@ -347,6 +350,7 @@ class V8_EXPORT_PRIVATE JSFunctionRef : public JSObjectRef {
NativeContextRef native_context() const;
SharedFunctionInfoRef shared() const;
FeedbackVectorRef feedback_vector() const;
+ FeedbackCellRef raw_feedback_cell() const;
CodeRef code() const;
int InitialMapInstanceSizeWithMinSlack() const;
};
@@ -772,8 +776,7 @@ class ScopeInfoRef : public HeapObjectRef {
int ContextLength() const;
bool HasOuterScopeInfo() const;
- int Flags() const;
- bool HasContextExtension() const;
+ bool HasContextExtensionSlot() const;
// Only serialized via SerializeScopeInfoChain.
ScopeInfoRef OuterScopeInfo() const;
@@ -791,8 +794,6 @@ class ScopeInfoRef : public HeapObjectRef {
V(bool, HasBuiltinId) \
V(bool, construct_as_builtin) \
V(bool, HasBytecodeArray) \
- V(bool, is_safe_to_skip_arguments_adaptor) \
- V(SharedFunctionInfo::Inlineability, GetInlineability) \
V(int, StartPosition) \
V(bool, is_compiled) \
V(bool, IsUserJavaScript)
@@ -806,6 +807,7 @@ class V8_EXPORT_PRIVATE SharedFunctionInfoRef : public HeapObjectRef {
int builtin_id() const;
int context_header_size() const;
BytecodeArrayRef GetBytecodeArray() const;
+ SharedFunctionInfo::Inlineability GetInlineability() const;
#define DECL_ACCESSOR(type, name) type name() const;
BROKER_SFI_FIELDS(DECL_ACCESSOR)
diff --git a/deps/v8/src/compiler/int64-lowering.cc b/deps/v8/src/compiler/int64-lowering.cc
index a6bbd563a0..2ef7d8af32 100644
--- a/deps/v8/src/compiler/int64-lowering.cc
+++ b/deps/v8/src/compiler/int64-lowering.cc
@@ -680,9 +680,9 @@ void Int64Lowering::LowerNode(Node* node) {
? GetReplacementLow(node->InputAt(1))
: node->InputAt(1);
Int32Matcher m(shift);
- if (m.HasValue()) {
+ if (m.HasResolvedValue()) {
// Precondition: 0 <= shift < 64.
- int32_t shift_value = m.Value() & 0x3F;
+ int32_t shift_value = m.ResolvedValue() & 0x3F;
if (shift_value == 0) {
ReplaceNode(node, GetReplacementLow(input),
GetReplacementHigh(input));
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
index 94a6b3a7c7..7088fb0d43 100644
--- a/deps/v8/src/compiler/js-call-reducer.cc
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -157,6 +157,9 @@ class JSCallReducerAssembler : public JSGraphAssembler {
gasm_->Bind(&merge);
}
+ IfBuilder0(const IfBuilder0&) = delete;
+ IfBuilder0& operator=(const IfBuilder0&) = delete;
+
private:
JSGraphAssembler* const gasm_;
const TNode<Boolean> cond_;
@@ -166,8 +169,6 @@ class JSCallReducerAssembler : public JSGraphAssembler {
BranchHint hint_ = BranchHint::kNone;
VoidGenerator0 then_body_;
VoidGenerator0 else_body_;
-
- DISALLOW_COPY_AND_ASSIGN(IfBuilder0);
};
IfBuilder0 If(TNode<Boolean> cond) { return {this, cond, false}; }
@@ -882,14 +883,13 @@ class PromiseBuiltinReducerAssembler : public JSCallReducerAssembler {
class FastApiCallReducerAssembler : public JSCallReducerAssembler {
public:
FastApiCallReducerAssembler(
- JSCallReducer* reducer, Node* node, Address c_function,
- const CFunctionInfo* c_signature,
+ JSCallReducer* reducer, Node* node,
const FunctionTemplateInfoRef function_template_info, Node* receiver,
Node* holder, const SharedFunctionInfoRef shared, Node* target,
const int arity, Node* effect)
: JSCallReducerAssembler(reducer, node),
- c_function_(c_function),
- c_signature_(c_signature),
+ c_function_(function_template_info.c_function()),
+ c_signature_(function_template_info.c_signature()),
function_template_info_(function_template_info),
receiver_(receiver),
holder_(holder),
@@ -2640,8 +2640,8 @@ Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) {
// recomputed even if the actual value of the object changes.
// This mirrors the checks done in builtins-function-gen.cc at
// runtime otherwise.
- int minimum_nof_descriptors = i::Max(JSFunction::kLengthDescriptorIndex,
- JSFunction::kNameDescriptorIndex) +
+ int minimum_nof_descriptors = std::max({JSFunction::kLengthDescriptorIndex,
+ JSFunction::kNameDescriptorIndex}) +
1;
if (receiver_map.NumberOfOwnDescriptors() < minimum_nof_descriptors) {
return inference.NoChange();
@@ -2725,7 +2725,7 @@ Reduction JSCallReducer::ReduceFunctionPrototypeCall(Node* node) {
// to ensure any exception is thrown in the correct context.
Node* context;
HeapObjectMatcher m(target);
- if (m.HasValue()) {
+ if (m.HasResolvedValue() && m.Ref(broker()).IsJSFunction()) {
JSFunctionRef function = m.Ref(broker()).AsJSFunction();
if (should_disallow_heap_access() && !function.serialized()) {
TRACE_BROKER_MISSING(broker(), "Serialize call on function " << function);
@@ -2901,10 +2901,10 @@ Reduction JSCallReducer::ReduceObjectPrototypeHasOwnProperty(Node* node) {
// Object.prototype.hasOwnProperty does an implicit ToObject anyway, and
// these operations are not observable.
if (name->opcode() == IrOpcode::kJSForInNext) {
- ForInMode const mode = ForInModeOf(name->op());
- if (mode != ForInMode::kGeneric) {
- Node* object = NodeProperties::GetValueInput(name, 0);
- Node* cache_type = NodeProperties::GetValueInput(name, 2);
+ JSForInNextNode n(name);
+ if (n.Parameters().mode() != ForInMode::kGeneric) {
+ Node* object = n.receiver();
+ Node* cache_type = n.cache_type();
if (object->opcode() == IrOpcode::kJSToObject) {
object = NodeProperties::GetValueInput(object, 0);
}
@@ -3453,6 +3453,54 @@ Reduction JSCallReducer::ReduceArraySome(Node* node,
return ReplaceWithSubgraph(&a, subgraph);
}
+#ifndef V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
+namespace {
+bool HasFPParamsInSignature(const CFunctionInfo* c_signature) {
+ for (unsigned int i = 0; i < c_signature->ArgumentCount(); ++i) {
+ if (c_signature->ArgumentInfo(i).GetType() == CTypeInfo::Type::kFloat32 ||
+ c_signature->ArgumentInfo(i).GetType() == CTypeInfo::Type::kFloat64) {
+ return true;
+ }
+ }
+ return false;
+}
+} // namespace
+#endif
+
+#ifndef V8_TARGET_ARCH_64_BIT
+namespace {
+bool Has64BitIntegerParamsInSignature(const CFunctionInfo* c_signature) {
+ for (unsigned int i = 0; i < c_signature->ArgumentCount(); ++i) {
+ if (c_signature->ArgumentInfo(i).GetType() == CTypeInfo::Type::kInt64 ||
+ c_signature->ArgumentInfo(i).GetType() == CTypeInfo::Type::kUint64) {
+ return true;
+ }
+ }
+ return false;
+}
+} // namespace
+#endif
+
+bool CanOptimizeFastCall(
+ const FunctionTemplateInfoRef& function_template_info) {
+ const CFunctionInfo* c_signature = function_template_info.c_signature();
+
+ bool optimize_to_fast_call =
+ FLAG_turbo_fast_api_calls &&
+ function_template_info.c_function() != kNullAddress;
+#ifndef V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
+ optimize_to_fast_call =
+ optimize_to_fast_call && !HasFPParamsInSignature(c_signature);
+#else
+ USE(c_signature);
+#endif
+#ifndef V8_TARGET_ARCH_64_BIT
+ optimize_to_fast_call =
+ optimize_to_fast_call && !Has64BitIntegerParamsInSignature(c_signature);
+#endif
+ return optimize_to_fast_call;
+}
+
Reduction JSCallReducer::ReduceCallApiFunction(
Node* node, const SharedFunctionInfoRef& shared) {
DisallowHeapAccessIf no_heap_access(should_disallow_heap_access());
@@ -3624,13 +3672,9 @@ Reduction JSCallReducer::ReduceCallApiFunction(
return NoChange();
}
- Address c_function = function_template_info.c_function();
-
- if (FLAG_turbo_fast_api_calls && c_function != kNullAddress) {
- const CFunctionInfo* c_signature = function_template_info.c_signature();
- FastApiCallReducerAssembler a(this, node, c_function, c_signature,
- function_template_info, receiver, holder,
- shared, target, argc, effect);
+ if (CanOptimizeFastCall(function_template_info)) {
+ FastApiCallReducerAssembler a(this, node, function_template_info, receiver,
+ holder, shared, target, argc, effect);
Node* fast_call_subgraph = a.ReduceFastApiCall();
ReplaceWithSubgraph(&a, fast_call_subgraph);
@@ -3934,7 +3978,7 @@ namespace {
bool ShouldUseCallICFeedback(Node* node) {
HeapObjectMatcher m(node);
- if (m.HasValue() || m.IsCheckClosure() || m.IsJSCreateClosure()) {
+ if (m.HasResolvedValue() || m.IsCheckClosure() || m.IsJSCreateClosure()) {
// Don't use CallIC feedback when we know the function
// being called, i.e. either know the closure itself or
// at least the SharedFunctionInfo.
@@ -3970,6 +4014,8 @@ bool JSCallReducer::IsBuiltinOrApiFunction(JSFunctionRef function) const {
}
Reduction JSCallReducer::ReduceJSCall(Node* node) {
+ if (broker()->StackHasOverflowed()) return NoChange();
+
JSCallNode n(node);
CallParameters const& p = n.Parameters();
Node* target = n.target();
@@ -3979,7 +4025,7 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
// Try to specialize JSCall {node}s with constant {target}s.
HeapObjectMatcher m(target);
- if (m.HasValue()) {
+ if (m.HasResolvedValue()) {
ObjectRef target_ref = m.Ref(broker());
if (target_ref.IsJSFunction()) {
JSFunctionRef function = target_ref.AsJSFunction();
@@ -4104,7 +4150,7 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
if (feedback_target.has_value() && feedback_target->map().is_callable()) {
Node* target_function = jsgraph()->Constant(*feedback_target);
- if (FLAG_turboprop) {
+ if (broker()->is_turboprop()) {
if (!feedback_target->IsJSFunction()) return NoChange();
if (!IsBuiltinOrApiFunction(feedback_target->AsJSFunction())) {
return NoChange();
@@ -4138,7 +4184,7 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
return NoChange();
}
- if (FLAG_turboprop &&
+ if (broker()->is_turboprop() &&
!feedback_vector.shared_function_info().HasBuiltinId()) {
return NoChange();
}
@@ -4578,7 +4624,7 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
arity, feedback_target->AsAllocationSite().object()));
return Changed(node);
} else if (feedback_target.has_value() &&
- !HeapObjectMatcher(new_target).HasValue() &&
+ !HeapObjectMatcher(new_target).HasResolvedValue() &&
feedback_target->map().is_constructor()) {
Node* new_target_feedback = jsgraph()->Constant(*feedback_target);
@@ -4603,7 +4649,7 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
// Try to specialize JSConstruct {node}s with constant {target}s.
HeapObjectMatcher m(target);
- if (m.HasValue()) {
+ if (m.HasResolvedValue()) {
HeapObjectRef target_ref = m.Ref(broker());
// Raise a TypeError if the {target} is not a constructor.
@@ -4659,7 +4705,7 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
// constructor), {value} will be ignored and therefore we can lower
// to {JSCreate}. See https://tc39.es/ecma262/#sec-object-value.
HeapObjectMatcher mnew_target(new_target);
- if (mnew_target.HasValue() &&
+ if (mnew_target.HasResolvedValue() &&
!mnew_target.Ref(broker()).equals(function)) {
// Drop the value inputs.
node->RemoveInput(n.FeedbackVectorIndex());
@@ -4965,7 +5011,7 @@ Reduction JSCallReducer::ReduceForInsufficientFeedback(
// TODO(mythria): May be add additional flags to specify if we need to deopt
// on calls / construct rather than checking for TurboProp here. We may need
// it for NativeContextIndependent code too.
- if (FLAG_turboprop) return NoChange();
+ if (broker()->is_turboprop()) return NoChange();
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
@@ -5999,7 +6045,7 @@ Reduction JSCallReducer::ReduceStringPrototypeStartsWith(Node* node) {
Node* position = n.ArgumentOr(1, jsgraph()->ZeroConstant());
HeapObjectMatcher m(search_string);
- if (m.HasValue()) {
+ if (m.HasResolvedValue()) {
ObjectRef target_ref = m.Ref(broker());
if (target_ref.IsString()) {
StringRef str = target_ref.AsString();
@@ -7323,7 +7369,7 @@ Reduction JSCallReducer::ReduceDataViewAccess(Node* node, DataViewAccess access,
// Check that the {offset} is within range for the {receiver}.
HeapObjectMatcher m(receiver);
- if (m.HasValue()) {
+ if (m.HasResolvedValue() && m.Ref(broker()).IsJSDataView()) {
// We only deal with DataViews here whose [[ByteLength]] is at least
// {element_size}, as for all other DataViews it'll be out-of-bounds.
JSDataViewRef dataview = m.Ref(broker()).AsJSDataView();
@@ -7602,7 +7648,7 @@ Reduction JSCallReducer::ReduceRegExpPrototypeTest(Node* node) {
// Add proper dependencies on the {regexp}s [[Prototype]]s.
dependencies()->DependOnStablePrototypeChains(
- ai_exec.receiver_maps(), kStartAtPrototype,
+ ai_exec.lookup_start_object_maps(), kStartAtPrototype,
JSObjectRef(broker(), holder));
} else {
return inference.NoChange();
@@ -7688,7 +7734,7 @@ Reduction JSCallReducer::ReduceBigIntAsUintN(Node* node) {
NumberMatcher matcher(bits);
if (matcher.IsInteger() && matcher.IsInRange(0, 64)) {
- const int bits_value = static_cast<int>(matcher.Value());
+ const int bits_value = static_cast<int>(matcher.ResolvedValue());
value = effect = graph()->NewNode(simplified()->CheckBigInt(p.feedback()),
value, effect, control);
value = graph()->NewNode(simplified()->BigIntAsUintN(bits_value), value);
diff --git a/deps/v8/src/compiler/js-context-specialization.h b/deps/v8/src/compiler/js-context-specialization.h
index f74bd5b6de..2da6d8d732 100644
--- a/deps/v8/src/compiler/js-context-specialization.h
+++ b/deps/v8/src/compiler/js-context-specialization.h
@@ -44,6 +44,8 @@ class V8_EXPORT_PRIVATE JSContextSpecialization final : public AdvancedReducer {
outer_(outer),
closure_(closure),
broker_(broker) {}
+ JSContextSpecialization(const JSContextSpecialization&) = delete;
+ JSContextSpecialization& operator=(const JSContextSpecialization&) = delete;
const char* reducer_name() const override {
return "JSContextSpecialization";
@@ -72,8 +74,6 @@ class V8_EXPORT_PRIVATE JSContextSpecialization final : public AdvancedReducer {
Maybe<OuterContext> outer_;
MaybeHandle<JSFunction> closure_;
JSHeapBroker* const broker_;
-
- DISALLOW_COPY_AND_ASSIGN(JSContextSpecialization);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc
index 1f3169fad3..619475ef7f 100644
--- a/deps/v8/src/compiler/js-create-lowering.cc
+++ b/deps/v8/src/compiler/js-create-lowering.cc
@@ -28,7 +28,6 @@
#include "src/objects/js-regexp-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/template-objects.h"
-#include "torque-generated/exported-class-definitions.h"
namespace v8 {
namespace internal {
@@ -1496,7 +1495,7 @@ Node* JSCreateLowering::AllocateAliasedArguments(
}
// Calculate number of argument values being aliased/mapped.
- int mapped_count = Min(argument_count, parameter_count);
+ int mapped_count = std::min(argument_count, parameter_count);
*has_aliased_arguments = true;
// Prepare an iterator over argument values recorded in the frame state.
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index 81bafa6183..0b38bd538d 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -7,6 +7,7 @@
#include "src/ast/ast.h"
#include "src/builtins/builtins-constructor.h"
#include "src/codegen/code-factory.h"
+#include "src/compiler/access-builder.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/js-heap-broker.h"
@@ -15,6 +16,7 @@
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/processed-feedback.h"
+#include "src/compiler/simplified-operator.h"
#include "src/objects/feedback-cell.h"
#include "src/objects/feedback-vector.h"
#include "src/objects/scope-info.h"
@@ -316,8 +318,10 @@ void JSGenericLowering::LowerJSLoadNamed(Node* node) {
}
void JSGenericLowering::LowerJSLoadNamedFromSuper(Node* node) {
+ // TODO(marja, v8:9237): Call a builtin which collects feedback.
JSLoadNamedFromSuperNode n(node);
NamedAccess const& p = n.Parameters();
+ node->RemoveInput(2); // Feedback vector
node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.name()));
ReplaceWithRuntimeCall(node, Runtime::kLoadFromSuper);
}
@@ -480,7 +484,21 @@ void JSGenericLowering::LowerJSDeleteProperty(Node* node) {
}
void JSGenericLowering::LowerJSGetSuperConstructor(Node* node) {
- ReplaceWithBuiltinCall(node, Builtins::kGetSuperConstructor);
+ Node* active_function = NodeProperties::GetValueInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ Node* function_map = effect = graph()->NewNode(
+ jsgraph()->simplified()->LoadField(AccessBuilder::ForMap()),
+ active_function, effect, control);
+
+ RelaxControls(node);
+ node->ReplaceInput(0, function_map);
+ node->ReplaceInput(1, effect);
+ node->ReplaceInput(2, control);
+ node->TrimInputCount(3);
+ NodeProperties::ChangeOp(node, jsgraph()->simplified()->LoadField(
+ AccessBuilder::ForMapPrototype()));
}
void JSGenericLowering::LowerJSHasInPrototypeChain(Node* node) {
@@ -828,9 +846,7 @@ void JSGenericLowering::LowerJSConstruct(Node* node) {
Node* stub_arity = jsgraph()->Int32Constant(arg_count);
Node* slot = jsgraph()->Int32Constant(p.feedback().index());
Node* receiver = jsgraph()->UndefinedConstant();
-#ifdef V8_REVERSE_JSARGS
Node* feedback_vector = node->RemoveInput(n.FeedbackVectorIndex());
-#endif
// Register argument inputs are followed by stack argument inputs (such as
// feedback_vector). Both are listed in ascending order. Note that
// the receiver is implicitly placed on the stack and is thus inserted
@@ -839,16 +855,10 @@ void JSGenericLowering::LowerJSConstruct(Node* node) {
node->InsertInput(zone(), 0, stub_code);
node->InsertInput(zone(), 3, stub_arity);
node->InsertInput(zone(), 4, slot);
-#ifdef V8_REVERSE_JSARGS
node->InsertInput(zone(), 5, feedback_vector);
node->InsertInput(zone(), 6, receiver);
// After: {code, target, new_target, arity, slot, vector, receiver,
// ...args}.
-#else
- node->InsertInput(zone(), 5, receiver);
- // After: {code, target, new_target, arity, slot, receiver, ...args,
- // vector}.
-#endif
NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
} else {
@@ -897,9 +907,7 @@ void JSGenericLowering::LowerJSConstructWithArrayLike(Node* node) {
Node* stub_code = jsgraph()->HeapConstant(callable.code());
Node* receiver = jsgraph()->UndefinedConstant();
Node* slot = jsgraph()->Int32Constant(p.feedback().index());
-#ifdef V8_REVERSE_JSARGS
Node* feedback_vector = node->RemoveInput(n.FeedbackVectorIndex());
-#endif
// Register argument inputs are followed by stack argument inputs (such as
// feedback_vector). Both are listed in ascending order. Note that
// the receiver is implicitly placed on the stack and is thus inserted
@@ -907,16 +915,10 @@ void JSGenericLowering::LowerJSConstructWithArrayLike(Node* node) {
// TODO(jgruber): Implement a simpler way to specify these mutations.
node->InsertInput(zone(), 0, stub_code);
node->InsertInput(zone(), 4, slot);
-#ifdef V8_REVERSE_JSARGS
node->InsertInput(zone(), 5, feedback_vector);
node->InsertInput(zone(), 6, receiver);
// After: {code, target, new_target, arguments_list, slot, vector,
// receiver}.
-#else
- node->InsertInput(zone(), 5, receiver);
- // After: {code, target, new_target, arguments_list, slot, receiver,
- // vector}.
-#endif
NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
} else {
@@ -972,10 +974,8 @@ void JSGenericLowering::LowerJSConstructWithSpread(Node* node) {
// on the stack here.
Node* stub_arity = jsgraph()->Int32Constant(arg_count - kTheSpread);
Node* receiver = jsgraph()->UndefinedConstant();
-#ifdef V8_REVERSE_JSARGS
Node* feedback_vector = node->RemoveInput(n.FeedbackVectorIndex());
Node* spread = node->RemoveInput(n.LastArgumentIndex());
-#endif
// Register argument inputs are followed by stack argument inputs (such as
// feedback_vector). Both are listed in ascending order. Note that
@@ -985,17 +985,11 @@ void JSGenericLowering::LowerJSConstructWithSpread(Node* node) {
node->InsertInput(zone(), 0, stub_code);
node->InsertInput(zone(), 3, stub_arity);
node->InsertInput(zone(), 4, slot);
-#ifdef V8_REVERSE_JSARGS
node->InsertInput(zone(), 5, spread);
node->InsertInput(zone(), 6, feedback_vector);
node->InsertInput(zone(), 7, receiver);
// After: {code, target, new_target, arity, slot, spread, vector, receiver,
// ...args}.
-#else
- node->InsertInput(zone(), 5, receiver);
- // After: {code, target, new_target, arity, slot, receiver, ...args, spread,
- // vector}.
-#endif
NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
} else {
@@ -1179,20 +1173,14 @@ void JSGenericLowering::LowerJSCallWithSpread(Node* node) {
// Shuffling inputs.
// Before: {target, receiver, ...args, spread, vector}.
-#ifdef V8_REVERSE_JSARGS
Node* feedback_vector = node->RemoveInput(n.FeedbackVectorIndex());
-#endif
Node* spread = node->RemoveInput(n.LastArgumentIndex());
node->InsertInput(zone(), 0, stub_code);
node->InsertInput(zone(), 2, stub_arity);
node->InsertInput(zone(), 3, spread);
node->InsertInput(zone(), 4, slot);
-#ifdef V8_REVERSE_JSARGS
node->InsertInput(zone(), 5, feedback_vector);
// After: {code, target, arity, spread, slot, vector, receiver, ...args}.
-#else
- // After: {code, target, arity, spread, slot, receiver, ...args, vector}.
-#endif
NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
} else {
@@ -1230,12 +1218,79 @@ void JSGenericLowering::LowerJSCallRuntime(Node* node) {
ReplaceWithRuntimeCall(node, p.id(), static_cast<int>(p.arity()));
}
-void JSGenericLowering::LowerJSForInNext(Node* node) {
- UNREACHABLE(); // Eliminated in typed lowering.
+void JSGenericLowering::LowerJSForInPrepare(Node* node) {
+ JSForInPrepareNode n(node);
+ Effect effect(node); // {node} is kept in the effect chain.
+ Control control = n.control(); // .. but not in the control chain.
+ Node* enumerator = n.enumerator();
+ Node* slot =
+ jsgraph()->UintPtrConstant(n.Parameters().feedback().slot.ToInt());
+
+ std::vector<Edge> use_edges;
+ for (Edge edge : node->use_edges()) use_edges.push_back(edge);
+
+ // {node} will be changed to a builtin call (see below). The returned value
+ // is a fixed array containing {cache_array} and {cache_length}.
+ // TODO(jgruber): This is awkward; what we really want is two return values,
+ // the {cache_array} and {cache_length}, or better yet three return values
+ // s.t. we can avoid the graph rewrites below. Builtin support for multiple
+ // return types is unclear though.
+
+ Node* result_fixed_array = node;
+ Node* cache_type = enumerator; // Just to clarify the rename.
+ Node* cache_array;
+ Node* cache_length;
+
+ cache_array = effect = graph()->NewNode(
+ machine()->Load(MachineType::AnyTagged()), result_fixed_array,
+ jsgraph()->IntPtrConstant(FixedArray::OffsetOfElementAt(0) -
+ kHeapObjectTag),
+ effect, control);
+ cache_length = effect = graph()->NewNode(
+ machine()->Load(MachineType::AnyTagged()), result_fixed_array,
+ jsgraph()->IntPtrConstant(FixedArray::OffsetOfElementAt(1) -
+ kHeapObjectTag),
+ effect, control);
+
+ // Update the uses of {node}.
+ for (Edge edge : use_edges) {
+ Node* const user = edge.from();
+ if (NodeProperties::IsEffectEdge(edge)) {
+ edge.UpdateTo(effect);
+ } else if (NodeProperties::IsControlEdge(edge)) {
+ edge.UpdateTo(control);
+ } else {
+ DCHECK(NodeProperties::IsValueEdge(edge));
+ switch (ProjectionIndexOf(user->op())) {
+ case 0:
+ Replace(user, cache_type);
+ break;
+ case 1:
+ Replace(user, cache_array);
+ break;
+ case 2:
+ Replace(user, cache_length);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ }
+
+ // Finally, change the original node into a builtin call. This happens here,
+ // after graph rewrites, since the Call does not have a control output and
+ // thus must not have any control uses. Any previously existing control
+ // outputs have been replaced by the graph rewrite above.
+ node->InsertInput(zone(), n.FeedbackVectorIndex(), slot);
+ ReplaceWithBuiltinCall(node, Builtins::kForInPrepare);
}
-void JSGenericLowering::LowerJSForInPrepare(Node* node) {
- UNREACHABLE(); // Eliminated in typed lowering.
+void JSGenericLowering::LowerJSForInNext(Node* node) {
+ JSForInNextNode n(node);
+ node->InsertInput(
+ zone(), 0,
+ jsgraph()->UintPtrConstant(n.Parameters().feedback().slot.ToInt()));
+ ReplaceWithBuiltinCall(node, Builtins::kForInNext);
}
void JSGenericLowering::LowerJSLoadMessage(Node* node) {
diff --git a/deps/v8/src/compiler/js-graph.h b/deps/v8/src/compiler/js-graph.h
index a17b615b3b..e86bb594ba 100644
--- a/deps/v8/src/compiler/js-graph.h
+++ b/deps/v8/src/compiler/js-graph.h
@@ -34,6 +34,9 @@ class V8_EXPORT_PRIVATE JSGraph : public MachineGraph {
simplified_(simplified) {
}
+ JSGraph(const JSGraph&) = delete;
+ JSGraph& operator=(const JSGraph&) = delete;
+
// CEntryStubs are cached depending on the result size and other flags.
Node* CEntryStubConstant(int result_size,
SaveFPRegsMode save_doubles = kDontSaveFPRegs,
@@ -132,8 +135,6 @@ class V8_EXPORT_PRIVATE JSGraph : public MachineGraph {
// Internal helper to canonicalize a number constant.
Node* NumberConstant(double value);
-
- DISALLOW_COPY_AND_ASSIGN(JSGraph);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/js-heap-broker.cc b/deps/v8/src/compiler/js-heap-broker.cc
index e3e009bc45..f7193ec944 100644
--- a/deps/v8/src/compiler/js-heap-broker.cc
+++ b/deps/v8/src/compiler/js-heap-broker.cc
@@ -50,7 +50,7 @@ HEAP_BROKER_SERIALIZED_OBJECT_LIST(FORWARD_DECL)
HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(FORWARD_DECL)
#undef FORWARD_DECL
-// There are three kinds of ObjectData values.
+// There are five kinds of ObjectData values.
//
// kSmi: The underlying V8 object is a Smi and the data is an instance of the
// base class (ObjectData), i.e. it's basically just the handle. Because the
@@ -65,6 +65,12 @@ HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(FORWARD_DECL)
// data is an instance of the base class (ObjectData), i.e. it basically
// carries no information other than the handle.
//
+// kNeverSerializedHeapObject: The underlying V8 object is a (potentially
+// mutable) HeapObject and the data is an instance of ObjectData. Its handle
+// must be persistent so that the GC can update it at a safepoint. Via this
+// handle, the object can be accessed concurrently to the main thread. To be
+// used the flag --turbo-direct-heap-access must be on.
+//
// kUnserializedReadOnlyHeapObject: The underlying V8 object is a read-only
// HeapObject and the data is an instance of ObjectData. For
// ReadOnlyHeapObjects, it is OK to access heap even from off-thread, so
@@ -297,7 +303,9 @@ CallHandlerInfoData::CallHandlerInfoData(JSHeapBroker* broker,
ObjectData** storage,
Handle<CallHandlerInfo> object)
: HeapObjectData(broker, storage, object),
- callback_(v8::ToCData<Address>(object->callback())) {}
+ callback_(v8::ToCData<Address>(object->callback())) {
+ DCHECK(!FLAG_turbo_direct_heap_access);
+}
// These definitions are here in order to please the linker, which in debug mode
// sometimes requires static constants to be defined in .cc files.
@@ -327,8 +335,14 @@ void FunctionTemplateInfoData::SerializeCallCode(JSHeapBroker* broker) {
TraceScope tracer(broker, this,
"FunctionTemplateInfoData::SerializeCallCode");
auto function_template_info = Handle<FunctionTemplateInfo>::cast(object());
- call_code_ = broker->GetOrCreateData(function_template_info->call_code());
- if (!call_code_->should_access_heap()) {
+ call_code_ =
+ broker->GetOrCreateData(function_template_info->call_code(kAcquireLoad));
+ if (call_code_->should_access_heap()) {
+ // TODO(mvstanton): When ObjectRef is in the never serialized list, this
+ // code can be removed.
+ broker->GetOrCreateData(
+ Handle<CallHandlerInfo>::cast(call_code_->object())->data());
+ } else {
call_code_->AsCallHandlerInfo()->Serialize(broker);
}
}
@@ -614,7 +628,7 @@ class JSBoundFunctionData : public JSObjectData {
JSBoundFunctionData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSBoundFunction> object);
- void Serialize(JSHeapBroker* broker);
+ bool Serialize(JSHeapBroker* broker);
bool serialized() const { return serialized_; }
ObjectData* bound_target_function() const { return bound_target_function_; }
@@ -650,6 +664,7 @@ class JSFunctionData : public JSObjectData {
ObjectData* initial_map() const { return initial_map_; }
ObjectData* prototype() const { return prototype_; }
ObjectData* shared() const { return shared_; }
+ ObjectData* raw_feedback_cell() const { return feedback_cell_; }
ObjectData* feedback_vector() const { return feedback_vector_; }
ObjectData* code() const { return code_; }
int initial_map_instance_size_with_min_slack() const {
@@ -672,6 +687,7 @@ class JSFunctionData : public JSObjectData {
ObjectData* prototype_ = nullptr;
ObjectData* shared_ = nullptr;
ObjectData* feedback_vector_ = nullptr;
+ ObjectData* feedback_cell_ = nullptr;
ObjectData* code_ = nullptr;
int initial_map_instance_size_with_min_slack_;
};
@@ -831,32 +847,44 @@ class StringData : public NameData {
// element access (s[i]). The first pair component is always less than
// {length_}. The second component is never nullptr.
ZoneVector<std::pair<uint32_t, ObjectData*>> chars_as_strings_;
-
- static constexpr int kMaxLengthForDoubleConversion = 23;
};
class SymbolData : public NameData {
public:
SymbolData(JSHeapBroker* broker, ObjectData** storage, Handle<Symbol> object)
- : NameData(broker, storage, object) {}
+ : NameData(broker, storage, object) {
+ DCHECK(!FLAG_turbo_direct_heap_access);
+ }
};
+namespace {
+
+// String to double helper without heap allocation.
+base::Optional<double> StringToDouble(Handle<String> object) {
+ const int kMaxLengthForDoubleConversion = 23;
+ String string = *object;
+ int length = string.length();
+ if (length <= kMaxLengthForDoubleConversion) {
+ const int flags = ALLOW_HEX | ALLOW_OCTAL | ALLOW_BINARY;
+ uc16 buffer[kMaxLengthForDoubleConversion];
+ String::WriteToFlat(*object, buffer, 0, length);
+ Vector<const uc16> v(buffer, length);
+ return StringToDouble(v, flags);
+ }
+ return base::nullopt;
+}
+
+} // namespace
+
StringData::StringData(JSHeapBroker* broker, ObjectData** storage,
Handle<String> object)
: NameData(broker, storage, object),
length_(object->length()),
first_char_(length_ > 0 ? object->Get(0) : 0),
+ to_number_(StringToDouble(object)),
is_external_string_(object->IsExternalString()),
is_seq_string_(object->IsSeqString()),
- chars_as_strings_(broker->zone()) {
- if (length_ <= kMaxLengthForDoubleConversion) {
- const int flags = ALLOW_HEX | ALLOW_OCTAL | ALLOW_BINARY;
- uc16 buffer[kMaxLengthForDoubleConversion];
- String::WriteToFlat(*object, buffer, 0, length_);
- Vector<const uc16> v(buffer, length_);
- to_number_ = StringToDouble(v, flags);
- }
-}
+ chars_as_strings_(broker->zone()) {}
class InternalizedStringData : public StringData {
public:
@@ -941,8 +969,8 @@ bool IsFastLiteralHelper(Handle<JSObject> boilerplate, int max_depth,
}
// Check the in-object properties.
- Handle<DescriptorArray> descriptors(boilerplate->map().instance_descriptors(),
- isolate);
+ Handle<DescriptorArray> descriptors(
+ boilerplate->map().instance_descriptors(kRelaxedLoad), isolate);
for (InternalIndex i : boilerplate->map().IterateOwnDescriptors()) {
PropertyDetails details = descriptors->GetDetails(i);
if (details.location() != kField) continue;
@@ -1232,7 +1260,7 @@ namespace {
bool IsReadOnlyLengthDescriptor(Isolate* isolate, Handle<Map> jsarray_map) {
DCHECK(!jsarray_map->is_dictionary_map());
Handle<Name> length_string = isolate->factory()->length_string();
- DescriptorArray descriptors = jsarray_map->instance_descriptors();
+ DescriptorArray descriptors = jsarray_map->instance_descriptors(kRelaxedLoad);
// TODO(jkummerow): We could skip the search and hardcode number == 0.
InternalIndex number = descriptors.Search(*length_string, *jsarray_map);
DCHECK(number.is_found());
@@ -1305,12 +1333,14 @@ void JSFunctionData::Serialize(JSHeapBroker* broker) {
DCHECK_NULL(initial_map_);
DCHECK_NULL(prototype_);
DCHECK_NULL(shared_);
+ DCHECK_NULL(feedback_cell_);
DCHECK_NULL(feedback_vector_);
DCHECK_NULL(code_);
context_ = broker->GetOrCreateData(function->context());
native_context_ = broker->GetOrCreateData(function->native_context());
shared_ = broker->GetOrCreateData(function->shared());
+ feedback_cell_ = broker->GetOrCreateData(function->raw_feedback_cell());
feedback_vector_ = has_feedback_vector()
? broker->GetOrCreateData(function->feedback_vector())
: nullptr;
@@ -1488,23 +1518,37 @@ JSBoundFunctionData::JSBoundFunctionData(JSHeapBroker* broker,
Handle<JSBoundFunction> object)
: JSObjectData(broker, storage, object) {}
-void JSBoundFunctionData::Serialize(JSHeapBroker* broker) {
- if (serialized_) return;
- serialized_ = true;
+bool JSBoundFunctionData::Serialize(JSHeapBroker* broker) {
+ if (serialized_) return true;
+ if (broker->StackHasOverflowed()) return false;
TraceScope tracer(broker, this, "JSBoundFunctionData::Serialize");
Handle<JSBoundFunction> function = Handle<JSBoundFunction>::cast(object());
+ // We don't immediately set {serialized_} in order to correctly handle the
+ // case where a recursive call to this method reaches the stack limit.
+
DCHECK_NULL(bound_target_function_);
bound_target_function_ =
broker->GetOrCreateData(function->bound_target_function());
+ bool serialized_nested = true;
if (!bound_target_function_->should_access_heap()) {
if (bound_target_function_->IsJSBoundFunction()) {
- bound_target_function_->AsJSBoundFunction()->Serialize(broker);
+ serialized_nested =
+ bound_target_function_->AsJSBoundFunction()->Serialize(broker);
} else if (bound_target_function_->IsJSFunction()) {
bound_target_function_->AsJSFunction()->Serialize(broker);
}
}
+ if (!serialized_nested) {
+ // We couldn't serialize all nested bound functions due to stack
+ // overflow. Give up.
+ DCHECK(!serialized_);
+ bound_target_function_ = nullptr; // Reset to sync with serialized_.
+ return false;
+ }
+
+ serialized_ = true;
DCHECK_NULL(bound_arguments_);
bound_arguments_ = broker->GetOrCreateData(function->bound_arguments());
@@ -1514,6 +1558,8 @@ void JSBoundFunctionData::Serialize(JSHeapBroker* broker) {
DCHECK_NULL(bound_this_);
bound_this_ = broker->GetOrCreateData(function->bound_this());
+
+ return true;
}
JSObjectData::JSObjectData(JSHeapBroker* broker, ObjectData** storage,
@@ -1697,17 +1743,17 @@ class ScopeInfoData : public HeapObjectData {
ScopeInfoData(JSHeapBroker* broker, ObjectData** storage,
Handle<ScopeInfo> object);
- int context_length() const { return context_length_; }
- bool has_outer_scope_info() const { return has_outer_scope_info_; }
- int flags() const { return flags_; }
+ int ContextLength() const { return context_length_; }
+ bool HasContextExtensionSlot() const { return has_context_extension_slot_; }
+ bool HasOuterScopeInfo() const { return has_outer_scope_info_; }
- ObjectData* outer_scope_info() const { return outer_scope_info_; }
+ ObjectData* OuterScopeInfo() const { return outer_scope_info_; }
void SerializeScopeInfoChain(JSHeapBroker* broker);
private:
int const context_length_;
+ bool const has_context_extension_slot_;
bool const has_outer_scope_info_;
- int const flags_;
// Only serialized via SerializeScopeInfoChain.
ObjectData* outer_scope_info_;
@@ -1717,9 +1763,11 @@ ScopeInfoData::ScopeInfoData(JSHeapBroker* broker, ObjectData** storage,
Handle<ScopeInfo> object)
: HeapObjectData(broker, storage, object),
context_length_(object->ContextLength()),
+ has_context_extension_slot_(object->HasContextExtensionSlot()),
has_outer_scope_info_(object->HasOuterScopeInfo()),
- flags_(object->Flags()),
- outer_scope_info_(nullptr) {}
+ outer_scope_info_(nullptr) {
+ DCHECK(!FLAG_turbo_direct_heap_access);
+}
void ScopeInfoData::SerializeScopeInfoChain(JSHeapBroker* broker) {
if (outer_scope_info_) return;
@@ -1739,6 +1787,9 @@ class SharedFunctionInfoData : public HeapObjectData {
int builtin_id() const { return builtin_id_; }
int context_header_size() const { return context_header_size_; }
ObjectData* GetBytecodeArray() const { return GetBytecodeArray_; }
+ SharedFunctionInfo::Inlineability GetInlineability() const {
+ return inlineability_;
+ }
void SerializeFunctionTemplateInfo(JSHeapBroker* broker);
ObjectData* scope_info() const { return scope_info_; }
void SerializeScopeInfoChain(JSHeapBroker* broker);
@@ -1762,11 +1813,12 @@ class SharedFunctionInfoData : public HeapObjectData {
private:
int const builtin_id_;
- int context_header_size_;
+ int const context_header_size_;
ObjectData* const GetBytecodeArray_;
#define DECL_MEMBER(type, name) type const name##_;
BROKER_SFI_FIELDS(DECL_MEMBER)
#undef DECL_MEMBER
+ SharedFunctionInfo::Inlineability const inlineability_;
ObjectData* function_template_info_;
ZoneMap<int, ObjectData*> template_objects_;
ObjectData* scope_info_;
@@ -1787,6 +1839,7 @@ SharedFunctionInfoData::SharedFunctionInfoData(
BROKER_SFI_FIELDS(INIT_MEMBER)
#undef INIT_MEMBER
,
+ inlineability_(object->GetInlineability()),
function_template_info_(nullptr),
template_objects_(broker->zone()),
scope_info_(nullptr) {
@@ -1798,7 +1851,7 @@ void SharedFunctionInfoData::SerializeFunctionTemplateInfo(
JSHeapBroker* broker) {
if (function_template_info_) return;
function_template_info_ = broker->GetOrCreateData(
- Handle<SharedFunctionInfo>::cast(object())->function_data());
+ Handle<SharedFunctionInfo>::cast(object())->function_data(kAcquireLoad));
}
void SharedFunctionInfoData::SerializeScopeInfoChain(JSHeapBroker* broker) {
@@ -2126,8 +2179,9 @@ void MapData::SerializeOwnDescriptor(JSHeapBroker* broker,
Handle<Map> map = Handle<Map>::cast(object());
if (instance_descriptors_ == nullptr) {
- instance_descriptors_ = broker->GetOrCreateData(map->instance_descriptors())
- ->AsDescriptorArray();
+ instance_descriptors_ =
+ broker->GetOrCreateData(map->instance_descriptors(kRelaxedLoad))
+ ->AsDescriptorArray();
}
ZoneMap<int, PropertyDescriptor>& contents =
@@ -2138,7 +2192,7 @@ void MapData::SerializeOwnDescriptor(JSHeapBroker* broker,
Isolate* const isolate = broker->isolate();
auto descriptors =
Handle<DescriptorArray>::cast(instance_descriptors_->object());
- CHECK_EQ(*descriptors, map->instance_descriptors());
+ CHECK_EQ(*descriptors, map->instance_descriptors(kRelaxedLoad));
PropertyDescriptor d;
d.key = broker->GetOrCreateData(descriptors->GetKey(descriptor_index));
@@ -2252,8 +2306,8 @@ void JSObjectData::SerializeRecursiveAsBoilerplate(JSHeapBroker* broker,
CHECK_EQ(inobject_fields_.size(), 0u);
// Check the in-object properties.
- Handle<DescriptorArray> descriptors(boilerplate->map().instance_descriptors(),
- isolate);
+ Handle<DescriptorArray> descriptors(
+ boilerplate->map().instance_descriptors(kRelaxedLoad), isolate);
for (InternalIndex i : boilerplate->map().IterateOwnDescriptors()) {
PropertyDetails details = descriptors->GetDetails(i);
if (details.location() != kField) continue;
@@ -2388,7 +2442,7 @@ SourceTextModuleRef ContextRef::GetModule(SerializationPolicy policy) const {
JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone,
bool tracing_enabled, bool is_concurrent_inlining,
- bool is_native_context_independent)
+ CodeKind code_kind)
: isolate_(isolate),
zone_(broker_zone),
refs_(zone()->New<RefsMap>(kMinimalRefsBucketCount, AddressMatcher(),
@@ -2397,8 +2451,7 @@ JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone,
array_and_object_prototypes_(zone()),
tracing_enabled_(tracing_enabled),
is_concurrent_inlining_(is_concurrent_inlining),
- is_native_context_independent_(is_native_context_independent),
- local_heap_(base::nullopt),
+ code_kind_(code_kind),
feedback_(zone()),
bytecode_analyses_(zone()),
property_access_infos_(zone()),
@@ -2413,7 +2466,7 @@ JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone,
TRACE(this, "Constructing heap broker");
}
-JSHeapBroker::~JSHeapBroker() { DCHECK(!local_heap_); }
+JSHeapBroker::~JSHeapBroker() { DCHECK_NULL(local_isolate_); }
void JSHeapBroker::SetPersistentAndCopyCanonicalHandlesForTesting(
std::unique_ptr<PersistentHandles> persistent_handles,
@@ -2432,7 +2485,7 @@ void JSHeapBroker::CopyCanonicalHandlesForTesting(
for (auto it = it_scope.begin(); it != it_scope.end(); ++it) {
Address* entry = *it.entry();
Object key = it.key();
- canonical_handles_->Set(key, entry);
+ canonical_handles_->Insert(key, entry);
}
}
@@ -2443,20 +2496,24 @@ std::string JSHeapBroker::Trace() const {
return oss.str();
}
-void JSHeapBroker::InitializeLocalHeap(OptimizedCompilationInfo* info) {
- set_persistent_handles(info->DetachPersistentHandles());
+void JSHeapBroker::AttachLocalIsolate(OptimizedCompilationInfo* info,
+ LocalIsolate* local_isolate) {
set_canonical_handles(info->DetachCanonicalHandles());
- DCHECK(!local_heap_);
- local_heap_.emplace(isolate_->heap(), std::move(ph_));
+ DCHECK_NULL(local_isolate_);
+ local_isolate_ = local_isolate;
+ DCHECK_NOT_NULL(local_isolate_);
+ local_isolate_->heap()->AttachPersistentHandles(
+ info->DetachPersistentHandles());
}
-void JSHeapBroker::TearDownLocalHeap(OptimizedCompilationInfo* info) {
+void JSHeapBroker::DetachLocalIsolate(OptimizedCompilationInfo* info) {
DCHECK_NULL(ph_);
- DCHECK(local_heap_);
- ph_ = local_heap_->DetachPersistentHandles();
- local_heap_.reset();
+ DCHECK_NOT_NULL(local_isolate_);
+ std::unique_ptr<PersistentHandles> ph =
+ local_isolate_->heap()->DetachPersistentHandles();
+ local_isolate_ = nullptr;
info->set_canonical_handles(DetachCanonicalHandles());
- info->set_persistent_handles(DetachPersistentHandles());
+ info->set_persistent_handles(std::move(ph));
}
void JSHeapBroker::StopSerializing() {
@@ -3069,7 +3126,9 @@ PropertyDetails MapRef::GetPropertyDetails(
if (data_->should_access_heap()) {
AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
broker()->mode());
- return object()->instance_descriptors().GetDetails(descriptor_index);
+ return object()
+ ->instance_descriptors(kRelaxedLoad)
+ .GetDetails(descriptor_index);
}
DescriptorArrayData* descriptors = data()->AsMap()->instance_descriptors();
return descriptors->contents().at(descriptor_index.as_int()).details;
@@ -3081,10 +3140,10 @@ NameRef MapRef::GetPropertyKey(InternalIndex descriptor_index) const {
broker()->mode());
AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
broker()->mode());
- return NameRef(
- broker(),
- broker()->CanonicalPersistentHandle(
- object()->instance_descriptors().GetKey(descriptor_index)));
+ return NameRef(broker(), broker()->CanonicalPersistentHandle(
+ object()
+ ->instance_descriptors(kRelaxedLoad)
+ .GetKey(descriptor_index)));
}
DescriptorArrayData* descriptors = data()->AsMap()->instance_descriptors();
return NameRef(broker(),
@@ -3124,9 +3183,10 @@ ObjectRef MapRef::GetFieldType(InternalIndex descriptor_index) const {
broker()->mode());
AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
broker()->mode());
- Handle<FieldType> field_type(
- object()->instance_descriptors().GetFieldType(descriptor_index),
- broker()->isolate());
+ Handle<FieldType> field_type(object()
+ ->instance_descriptors(kRelaxedLoad)
+ .GetFieldType(descriptor_index),
+ broker()->isolate());
return ObjectRef(broker(), field_type);
}
DescriptorArrayData* descriptors = data()->AsMap()->instance_descriptors();
@@ -3163,10 +3223,7 @@ base::Optional<double> StringRef::ToNumber() {
broker()->mode());
AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
broker()->mode());
- AllowHeapAllocationIfNeeded allow_heap_allocation(data()->kind(),
- broker()->mode());
- int flags = ALLOW_HEX | ALLOW_OCTAL | ALLOW_BINARY;
- return StringToDouble(broker()->isolate(), object(), flags);
+ return StringToDouble(object());
}
return data()->AsString()->to_number();
}
@@ -3302,8 +3359,17 @@ int BytecodeArrayRef::handler_table_size() const {
return BitField::decode(ObjectRef::data()->As##holder()->field()); \
}
-// Like IF_ACCESS_FROM_HEAP_C but we also allow direct heap access for
+// Like IF_ACCESS_FROM_HEAP[_C] but we also allow direct heap access for
// kSerialized only for methods that we identified to be safe.
+#define IF_ACCESS_FROM_HEAP_WITH_FLAG(result, name) \
+ if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) { \
+ AllowHandleAllocationIfNeeded handle_allocation( \
+ data_->kind(), broker()->mode(), FLAG_turbo_direct_heap_access); \
+ AllowHandleDereferenceIfNeeded allow_handle_dereference( \
+ data_->kind(), broker()->mode(), FLAG_turbo_direct_heap_access); \
+ return result##Ref(broker(), \
+ broker()->CanonicalPersistentHandle(object()->name())); \
+ }
#define IF_ACCESS_FROM_HEAP_WITH_FLAG_C(name) \
if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) { \
AllowHandleAllocationIfNeeded handle_allocation( \
@@ -3313,10 +3379,15 @@ int BytecodeArrayRef::handler_table_size() const {
return object()->name(); \
}
-// Like BIMODAL_ACCESSOR_C except that we force a direct heap access if
+// Like BIMODAL_ACCESSOR[_C] except that we force a direct heap access if
// FLAG_turbo_direct_heap_access is true (even for kSerialized). This is because
// we identified the method to be safe to use direct heap access, but the
// holder##Data class still needs to be serialized.
+#define BIMODAL_ACCESSOR_WITH_FLAG(holder, result, name) \
+ result##Ref holder##Ref::name() const { \
+ IF_ACCESS_FROM_HEAP_WITH_FLAG(result, name); \
+ return result##Ref(broker(), ObjectRef::data()->As##holder()->name()); \
+ }
#define BIMODAL_ACCESSOR_WITH_FLAG_C(holder, result, name) \
result holder##Ref::name() const { \
IF_ACCESS_FROM_HEAP_WITH_FLAG_C(name); \
@@ -3360,6 +3431,7 @@ BIMODAL_ACCESSOR(JSFunction, NativeContext, native_context)
BIMODAL_ACCESSOR(JSFunction, Map, initial_map)
BIMODAL_ACCESSOR(JSFunction, Object, prototype)
BIMODAL_ACCESSOR(JSFunction, SharedFunctionInfo, shared)
+BIMODAL_ACCESSOR(JSFunction, FeedbackCell, raw_feedback_cell)
BIMODAL_ACCESSOR(JSFunction, FeedbackVector, feedback_vector)
BIMODAL_ACCESSOR(JSFunction, Code, code)
@@ -3411,8 +3483,8 @@ BIMODAL_ACCESSOR_C(PropertyCell, PropertyDetails, property_details)
base::Optional<CallHandlerInfoRef> FunctionTemplateInfoRef::call_code() const {
if (data_->should_access_heap()) {
- return CallHandlerInfoRef(
- broker(), broker()->CanonicalPersistentHandle(object()->call_code()));
+ return CallHandlerInfoRef(broker(), broker()->CanonicalPersistentHandle(
+ object()->call_code(kAcquireLoad)));
}
ObjectData* call_code = data()->AsFunctionTemplateInfo()->call_code();
if (!call_code) return base::nullopt;
@@ -3517,12 +3589,19 @@ HolderLookupResult FunctionTemplateInfoRef::LookupHolderOfExpectedType(
BIMODAL_ACCESSOR(CallHandlerInfo, Object, data)
+BIMODAL_ACCESSOR_C(ScopeInfo, int, ContextLength)
+BIMODAL_ACCESSOR_C(ScopeInfo, bool, HasContextExtensionSlot)
+BIMODAL_ACCESSOR_C(ScopeInfo, bool, HasOuterScopeInfo)
+BIMODAL_ACCESSOR(ScopeInfo, ScopeInfo, OuterScopeInfo)
+
BIMODAL_ACCESSOR_C(SharedFunctionInfo, int, builtin_id)
BIMODAL_ACCESSOR(SharedFunctionInfo, BytecodeArray, GetBytecodeArray)
#define DEF_SFI_ACCESSOR(type, name) \
- BIMODAL_ACCESSOR_C(SharedFunctionInfo, type, name)
+ BIMODAL_ACCESSOR_WITH_FLAG_C(SharedFunctionInfo, type, name)
BROKER_SFI_FIELDS(DEF_SFI_ACCESSOR)
#undef DEF_SFI_ACCESSOR
+BIMODAL_ACCESSOR_C(SharedFunctionInfo, SharedFunctionInfo::Inlineability,
+ GetInlineability)
BIMODAL_ACCESSOR_C(String, int, length)
@@ -3534,7 +3613,7 @@ base::Optional<ObjectRef> MapRef::GetStrongValue(
AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
broker()->mode());
MaybeObject value =
- object()->instance_descriptors().GetValue(descriptor_index);
+ object()->instance_descriptors(kRelaxedLoad).GetValue(descriptor_index);
HeapObject object;
if (value.GetHeapObjectIfStrong(&object)) {
return ObjectRef(broker(), broker()->CanonicalPersistentHandle((object)));
@@ -3616,37 +3695,6 @@ int MapRef::GetInObjectProperties() const {
return data()->AsMap()->in_object_properties();
}
-int ScopeInfoRef::ContextLength() const {
- IF_ACCESS_FROM_HEAP_C(ContextLength);
- return data()->AsScopeInfo()->context_length();
-}
-
-int ScopeInfoRef::Flags() const {
- IF_ACCESS_FROM_HEAP_C(Flags);
- return data()->AsScopeInfo()->flags();
-}
-
-bool ScopeInfoRef::HasContextExtension() const {
- return ScopeInfo::HasContextExtensionSlotBit::decode(Flags());
-}
-
-bool ScopeInfoRef::HasOuterScopeInfo() const {
- IF_ACCESS_FROM_HEAP_C(HasOuterScopeInfo);
- return data()->AsScopeInfo()->has_outer_scope_info();
-}
-
-ScopeInfoRef ScopeInfoRef::OuterScopeInfo() const {
- if (data_->should_access_heap()) {
- AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
- broker()->mode());
- AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
- broker()->mode());
- return ScopeInfoRef(broker(), broker()->CanonicalPersistentHandle(
- object()->OuterScopeInfo()));
- }
- return ScopeInfoRef(broker(), data()->AsScopeInfo()->outer_scope_info());
-}
-
void ScopeInfoRef::SerializeScopeInfoChain() {
if (data_->should_access_heap()) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
@@ -3810,19 +3858,10 @@ base::Optional<ObjectRef> ObjectRef::GetOwnConstantElement(
uint32_t index, SerializationPolicy policy) const {
if (!(IsJSObject() || IsString())) return base::nullopt;
if (data_->should_access_heap()) {
- // TODO(neis): Once the CHECK_NE below is eliminated, i.e. once we can
- // safely read from the background thread, the special branch for read-only
- // objects can be removed as well.
- if (data_->kind() == ObjectDataKind::kUnserializedReadOnlyHeapObject) {
- DCHECK(IsString());
- // TODO(mythria): For ReadOnly strings, currently we cannot access data
- // from heap without creating handles since we use LookupIterator. We
- // should have a custom implementation for read only strings that doesn't
- // create handles. Till then it is OK to disable this optimization since
- // this only impacts keyed accesses on read only strings.
- return base::nullopt;
- }
- CHECK_NE(data_->kind(), ObjectDataKind::kNeverSerializedHeapObject);
+ // TODO(solanes, neis, v8:7790, v8:11012): Re-enable this optmization for
+ // concurrent inlining when we have the infrastructure to safely do so.
+ if (broker()->is_concurrent_inlining() && IsString()) return base::nullopt;
+ CHECK_EQ(data_->kind(), ObjectDataKind::kUnserializedHeapObject);
return GetOwnElementFromHeap(broker(), object(), index, true);
}
ObjectData* element = nullptr;
@@ -4264,8 +4303,8 @@ SharedFunctionInfoRef::function_template_info() const {
if (data_->should_access_heap()) {
if (object()->IsApiFunction()) {
return FunctionTemplateInfoRef(
- broker(),
- broker()->CanonicalPersistentHandle(object()->function_data()));
+ broker(), broker()->CanonicalPersistentHandle(
+ object()->function_data(kAcquireLoad)));
}
return base::nullopt;
}
@@ -4361,10 +4400,10 @@ bool JSTypedArrayRef::serialized() const {
return data()->AsJSTypedArray()->serialized();
}
-void JSBoundFunctionRef::Serialize() {
- if (data_->should_access_heap()) return;
+bool JSBoundFunctionRef::Serialize() {
+ if (data_->should_access_heap()) return true;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsJSBoundFunction()->Serialize(broker());
+ return data()->AsJSBoundFunction()->Serialize(broker());
}
void PropertyCellRef::Serialize() {
@@ -4466,7 +4505,6 @@ GlobalAccessFeedback::GlobalAccessFeedback(PropertyCellRef cell,
GlobalAccessFeedback::GlobalAccessFeedback(FeedbackSlotKind slot_kind)
: ProcessedFeedback(kGlobalAccess, slot_kind),
- cell_or_context_(base::nullopt),
index_and_immutable_(0 /* doesn't matter */) {
DCHECK(IsGlobalICKind(slot_kind));
}
@@ -4599,11 +4637,11 @@ bool ElementAccessFeedback::HasOnlyStringMaps(JSHeapBroker* broker) const {
MinimorphicLoadPropertyAccessFeedback::MinimorphicLoadPropertyAccessFeedback(
NameRef const& name, FeedbackSlotKind slot_kind, Handle<Object> handler,
- MaybeHandle<Map> maybe_map, bool has_migration_target_maps)
+ ZoneVector<Handle<Map>> const& maps, bool has_migration_target_maps)
: ProcessedFeedback(kMinimorphicPropertyAccess, slot_kind),
name_(name),
handler_(handler),
- maybe_map_(maybe_map),
+ maps_(maps),
has_migration_target_maps_(has_migration_target_maps) {
DCHECK(IsLoadICKind(slot_kind));
}
@@ -4645,14 +4683,15 @@ FeedbackSlotKind JSHeapBroker::GetFeedbackSlotKind(
ProcessedFeedback const& processed = GetFeedback(source);
return processed.slot_kind();
}
- FeedbackNexus nexus(source.vector, source.slot);
+ FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config());
return nexus.kind();
}
bool JSHeapBroker::FeedbackIsInsufficient(FeedbackSource const& source) const {
- return is_concurrent_inlining_
- ? GetFeedback(source).IsInsufficient()
- : FeedbackNexus(source.vector, source.slot).IsUninitialized();
+ return is_concurrent_inlining_ ? GetFeedback(source).IsInsufficient()
+ : FeedbackNexus(source.vector, source.slot,
+ feedback_nexus_config())
+ .IsUninitialized();
}
namespace {
@@ -4678,9 +4717,11 @@ void FilterRelevantReceiverMaps(Isolate* isolate, MapHandles* maps) {
MaybeObjectHandle TryGetMinimorphicHandler(
std::vector<MapAndHandler> const& maps_and_handlers, FeedbackSlotKind kind,
- Handle<NativeContext> native_context) {
- if (!FLAG_dynamic_map_checks || !IsLoadICKind(kind))
+ Handle<NativeContext> native_context, bool is_turboprop) {
+ if (!is_turboprop || !FLAG_turboprop_dynamic_map_checks ||
+ !IsLoadICKind(kind)) {
return MaybeObjectHandle();
+ }
// Don't use dynamic map checks when loading properties from Array.prototype.
// Using dynamic map checks prevents constant folding and hence does not
@@ -4737,7 +4778,7 @@ const ProcessedFeedback& JSHeapBroker::NewInsufficientFeedback(
ProcessedFeedback const& JSHeapBroker::ReadFeedbackForPropertyAccess(
FeedbackSource const& source, AccessMode mode,
base::Optional<NameRef> static_name) {
- FeedbackNexus nexus(source.vector, source.slot);
+ FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config());
FeedbackSlotKind kind = nexus.kind();
if (!CanUseFeedback(nexus)) return NewInsufficientFeedback(kind);
@@ -4751,15 +4792,13 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForPropertyAccess(
base::Optional<NameRef> name =
static_name.has_value() ? static_name : GetNameFeedback(nexus);
MaybeObjectHandle handler = TryGetMinimorphicHandler(
- maps_and_handlers, kind, target_native_context().object());
+ maps_and_handlers, kind, target_native_context().object(),
+ is_turboprop());
if (!handler.is_null()) {
- MaybeHandle<Map> maybe_map;
- if (nexus.ic_state() == MONOMORPHIC) {
- DCHECK_EQ(maps.size(), 1);
- maybe_map = maps[0];
- }
return *zone()->New<MinimorphicLoadPropertyAccessFeedback>(
- *name, kind, handler.object(), maybe_map, HasMigrationTargets(maps));
+ *name, kind, handler.object(),
+ ZoneVector<Handle<Map>>(maps.begin(), maps.end(), zone()),
+ HasMigrationTargets(maps));
}
FilterRelevantReceiverMaps(isolate(), &maps);
@@ -4839,7 +4878,7 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForGlobalAccess(
ProcessedFeedback const& JSHeapBroker::ReadFeedbackForBinaryOperation(
FeedbackSource const& source) const {
- FeedbackNexus nexus(source.vector, source.slot);
+ FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config());
if (!CanUseFeedback(nexus)) return NewInsufficientFeedback(nexus.kind());
BinaryOperationHint hint = nexus.GetBinaryOperationFeedback();
DCHECK_NE(hint, BinaryOperationHint::kNone); // Not uninitialized.
@@ -4848,7 +4887,7 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForBinaryOperation(
ProcessedFeedback const& JSHeapBroker::ReadFeedbackForCompareOperation(
FeedbackSource const& source) const {
- FeedbackNexus nexus(source.vector, source.slot);
+ FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config());
if (!CanUseFeedback(nexus)) return NewInsufficientFeedback(nexus.kind());
CompareOperationHint hint = nexus.GetCompareOperationFeedback();
DCHECK_NE(hint, CompareOperationHint::kNone); // Not uninitialized.
@@ -4857,7 +4896,7 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForCompareOperation(
ProcessedFeedback const& JSHeapBroker::ReadFeedbackForForIn(
FeedbackSource const& source) const {
- FeedbackNexus nexus(source.vector, source.slot);
+ FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config());
if (!CanUseFeedback(nexus)) return NewInsufficientFeedback(nexus.kind());
ForInHint hint = nexus.GetForInFeedback();
DCHECK_NE(hint, ForInHint::kNone); // Not uninitialized.
@@ -4866,7 +4905,7 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForForIn(
ProcessedFeedback const& JSHeapBroker::ReadFeedbackForInstanceOf(
FeedbackSource const& source) {
- FeedbackNexus nexus(source.vector, source.slot);
+ FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config());
if (!CanUseFeedback(nexus)) return NewInsufficientFeedback(nexus.kind());
base::Optional<JSObjectRef> optional_constructor;
@@ -4882,7 +4921,7 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForInstanceOf(
ProcessedFeedback const& JSHeapBroker::ReadFeedbackForArrayOrObjectLiteral(
FeedbackSource const& source) {
- FeedbackNexus nexus(source.vector, source.slot);
+ FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config());
if (!CanUseFeedback(nexus)) return NewInsufficientFeedback(nexus.kind());
HeapObject object;
@@ -4900,7 +4939,7 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForArrayOrObjectLiteral(
ProcessedFeedback const& JSHeapBroker::ReadFeedbackForRegExpLiteral(
FeedbackSource const& source) {
- FeedbackNexus nexus(source.vector, source.slot);
+ FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config());
if (!CanUseFeedback(nexus)) return NewInsufficientFeedback(nexus.kind());
HeapObject object;
@@ -4915,7 +4954,7 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForRegExpLiteral(
ProcessedFeedback const& JSHeapBroker::ReadFeedbackForTemplateObject(
FeedbackSource const& source) {
- FeedbackNexus nexus(source.vector, source.slot);
+ FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config());
if (!CanUseFeedback(nexus)) return NewInsufficientFeedback(nexus.kind());
HeapObject object;
@@ -4929,7 +4968,7 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForTemplateObject(
ProcessedFeedback const& JSHeapBroker::ReadFeedbackForCall(
FeedbackSource const& source) {
- FeedbackNexus nexus(source.vector, source.slot);
+ FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config());
if (!CanUseFeedback(nexus)) return NewInsufficientFeedback(nexus.kind());
base::Optional<HeapObjectRef> target_ref;
@@ -5336,6 +5375,14 @@ BytecodeAnalysis const& JSHeapBroker::GetBytecodeAnalysis(
return *analysis;
}
+bool JSHeapBroker::StackHasOverflowed() const {
+ DCHECK_IMPLIES(local_isolate_ == nullptr,
+ ThreadId::Current() == isolate_->thread_id());
+ return (local_isolate_ != nullptr)
+ ? StackLimitCheck::HasOverflowed(local_isolate_)
+ : StackLimitCheck(isolate_).HasOverflowed();
+}
+
OffHeapBytecodeArray::OffHeapBytecodeArray(BytecodeArrayRef bytecode_array)
: array_(bytecode_array) {}
diff --git a/deps/v8/src/compiler/js-heap-broker.h b/deps/v8/src/compiler/js-heap-broker.h
index d2bfbace26..a9be949566 100644
--- a/deps/v8/src/compiler/js-heap-broker.h
+++ b/deps/v8/src/compiler/js-heap-broker.h
@@ -14,10 +14,12 @@
#include "src/compiler/processed-feedback.h"
#include "src/compiler/refs-map.h"
#include "src/compiler/serializer-hints.h"
+#include "src/execution/local-isolate.h"
#include "src/handles/handles.h"
#include "src/handles/persistent-handles.h"
#include "src/heap/local-heap.h"
#include "src/interpreter/bytecode-array-accessor.h"
+#include "src/objects/code-kind.h"
#include "src/objects/feedback-vector.h"
#include "src/objects/function-kind.h"
#include "src/objects/objects.h"
@@ -32,6 +34,7 @@ namespace compiler {
class BytecodeAnalysis;
class ObjectRef;
+
std::ostream& operator<<(std::ostream& os, const ObjectRef& ref);
#define TRACE_BROKER(broker, x) \
@@ -78,13 +81,13 @@ struct PropertyAccessTarget {
class V8_EXPORT_PRIVATE JSHeapBroker {
public:
JSHeapBroker(Isolate* isolate, Zone* broker_zone, bool tracing_enabled,
- bool is_concurrent_inlining, bool is_native_context_independent);
+ bool is_concurrent_inlining, CodeKind code_kind);
// For use only in tests, sets default values for some arguments. Avoids
// churn when new flags are added.
JSHeapBroker(Isolate* isolate, Zone* broker_zone)
: JSHeapBroker(isolate, broker_zone, FLAG_trace_heap_broker, false,
- false) {}
+ CodeKind::TURBOFAN) {}
~JSHeapBroker();
@@ -102,7 +105,7 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
bool tracing_enabled() const { return tracing_enabled_; }
bool is_concurrent_inlining() const { return is_concurrent_inlining_; }
bool is_native_context_independent() const {
- return is_native_context_independent_;
+ return code_kind_ == CodeKind::NATIVE_CONTEXT_INDEPENDENT;
}
bool generate_full_feedback_collection() const {
// NCI code currently collects full feedback.
@@ -110,19 +113,33 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
CollectFeedbackInGenericLowering());
return is_native_context_independent();
}
+ bool is_turboprop() const { return code_kind_ == CodeKind::TURBOPROP; }
+
+ NexusConfig feedback_nexus_config() const {
+ // TODO(mvstanton): when the broker gathers feedback on the background
+ // thread, this should return a local NexusConfig object which points
+ // to the associated LocalHeap.
+ return NexusConfig::FromMainThread(isolate());
+ }
enum BrokerMode { kDisabled, kSerializing, kSerialized, kRetired };
BrokerMode mode() const { return mode_; }
- // Initialize the local heap with the persistent and canonical handles
- // provided by {info}.
- void InitializeLocalHeap(OptimizedCompilationInfo* info);
- // Tear down the local heap and pass the persistent and canonical handles
- // provided back to {info}. {info} is responsible for disposing of them.
- void TearDownLocalHeap(OptimizedCompilationInfo* info);
+
void StopSerializing();
void Retire();
bool SerializingAllowed() const;
+ // Remember the local isolate and initialize its local heap with the
+ // persistent and canonical handles provided by {info}.
+ void AttachLocalIsolate(OptimizedCompilationInfo* info,
+ LocalIsolate* local_isolate);
+ // Forget about the local isolate and pass the persistent and canonical
+ // handles provided back to {info}. {info} is responsible for disposing of
+ // them.
+ void DetachLocalIsolate(OptimizedCompilationInfo* info);
+
+ bool StackHasOverflowed() const;
+
#ifdef DEBUG
void PrintRefsAnalysis() const;
#endif // DEBUG
@@ -225,9 +242,7 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
bool IsSerializedForCompilation(const SharedFunctionInfoRef& shared,
const FeedbackVectorRef& feedback) const;
- LocalHeap* local_heap() {
- return local_heap_.has_value() ? &(*local_heap_) : nullptr;
- }
+ LocalIsolate* local_isolate() const { return local_isolate_; }
// Return the corresponding canonical persistent handle for {object}. Create
// one if it does not exist.
@@ -248,13 +263,14 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
}
Object obj(address);
- Address** entry = canonical_handles_->Get(obj);
- if (*entry == nullptr) {
+ auto find_result = canonical_handles_->FindOrInsert(obj);
+ if (!find_result.already_exists) {
// Allocate new PersistentHandle if one wasn't created before.
- DCHECK(local_heap_);
- *entry = local_heap_->NewPersistentHandle(obj).location();
+ DCHECK_NOT_NULL(local_isolate());
+ *find_result.entry =
+ local_isolate()->heap()->NewPersistentHandle(obj).location();
}
- return Handle<T>(*entry);
+ return Handle<T>(*find_result.entry);
} else {
return Handle<T>(object, isolate());
}
@@ -357,9 +373,9 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
BrokerMode mode_ = kDisabled;
bool const tracing_enabled_;
bool const is_concurrent_inlining_;
- bool const is_native_context_independent_;
+ CodeKind const code_kind_;
std::unique_ptr<PersistentHandles> ph_;
- base::Optional<LocalHeap> local_heap_;
+ LocalIsolate* local_isolate_ = nullptr;
std::unique_ptr<CanonicalHandlesMap> canonical_handles_;
unsigned trace_indentation_ = 0;
PerIsolateCompilerCache* compiler_cache_ = nullptr;
@@ -451,17 +467,19 @@ class OffHeapBytecodeArray final : public interpreter::AbstractBytecodeArray {
// Scope that unparks the LocalHeap, if:
// a) We have a JSHeapBroker,
-// b) Said JSHeapBroker has a LocalHeap, and
-// c) Said LocalHeap has been parked.
+// b) Said JSHeapBroker has a LocalIsolate and thus a LocalHeap,
+// c) Said LocalHeap has been parked and
+// d) The given condition evaluates to true.
// Used, for example, when printing the graph with --trace-turbo with a
// previously parked LocalHeap.
class UnparkedScopeIfNeeded {
public:
- explicit UnparkedScopeIfNeeded(JSHeapBroker* broker) {
- if (broker != nullptr) {
- LocalHeap* local_heap = broker->local_heap();
- if (local_heap != nullptr && local_heap->IsParked()) {
- unparked_scope.emplace(local_heap);
+ explicit UnparkedScopeIfNeeded(JSHeapBroker* broker,
+ bool extra_condition = true) {
+ if (broker != nullptr && extra_condition) {
+ LocalIsolate* local_isolate = broker->local_isolate();
+ if (local_isolate != nullptr && local_isolate->heap()->IsParked()) {
+ unparked_scope.emplace(local_isolate->heap());
}
}
}
diff --git a/deps/v8/src/compiler/js-heap-copy-reducer.cc b/deps/v8/src/compiler/js-heap-copy-reducer.cc
index 837369ec55..4ad4181b59 100644
--- a/deps/v8/src/compiler/js-heap-copy-reducer.cc
+++ b/deps/v8/src/compiler/js-heap-copy-reducer.cc
@@ -172,10 +172,12 @@ Reduction JSHeapCopyReducer::Reduce(Node* node) {
break;
}
case IrOpcode::kJSLoadNamedFromSuper: {
- // TODO(marja, v8:9237): Process feedback once it's added to the byte
- // code.
NamedAccess const& p = NamedAccessOf(node->op());
NameRef name(broker(), p.name());
+ if (p.feedback().IsValid()) {
+ broker()->ProcessFeedbackForPropertyAccess(p.feedback(),
+ AccessMode::kLoad, name);
+ }
break;
}
case IrOpcode::kJSStoreNamed: {
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.cc b/deps/v8/src/compiler/js-inlining-heuristic.cc
index 31f8298757..33846d1ac3 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.cc
+++ b/deps/v8/src/compiler/js-inlining-heuristic.cc
@@ -77,7 +77,7 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions(
out.node = node;
HeapObjectMatcher m(callee);
- if (m.HasValue() && m.Ref(broker()).IsJSFunction()) {
+ if (m.HasResolvedValue() && m.Ref(broker()).IsJSFunction()) {
out.functions[0] = m.Ref(broker()).AsJSFunction();
JSFunctionRef function = out.functions[0].value();
if (CanConsiderForInlining(broker(), function)) {
@@ -94,7 +94,7 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions(
}
for (int n = 0; n < value_input_count; ++n) {
HeapObjectMatcher m(callee->InputAt(n));
- if (!m.HasValue() || !m.Ref(broker()).IsJSFunction()) {
+ if (!m.HasResolvedValue() || !m.Ref(broker()).IsJSFunction()) {
out.num_functions = 0;
return out;
}
diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc
index 74e9d2c012..30f0a01d52 100644
--- a/deps/v8/src/compiler/js-inlining.cc
+++ b/deps/v8/src/compiler/js-inlining.cc
@@ -287,7 +287,7 @@ base::Optional<SharedFunctionInfoRef> JSInliner::DetermineCallTarget(
// calls whenever the target is a constant function object, as follows:
// - JSCall(target:constant, receiver, args..., vector)
// - JSConstruct(target:constant, new.target, args..., vector)
- if (match.HasValue() && match.Ref(broker()).IsJSFunction()) {
+ if (match.HasResolvedValue() && match.Ref(broker()).IsJSFunction()) {
JSFunctionRef function = match.Ref(broker()).AsJSFunction();
// The function might have not been called yet.
@@ -332,20 +332,20 @@ base::Optional<SharedFunctionInfoRef> JSInliner::DetermineCallTarget(
// following static information is provided:
// - context : The context (as SSA value) bound by the call target.
// - feedback_vector : The target is guaranteed to use this feedback vector.
-FeedbackVectorRef JSInliner::DetermineCallContext(Node* node,
- Node** context_out) {
+FeedbackCellRef JSInliner::DetermineCallContext(Node* node,
+ Node** context_out) {
DCHECK(IrOpcode::IsInlineeOpcode(node->opcode()));
Node* target = node->InputAt(JSCallOrConstructNode::TargetIndex());
HeapObjectMatcher match(target);
- if (match.HasValue() && match.Ref(broker()).IsJSFunction()) {
+ if (match.HasResolvedValue() && match.Ref(broker()).IsJSFunction()) {
JSFunctionRef function = match.Ref(broker()).AsJSFunction();
// This was already ensured by DetermineCallTarget
CHECK(function.has_feedback_vector());
// The inlinee specializes to the context from the JSFunction object.
*context_out = jsgraph()->Constant(function.context());
- return function.feedback_vector();
+ return function.raw_feedback_cell();
}
if (match.IsJSCreateClosure()) {
@@ -356,7 +356,7 @@ FeedbackVectorRef JSInliner::DetermineCallContext(Node* node,
// The inlinee uses the locally provided context at instantiation.
*context_out = NodeProperties::GetContextInput(match.node());
- return cell.value().AsFeedbackVector();
+ return cell;
} else if (match.IsCheckClosure()) {
FeedbackCellRef cell(broker(), FeedbackCellOf(match.op()));
@@ -367,7 +367,7 @@ FeedbackVectorRef JSInliner::DetermineCallContext(Node* node,
match.node(), effect, control);
NodeProperties::ReplaceEffectInput(node, effect);
- return cell.value().AsFeedbackVector();
+ return cell;
}
// Must succeed.
@@ -438,8 +438,9 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
: ""));
// Determine the target's feedback vector and its context.
Node* context;
- FeedbackVectorRef feedback_vector = DetermineCallContext(node, &context);
- CHECK(broker()->IsSerializedForCompilation(*shared_info, feedback_vector));
+ FeedbackCellRef feedback_cell = DetermineCallContext(node, &context);
+ CHECK(broker()->IsSerializedForCompilation(
+ *shared_info, feedback_cell.value().AsFeedbackVector()));
// ----------------------------------------------------------------
// After this point, we've made a decision to inline this function.
@@ -468,7 +469,7 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
}
{
CallFrequency frequency = call.frequency();
- BuildGraphFromBytecode(broker(), zone(), *shared_info, feedback_vector,
+ BuildGraphFromBytecode(broker(), zone(), *shared_info, feedback_cell,
BailoutId::None(), jsgraph(), frequency,
source_positions_, inlining_id, info_->code_kind(),
flags, &info_->tick_counter());
diff --git a/deps/v8/src/compiler/js-inlining.h b/deps/v8/src/compiler/js-inlining.h
index f60d53dbc9..0648c86f62 100644
--- a/deps/v8/src/compiler/js-inlining.h
+++ b/deps/v8/src/compiler/js-inlining.h
@@ -59,7 +59,7 @@ class JSInliner final : public AdvancedReducer {
SourcePositionTable* const source_positions_;
base::Optional<SharedFunctionInfoRef> DetermineCallTarget(Node* node);
- FeedbackVectorRef DetermineCallContext(Node* node, Node** context_out);
+ FeedbackCellRef DetermineCallContext(Node* node, Node** context_out);
Node* CreateArtificialFrameState(Node* node, Node* outer_frame_state,
int parameter_count, BailoutId bailout_id,
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.cc b/deps/v8/src/compiler/js-intrinsic-lowering.cc
index 03ac064c4e..653f8ff114 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.cc
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.cc
@@ -323,7 +323,7 @@ Reduction JSIntrinsicLowering::ReduceToObject(Node* node) {
Reduction JSIntrinsicLowering::ReduceToString(Node* node) {
// ToString is unnecessary if the input is a string.
HeapObjectMatcher m(NodeProperties::GetValueInput(node, 0));
- if (m.HasValue() && m.Ref(broker()).IsString()) {
+ if (m.HasResolvedValue() && m.Ref(broker()).IsString()) {
ReplaceWithValue(node, m.node());
return Replace(m.node());
}
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index 2a4524f386..a0115df0db 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -102,6 +102,8 @@ Reduction JSNativeContextSpecialization::Reduce(Node* node) {
return ReduceJSStoreGlobal(node);
case IrOpcode::kJSLoadNamed:
return ReduceJSLoadNamed(node);
+ case IrOpcode::kJSLoadNamedFromSuper:
+ return ReduceJSLoadNamedFromSuper(node);
case IrOpcode::kJSStoreNamed:
return ReduceJSStoreNamed(node);
case IrOpcode::kJSHasProperty:
@@ -136,13 +138,13 @@ base::Optional<size_t> JSNativeContextSpecialization::GetMaxStringLength(
}
HeapObjectMatcher matcher(node);
- if (matcher.HasValue() && matcher.Ref(broker).IsString()) {
+ if (matcher.HasResolvedValue() && matcher.Ref(broker).IsString()) {
StringRef input = matcher.Ref(broker).AsString();
return input.length();
}
NumberMatcher number_matcher(node);
- if (number_matcher.HasValue()) {
+ if (number_matcher.HasResolvedValue()) {
return kBase10MaximalLength + 1;
}
@@ -157,7 +159,7 @@ Reduction JSNativeContextSpecialization::ReduceJSToString(Node* node) {
Reduction reduction;
HeapObjectMatcher matcher(input);
- if (matcher.HasValue() && matcher.Ref(broker()).IsString()) {
+ if (matcher.HasResolvedValue() && matcher.Ref(broker()).IsString()) {
reduction = Changed(input); // JSToString(x:string) => x
ReplaceWithValue(node, reduction.replacement());
return reduction;
@@ -168,9 +170,9 @@ Reduction JSNativeContextSpecialization::ReduceJSToString(Node* node) {
// so alternative approach should be designed if this causes performance
// regressions and the stronger optimization should be re-implemented.
NumberMatcher number_matcher(input);
- if (number_matcher.HasValue()) {
- const StringConstantBase* base =
- shared_zone()->New<NumberToStringConstant>(number_matcher.Value());
+ if (number_matcher.HasResolvedValue()) {
+ const StringConstantBase* base = shared_zone()->New<NumberToStringConstant>(
+ number_matcher.ResolvedValue());
reduction =
Replace(graph()->NewNode(common()->DelayedStringConstant(base)));
ReplaceWithValue(node, reduction.replacement());
@@ -186,11 +188,12 @@ JSNativeContextSpecialization::CreateDelayedStringConstant(Node* node) {
return StringConstantBaseOf(node->op());
} else {
NumberMatcher number_matcher(node);
- if (number_matcher.HasValue()) {
- return shared_zone()->New<NumberToStringConstant>(number_matcher.Value());
+ if (number_matcher.HasResolvedValue()) {
+ return shared_zone()->New<NumberToStringConstant>(
+ number_matcher.ResolvedValue());
} else {
HeapObjectMatcher matcher(node);
- if (matcher.HasValue() && matcher.Ref(broker()).IsString()) {
+ if (matcher.HasResolvedValue() && matcher.Ref(broker()).IsString()) {
StringRef s = matcher.Ref(broker()).AsString();
return shared_zone()->New<StringLiteral>(
s.object(), static_cast<size_t>(s.length()));
@@ -208,7 +211,7 @@ bool IsStringConstant(JSHeapBroker* broker, Node* node) {
}
HeapObjectMatcher matcher(node);
- return matcher.HasValue() && matcher.Ref(broker).IsString();
+ return matcher.HasResolvedValue() && matcher.Ref(broker).IsString();
}
} // namespace
@@ -352,20 +355,21 @@ Reduction JSNativeContextSpecialization::ReduceJSGetSuperConstructor(
// Check if the input is a known JSFunction.
HeapObjectMatcher m(constructor);
- if (!m.HasValue()) return NoChange();
+ if (!m.HasResolvedValue() || !m.Ref(broker()).IsJSFunction()) {
+ return NoChange();
+ }
JSFunctionRef function = m.Ref(broker()).AsJSFunction();
MapRef function_map = function.map();
if (should_disallow_heap_access() && !function_map.serialized_prototype()) {
TRACE_BROKER_MISSING(broker(), "data for map " << function_map);
return NoChange();
}
- ObjectRef function_prototype = function_map.prototype();
+ HeapObjectRef function_prototype = function_map.prototype();
// We can constant-fold the super constructor access if the
// {function}s map is stable, i.e. we can use a code dependency
// to guard against [[Prototype]] changes of {function}.
- if (function_map.is_stable() && function_prototype.IsHeapObject() &&
- function_prototype.AsHeapObject().map().is_constructor()) {
+ if (function_map.is_stable()) {
dependencies()->DependOnStableMap(function_map);
Node* value = jsgraph()->Constant(function_prototype);
ReplaceWithValue(node, value);
@@ -389,7 +393,7 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
// we have feedback from the InstanceOfIC.
Handle<JSObject> receiver;
HeapObjectMatcher m(constructor);
- if (m.HasValue() && m.Ref(broker()).IsJSObject()) {
+ if (m.HasResolvedValue() && m.Ref(broker()).IsJSObject()) {
receiver = m.Ref(broker()).AsJSObject().object();
} else if (p.feedback().IsValid()) {
ProcessedFeedback const& feedback =
@@ -430,12 +434,12 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
// takes over, but that requires the constructor to be callable.
if (!receiver_map.is_callable()) return NoChange();
- dependencies()->DependOnStablePrototypeChains(access_info.receiver_maps(),
- kStartAtPrototype);
+ dependencies()->DependOnStablePrototypeChains(
+ access_info.lookup_start_object_maps(), kStartAtPrototype);
// Monomorphic property access.
access_builder.BuildCheckMaps(constructor, &effect, control,
- access_info.receiver_maps());
+ access_info.lookup_start_object_maps());
// Lower to OrdinaryHasInstance(C, O).
NodeProperties::ReplaceValueInput(node, constructor, 0);
@@ -460,7 +464,7 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
if (found_on_proto) {
dependencies()->DependOnStablePrototypeChains(
- access_info.receiver_maps(), kStartAtPrototype,
+ access_info.lookup_start_object_maps(), kStartAtPrototype,
JSObjectRef(broker(), holder));
}
@@ -470,7 +474,7 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
// Monomorphic property access.
access_builder.BuildCheckMaps(constructor, &effect, control,
- access_info.receiver_maps());
+ access_info.lookup_start_object_maps());
// Create a nested frame state inside the current method's most-recent frame
// state that will ensure that deopts that happen after this point will not
@@ -519,10 +523,9 @@ JSNativeContextSpecialization::InferHasInPrototypeChainResult
JSNativeContextSpecialization::InferHasInPrototypeChain(
Node* receiver, Node* effect, HeapObjectRef const& prototype) {
ZoneHandleSet<Map> receiver_maps;
- NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMapsUnsafe(broker(), receiver, effect,
- &receiver_maps);
- if (result == NodeProperties::kNoReceiverMaps) return kMayBeInPrototypeChain;
+ NodeProperties::InferMapsResult result = NodeProperties::InferMapsUnsafe(
+ broker(), receiver, effect, &receiver_maps);
+ if (result == NodeProperties::kNoMaps) return kMayBeInPrototypeChain;
// Try to determine either that all of the {receiver_maps} have the given
// {prototype} in their chain, or that none do. If we can't tell, return
@@ -531,7 +534,7 @@ JSNativeContextSpecialization::InferHasInPrototypeChain(
bool none = true;
for (size_t i = 0; i < receiver_maps.size(); ++i) {
MapRef map(broker(), receiver_maps[i]);
- if (result == NodeProperties::kUnreliableReceiverMaps && !map.is_stable()) {
+ if (result == NodeProperties::kUnreliableMaps && !map.is_stable()) {
return kMayBeInPrototypeChain;
}
while (true) {
@@ -573,7 +576,7 @@ JSNativeContextSpecialization::InferHasInPrototypeChain(
if (!prototype.map().is_stable()) return kMayBeInPrototypeChain;
last_prototype = prototype.AsJSObject();
}
- WhereToStart start = result == NodeProperties::kUnreliableReceiverMaps
+ WhereToStart start = result == NodeProperties::kUnreliableMaps
? kStartAtReceiver
: kStartAtPrototype;
dependencies()->DependOnStablePrototypeChains(receiver_maps, start,
@@ -594,7 +597,7 @@ Reduction JSNativeContextSpecialization::ReduceJSHasInPrototypeChain(
// Check if we can constant-fold the prototype chain walk
// for the given {value} and the {prototype}.
HeapObjectMatcher m(prototype);
- if (m.HasValue()) {
+ if (m.HasResolvedValue()) {
InferHasInPrototypeChainResult result =
InferHasInPrototypeChain(value, effect, m.Ref(broker()));
if (result != kMayBeInPrototypeChain) {
@@ -615,7 +618,7 @@ Reduction JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance(
// Check if the {constructor} is known at compile time.
HeapObjectMatcher m(constructor);
- if (!m.HasValue()) return NoChange();
+ if (!m.HasResolvedValue()) return NoChange();
if (m.Ref(broker()).IsJSBoundFunction()) {
// OrdinaryHasInstance on bound functions turns into a recursive invocation
@@ -681,7 +684,7 @@ Reduction JSNativeContextSpecialization::ReduceJSPromiseResolve(Node* node) {
// Check if the {constructor} is the %Promise% function.
HeapObjectMatcher m(constructor);
- if (!m.HasValue() ||
+ if (!m.HasResolvedValue() ||
!m.Ref(broker()).equals(native_context().promise_function())) {
return NoChange();
}
@@ -747,8 +750,8 @@ Reduction JSNativeContextSpecialization::ReduceJSResolvePromise(Node* node) {
return inference.NoChange();
}
- dependencies()->DependOnStablePrototypeChains(access_info.receiver_maps(),
- kStartAtPrototype);
+ dependencies()->DependOnStablePrototypeChains(
+ access_info.lookup_start_object_maps(), kStartAtPrototype);
// Simply fulfill the {promise} with the {resolution}.
Node* value = effect =
@@ -779,23 +782,30 @@ FieldAccess ForPropertyCellValue(MachineRepresentation representation,
} // namespace
Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
- Node* node, Node* receiver, Node* value, NameRef const& name,
- AccessMode access_mode, Node* key) {
+ Node* node, Node* lookup_start_object, Node* receiver, Node* value,
+ NameRef const& name, AccessMode access_mode, Node* key, Node* effect) {
base::Optional<PropertyCellRef> cell =
native_context().global_object().GetPropertyCell(name);
- return cell.has_value() ? ReduceGlobalAccess(node, receiver, value, name,
- access_mode, key, *cell)
- : NoChange();
+ return cell.has_value()
+ ? ReduceGlobalAccess(node, lookup_start_object, receiver, value,
+ name, access_mode, key, *cell, effect)
+ : NoChange();
}
// TODO(neis): Try to merge this with ReduceNamedAccess by introducing a new
// PropertyAccessInfo kind for global accesses and using the existing mechanism
// for building loads/stores.
+// Note: The "receiver" parameter is only used for DCHECKS, but that's on
+// purpose. This way we can assert the super property access cases won't hit the
+// code which hasn't been modified to support super property access.
Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
- Node* node, Node* receiver, Node* value, NameRef const& name,
- AccessMode access_mode, Node* key, PropertyCellRef const& property_cell) {
- Node* effect = NodeProperties::GetEffectInput(node);
+ Node* node, Node* lookup_start_object, Node* receiver, Node* value,
+ NameRef const& name, AccessMode access_mode, Node* key,
+ PropertyCellRef const& property_cell, Node* effect) {
Node* control = NodeProperties::GetControlInput(node);
+ if (effect == nullptr) {
+ effect = NodeProperties::GetEffectInput(node);
+ }
ObjectRef property_cell_value = property_cell.value();
if (property_cell_value.IsHeapObject() &&
@@ -811,6 +821,7 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
// We have additional constraints for stores.
if (access_mode == AccessMode::kStore) {
+ DCHECK_EQ(receiver, lookup_start_object);
if (property_details.IsReadOnly()) {
// Don't even bother trying to lower stores to read-only data properties.
return NoChange();
@@ -826,6 +837,7 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
}
}
} else if (access_mode == AccessMode::kHas) {
+ DCHECK_EQ(receiver, lookup_start_object);
// has checks cannot follow the fast-path used by loads when these
// conditions hold.
if ((property_details.IsConfigurable() || !property_details.IsReadOnly()) &&
@@ -839,16 +851,16 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
effect = BuildCheckEqualsName(name, key, effect, control);
}
- // If we have a {receiver} to validate, we do so by checking that its map is
- // the (target) global proxy's map. This guarantees that in fact the receiver
- // is the global proxy.
- if (receiver != nullptr) {
+ // If we have a {lookup_start_object} to validate, we do so by checking that
+ // its map is the (target) global proxy's map. This guarantees that in fact
+ // the lookup start object is the global proxy.
+ if (lookup_start_object != nullptr) {
effect = graph()->NewNode(
simplified()->CheckMaps(
CheckMapsFlag::kNone,
ZoneHandleSet<Map>(
HeapObjectRef(broker(), global_proxy()).map().object())),
- receiver, effect, control);
+ lookup_start_object, effect, control);
}
if (access_mode == AccessMode::kLoad || access_mode == AccessMode::kHas) {
@@ -914,6 +926,7 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
}
} else {
DCHECK_EQ(AccessMode::kStore, access_mode);
+ DCHECK_EQ(receiver, lookup_start_object);
DCHECK(!property_details.IsReadOnly());
switch (property_details.cell_type()) {
case PropertyCellType::kUndefined: {
@@ -1010,7 +1023,7 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadGlobal(Node* node) {
ReplaceWithValue(node, value, effect);
return Replace(value);
} else if (feedback.IsPropertyCell()) {
- return ReduceGlobalAccess(node, nullptr, nullptr,
+ return ReduceGlobalAccess(node, nullptr, nullptr, nullptr,
NameRef(broker(), p.name()), AccessMode::kLoad,
nullptr, feedback.property_cell());
} else {
@@ -1041,9 +1054,9 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreGlobal(Node* node) {
ReplaceWithValue(node, value, effect, control);
return Replace(value);
} else if (feedback.IsPropertyCell()) {
- return ReduceGlobalAccess(node, nullptr, value, NameRef(broker(), p.name()),
- AccessMode::kStore, nullptr,
- feedback.property_cell());
+ return ReduceGlobalAccess(node, nullptr, nullptr, value,
+ NameRef(broker(), p.name()), AccessMode::kStore,
+ nullptr, feedback.property_cell());
} else {
DCHECK(feedback.IsMegamorphic());
return NoChange();
@@ -1054,10 +1067,26 @@ Reduction JSNativeContextSpecialization::ReduceMinimorphicPropertyAccess(
Node* node, Node* value,
MinimorphicLoadPropertyAccessFeedback const& feedback,
FeedbackSource const& source) {
- Node* receiver = NodeProperties::GetValueInput(node, 0);
+ DCHECK(node->opcode() == IrOpcode::kJSLoadNamed ||
+ node->opcode() == IrOpcode::kJSLoadProperty ||
+ node->opcode() == IrOpcode::kJSLoadNamedFromSuper);
+ STATIC_ASSERT(JSLoadNamedNode::ObjectIndex() == 0 &&
+ JSLoadPropertyNode::ObjectIndex() == 0);
+
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
+ Node* lookup_start_object;
+ if (node->opcode() == IrOpcode::kJSLoadNamedFromSuper) {
+ DCHECK(FLAG_super_ic);
+ JSLoadNamedFromSuperNode n(node);
+ // Lookup start object is the __proto__ of the home object.
+ lookup_start_object = effect =
+ BuildLoadPrototypeFromObject(n.home_object(), effect, control);
+ } else {
+ lookup_start_object = NodeProperties::GetValueInput(node, 0);
+ }
+
MinimorphicLoadPropertyAccessInfo access_info =
broker()->GetPropertyAccessInfo(
feedback, source,
@@ -1066,17 +1095,32 @@ Reduction JSNativeContextSpecialization::ReduceMinimorphicPropertyAccess(
: SerializationPolicy::kSerializeIfNeeded);
if (access_info.IsInvalid()) return NoChange();
+ // The dynamic map check operator loads the feedback vector from the
+ // function's frame, so we can only use this for non-inlined functions.
+ // TODO(rmcilroy): Add support for using a trampoline like LoadICTrampoline
+ // and otherwise pass feedback vector explicitly if we need support for
+ // inlined functions.
+ // TODO(rmcilroy): Ideally we would check whether we are have an inlined frame
+ // state here, but there isn't a good way to distinguish inlined from OSR
+ // framestates.
+ DCHECK(broker()->is_turboprop());
+
PropertyAccessBuilder access_builder(jsgraph(), broker(), nullptr);
CheckMapsFlags flags = CheckMapsFlag::kNone;
if (feedback.has_migration_target_maps()) {
flags |= CheckMapsFlag::kTryMigrateInstance;
}
- effect =
- graph()->NewNode(simplified()->DynamicCheckMaps(flags, feedback.handler(),
- feedback.map(), source),
- receiver, effect, control);
+
+ ZoneHandleSet<Map> maps;
+ for (Handle<Map> map : feedback.maps()) {
+ maps.insert(map, graph()->zone());
+ }
+
+ effect = graph()->NewNode(
+ simplified()->DynamicCheckMaps(flags, feedback.handler(), maps, source),
+ lookup_start_object, effect, control);
value = access_builder.BuildMinimorphicLoadDataField(
- feedback.name(), access_info, receiver, &effect, &control);
+ feedback.name(), access_info, lookup_start_object, &effect, &control);
ReplaceWithValue(node, value, effect, control);
return Replace(value);
@@ -1091,7 +1135,8 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
node->opcode() == IrOpcode::kJSStoreProperty ||
node->opcode() == IrOpcode::kJSStoreNamedOwn ||
node->opcode() == IrOpcode::kJSStoreDataPropertyInLiteral ||
- node->opcode() == IrOpcode::kJSHasProperty);
+ node->opcode() == IrOpcode::kJSHasProperty ||
+ node->opcode() == IrOpcode::kJSLoadNamedFromSuper);
STATIC_ASSERT(JSLoadNamedNode::ObjectIndex() == 0 &&
JSStoreNamedNode::ObjectIndex() == 0 &&
JSLoadPropertyNode::ObjectIndex() == 0 &&
@@ -1100,36 +1145,51 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
JSStoreNamedNode::ObjectIndex() == 0 &&
JSStoreDataPropertyInLiteralNode::ObjectIndex() == 0 &&
JSHasPropertyNode::ObjectIndex() == 0);
- Node* receiver = NodeProperties::GetValueInput(node, 0);
+ STATIC_ASSERT(JSLoadNamedFromSuperNode::ReceiverIndex() == 0);
+
Node* context = NodeProperties::GetContextInput(node);
Node* frame_state = NodeProperties::GetFrameStateInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
+ // receiver = the object we pass to the accessor (if any) as the "this" value.
+ Node* receiver = NodeProperties::GetValueInput(node, 0);
+ // lookup_start_object = the object where we start looking for the property.
+ Node* lookup_start_object;
+ if (node->opcode() == IrOpcode::kJSLoadNamedFromSuper) {
+ DCHECK(FLAG_super_ic);
+ JSLoadNamedFromSuperNode n(node);
+ // Lookup start object is the __proto__ of the home object.
+ lookup_start_object = effect =
+ BuildLoadPrototypeFromObject(n.home_object(), effect, control);
+ } else {
+ lookup_start_object = receiver;
+ }
+
// Either infer maps from the graph or use the feedback.
- ZoneVector<Handle<Map>> receiver_maps(zone());
- if (!InferReceiverMaps(receiver, effect, &receiver_maps)) {
- receiver_maps = feedback.maps();
+ ZoneVector<Handle<Map>> lookup_start_object_maps(zone());
+ if (!InferMaps(lookup_start_object, effect, &lookup_start_object_maps)) {
+ lookup_start_object_maps = feedback.maps();
}
- RemoveImpossibleReceiverMaps(receiver, &receiver_maps);
+ RemoveImpossibleMaps(lookup_start_object, &lookup_start_object_maps);
// Check if we have an access o.x or o.x=v where o is the target native
// contexts' global proxy, and turn that into a direct access to the
// corresponding global object instead.
- if (receiver_maps.size() == 1) {
- MapRef receiver_map(broker(), receiver_maps[0]);
- if (receiver_map.equals(
+ if (lookup_start_object_maps.size() == 1) {
+ MapRef lookup_start_object_map(broker(), lookup_start_object_maps[0]);
+ if (lookup_start_object_map.equals(
broker()->target_native_context().global_proxy_object().map()) &&
!broker()->target_native_context().global_object().IsDetached()) {
- return ReduceGlobalAccess(node, receiver, value, feedback.name(),
- access_mode, key);
+ return ReduceGlobalAccess(node, lookup_start_object, receiver, value,
+ feedback.name(), access_mode, key, effect);
}
}
ZoneVector<PropertyAccessInfo> access_infos(zone());
{
ZoneVector<PropertyAccessInfo> access_infos_for_feedback(zone());
- for (Handle<Map> map_handle : receiver_maps) {
+ for (Handle<Map> map_handle : lookup_start_object_maps) {
MapRef map(broker(), map_handle);
if (map.is_deprecated()) continue;
PropertyAccessInfo access_info = broker()->GetPropertyAccessInfo(
@@ -1166,15 +1226,26 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
// Check for the monomorphic cases.
if (access_infos.size() == 1) {
PropertyAccessInfo access_info = access_infos.front();
- // Try to build string check or number check if possible.
- // Otherwise build a map check.
- if (!access_builder.TryBuildStringCheck(broker(),
- access_info.receiver_maps(),
- &receiver, &effect, control) &&
- !access_builder.TryBuildNumberCheck(broker(),
- access_info.receiver_maps(),
- &receiver, &effect, control)) {
- if (HasNumberMaps(broker(), access_info.receiver_maps())) {
+ if (receiver != lookup_start_object) {
+ // Super property access. lookup_start_object is a JSReceiver or
+ // null. It can't be a number, a string etc. So trying to build the
+ // checks in the "else if" branch doesn't make sense.
+ access_builder.BuildCheckMaps(lookup_start_object, &effect, control,
+ access_info.lookup_start_object_maps());
+
+ } else if (!access_builder.TryBuildStringCheck(
+ broker(), access_info.lookup_start_object_maps(), &receiver,
+ &effect, control) &&
+ !access_builder.TryBuildNumberCheck(
+ broker(), access_info.lookup_start_object_maps(), &receiver,
+ &effect, control)) {
+ // Try to build string check or number check if possible. Otherwise build
+ // a map check.
+
+ // TryBuildStringCheck and TryBuildNumberCheck don't update the receiver
+ // if they fail.
+ DCHECK_EQ(receiver, lookup_start_object);
+ if (HasNumberMaps(broker(), access_info.lookup_start_object_maps())) {
// We need to also let Smi {receiver}s through in this case, so
// we construct a diamond, guarded by the Sminess of the {receiver}
// and if {receiver} is not a Smi just emit a sequence of map checks.
@@ -1188,7 +1259,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
Node* efalse = effect;
{
access_builder.BuildCheckMaps(receiver, &efalse, if_false,
- access_info.receiver_maps());
+ access_info.lookup_start_object_maps());
}
control = graph()->NewNode(common()->Merge(2), if_true, if_false);
@@ -1196,14 +1267,19 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
} else {
access_builder.BuildCheckMaps(receiver, &effect, control,
- access_info.receiver_maps());
+ access_info.lookup_start_object_maps());
}
+ } else {
+ // At least one of TryBuildStringCheck & TryBuildNumberCheck succeeded
+ // and updated the receiver. Update lookup_start_object to match (they
+ // should be the same).
+ lookup_start_object = receiver;
}
// Generate the actual property access.
ValueEffectControl continuation = BuildPropertyAccess(
- receiver, value, context, frame_state, effect, control, feedback.name(),
- if_exceptions, access_info, access_mode);
+ lookup_start_object, receiver, value, context, frame_state, effect,
+ control, feedback.name(), if_exceptions, access_info, access_mode);
value = continuation.value();
effect = continuation.effect();
control = continuation.control();
@@ -1214,24 +1290,27 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
ZoneVector<Node*> effects(zone());
ZoneVector<Node*> controls(zone());
- // Check if {receiver} may be a number.
- bool receiverissmi_possible = false;
- for (PropertyAccessInfo const& access_info : access_infos) {
- if (HasNumberMaps(broker(), access_info.receiver_maps())) {
- receiverissmi_possible = true;
- break;
- }
- }
-
- // Handle the case that {receiver} may be a number.
Node* receiverissmi_control = nullptr;
Node* receiverissmi_effect = effect;
- if (receiverissmi_possible) {
- Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), receiver);
- Node* branch = graph()->NewNode(common()->Branch(), check, control);
- control = graph()->NewNode(common()->IfFalse(), branch);
- receiverissmi_control = graph()->NewNode(common()->IfTrue(), branch);
- receiverissmi_effect = effect;
+
+ if (receiver == lookup_start_object) {
+ // Check if {receiver} may be a number.
+ bool receiverissmi_possible = false;
+ for (PropertyAccessInfo const& access_info : access_infos) {
+ if (HasNumberMaps(broker(), access_info.lookup_start_object_maps())) {
+ receiverissmi_possible = true;
+ break;
+ }
+ }
+
+ // Handle the case that {receiver} may be a number.
+ if (receiverissmi_possible) {
+ Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), receiver);
+ Node* branch = graph()->NewNode(common()->Branch(), check, control);
+ control = graph()->NewNode(common()->IfFalse(), branch);
+ receiverissmi_control = graph()->NewNode(common()->IfTrue(), branch);
+ receiverissmi_effect = effect;
+ }
}
// Generate code for the various different property access patterns.
@@ -1239,24 +1318,25 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
for (size_t j = 0; j < access_infos.size(); ++j) {
PropertyAccessInfo const& access_info = access_infos[j];
Node* this_value = value;
+ Node* this_lookup_start_object = lookup_start_object;
Node* this_receiver = receiver;
Node* this_effect = effect;
Node* this_control = fallthrough_control;
- // Perform map check on {receiver}.
- ZoneVector<Handle<Map>> const& receiver_maps =
- access_info.receiver_maps();
+ // Perform map check on {lookup_start_object}.
+ ZoneVector<Handle<Map>> const& lookup_start_object_maps =
+ access_info.lookup_start_object_maps();
{
// Whether to insert a dedicated MapGuard node into the
// effect to be able to learn from the control flow.
bool insert_map_guard = true;
- // Check maps for the {receiver}s.
+ // Check maps for the {lookup_start_object}s.
if (j == access_infos.size() - 1) {
// Last map check on the fallthrough control path, do a
// conditional eager deoptimization exit here.
- access_builder.BuildCheckMaps(receiver, &this_effect, this_control,
- receiver_maps);
+ access_builder.BuildCheckMaps(lookup_start_object, &this_effect,
+ this_control, lookup_start_object_maps);
fallthrough_control = nullptr;
// Don't insert a MapGuard in this case, as the CheckMaps
@@ -1264,14 +1344,14 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
// along the effect chain.
insert_map_guard = false;
} else {
- // Explicitly branch on the {receiver_maps}.
+ // Explicitly branch on the {lookup_start_object_maps}.
ZoneHandleSet<Map> maps;
- for (Handle<Map> map : receiver_maps) {
+ for (Handle<Map> map : lookup_start_object_maps) {
maps.insert(map, graph()->zone());
}
Node* check = this_effect =
- graph()->NewNode(simplified()->CompareMaps(maps), receiver,
- this_effect, this_control);
+ graph()->NewNode(simplified()->CompareMaps(maps),
+ lookup_start_object, this_effect, this_control);
Node* branch =
graph()->NewNode(common()->Branch(), check, this_control);
fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
@@ -1279,8 +1359,9 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
}
// The Number case requires special treatment to also deal with Smis.
- if (HasNumberMaps(broker(), receiver_maps)) {
+ if (HasNumberMaps(broker(), lookup_start_object_maps)) {
// Join this check with the "receiver is smi" check above.
+ DCHECK_EQ(receiver, lookup_start_object);
DCHECK_NOT_NULL(receiverissmi_effect);
DCHECK_NOT_NULL(receiverissmi_control);
this_control = graph()->NewNode(common()->Merge(2), this_control,
@@ -1289,7 +1370,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
receiverissmi_effect, this_control);
receiverissmi_effect = receiverissmi_control = nullptr;
- // The {receiver} can also be a Smi in this case, so
+ // The {lookup_start_object} can also be a Smi in this case, so
// a MapGuard doesn't make sense for this at all.
insert_map_guard = false;
}
@@ -1297,29 +1378,32 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
// Introduce a MapGuard to learn from this on the effect chain.
if (insert_map_guard) {
ZoneHandleSet<Map> maps;
- for (auto receiver_map : receiver_maps) {
- maps.insert(receiver_map, graph()->zone());
+ for (auto lookup_start_object_map : lookup_start_object_maps) {
+ maps.insert(lookup_start_object_map, graph()->zone());
}
- this_effect = graph()->NewNode(simplified()->MapGuard(maps), receiver,
- this_effect, this_control);
+ this_effect =
+ graph()->NewNode(simplified()->MapGuard(maps),
+ lookup_start_object, this_effect, this_control);
}
- // If all {receiver_maps} are Strings we also need to rename the
- // {receiver} here to make sure that TurboFan knows that along this
- // path the {this_receiver} is a String. This is because we want
- // strict checking of types, for example for StringLength operators.
- if (HasOnlyStringMaps(broker(), receiver_maps)) {
- this_receiver = this_effect =
- graph()->NewNode(common()->TypeGuard(Type::String()), receiver,
- this_effect, this_control);
+ // If all {lookup_start_object_maps} are Strings we also need to rename
+ // the {lookup_start_object} here to make sure that TurboFan knows that
+ // along this path the {this_lookup_start_object} is a String. This is
+ // because we want strict checking of types, for example for
+ // StringLength operators.
+ if (HasOnlyStringMaps(broker(), lookup_start_object_maps)) {
+ DCHECK_EQ(receiver, lookup_start_object);
+ this_lookup_start_object = this_receiver = this_effect =
+ graph()->NewNode(common()->TypeGuard(Type::String()),
+ lookup_start_object, this_effect, this_control);
}
}
// Generate the actual property access.
- ValueEffectControl continuation =
- BuildPropertyAccess(this_receiver, this_value, context, frame_state,
- this_effect, this_control, feedback.name(),
- if_exceptions, access_info, access_mode);
+ ValueEffectControl continuation = BuildPropertyAccess(
+ this_lookup_start_object, this_receiver, this_value, context,
+ frame_state, this_effect, this_control, feedback.name(),
+ if_exceptions, access_info, access_mode);
values.push_back(continuation.value());
effects.push_back(continuation.effect());
controls.push_back(continuation.control());
@@ -1377,7 +1461,7 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
// Check if we have a constant receiver.
HeapObjectMatcher m(receiver);
- if (m.HasValue()) {
+ if (m.HasResolvedValue()) {
ObjectRef object = m.Ref(broker());
if (object.IsJSFunction() &&
name.equals(ObjectRef(broker(), factory()->prototype_string()))) {
@@ -1411,6 +1495,17 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
FeedbackSource(p.feedback()), AccessMode::kLoad);
}
+Reduction JSNativeContextSpecialization::ReduceJSLoadNamedFromSuper(
+ Node* node) {
+ JSLoadNamedFromSuperNode n(node);
+ NamedAccess const& p = n.Parameters();
+ NameRef name(broker(), p.name());
+
+ if (!p.feedback().IsValid()) return NoChange();
+ return ReducePropertyAccess(node, nullptr, name, jsgraph()->Dead(),
+ FeedbackSource(p.feedback()), AccessMode::kLoad);
+}
+
Reduction JSNativeContextSpecialization::ReduceJSGetIterator(Node* node) {
JSGetIteratorNode n(node);
GetIteratorParameters const& p = n.Parameters();
@@ -1545,7 +1640,7 @@ namespace {
base::Optional<JSTypedArrayRef> GetTypedArrayConstant(JSHeapBroker* broker,
Node* receiver) {
HeapObjectMatcher m(receiver);
- if (!m.HasValue()) return base::nullopt;
+ if (!m.HasResolvedValue()) return base::nullopt;
ObjectRef object = m.Ref(broker);
if (!object.IsJSTypedArray()) return base::nullopt;
JSTypedArrayRef typed_array = object.AsJSTypedArray();
@@ -1554,20 +1649,20 @@ base::Optional<JSTypedArrayRef> GetTypedArrayConstant(JSHeapBroker* broker,
}
} // namespace
-void JSNativeContextSpecialization::RemoveImpossibleReceiverMaps(
- Node* receiver, ZoneVector<Handle<Map>>* receiver_maps) const {
- base::Optional<MapRef> root_map = InferReceiverRootMap(receiver);
+void JSNativeContextSpecialization::RemoveImpossibleMaps(
+ Node* object, ZoneVector<Handle<Map>>* maps) const {
+ base::Optional<MapRef> root_map = InferRootMap(object);
if (root_map.has_value()) {
DCHECK(!root_map->is_abandoned_prototype_map());
- receiver_maps->erase(
- std::remove_if(receiver_maps->begin(), receiver_maps->end(),
+ maps->erase(
+ std::remove_if(maps->begin(), maps->end(),
[root_map, this](Handle<Map> map) {
MapRef map_ref(broker(), map);
return map_ref.is_abandoned_prototype_map() ||
(map_ref.FindRootMap().has_value() &&
!map_ref.FindRootMap()->equals(*root_map));
}),
- receiver_maps->end());
+ maps->end());
}
}
@@ -1581,9 +1676,9 @@ JSNativeContextSpecialization::TryRefineElementAccessFeedback(
if (!use_inference) return feedback;
ZoneVector<Handle<Map>> inferred_maps(zone());
- if (!InferReceiverMaps(receiver, effect, &inferred_maps)) return feedback;
+ if (!InferMaps(receiver, effect, &inferred_maps)) return feedback;
- RemoveImpossibleReceiverMaps(receiver, &inferred_maps);
+ RemoveImpossibleMaps(receiver, &inferred_maps);
// TODO(neis): After Refine, the resulting feedback can still contain
// impossible maps when a target is kept only because more than one of its
// sources was inferred. Think of a way to completely rule out impossible
@@ -1650,7 +1745,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
// the zone allocation of this vector.
ZoneVector<MapRef> prototype_maps(zone());
for (ElementAccessInfo const& access_info : access_infos) {
- for (Handle<Map> map : access_info.receiver_maps()) {
+ for (Handle<Map> map : access_info.lookup_start_object_maps()) {
MapRef receiver_map(broker(), map);
// If the {receiver_map} has a prototype and its elements backing
// store is either holey, or we have a potentially growing store,
@@ -1697,9 +1792,10 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
ElementAccessInfo access_info = access_infos.front();
// Perform possible elements kind transitions.
- MapRef transition_target(broker(), access_info.receiver_maps().front());
+ MapRef transition_target(broker(),
+ access_info.lookup_start_object_maps().front());
for (auto source : access_info.transition_sources()) {
- DCHECK_EQ(access_info.receiver_maps().size(), 1);
+ DCHECK_EQ(access_info.lookup_start_object_maps().size(), 1);
MapRef transition_source(broker(), source);
effect = graph()->NewNode(
simplified()->TransitionElementsKind(ElementsTransition(
@@ -1721,7 +1817,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
// Perform map check on the {receiver}.
access_builder.BuildCheckMaps(receiver, &effect, control,
- access_info.receiver_maps());
+ access_info.lookup_start_object_maps());
// Access the actual element.
ValueEffectControl continuation =
@@ -1748,10 +1844,11 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
Node* this_control = fallthrough_control;
// Perform possible elements kind transitions.
- MapRef transition_target(broker(), access_info.receiver_maps().front());
+ MapRef transition_target(broker(),
+ access_info.lookup_start_object_maps().front());
for (auto source : access_info.transition_sources()) {
MapRef transition_source(broker(), source);
- DCHECK_EQ(access_info.receiver_maps().size(), 1);
+ DCHECK_EQ(access_info.lookup_start_object_maps().size(), 1);
this_effect = graph()->NewNode(
simplified()->TransitionElementsKind(ElementsTransition(
IsSimpleMapChangeTransition(transition_source.elements_kind(),
@@ -1764,7 +1861,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
// Perform map check(s) on {receiver}.
ZoneVector<Handle<Map>> const& receiver_maps =
- access_info.receiver_maps();
+ access_info.lookup_start_object_maps();
if (j == access_infos.size() - 1) {
// Last map check on the fallthrough control path, do a
// conditional eager deoptimization exit here.
@@ -1849,7 +1946,7 @@ Reduction JSNativeContextSpecialization::ReduceElementLoadFromHeapConstant(
// constant-fold the load.
NumberMatcher mkey(key);
if (mkey.IsInteger() && mkey.IsInRange(0.0, kMaxUInt32 - 1.0)) {
- uint32_t index = static_cast<uint32_t>(mkey.Value());
+ uint32_t index = static_cast<uint32_t>(mkey.ResolvedValue());
base::Optional<ObjectRef> element =
receiver_ref.GetOwnConstantElement(index);
if (!element.has_value() && receiver_ref.IsJSArray()) {
@@ -1911,7 +2008,8 @@ Reduction JSNativeContextSpecialization::ReducePropertyAccess(
node->opcode() == IrOpcode::kJSHasProperty ||
node->opcode() == IrOpcode::kJSLoadNamed ||
node->opcode() == IrOpcode::kJSStoreNamed ||
- node->opcode() == IrOpcode::kJSStoreNamedOwn);
+ node->opcode() == IrOpcode::kJSStoreNamedOwn ||
+ node->opcode() == IrOpcode::kJSLoadNamedFromSuper);
DCHECK_GE(node->op()->ControlOutputCount(), 1);
ProcessedFeedback const& feedback =
@@ -1932,6 +2030,7 @@ Reduction JSNativeContextSpecialization::ReducePropertyAccess(
case ProcessedFeedback::kElementAccess:
DCHECK_EQ(feedback.AsElementAccess().keyed_mode().access_mode(),
access_mode);
+ DCHECK_NE(node->opcode(), IrOpcode::kJSLoadNamedFromSuper);
return ReduceElementAccess(node, key, value, feedback.AsElementAccess());
default:
UNREACHABLE();
@@ -2008,18 +2107,17 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadPropertyWithEnumeratedKey(
DCHECK_EQ(IrOpcode::kJSLoadProperty, node->opcode());
Node* receiver = NodeProperties::GetValueInput(node, 0);
- Node* name = NodeProperties::GetValueInput(node, 1);
- DCHECK_EQ(IrOpcode::kJSForInNext, name->opcode());
+ JSForInNextNode name(NodeProperties::GetValueInput(node, 1));
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- if (ForInModeOf(name->op()) != ForInMode::kUseEnumCacheKeysAndIndices) {
+ if (name.Parameters().mode() != ForInMode::kUseEnumCacheKeysAndIndices) {
return NoChange();
}
- Node* object = NodeProperties::GetValueInput(name, 0);
- Node* enumerator = NodeProperties::GetValueInput(name, 2);
- Node* key = NodeProperties::GetValueInput(name, 3);
+ Node* object = name.receiver();
+ Node* cache_type = name.cache_type();
+ Node* index = name.index();
if (object->opcode() == IrOpcode::kJSToObject) {
object = NodeProperties::GetValueInput(object, 0);
}
@@ -2033,7 +2131,7 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadPropertyWithEnumeratedKey(
graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
receiver, effect, control);
Node* check = graph()->NewNode(simplified()->ReferenceEqual(), receiver_map,
- enumerator);
+ cache_type);
effect =
graph()->NewNode(simplified()->CheckIf(DeoptimizeReason::kWrongMap),
check, effect, control);
@@ -2041,7 +2139,7 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadPropertyWithEnumeratedKey(
// Load the enum cache indices from the {cache_type}.
Node* descriptor_array = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMapDescriptors()), enumerator,
+ simplified()->LoadField(AccessBuilder::ForMapDescriptors()), cache_type,
effect, control);
Node* enum_cache = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForDescriptorArrayEnumCache()),
@@ -2060,10 +2158,10 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadPropertyWithEnumeratedKey(
control);
// Determine the key from the {enum_indices}.
- key = effect = graph()->NewNode(
+ Node* key = effect = graph()->NewNode(
simplified()->LoadElement(
AccessBuilder::ForFixedArrayElement(PACKED_SMI_ELEMENTS)),
- enum_indices, key, effect, control);
+ enum_indices, index, effect, control);
// Load the actual field value.
Node* value = effect = graph()->NewNode(simplified()->LoadFieldByIndex(),
@@ -2229,14 +2327,14 @@ Node* JSNativeContextSpecialization::InlineApiCall(
JSNativeContextSpecialization::ValueEffectControl
JSNativeContextSpecialization::BuildPropertyLoad(
- Node* receiver, Node* context, Node* frame_state, Node* effect,
- Node* control, NameRef const& name, ZoneVector<Node*>* if_exceptions,
- PropertyAccessInfo const& access_info) {
+ Node* lookup_start_object, Node* receiver, Node* context, Node* frame_state,
+ Node* effect, Node* control, NameRef const& name,
+ ZoneVector<Node*>* if_exceptions, PropertyAccessInfo const& access_info) {
// Determine actual holder and perform prototype chain checks.
Handle<JSObject> holder;
if (access_info.holder().ToHandle(&holder)) {
dependencies()->DependOnStablePrototypeChains(
- access_info.receiver_maps(), kStartAtPrototype,
+ access_info.lookup_start_object_maps(), kStartAtPrototype,
JSObjectRef(broker(), holder));
}
@@ -2254,12 +2352,13 @@ JSNativeContextSpecialization::BuildPropertyLoad(
graph()->NewNode(simplified()->LoadField(AccessBuilder::ForCellValue()),
cell, effect, control);
} else if (access_info.IsStringLength()) {
+ DCHECK_EQ(receiver, lookup_start_object);
value = graph()->NewNode(simplified()->StringLength(), receiver);
} else {
DCHECK(access_info.IsDataField() || access_info.IsDataConstant());
PropertyAccessBuilder access_builder(jsgraph(), broker(), dependencies());
- value = access_builder.BuildLoadDataField(name, access_info, receiver,
- &effect, &control);
+ value = access_builder.BuildLoadDataField(
+ name, access_info, lookup_start_object, &effect, &control);
}
return ValueEffectControl(value, effect, control);
@@ -2272,7 +2371,7 @@ JSNativeContextSpecialization::BuildPropertyTest(
Handle<JSObject> holder;
if (access_info.holder().ToHandle(&holder)) {
dependencies()->DependOnStablePrototypeChains(
- access_info.receiver_maps(), kStartAtPrototype,
+ access_info.lookup_start_object_maps(), kStartAtPrototype,
JSObjectRef(broker(), holder));
}
@@ -2283,19 +2382,23 @@ JSNativeContextSpecialization::BuildPropertyTest(
JSNativeContextSpecialization::ValueEffectControl
JSNativeContextSpecialization::BuildPropertyAccess(
- Node* receiver, Node* value, Node* context, Node* frame_state, Node* effect,
- Node* control, NameRef const& name, ZoneVector<Node*>* if_exceptions,
- PropertyAccessInfo const& access_info, AccessMode access_mode) {
+ Node* lookup_start_object, Node* receiver, Node* value, Node* context,
+ Node* frame_state, Node* effect, Node* control, NameRef const& name,
+ ZoneVector<Node*>* if_exceptions, PropertyAccessInfo const& access_info,
+ AccessMode access_mode) {
switch (access_mode) {
case AccessMode::kLoad:
- return BuildPropertyLoad(receiver, context, frame_state, effect, control,
- name, if_exceptions, access_info);
+ return BuildPropertyLoad(lookup_start_object, receiver, context,
+ frame_state, effect, control, name,
+ if_exceptions, access_info);
case AccessMode::kStore:
case AccessMode::kStoreInLiteral:
+ DCHECK_EQ(receiver, lookup_start_object);
return BuildPropertyStore(receiver, value, context, frame_state, effect,
control, name, if_exceptions, access_info,
access_mode);
case AccessMode::kHas:
+ DCHECK_EQ(receiver, lookup_start_object);
return BuildPropertyTest(effect, control, access_info);
}
UNREACHABLE();
@@ -2312,7 +2415,7 @@ JSNativeContextSpecialization::BuildPropertyStore(
if (access_info.holder().ToHandle(&holder)) {
DCHECK_NE(AccessMode::kStoreInLiteral, access_mode);
dependencies()->DependOnStablePrototypeChains(
- access_info.receiver_maps(), kStartAtPrototype,
+ access_info.lookup_start_object_maps(), kStartAtPrototype,
JSObjectRef(broker(), holder));
}
@@ -2517,8 +2620,8 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreDataPropertyInLiteral(
if (!p.feedback().IsValid()) return NoChange();
NumberMatcher mflags(n.flags());
- CHECK(mflags.HasValue());
- DataPropertyInLiteralFlags cflags(mflags.Value());
+ CHECK(mflags.HasResolvedValue());
+ DataPropertyInLiteralFlags cflags(mflags.ResolvedValue());
DCHECK(!(cflags & DataPropertyInLiteralFlag::kDontEnum));
if (cflags & DataPropertyInLiteralFlag::kSetFunctionName) return NoChange();
@@ -2575,7 +2678,8 @@ JSNativeContextSpecialization::BuildElementAccess(
// TODO(bmeurer): We currently specialize based on elements kind. We should
// also be able to properly support strings and other JSObjects here.
ElementsKind elements_kind = access_info.elements_kind();
- ZoneVector<Handle<Map>> const& receiver_maps = access_info.receiver_maps();
+ ZoneVector<Handle<Map>> const& receiver_maps =
+ access_info.lookup_start_object_maps();
if (IsTypedArrayElementsKind(elements_kind)) {
Node* buffer_or_receiver = receiver;
@@ -3334,42 +3438,40 @@ bool JSNativeContextSpecialization::CanTreatHoleAsUndefined(
return dependencies()->DependOnNoElementsProtector();
}
-bool JSNativeContextSpecialization::InferReceiverMaps(
- Node* receiver, Node* effect,
- ZoneVector<Handle<Map>>* receiver_maps) const {
- ZoneHandleSet<Map> maps;
- NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMapsUnsafe(broker(), receiver, effect,
- &maps);
- if (result == NodeProperties::kReliableReceiverMaps) {
- for (size_t i = 0; i < maps.size(); ++i) {
- receiver_maps->push_back(maps[i]);
+bool JSNativeContextSpecialization::InferMaps(
+ Node* object, Node* effect, ZoneVector<Handle<Map>>* maps) const {
+ ZoneHandleSet<Map> map_set;
+ NodeProperties::InferMapsResult result =
+ NodeProperties::InferMapsUnsafe(broker(), object, effect, &map_set);
+ if (result == NodeProperties::kReliableMaps) {
+ for (size_t i = 0; i < map_set.size(); ++i) {
+ maps->push_back(map_set[i]);
}
return true;
- } else if (result == NodeProperties::kUnreliableReceiverMaps) {
- // For untrusted receiver maps, we can still use the information
+ } else if (result == NodeProperties::kUnreliableMaps) {
+ // For untrusted maps, we can still use the information
// if the maps are stable.
- for (size_t i = 0; i < maps.size(); ++i) {
- MapRef map(broker(), maps[i]);
+ for (size_t i = 0; i < map_set.size(); ++i) {
+ MapRef map(broker(), map_set[i]);
if (!map.is_stable()) return false;
}
- for (size_t i = 0; i < maps.size(); ++i) {
- receiver_maps->push_back(maps[i]);
+ for (size_t i = 0; i < map_set.size(); ++i) {
+ maps->push_back(map_set[i]);
}
return true;
}
return false;
}
-base::Optional<MapRef> JSNativeContextSpecialization::InferReceiverRootMap(
- Node* receiver) const {
- HeapObjectMatcher m(receiver);
- if (m.HasValue()) {
+base::Optional<MapRef> JSNativeContextSpecialization::InferRootMap(
+ Node* object) const {
+ HeapObjectMatcher m(object);
+ if (m.HasResolvedValue()) {
MapRef map = m.Ref(broker()).map();
return map.FindRootMap();
} else if (m.IsJSCreate()) {
base::Optional<MapRef> initial_map =
- NodeProperties::GetJSCreateMap(broker(), receiver);
+ NodeProperties::GetJSCreateMap(broker(), object);
if (initial_map.has_value()) {
if (!initial_map->FindRootMap().has_value()) {
return base::nullopt;
@@ -3381,6 +3483,16 @@ base::Optional<MapRef> JSNativeContextSpecialization::InferReceiverRootMap(
return base::nullopt;
}
+Node* JSNativeContextSpecialization::BuildLoadPrototypeFromObject(
+ Node* object, Node* effect, Node* control) {
+ Node* map = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()), object,
+ effect, control);
+ return graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapPrototype()), map, effect,
+ control);
+}
+
Graph* JSNativeContextSpecialization::graph() const {
return jsgraph()->graph();
}
diff --git a/deps/v8/src/compiler/js-native-context-specialization.h b/deps/v8/src/compiler/js-native-context-specialization.h
index 81587870da..3d0c347261 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.h
+++ b/deps/v8/src/compiler/js-native-context-specialization.h
@@ -16,7 +16,6 @@ namespace internal {
// Forward declarations.
class Factory;
-class FeedbackNexus;
class JSGlobalObject;
class JSGlobalProxy;
class StringConstantBase;
@@ -54,6 +53,9 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
JSHeapBroker* broker, Flags flags,
CompilationDependencies* dependencies,
Zone* zone, Zone* shared_zone);
+ JSNativeContextSpecialization(const JSNativeContextSpecialization&) = delete;
+ JSNativeContextSpecialization& operator=(
+ const JSNativeContextSpecialization&) = delete;
const char* reducer_name() const override {
return "JSNativeContextSpecialization";
@@ -81,6 +83,7 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
Reduction ReduceJSLoadGlobal(Node* node);
Reduction ReduceJSStoreGlobal(Node* node);
Reduction ReduceJSLoadNamed(Node* node);
+ Reduction ReduceJSLoadNamedFromSuper(Node* node);
Reduction ReduceJSGetIterator(Node* node);
Reduction ReduceJSStoreNamed(Node* node);
Reduction ReduceJSHasProperty(Node* node);
@@ -92,7 +95,7 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
Reduction ReduceJSToObject(Node* node);
Reduction ReduceElementAccess(Node* node, Node* index, Node* value,
- ElementAccessFeedback const& processed);
+ ElementAccessFeedback const& feedback);
// In the case of non-keyed (named) accesses, pass the name as {static_name}
// and use {nullptr} for {key} (load/store modes are irrelevant).
Reduction ReducePropertyAccess(Node* node, Node* key,
@@ -100,18 +103,21 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
Node* value, FeedbackSource const& source,
AccessMode access_mode);
Reduction ReduceNamedAccess(Node* node, Node* value,
- NamedAccessFeedback const& processed,
+ NamedAccessFeedback const& feedback,
AccessMode access_mode, Node* key = nullptr);
Reduction ReduceMinimorphicPropertyAccess(
Node* node, Node* value,
MinimorphicLoadPropertyAccessFeedback const& feedback,
FeedbackSource const& source);
- Reduction ReduceGlobalAccess(Node* node, Node* receiver, Node* value,
- NameRef const& name, AccessMode access_mode,
- Node* key = nullptr);
- Reduction ReduceGlobalAccess(Node* node, Node* receiver, Node* value,
- NameRef const& name, AccessMode access_mode,
- Node* key, PropertyCellRef const& property_cell);
+ Reduction ReduceGlobalAccess(Node* node, Node* lookup_start_object,
+ Node* receiver, Node* value, NameRef const& name,
+ AccessMode access_mode, Node* key = nullptr,
+ Node* effect = nullptr);
+ Reduction ReduceGlobalAccess(Node* node, Node* lookup_start_object,
+ Node* receiver, Node* value, NameRef const& name,
+ AccessMode access_mode, Node* key,
+ PropertyCellRef const& property_cell,
+ Node* effect = nullptr);
Reduction ReduceElementLoadFromHeapConstant(Node* node, Node* key,
AccessMode access_mode,
KeyedAccessLoadMode load_mode);
@@ -144,14 +150,13 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
};
// Construct the appropriate subgraph for property access.
- ValueEffectControl BuildPropertyAccess(Node* receiver, Node* value,
- Node* context, Node* frame_state,
- Node* effect, Node* control,
- NameRef const& name,
- ZoneVector<Node*>* if_exceptions,
- PropertyAccessInfo const& access_info,
- AccessMode access_mode);
- ValueEffectControl BuildPropertyLoad(Node* receiver, Node* context,
+ ValueEffectControl BuildPropertyAccess(
+ Node* lookup_start_object, Node* receiver, Node* value, Node* context,
+ Node* frame_state, Node* effect, Node* control, NameRef const& name,
+ ZoneVector<Node*>* if_exceptions, PropertyAccessInfo const& access_info,
+ AccessMode access_mode);
+ ValueEffectControl BuildPropertyLoad(Node* lookup_start_object,
+ Node* receiver, Node* context,
Node* frame_state, Node* effect,
Node* control, NameRef const& name,
ZoneVector<Node*>* if_exceptions,
@@ -210,20 +215,19 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
// code dependencies and might use the array protector cell.
bool CanTreatHoleAsUndefined(ZoneVector<Handle<Map>> const& receiver_maps);
- void RemoveImpossibleReceiverMaps(
- Node* receiver, ZoneVector<Handle<Map>>* receiver_maps) const;
+ void RemoveImpossibleMaps(Node* object, ZoneVector<Handle<Map>>* maps) const;
ElementAccessFeedback const& TryRefineElementAccessFeedback(
ElementAccessFeedback const& feedback, Node* receiver,
Node* effect) const;
- // Try to infer maps for the given {receiver} at the current {effect}.
- bool InferReceiverMaps(Node* receiver, Node* effect,
- ZoneVector<Handle<Map>>* receiver_maps) const;
+ // Try to infer maps for the given {object} at the current {effect}.
+ bool InferMaps(Node* object, Node* effect,
+ ZoneVector<Handle<Map>>* maps) const;
- // Try to infer a root map for the {receiver} independent of the current
- // program location.
- base::Optional<MapRef> InferReceiverRootMap(Node* receiver) const;
+ // Try to infer a root map for the {object} independent of the current program
+ // location.
+ base::Optional<MapRef> InferRootMap(Node* object) const;
// Checks if we know at compile time that the {receiver} either definitely
// has the {prototype} in it's prototype chain, or the {receiver} definitely
@@ -236,6 +240,8 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
InferHasInPrototypeChainResult InferHasInPrototypeChain(
Node* receiver, Node* effect, HeapObjectRef const& prototype);
+ Node* BuildLoadPrototypeFromObject(Node* object, Node* effect, Node* control);
+
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
@@ -265,8 +271,6 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
Zone* const zone_;
Zone* const shared_zone_;
TypeCache const* type_cache_;
-
- DISALLOW_COPY_AND_ASSIGN(JSNativeContextSpecialization);
};
DEFINE_OPERATORS_FOR_FLAGS(JSNativeContextSpecialization::Flags)
diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc
index dccc9558b5..da3af62bf2 100644
--- a/deps/v8/src/compiler/js-operator.cc
+++ b/deps/v8/src/compiler/js-operator.cc
@@ -40,8 +40,8 @@ TNode<Oddball> UndefinedConstant(JSGraph* jsgraph) {
FeedbackCellRef JSCreateClosureNode::GetFeedbackCellRefChecked(
JSHeapBroker* broker) const {
HeapObjectMatcher m(feedback_cell());
- CHECK(m.HasValue());
- return FeedbackCellRef(broker, m.Value());
+ CHECK(m.HasResolvedValue());
+ return FeedbackCellRef(broker, m.ResolvedValue());
}
std::ostream& operator<<(std::ostream& os, CallFrequency const& f) {
@@ -640,9 +640,9 @@ size_t hash_value(GetIteratorParameters const& p) {
FeedbackSource::Hash()(p.callFeedback()));
}
-size_t hash_value(ForInMode mode) { return static_cast<uint8_t>(mode); }
+size_t hash_value(ForInMode const& mode) { return static_cast<uint8_t>(mode); }
-std::ostream& operator<<(std::ostream& os, ForInMode mode) {
+std::ostream& operator<<(std::ostream& os, ForInMode const& mode) {
switch (mode) {
case ForInMode::kUseEnumCacheKeysAndIndices:
return os << "UseEnumCacheKeysAndIndices";
@@ -654,10 +654,26 @@ std::ostream& operator<<(std::ostream& os, ForInMode mode) {
UNREACHABLE();
}
-ForInMode ForInModeOf(Operator const* op) {
+bool operator==(ForInParameters const& lhs, ForInParameters const& rhs) {
+ return lhs.feedback() == rhs.feedback() && lhs.mode() == rhs.mode();
+}
+
+bool operator!=(ForInParameters const& lhs, ForInParameters const& rhs) {
+ return !(lhs == rhs);
+}
+
+size_t hash_value(ForInParameters const& p) {
+ return base::hash_combine(FeedbackSource::Hash()(p.feedback()), p.mode());
+}
+
+std::ostream& operator<<(std::ostream& os, ForInParameters const& p) {
+ return os << p.feedback() << ", " << p.mode();
+}
+
+ForInParameters const& ForInParametersOf(const Operator* op) {
DCHECK(op->opcode() == IrOpcode::kJSForInNext ||
op->opcode() == IrOpcode::kJSForInPrepare);
- return OpParameter<ForInMode>(op);
+ return OpParameter<ForInParameters>(op);
}
#define CACHED_OP_LIST(V) \
@@ -693,7 +709,7 @@ ForInMode ForInModeOf(Operator const* op) {
V(PromiseResolve, Operator::kNoProperties, 2, 1) \
V(RejectPromise, Operator::kNoDeopt | Operator::kNoThrow, 3, 1) \
V(ResolvePromise, Operator::kNoDeopt | Operator::kNoThrow, 2, 1) \
- V(GetSuperConstructor, Operator::kNoWrite, 1, 1) \
+ V(GetSuperConstructor, Operator::kNoWrite | Operator::kNoThrow, 1, 1) \
V(ParseInt, Operator::kNoProperties, 2, 1) \
V(RegExpTest, Operator::kNoProperties, 2, 1)
@@ -919,12 +935,13 @@ const Operator* JSOperatorBuilder::LoadNamed(Handle<Name> name,
access); // parameter
}
-const Operator* JSOperatorBuilder::LoadNamedFromSuper(Handle<Name> name) {
+const Operator* JSOperatorBuilder::LoadNamedFromSuper(
+ Handle<Name> name, const FeedbackSource& feedback) {
static constexpr int kReceiver = 1;
static constexpr int kHomeObject = 1;
- static constexpr int kArity = kReceiver + kHomeObject;
- // TODO(marja, v8:9237): Use real feedback.
- NamedAccess access(LanguageMode::kSloppy, name, FeedbackSource());
+ static constexpr int kFeedbackVector = 1;
+ static constexpr int kArity = kReceiver + kHomeObject + kFeedbackVector;
+ NamedAccess access(LanguageMode::kSloppy, name, feedback);
return zone()->New<Operator1<NamedAccess>>( // --
IrOpcode::kJSLoadNamedFromSuper, Operator::kNoProperties, // opcode
"JSLoadNamedFromSuper", // name
@@ -961,21 +978,23 @@ const Operator* JSOperatorBuilder::HasProperty(FeedbackSource const& feedback) {
access); // parameter
}
-const Operator* JSOperatorBuilder::ForInNext(ForInMode mode) {
- return zone()->New<Operator1<ForInMode>>( // --
+const Operator* JSOperatorBuilder::ForInNext(ForInMode mode,
+ const FeedbackSource& feedback) {
+ return zone()->New<Operator1<ForInParameters>>( // --
IrOpcode::kJSForInNext, Operator::kNoProperties, // opcode
"JSForInNext", // name
- 4, 1, 1, 1, 1, 2, // counts
- mode); // parameter
-}
-
-const Operator* JSOperatorBuilder::ForInPrepare(ForInMode mode) {
- return zone()->New<Operator1<ForInMode>>( // --
- IrOpcode::kJSForInPrepare, // opcode
- Operator::kNoWrite | Operator::kNoThrow, // flags
- "JSForInPrepare", // name
- 1, 1, 1, 3, 1, 1, // counts
- mode); // parameter
+ 5, 1, 1, 1, 1, 2, // counts
+ ForInParameters{feedback, mode}); // parameter
+}
+
+const Operator* JSOperatorBuilder::ForInPrepare(
+ ForInMode mode, const FeedbackSource& feedback) {
+ return zone()->New<Operator1<ForInParameters>>( // --
+ IrOpcode::kJSForInPrepare, // opcode
+ Operator::kNoWrite | Operator::kNoThrow, // flags
+ "JSForInPrepare", // name
+ 2, 1, 1, 3, 1, 1, // counts
+ ForInParameters{feedback, mode}); // parameter
}
const Operator* JSOperatorBuilder::GeneratorStore(int register_count) {
diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h
index 4043969000..7e61bf3760 100644
--- a/deps/v8/src/compiler/js-operator.h
+++ b/deps/v8/src/compiler/js-operator.h
@@ -789,18 +789,32 @@ std::ostream& operator<<(std::ostream&, GetIteratorParameters const&);
const GetIteratorParameters& GetIteratorParametersOf(const Operator* op);
-// Descriptor used by the JSForInPrepare and JSForInNext opcodes.
enum class ForInMode : uint8_t {
kUseEnumCacheKeysAndIndices,
kUseEnumCacheKeys,
kGeneric
};
+size_t hash_value(ForInMode const&);
+std::ostream& operator<<(std::ostream&, ForInMode const&);
-size_t hash_value(ForInMode);
+class ForInParameters final {
+ public:
+ ForInParameters(const FeedbackSource& feedback, ForInMode mode)
+ : feedback_(feedback), mode_(mode) {}
+
+ const FeedbackSource& feedback() const { return feedback_; }
+ ForInMode mode() const { return mode_; }
-std::ostream& operator<<(std::ostream&, ForInMode);
+ private:
+ const FeedbackSource feedback_;
+ const ForInMode mode_;
+};
-ForInMode ForInModeOf(Operator const* op) V8_WARN_UNUSED_RESULT;
+bool operator==(ForInParameters const&, ForInParameters const&);
+bool operator!=(ForInParameters const&, ForInParameters const&);
+size_t hash_value(ForInParameters const&);
+std::ostream& operator<<(std::ostream&, ForInParameters const&);
+const ForInParameters& ForInParametersOf(const Operator* op);
int RegisterCountOf(Operator const* op) V8_WARN_UNUSED_RESULT;
@@ -816,6 +830,8 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
: public NON_EXPORTED_BASE(ZoneObject) {
public:
explicit JSOperatorBuilder(Zone* zone);
+ JSOperatorBuilder(const JSOperatorBuilder&) = delete;
+ JSOperatorBuilder& operator=(const JSOperatorBuilder&) = delete;
const Operator* Equal(FeedbackSource const& feedback);
const Operator* StrictEqual(FeedbackSource const& feedback);
@@ -921,7 +937,8 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* LoadProperty(FeedbackSource const& feedback);
const Operator* LoadNamed(Handle<Name> name, FeedbackSource const& feedback);
- const Operator* LoadNamedFromSuper(Handle<Name> name);
+ const Operator* LoadNamedFromSuper(Handle<Name> name,
+ FeedbackSource const& feedback);
const Operator* StoreProperty(LanguageMode language_mode,
FeedbackSource const& feedback);
@@ -966,8 +983,8 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* AsyncFunctionResolve();
const Operator* ForInEnumerate();
- const Operator* ForInNext(ForInMode);
- const Operator* ForInPrepare(ForInMode);
+ const Operator* ForInNext(ForInMode mode, const FeedbackSource& feedback);
+ const Operator* ForInPrepare(ForInMode mode, const FeedbackSource& feedback);
const Operator* LoadMessage();
const Operator* StoreMessage();
@@ -1010,8 +1027,6 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const JSOperatorGlobalCache& cache_;
Zone* const zone_;
-
- DISALLOW_COPY_AND_ASSIGN(JSOperatorBuilder);
};
// Node wrappers.
@@ -1399,9 +1414,13 @@ class JSLoadNamedFromSuperNode final : public JSNodeWrapperBase {
const NamedAccess& Parameters() const { return NamedAccessOf(node()->op()); }
-#define INPUTS(V) \
- V(Receiver, receiver, 0, Object) \
- V(Object, home_object, 1, Object)
+ // TODO(marja, v8:9237): A more intuitive order would be (home_object,
+ // receiver, feedback_vector). The order can be changed once we no longer
+ // delegate to Runtime_LoadFromSuper.
+#define INPUTS(V) \
+ V(Receiver, receiver, 0, Object) \
+ V(HomeObject, home_object, 1, Object) \
+ V(FeedbackVector, feedback_vector, 2, HeapObject)
INPUTS(DEFINE_INPUT_ACCESSORS)
#undef INPUTS
};
@@ -1546,6 +1565,43 @@ class JSCreateClosureNode final : public JSNodeWrapperBase {
FeedbackCellRef GetFeedbackCellRefChecked(JSHeapBroker* broker) const;
};
+class JSForInPrepareNode final : public JSNodeWrapperBase {
+ public:
+ explicit constexpr JSForInPrepareNode(Node* node) : JSNodeWrapperBase(node) {
+ CONSTEXPR_DCHECK(node->opcode() == IrOpcode::kJSForInPrepare);
+ }
+
+ const ForInParameters& Parameters() const {
+ return ForInParametersOf(node()->op());
+ }
+
+#define INPUTS(V) \
+ V(Enumerator, enumerator, 0, Object) \
+ V(FeedbackVector, feedback_vector, 1, HeapObject)
+ INPUTS(DEFINE_INPUT_ACCESSORS)
+#undef INPUTS
+};
+
+class JSForInNextNode final : public JSNodeWrapperBase {
+ public:
+ explicit constexpr JSForInNextNode(Node* node) : JSNodeWrapperBase(node) {
+ CONSTEXPR_DCHECK(node->opcode() == IrOpcode::kJSForInNext);
+ }
+
+ const ForInParameters& Parameters() const {
+ return ForInParametersOf(node()->op());
+ }
+
+#define INPUTS(V) \
+ V(Receiver, receiver, 0, Object) \
+ V(CacheArray, cache_array, 1, Object) \
+ V(CacheType, cache_type, 2, Object) \
+ V(Index, index, 3, Smi) \
+ V(FeedbackVector, feedback_vector, 4, HeapObject)
+ INPUTS(DEFINE_INPUT_ACCESSORS)
+#undef INPUTS
+};
+
#undef DEFINE_INPUT_ACCESSORS
} // namespace compiler
diff --git a/deps/v8/src/compiler/js-type-hint-lowering.cc b/deps/v8/src/compiler/js-type-hint-lowering.cc
index 808c59a65e..046ed47577 100644
--- a/deps/v8/src/compiler/js-type-hint-lowering.cc
+++ b/deps/v8/src/compiler/js-type-hint-lowering.cc
@@ -513,9 +513,9 @@ JSTypeHintLowering::ReduceGetIteratorOperation(const Operator* op,
}
JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceLoadNamedOperation(
- const Operator* op, Node* receiver, Node* effect, Node* control,
- FeedbackSlot slot) const {
- DCHECK_EQ(IrOpcode::kJSLoadNamed, op->opcode());
+ const Operator* op, Node* effect, Node* control, FeedbackSlot slot) const {
+ DCHECK(op->opcode() == IrOpcode::kJSLoadNamed ||
+ op->opcode() == IrOpcode::kJSLoadNamedFromSuper);
if (Node* node = TryBuildSoftDeopt(
slot, effect, control,
DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess)) {
@@ -574,8 +574,8 @@ Node* JSTypeHintLowering::TryBuildSoftDeopt(FeedbackSlot slot, Node* effect,
FeedbackSource source(feedback_vector(), slot);
// TODO(mythria): Think of adding flags to specify if we need a soft deopt for
- // calls instead of using FLAG_turboprop here.
- if (FLAG_turboprop &&
+ // calls instead of using broker()->is_turboprop() here.
+ if (broker()->is_turboprop() &&
broker()->GetFeedbackSlotKind(source) == FeedbackSlotKind::kCall) {
return nullptr;
}
diff --git a/deps/v8/src/compiler/js-type-hint-lowering.h b/deps/v8/src/compiler/js-type-hint-lowering.h
index 256858c1c6..c89acd12ff 100644
--- a/deps/v8/src/compiler/js-type-hint-lowering.h
+++ b/deps/v8/src/compiler/js-type-hint-lowering.h
@@ -14,7 +14,6 @@ namespace v8 {
namespace internal {
// Forward declarations.
-class FeedbackNexus;
class FeedbackSlot;
namespace compiler {
@@ -43,6 +42,8 @@ class JSTypeHintLowering {
JSTypeHintLowering(JSHeapBroker* broker, JSGraph* jsgraph,
FeedbackVectorRef feedback_vector, Flags flags);
+ JSTypeHintLowering(const JSTypeHintLowering&) = delete;
+ JSTypeHintLowering& operator=(const JSTypeHintLowering&) = delete;
// {LoweringResult} describes the result of lowering. The following outcomes
// are possible:
@@ -143,8 +144,8 @@ class JSTypeHintLowering {
FeedbackSlot call_slot) const;
// Potential reduction of property access operations.
- LoweringResult ReduceLoadNamedOperation(const Operator* op, Node* obj,
- Node* effect, Node* control,
+ LoweringResult ReduceLoadNamedOperation(const Operator* op, Node* effect,
+ Node* control,
FeedbackSlot slot) const;
LoweringResult ReduceLoadKeyedOperation(const Operator* op, Node* obj,
Node* key, Node* effect,
@@ -177,8 +178,6 @@ class JSTypeHintLowering {
JSGraph* const jsgraph_;
Flags const flags_;
FeedbackVectorRef const feedback_vector_;
-
- DISALLOW_COPY_AND_ASSIGN(JSTypeHintLowering);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index 46018225a3..9927cc0b70 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -107,11 +107,11 @@ class JSBinopReduction final {
GetBinaryOperationHint(node_) == BinaryOperationHint::kString) {
HeapObjectBinopMatcher m(node_);
JSHeapBroker* broker = lowering_->broker();
- if (m.right().HasValue() && m.right().Ref(broker).IsString()) {
+ if (m.right().HasResolvedValue() && m.right().Ref(broker).IsString()) {
StringRef right_string = m.right().Ref(broker).AsString();
if (right_string.length() >= ConsString::kMinLength) return true;
}
- if (m.left().HasValue() && m.left().Ref(broker).IsString()) {
+ if (m.left().HasResolvedValue() && m.left().Ref(broker).IsString()) {
StringRef left_string = m.left().Ref(broker).AsString();
if (left_string.length() >= ConsString::kMinLength) {
// The invariant for ConsString requires the left hand side to be
@@ -989,7 +989,7 @@ Reduction JSTypedLowering::ReduceJSToNumberInput(Node* input) {
if (input_type.Is(Type::String())) {
HeapObjectMatcher m(input);
- if (m.HasValue() && m.Ref(broker()).IsString()) {
+ if (m.HasResolvedValue() && m.Ref(broker()).IsString()) {
StringRef input_value = m.Ref(broker()).AsString();
double number;
ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(number, input_value.ToNumber());
@@ -1492,8 +1492,6 @@ namespace {
void ReduceBuiltin(JSGraph* jsgraph, Node* node, int builtin_index, int arity,
CallDescriptor::Flags flags) {
// Patch {node} to a direct CEntry call.
- //
- // When V8_REVERSE_JSARGS is set:
// ----------- A r g u m e n t s -----------
// -- 0: CEntry
// --- Stack args ---
@@ -1507,21 +1505,6 @@ void ReduceBuiltin(JSGraph* jsgraph, Node* node, int builtin_index, int arity,
// -- 6 + n: the C entry point
// -- 6 + n + 1: argc (Int32)
// -----------------------------------
- //
- // Otherwise:
- // ----------- A r g u m e n t s -----------
- // -- 0: CEntry
- // --- Stack args ---
- // -- 1: receiver
- // -- [2, 2 + n[: the n actual arguments passed to the builtin
- // -- 2 + n: padding
- // -- 2 + n + 1: argc, including the receiver and implicit args (Smi)
- // -- 2 + n + 2: target
- // -- 2 + n + 3: new_target
- // --- Register args ---
- // -- 2 + n + 4: the C entry point
- // -- 2 + n + 5: argc (Int32)
- // -----------------------------------
// The logic contained here is mirrored in Builtins::Generate_Adaptor.
// Keep these in sync.
@@ -1558,19 +1541,11 @@ void ReduceBuiltin(JSGraph* jsgraph, Node* node, int builtin_index, int arity,
Node* argc_node = jsgraph->Constant(argc);
static const int kStubAndReceiver = 2;
-#ifdef V8_REVERSE_JSARGS
node->InsertInput(zone, 1, new_target);
node->InsertInput(zone, 2, target);
node->InsertInput(zone, 3, argc_node);
node->InsertInput(zone, 4, jsgraph->PaddingConstant());
int cursor = arity + kStubAndReceiver + BuiltinArguments::kNumExtraArgs;
-#else
- int cursor = arity + kStubAndReceiver;
- node->InsertInput(zone, cursor++, jsgraph->PaddingConstant());
- node->InsertInput(zone, cursor++, argc_node);
- node->InsertInput(zone, cursor++, target);
- node->InsertInput(zone, cursor++, new_target);
-#endif
Address entry = Builtins::CppEntryOf(builtin_index);
ExternalReference entry_ref = ExternalReference::Create(entry);
@@ -1803,51 +1778,18 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
#else
if (NeedsArgumentAdaptorFrame(*shared, arity)) {
node->RemoveInput(n.FeedbackVectorIndex());
-
- // Check if it's safe to skip the arguments adaptor for {shared},
- // that is whether the target function anyways cannot observe the
- // actual arguments. Details can be found in this document at
- // https://bit.ly/v8-faster-calls-with-arguments-mismatch and
- // on the tracking bug at https://crbug.com/v8/8895
- if (shared->is_safe_to_skip_arguments_adaptor()) {
- // Currently we only support skipping arguments adaptor frames
- // for strict mode functions, since there's Function.arguments
- // legacy accessor, which is still available in sloppy mode.
- DCHECK_EQ(LanguageMode::kStrict, shared->language_mode());
-
- // Massage the arguments to match the expected number of arguments.
- int expected_argument_count = shared->internal_formal_parameter_count();
- for (; arity > expected_argument_count; --arity) {
- node->RemoveInput(arity + 1);
- }
- for (; arity < expected_argument_count; ++arity) {
- node->InsertInput(graph()->zone(), arity + 2,
- jsgraph()->UndefinedConstant());
- }
-
- // Patch {node} to a direct call.
- node->InsertInput(graph()->zone(), arity + 2, new_target);
- node->InsertInput(graph()->zone(), arity + 3,
- jsgraph()->Constant(arity));
- NodeProperties::ChangeOp(node,
- common()->Call(Linkage::GetJSCallDescriptor(
- graph()->zone(), false, 1 + arity,
- flags | CallDescriptor::kCanUseRoots)));
- } else {
- // Patch {node} to an indirect call via the ArgumentsAdaptorTrampoline.
- Callable callable = CodeFactory::ArgumentAdaptor(isolate());
- node->InsertInput(graph()->zone(), 0,
- jsgraph()->HeapConstant(callable.code()));
- node->InsertInput(graph()->zone(), 2, new_target);
- node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(arity));
- node->InsertInput(
- graph()->zone(), 4,
- jsgraph()->Constant(shared->internal_formal_parameter_count()));
- NodeProperties::ChangeOp(
- node,
- common()->Call(Linkage::GetStubCallDescriptor(
- graph()->zone(), callable.descriptor(), 1 + arity, flags)));
- }
+ // Patch {node} to an indirect call via the ArgumentsAdaptorTrampoline.
+ Callable callable = CodeFactory::ArgumentAdaptor(isolate());
+ node->InsertInput(graph()->zone(), 0,
+ jsgraph()->HeapConstant(callable.code()));
+ node->InsertInput(graph()->zone(), 2, new_target);
+ node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(arity));
+ node->InsertInput(
+ graph()->zone(), 4,
+ jsgraph()->Constant(shared->internal_formal_parameter_count()));
+ NodeProperties::ChangeOp(
+ node, common()->Call(Linkage::GetStubCallDescriptor(
+ graph()->zone(), callable.descriptor(), 1 + arity, flags)));
#endif
} else if (shared->HasBuiltinId() &&
Builtins::IsCpp(shared->builtin_id())) {
@@ -1912,23 +1854,22 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
}
Reduction JSTypedLowering::ReduceJSForInNext(Node* node) {
- DCHECK_EQ(IrOpcode::kJSForInNext, node->opcode());
- ForInMode const mode = ForInModeOf(node->op());
- Node* receiver = NodeProperties::GetValueInput(node, 0);
- Node* cache_array = NodeProperties::GetValueInput(node, 1);
- Node* cache_type = NodeProperties::GetValueInput(node, 2);
- Node* index = NodeProperties::GetValueInput(node, 3);
- Node* context = NodeProperties::GetContextInput(node);
- Node* frame_state = NodeProperties::GetFrameStateInput(node);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
+ JSForInNextNode n(node);
+ Node* receiver = n.receiver();
+ Node* cache_array = n.cache_array();
+ Node* cache_type = n.cache_type();
+ Node* index = n.index();
+ Node* context = n.context();
+ FrameState frame_state = n.frame_state();
+ Effect effect = n.effect();
+ Control control = n.control();
// Load the map of the {receiver}.
Node* receiver_map = effect =
graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
receiver, effect, control);
- switch (mode) {
+ switch (n.Parameters().mode()) {
case ForInMode::kUseEnumCacheKeys:
case ForInMode::kUseEnumCacheKeysAndIndices: {
// Ensure that the expected map still matches that of the {receiver}.
@@ -2025,16 +1966,15 @@ Reduction JSTypedLowering::ReduceJSForInNext(Node* node) {
}
Reduction JSTypedLowering::ReduceJSForInPrepare(Node* node) {
- DCHECK_EQ(IrOpcode::kJSForInPrepare, node->opcode());
- ForInMode const mode = ForInModeOf(node->op());
- Node* enumerator = NodeProperties::GetValueInput(node, 0);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
+ JSForInPrepareNode n(node);
+ Node* enumerator = n.enumerator();
+ Effect effect = n.effect();
+ Control control = n.control();
Node* cache_type = enumerator;
Node* cache_array = nullptr;
Node* cache_length = nullptr;
- switch (mode) {
+ switch (n.Parameters().mode()) {
case ForInMode::kUseEnumCacheKeys:
case ForInMode::kUseEnumCacheKeysAndIndices: {
// Check that the {enumerator} is a Map.
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index cde4b96c87..ee025896c0 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -339,11 +339,7 @@ CallDescriptor* Linkage::GetJSCallDescriptor(Zone* zone, bool is_osr,
// All parameters to JS calls go on the stack.
for (int i = 0; i < js_parameter_count; i++) {
-#ifdef V8_REVERSE_JSARGS
int spill_slot_index = -i - 1;
-#else
- int spill_slot_index = i - js_parameter_count;
-#endif
locations.AddParam(LinkageLocation::ForCallerFrameSlot(
spill_slot_index, MachineType::AnyTagged()));
}
diff --git a/deps/v8/src/compiler/linkage.h b/deps/v8/src/compiler/linkage.h
index d96b049d92..ad68a57957 100644
--- a/deps/v8/src/compiler/linkage.h
+++ b/deps/v8/src/compiler/linkage.h
@@ -276,6 +276,9 @@ class V8_EXPORT_PRIVATE CallDescriptor final
stack_order_(stack_order),
debug_name_(debug_name) {}
+ CallDescriptor(const CallDescriptor&) = delete;
+ CallDescriptor& operator=(const CallDescriptor&) = delete;
+
// Returns the kind of this call.
Kind kind() const { return kind_; }
@@ -317,16 +320,12 @@ class V8_EXPORT_PRIVATE CallDescriptor final
}
int GetStackIndexFromSlot(int slot_index) const {
-#ifdef V8_REVERSE_JSARGS
switch (GetStackArgumentOrder()) {
case StackArgumentOrder::kDefault:
return -slot_index - 1;
case StackArgumentOrder::kJS:
return slot_index + static_cast<int>(StackParameterCount());
}
-#else
- return -slot_index - 1;
-#endif
}
// The total number of inputs to this call, which includes the target,
@@ -433,8 +432,6 @@ class V8_EXPORT_PRIVATE CallDescriptor final
const StackArgumentOrder stack_order_;
const char* const debug_name_;
const CFunctionInfo* c_function_info_ = nullptr;
-
- DISALLOW_COPY_AND_ASSIGN(CallDescriptor);
};
DEFINE_OPERATORS_FOR_FLAGS(CallDescriptor::Flags)
@@ -460,6 +457,8 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
class V8_EXPORT_PRIVATE Linkage : public NON_EXPORTED_BASE(ZoneObject) {
public:
explicit Linkage(CallDescriptor* incoming) : incoming_(incoming) {}
+ Linkage(const Linkage&) = delete;
+ Linkage& operator=(const Linkage&) = delete;
static CallDescriptor* ComputeIncoming(Zone* zone,
OptimizedCompilationInfo* info);
@@ -558,8 +557,6 @@ class V8_EXPORT_PRIVATE Linkage : public NON_EXPORTED_BASE(ZoneObject) {
private:
CallDescriptor* const incoming_;
-
- DISALLOW_COPY_AND_ASSIGN(Linkage);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/load-elimination.h b/deps/v8/src/compiler/load-elimination.h
index 6b7cb86cdd..e0f4eb7fe2 100644
--- a/deps/v8/src/compiler/load-elimination.h
+++ b/deps/v8/src/compiler/load-elimination.h
@@ -33,6 +33,8 @@ class V8_EXPORT_PRIVATE LoadElimination final
LoadElimination(Editor* editor, JSGraph* jsgraph, Zone* zone)
: AdvancedReducer(editor), node_states_(zone), jsgraph_(jsgraph) {}
~LoadElimination() final = default;
+ LoadElimination(const LoadElimination&) = delete;
+ LoadElimination& operator=(const LoadElimination&) = delete;
const char* reducer_name() const override { return "LoadElimination"; }
@@ -335,8 +337,6 @@ class V8_EXPORT_PRIVATE LoadElimination final
AbstractStateForEffectNodes node_states_;
JSGraph* const jsgraph_;
-
- DISALLOW_COPY_AND_ASSIGN(LoadElimination);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/machine-graph.h b/deps/v8/src/compiler/machine-graph.h
index 87175847f5..a110a4b7e8 100644
--- a/deps/v8/src/compiler/machine-graph.h
+++ b/deps/v8/src/compiler/machine-graph.h
@@ -25,6 +25,8 @@ class V8_EXPORT_PRIVATE MachineGraph : public NON_EXPORTED_BASE(ZoneObject) {
MachineGraph(Graph* graph, CommonOperatorBuilder* common,
MachineOperatorBuilder* machine)
: graph_(graph), common_(common), machine_(machine), cache_(zone()) {}
+ MachineGraph(const MachineGraph&) = delete;
+ MachineGraph& operator=(const MachineGraph&) = delete;
// Creates a Int32Constant node, usually canonicalized.
Node* Int32Constant(int32_t value);
@@ -84,8 +86,6 @@ class V8_EXPORT_PRIVATE MachineGraph : public NON_EXPORTED_BASE(ZoneObject) {
MachineOperatorBuilder* machine_;
CommonNodeCache cache_;
Node* Dead_ = nullptr;
-
- DISALLOW_COPY_AND_ASSIGN(MachineGraph);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc
index 55f39d76e8..918caaf8fd 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.cc
+++ b/deps/v8/src/compiler/machine-operator-reducer.cc
@@ -305,8 +305,8 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
Int32BinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.left().node()); // x ror 0 => x
if (m.IsFoldable()) { // K ror K => K (K stands for arbitrary constants)
- return ReplaceInt32(base::bits::RotateRight32(m.left().Value(),
- m.right().Value() & 31));
+ return ReplaceInt32(base::bits::RotateRight32(
+ m.left().ResolvedValue(), m.right().ResolvedValue() & 31));
}
break;
}
@@ -316,7 +316,8 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
case IrOpcode::kWord64Equal: {
Int64BinopMatcher m(node);
if (m.IsFoldable()) { // K == K => K (K stands for arbitrary constants)
- return ReplaceBool(m.left().Value() == m.right().Value());
+ return ReplaceBool(m.left().ResolvedValue() ==
+ m.right().ResolvedValue());
}
if (m.left().IsInt64Sub() && m.right().Is(0)) { // x - y == 0 => x == y
Int64BinopMatcher msub(m.left().node());
@@ -341,8 +342,8 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
if (m.right().Is(0)) return Replace(m.right().node()); // x * 0 => 0
if (m.right().Is(1)) return Replace(m.left().node()); // x * 1 => x
if (m.IsFoldable()) { // K * K => K (K stands for arbitrary constants)
- return ReplaceInt32(
- base::MulWithWraparound(m.left().Value(), m.right().Value()));
+ return ReplaceInt32(base::MulWithWraparound(m.left().ResolvedValue(),
+ m.right().ResolvedValue()));
}
if (m.right().Is(-1)) { // x * -1 => 0 - x
node->ReplaceInput(0, Int32Constant(0));
@@ -351,17 +352,18 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
return Changed(node);
}
if (m.right().IsPowerOf2()) { // x * 2^n => x << n
- node->ReplaceInput(
- 1, Int32Constant(base::bits::WhichPowerOfTwo(m.right().Value())));
+ node->ReplaceInput(1, Int32Constant(base::bits::WhichPowerOfTwo(
+ m.right().ResolvedValue())));
NodeProperties::ChangeOp(node, machine()->Word32Shl());
return Changed(node).FollowedBy(ReduceWord32Shl(node));
}
// (x * Int32Constant(a)) * Int32Constant(b)) => x * Int32Constant(a * b)
- if (m.right().HasValue() && m.left().IsInt32Mul()) {
+ if (m.right().HasResolvedValue() && m.left().IsInt32Mul()) {
Int32BinopMatcher n(m.left().node());
- if (n.right().HasValue() && m.OwnsInput(m.left().node())) {
- node->ReplaceInput(1, Int32Constant(base::MulWithWraparound(
- m.right().Value(), n.right().Value())));
+ if (n.right().HasResolvedValue() && m.OwnsInput(m.left().node())) {
+ node->ReplaceInput(
+ 1, Int32Constant(base::MulWithWraparound(
+ m.right().ResolvedValue(), n.right().ResolvedValue())));
node->ReplaceInput(0, n.left().node());
return Changed(node);
}
@@ -396,7 +398,8 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
case IrOpcode::kInt32LessThan: {
Int32BinopMatcher m(node);
if (m.IsFoldable()) { // K < K => K (K stands for arbitrary constants)
- return ReplaceBool(m.left().Value() < m.right().Value());
+ return ReplaceBool(m.left().ResolvedValue() <
+ m.right().ResolvedValue());
}
if (m.LeftEqualsRight()) return ReplaceBool(false); // x < x => false
if (m.left().IsWord32Or() && m.right().Is(0)) {
@@ -412,7 +415,8 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
case IrOpcode::kInt32LessThanOrEqual: {
Int32BinopMatcher m(node);
if (m.IsFoldable()) { // K <= K => K (K stands for arbitrary constants)
- return ReplaceBool(m.left().Value() <= m.right().Value());
+ return ReplaceBool(m.left().ResolvedValue() <=
+ m.right().ResolvedValue());
}
if (m.LeftEqualsRight()) return ReplaceBool(true); // x <= x => true
return ReduceWord32Comparisons(node);
@@ -422,16 +426,17 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
if (m.left().Is(kMaxUInt32)) return ReplaceBool(false); // M < x => false
if (m.right().Is(0)) return ReplaceBool(false); // x < 0 => false
if (m.IsFoldable()) { // K < K => K (K stands for arbitrary constants)
- return ReplaceBool(m.left().Value() < m.right().Value());
+ return ReplaceBool(m.left().ResolvedValue() <
+ m.right().ResolvedValue());
}
if (m.LeftEqualsRight()) return ReplaceBool(false); // x < x => false
- if (m.left().IsWord32Sar() && m.right().HasValue()) {
+ if (m.left().IsWord32Sar() && m.right().HasResolvedValue()) {
Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue()) {
+ if (mleft.right().HasResolvedValue()) {
// (x >> K) < C => x < (C << K)
// when C < (M >> K)
- const uint32_t c = m.right().Value();
- const uint32_t k = mleft.right().Value() & 0x1F;
+ const uint32_t c = m.right().ResolvedValue();
+ const uint32_t k = mleft.right().ResolvedValue() & 0x1F;
if (c < static_cast<uint32_t>(kMaxInt >> k)) {
node->ReplaceInput(0, mleft.left().node());
node->ReplaceInput(1, Uint32Constant(c << k));
@@ -447,7 +452,8 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
if (m.left().Is(0)) return ReplaceBool(true); // 0 <= x => true
if (m.right().Is(kMaxUInt32)) return ReplaceBool(true); // x <= M => true
if (m.IsFoldable()) { // K <= K => K (K stands for arbitrary constants)
- return ReplaceBool(m.left().Value() <= m.right().Value());
+ return ReplaceBool(m.left().ResolvedValue() <=
+ m.right().ResolvedValue());
}
if (m.LeftEqualsRight()) return ReplaceBool(true); // x <= x => true
return ReduceWord32Comparisons(node);
@@ -455,19 +461,22 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
case IrOpcode::kFloat32Sub: {
Float32BinopMatcher m(node);
if (allow_signalling_nan_ && m.right().Is(0) &&
- (std::copysign(1.0, m.right().Value()) > 0)) {
+ (std::copysign(1.0, m.right().ResolvedValue()) > 0)) {
return Replace(m.left().node()); // x - 0 => x
}
if (m.right().IsNaN()) { // x - NaN => NaN
// Do some calculation to make a signalling NaN quiet.
- return ReplaceFloat32(m.right().Value() - m.right().Value());
+ return ReplaceFloat32(m.right().ResolvedValue() -
+ m.right().ResolvedValue());
}
if (m.left().IsNaN()) { // NaN - x => NaN
// Do some calculation to make a signalling NaN quiet.
- return ReplaceFloat32(m.left().Value() - m.left().Value());
+ return ReplaceFloat32(m.left().ResolvedValue() -
+ m.left().ResolvedValue());
}
if (m.IsFoldable()) { // L - R => (L - R)
- return ReplaceFloat32(m.left().Value() - m.right().Value());
+ return ReplaceFloat32(m.left().ResolvedValue() -
+ m.right().ResolvedValue());
}
if (allow_signalling_nan_ && m.left().IsMinusZero()) {
// -0.0 - round_down(-0.0 - R) => round_up(R)
@@ -491,26 +500,30 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
case IrOpcode::kFloat64Add: {
Float64BinopMatcher m(node);
if (m.IsFoldable()) { // K + K => K (K stands for arbitrary constants)
- return ReplaceFloat64(m.left().Value() + m.right().Value());
+ return ReplaceFloat64(m.left().ResolvedValue() +
+ m.right().ResolvedValue());
}
break;
}
case IrOpcode::kFloat64Sub: {
Float64BinopMatcher m(node);
if (allow_signalling_nan_ && m.right().Is(0) &&
- (Double(m.right().Value()).Sign() > 0)) {
+ (Double(m.right().ResolvedValue()).Sign() > 0)) {
return Replace(m.left().node()); // x - 0 => x
}
if (m.right().IsNaN()) { // x - NaN => NaN
// Do some calculation to make a signalling NaN quiet.
- return ReplaceFloat64(m.right().Value() - m.right().Value());
+ return ReplaceFloat64(m.right().ResolvedValue() -
+ m.right().ResolvedValue());
}
if (m.left().IsNaN()) { // NaN - x => NaN
// Do some calculation to make a signalling NaN quiet.
- return ReplaceFloat64(m.left().Value() - m.left().Value());
+ return ReplaceFloat64(m.left().ResolvedValue() -
+ m.left().ResolvedValue());
}
if (m.IsFoldable()) { // L - R => (L - R)
- return ReplaceFloat64(m.left().Value() - m.right().Value());
+ return ReplaceFloat64(m.left().ResolvedValue() -
+ m.right().ResolvedValue());
}
if (allow_signalling_nan_ && m.left().IsMinusZero()) {
// -0.0 - round_down(-0.0 - R) => round_up(R)
@@ -543,10 +556,12 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
}
if (m.right().IsNaN()) { // x * NaN => NaN
// Do some calculation to make a signalling NaN quiet.
- return ReplaceFloat64(m.right().Value() - m.right().Value());
+ return ReplaceFloat64(m.right().ResolvedValue() -
+ m.right().ResolvedValue());
}
if (m.IsFoldable()) { // K * K => K (K stands for arbitrary constants)
- return ReplaceFloat64(m.left().Value() * m.right().Value());
+ return ReplaceFloat64(m.left().ResolvedValue() *
+ m.right().ResolvedValue());
}
if (m.right().Is(2)) { // x * 2.0 => x + x
node->ReplaceInput(1, m.left().node());
@@ -562,15 +577,17 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
// TODO(ahaas): We could do x / 1.0 = x if we knew that x is not an sNaN.
if (m.right().IsNaN()) { // x / NaN => NaN
// Do some calculation to make a signalling NaN quiet.
- return ReplaceFloat64(m.right().Value() - m.right().Value());
+ return ReplaceFloat64(m.right().ResolvedValue() -
+ m.right().ResolvedValue());
}
if (m.left().IsNaN()) { // NaN / x => NaN
// Do some calculation to make a signalling NaN quiet.
- return ReplaceFloat64(m.left().Value() - m.left().Value());
+ return ReplaceFloat64(m.left().ResolvedValue() -
+ m.left().ResolvedValue());
}
if (m.IsFoldable()) { // K / K => K (K stands for arbitrary constants)
return ReplaceFloat64(
- base::Divide(m.left().Value(), m.right().Value()));
+ base::Divide(m.left().ResolvedValue(), m.right().ResolvedValue()));
}
if (allow_signalling_nan_ && m.right().Is(-1)) { // x / -1.0 => -x
node->RemoveInput(1);
@@ -581,7 +598,7 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
// All reciprocals of non-denormal powers of two can be represented
// exactly, so division by power of two can be reduced to
// multiplication by reciprocal, with the same result.
- node->ReplaceInput(1, Float64Constant(1.0 / m.right().Value()));
+ node->ReplaceInput(1, Float64Constant(1.0 / m.right().ResolvedValue()));
NodeProperties::ChangeOp(node, machine()->Float64Mul());
return Changed(node);
}
@@ -599,38 +616,45 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
return Replace(m.left().node());
}
if (m.IsFoldable()) { // K % K => K (K stands for arbitrary constants)
- return ReplaceFloat64(Modulo(m.left().Value(), m.right().Value()));
+ return ReplaceFloat64(
+ Modulo(m.left().ResolvedValue(), m.right().ResolvedValue()));
}
break;
}
case IrOpcode::kFloat64Acos: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceFloat64(base::ieee754::acos(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceFloat64(base::ieee754::acos(m.ResolvedValue()));
break;
}
case IrOpcode::kFloat64Acosh: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceFloat64(base::ieee754::acosh(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceFloat64(base::ieee754::acosh(m.ResolvedValue()));
break;
}
case IrOpcode::kFloat64Asin: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceFloat64(base::ieee754::asin(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceFloat64(base::ieee754::asin(m.ResolvedValue()));
break;
}
case IrOpcode::kFloat64Asinh: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceFloat64(base::ieee754::asinh(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceFloat64(base::ieee754::asinh(m.ResolvedValue()));
break;
}
case IrOpcode::kFloat64Atan: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceFloat64(base::ieee754::atan(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceFloat64(base::ieee754::atan(m.ResolvedValue()));
break;
}
case IrOpcode::kFloat64Atanh: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceFloat64(base::ieee754::atanh(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceFloat64(base::ieee754::atanh(m.ResolvedValue()));
break;
}
case IrOpcode::kFloat64Atan2: {
@@ -642,61 +666,70 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
return Replace(m.left().node());
}
if (m.IsFoldable()) {
- return ReplaceFloat64(
- base::ieee754::atan2(m.left().Value(), m.right().Value()));
+ return ReplaceFloat64(base::ieee754::atan2(m.left().ResolvedValue(),
+ m.right().ResolvedValue()));
}
break;
}
case IrOpcode::kFloat64Cbrt: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceFloat64(base::ieee754::cbrt(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceFloat64(base::ieee754::cbrt(m.ResolvedValue()));
break;
}
case IrOpcode::kFloat64Cos: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceFloat64(base::ieee754::cos(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceFloat64(base::ieee754::cos(m.ResolvedValue()));
break;
}
case IrOpcode::kFloat64Cosh: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceFloat64(base::ieee754::cosh(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceFloat64(base::ieee754::cosh(m.ResolvedValue()));
break;
}
case IrOpcode::kFloat64Exp: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceFloat64(base::ieee754::exp(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceFloat64(base::ieee754::exp(m.ResolvedValue()));
break;
}
case IrOpcode::kFloat64Expm1: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceFloat64(base::ieee754::expm1(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceFloat64(base::ieee754::expm1(m.ResolvedValue()));
break;
}
case IrOpcode::kFloat64Log: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceFloat64(base::ieee754::log(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceFloat64(base::ieee754::log(m.ResolvedValue()));
break;
}
case IrOpcode::kFloat64Log1p: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceFloat64(base::ieee754::log1p(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceFloat64(base::ieee754::log1p(m.ResolvedValue()));
break;
}
case IrOpcode::kFloat64Log10: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceFloat64(base::ieee754::log10(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceFloat64(base::ieee754::log10(m.ResolvedValue()));
break;
}
case IrOpcode::kFloat64Log2: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceFloat64(base::ieee754::log2(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceFloat64(base::ieee754::log2(m.ResolvedValue()));
break;
}
case IrOpcode::kFloat64Pow: {
Float64BinopMatcher m(node);
if (m.IsFoldable()) {
- return ReplaceFloat64(
- base::ieee754::pow(m.left().Value(), m.right().Value()));
+ return ReplaceFloat64(base::ieee754::pow(m.left().ResolvedValue(),
+ m.right().ResolvedValue()));
} else if (m.right().Is(0.0)) { // x ** +-0.0 => 1.0
return ReplaceFloat64(1.0);
} else if (m.right().Is(-2.0)) { // x ** -2.0 => 1 / (x * x)
@@ -722,87 +755,99 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
}
case IrOpcode::kFloat64Sin: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceFloat64(base::ieee754::sin(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceFloat64(base::ieee754::sin(m.ResolvedValue()));
break;
}
case IrOpcode::kFloat64Sinh: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceFloat64(base::ieee754::sinh(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceFloat64(base::ieee754::sinh(m.ResolvedValue()));
break;
}
case IrOpcode::kFloat64Tan: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceFloat64(base::ieee754::tan(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceFloat64(base::ieee754::tan(m.ResolvedValue()));
break;
}
case IrOpcode::kFloat64Tanh: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceFloat64(base::ieee754::tanh(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceFloat64(base::ieee754::tanh(m.ResolvedValue()));
break;
}
case IrOpcode::kChangeFloat32ToFloat64: {
Float32Matcher m(node->InputAt(0));
- if (m.HasValue()) {
- if (!allow_signalling_nan_ && std::isnan(m.Value())) {
+ if (m.HasResolvedValue()) {
+ if (!allow_signalling_nan_ && std::isnan(m.ResolvedValue())) {
// Do some calculation to make guarantee the value is a quiet NaN.
- return ReplaceFloat64(m.Value() + m.Value());
+ return ReplaceFloat64(m.ResolvedValue() + m.ResolvedValue());
}
- return ReplaceFloat64(m.Value());
+ return ReplaceFloat64(m.ResolvedValue());
}
break;
}
case IrOpcode::kChangeFloat64ToInt32: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceInt32(FastD2IChecked(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceInt32(FastD2IChecked(m.ResolvedValue()));
if (m.IsChangeInt32ToFloat64()) return Replace(m.node()->InputAt(0));
break;
}
case IrOpcode::kChangeFloat64ToInt64: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceInt64(static_cast<int64_t>(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceInt64(static_cast<int64_t>(m.ResolvedValue()));
if (m.IsChangeInt64ToFloat64()) return Replace(m.node()->InputAt(0));
break;
}
case IrOpcode::kChangeFloat64ToUint32: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceInt32(FastD2UI(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceInt32(FastD2UI(m.ResolvedValue()));
if (m.IsChangeUint32ToFloat64()) return Replace(m.node()->InputAt(0));
break;
}
case IrOpcode::kChangeInt32ToFloat64: {
Int32Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceFloat64(FastI2D(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceFloat64(FastI2D(m.ResolvedValue()));
break;
}
case IrOpcode::kBitcastWord32ToWord64: {
Int32Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceInt64(m.Value());
+ if (m.HasResolvedValue()) return ReplaceInt64(m.ResolvedValue());
break;
}
case IrOpcode::kChangeInt32ToInt64: {
Int32Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceInt64(m.Value());
+ if (m.HasResolvedValue()) return ReplaceInt64(m.ResolvedValue());
break;
}
case IrOpcode::kChangeInt64ToFloat64: {
Int64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceFloat64(static_cast<double>(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceFloat64(static_cast<double>(m.ResolvedValue()));
if (m.IsChangeFloat64ToInt64()) return Replace(m.node()->InputAt(0));
break;
}
case IrOpcode::kChangeUint32ToFloat64: {
Uint32Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceFloat64(FastUI2D(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceFloat64(FastUI2D(m.ResolvedValue()));
break;
}
case IrOpcode::kChangeUint32ToUint64: {
Uint32Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceInt64(static_cast<uint64_t>(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceInt64(static_cast<uint64_t>(m.ResolvedValue()));
break;
}
case IrOpcode::kTruncateFloat64ToWord32: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceInt32(DoubleToInt32(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceInt32(DoubleToInt32(m.ResolvedValue()));
if (m.IsChangeInt32ToFloat64()) return Replace(m.node()->InputAt(0));
return NoChange();
}
@@ -810,12 +855,13 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
return ReduceTruncateInt64ToInt32(node);
case IrOpcode::kTruncateFloat64ToFloat32: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) {
- if (!allow_signalling_nan_ && std::isnan(m.Value())) {
+ if (m.HasResolvedValue()) {
+ if (!allow_signalling_nan_ && std::isnan(m.ResolvedValue())) {
// Do some calculation to make guarantee the value is a quiet NaN.
- return ReplaceFloat32(DoubleToFloat32(m.Value() + m.Value()));
+ return ReplaceFloat32(
+ DoubleToFloat32(m.ResolvedValue() + m.ResolvedValue()));
}
- return ReplaceFloat32(DoubleToFloat32(m.Value()));
+ return ReplaceFloat32(DoubleToFloat32(m.ResolvedValue()));
}
if (allow_signalling_nan_ && m.IsChangeFloat32ToFloat64())
return Replace(m.node()->InputAt(0));
@@ -823,8 +869,8 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
}
case IrOpcode::kRoundFloat64ToInt32: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) {
- return ReplaceInt32(DoubleToInt32(m.Value()));
+ if (m.HasResolvedValue()) {
+ return ReplaceInt32(DoubleToInt32(m.ResolvedValue()));
}
if (m.IsChangeInt32ToFloat64()) return Replace(m.node()->InputAt(0));
break;
@@ -860,28 +906,32 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
case IrOpcode::kInt64LessThan: {
Int64BinopMatcher m(node);
if (m.IsFoldable()) { // K < K => K (K stands for arbitrary constants)
- return ReplaceBool(m.left().Value() < m.right().Value());
+ return ReplaceBool(m.left().ResolvedValue() <
+ m.right().ResolvedValue());
}
return ReduceWord64Comparisons(node);
}
case IrOpcode::kInt64LessThanOrEqual: {
Int64BinopMatcher m(node);
if (m.IsFoldable()) { // K <= K => K (K stands for arbitrary constants)
- return ReplaceBool(m.left().Value() <= m.right().Value());
+ return ReplaceBool(m.left().ResolvedValue() <=
+ m.right().ResolvedValue());
}
return ReduceWord64Comparisons(node);
}
case IrOpcode::kUint64LessThan: {
Uint64BinopMatcher m(node);
if (m.IsFoldable()) { // K < K => K (K stands for arbitrary constants)
- return ReplaceBool(m.left().Value() < m.right().Value());
+ return ReplaceBool(m.left().ResolvedValue() <
+ m.right().ResolvedValue());
}
return ReduceWord64Comparisons(node);
}
case IrOpcode::kUint64LessThanOrEqual: {
Uint64BinopMatcher m(node);
if (m.IsFoldable()) { // K <= K => K (K stands for arbitrary constants)
- return ReplaceBool(m.left().Value() <= m.right().Value());
+ return ReplaceBool(m.left().ResolvedValue() <=
+ m.right().ResolvedValue());
}
return ReduceWord64Comparisons(node);
}
@@ -893,7 +943,8 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
Reduction MachineOperatorReducer::ReduceTruncateInt64ToInt32(Node* node) {
Int64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceInt32(static_cast<int32_t>(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceInt32(static_cast<int32_t>(m.ResolvedValue()));
if (m.IsChangeInt32ToInt64()) return Replace(m.node()->InputAt(0));
return NoChange();
}
@@ -903,8 +954,8 @@ Reduction MachineOperatorReducer::ReduceInt32Add(Node* node) {
Int32BinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.left().node()); // x + 0 => x
if (m.IsFoldable()) { // K + K => K (K stands for arbitrary constants)
- return ReplaceInt32(
- base::AddWithWraparound(m.left().Value(), m.right().Value()));
+ return ReplaceInt32(base::AddWithWraparound(m.left().ResolvedValue(),
+ m.right().ResolvedValue()));
}
if (m.left().IsInt32Sub()) {
Int32BinopMatcher mleft(m.left().node());
@@ -924,11 +975,12 @@ Reduction MachineOperatorReducer::ReduceInt32Add(Node* node) {
}
}
// (x + Int32Constant(a)) + Int32Constant(b)) => x + Int32Constant(a + b)
- if (m.right().HasValue() && m.left().IsInt32Add()) {
+ if (m.right().HasResolvedValue() && m.left().IsInt32Add()) {
Int32BinopMatcher n(m.left().node());
- if (n.right().HasValue() && m.OwnsInput(m.left().node())) {
- node->ReplaceInput(1, Int32Constant(base::AddWithWraparound(
- m.right().Value(), n.right().Value())));
+ if (n.right().HasResolvedValue() && m.OwnsInput(m.left().node())) {
+ node->ReplaceInput(
+ 1, Int32Constant(base::AddWithWraparound(m.right().ResolvedValue(),
+ n.right().ResolvedValue())));
node->ReplaceInput(0, n.left().node());
return Changed(node);
}
@@ -942,15 +994,16 @@ Reduction MachineOperatorReducer::ReduceInt64Add(Node* node) {
Int64BinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.left().node()); // x + 0 => 0
if (m.IsFoldable()) {
- return ReplaceInt64(
- base::AddWithWraparound(m.left().Value(), m.right().Value()));
+ return ReplaceInt64(base::AddWithWraparound(m.left().ResolvedValue(),
+ m.right().ResolvedValue()));
}
// (x + Int64Constant(a)) + Int64Constant(b)) => x + Int64Constant(a + b)
- if (m.right().HasValue() && m.left().IsInt64Add()) {
+ if (m.right().HasResolvedValue() && m.left().IsInt64Add()) {
Int64BinopMatcher n(m.left().node());
- if (n.right().HasValue() && m.OwnsInput(m.left().node())) {
- node->ReplaceInput(1, Int64Constant(base::AddWithWraparound(
- m.right().Value(), n.right().Value())));
+ if (n.right().HasResolvedValue() && m.OwnsInput(m.left().node())) {
+ node->ReplaceInput(
+ 1, Int64Constant(base::AddWithWraparound(m.right().ResolvedValue(),
+ n.right().ResolvedValue())));
node->ReplaceInput(0, n.left().node());
return Changed(node);
}
@@ -963,13 +1016,14 @@ Reduction MachineOperatorReducer::ReduceInt32Sub(Node* node) {
Int32BinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.left().node()); // x - 0 => x
if (m.IsFoldable()) { // K - K => K (K stands for arbitrary constants)
- return ReplaceInt32(
- base::SubWithWraparound(m.left().Value(), m.right().Value()));
+ return ReplaceInt32(base::SubWithWraparound(m.left().ResolvedValue(),
+ m.right().ResolvedValue()));
}
if (m.LeftEqualsRight()) return ReplaceInt32(0); // x - x => 0
- if (m.right().HasValue()) { // x - K => x + -K
+ if (m.right().HasResolvedValue()) { // x - K => x + -K
node->ReplaceInput(
- 1, Int32Constant(base::NegateWithWraparound(m.right().Value())));
+ 1,
+ Int32Constant(base::NegateWithWraparound(m.right().ResolvedValue())));
NodeProperties::ChangeOp(node, machine()->Int32Add());
return Changed(node).FollowedBy(ReduceInt32Add(node));
}
@@ -981,13 +1035,14 @@ Reduction MachineOperatorReducer::ReduceInt64Sub(Node* node) {
Int64BinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.left().node()); // x - 0 => x
if (m.IsFoldable()) { // K - K => K (K stands for arbitrary constants)
- return ReplaceInt64(
- base::SubWithWraparound(m.left().Value(), m.right().Value()));
+ return ReplaceInt64(base::SubWithWraparound(m.left().ResolvedValue(),
+ m.right().ResolvedValue()));
}
if (m.LeftEqualsRight()) return Replace(Int64Constant(0)); // x - x => 0
- if (m.right().HasValue()) { // x - K => x + -K
+ if (m.right().HasResolvedValue()) { // x - K => x + -K
node->ReplaceInput(
- 1, Int64Constant(base::NegateWithWraparound(m.right().Value())));
+ 1,
+ Int64Constant(base::NegateWithWraparound(m.right().ResolvedValue())));
NodeProperties::ChangeOp(node, machine()->Int64Add());
return Changed(node).FollowedBy(ReduceInt64Add(node));
}
@@ -1000,8 +1055,8 @@ Reduction MachineOperatorReducer::ReduceInt64Mul(Node* node) {
if (m.right().Is(0)) return Replace(m.right().node()); // x * 0 => 0
if (m.right().Is(1)) return Replace(m.left().node()); // x * 1 => x
if (m.IsFoldable()) { // K * K => K (K stands for arbitrary constants)
- return ReplaceInt64(
- base::MulWithWraparound(m.left().Value(), m.right().Value()));
+ return ReplaceInt64(base::MulWithWraparound(m.left().ResolvedValue(),
+ m.right().ResolvedValue()));
}
if (m.right().Is(-1)) { // x * -1 => 0 - x
node->ReplaceInput(0, Int64Constant(0));
@@ -1011,16 +1066,18 @@ Reduction MachineOperatorReducer::ReduceInt64Mul(Node* node) {
}
if (m.right().IsPowerOf2()) { // x * 2^n => x << n
node->ReplaceInput(
- 1, Int64Constant(base::bits::WhichPowerOfTwo(m.right().Value())));
+ 1,
+ Int64Constant(base::bits::WhichPowerOfTwo(m.right().ResolvedValue())));
NodeProperties::ChangeOp(node, machine()->Word64Shl());
return Changed(node).FollowedBy(ReduceWord64Shl(node));
}
// (x * Int64Constant(a)) * Int64Constant(b)) => x * Int64Constant(a * b)
- if (m.right().HasValue() && m.left().IsInt64Mul()) {
+ if (m.right().HasResolvedValue() && m.left().IsInt64Mul()) {
Int64BinopMatcher n(m.left().node());
- if (n.right().HasValue() && m.OwnsInput(m.left().node())) {
- node->ReplaceInput(1, Int64Constant(base::MulWithWraparound(
- m.right().Value(), n.right().Value())));
+ if (n.right().HasResolvedValue() && m.OwnsInput(m.left().node())) {
+ node->ReplaceInput(
+ 1, Int64Constant(base::MulWithWraparound(m.right().ResolvedValue(),
+ n.right().ResolvedValue())));
node->ReplaceInput(0, n.left().node());
return Changed(node);
}
@@ -1034,8 +1091,8 @@ Reduction MachineOperatorReducer::ReduceInt32Div(Node* node) {
if (m.right().Is(0)) return Replace(m.right().node()); // x / 0 => 0
if (m.right().Is(1)) return Replace(m.left().node()); // x / 1 => x
if (m.IsFoldable()) { // K / K => K (K stands for arbitrary constants)
- return ReplaceInt32(
- base::bits::SignedDiv32(m.left().Value(), m.right().Value()));
+ return ReplaceInt32(base::bits::SignedDiv32(m.left().ResolvedValue(),
+ m.right().ResolvedValue()));
}
if (m.LeftEqualsRight()) { // x / x => x != 0
Node* const zero = Int32Constant(0);
@@ -1048,8 +1105,8 @@ Reduction MachineOperatorReducer::ReduceInt32Div(Node* node) {
NodeProperties::ChangeOp(node, machine()->Int32Sub());
return Changed(node);
}
- if (m.right().HasValue()) {
- int32_t const divisor = m.right().Value();
+ if (m.right().HasResolvedValue()) {
+ int32_t const divisor = m.right().ResolvedValue();
Node* const dividend = m.left().node();
Node* quotient = dividend;
if (base::bits::IsPowerOfTwo(Abs(divisor))) {
@@ -1081,19 +1138,19 @@ Reduction MachineOperatorReducer::ReduceUint32Div(Node* node) {
if (m.right().Is(0)) return Replace(m.right().node()); // x / 0 => 0
if (m.right().Is(1)) return Replace(m.left().node()); // x / 1 => x
if (m.IsFoldable()) { // K / K => K (K stands for arbitrary constants)
- return ReplaceUint32(
- base::bits::UnsignedDiv32(m.left().Value(), m.right().Value()));
+ return ReplaceUint32(base::bits::UnsignedDiv32(m.left().ResolvedValue(),
+ m.right().ResolvedValue()));
}
if (m.LeftEqualsRight()) { // x / x => x != 0
Node* const zero = Int32Constant(0);
return Replace(Word32Equal(Word32Equal(m.left().node(), zero), zero));
}
- if (m.right().HasValue()) {
+ if (m.right().HasResolvedValue()) {
Node* const dividend = m.left().node();
- uint32_t const divisor = m.right().Value();
+ uint32_t const divisor = m.right().ResolvedValue();
if (base::bits::IsPowerOfTwo(divisor)) { // x / 2^n => x >> n
- node->ReplaceInput(
- 1, Uint32Constant(base::bits::WhichPowerOfTwo(m.right().Value())));
+ node->ReplaceInput(1, Uint32Constant(base::bits::WhichPowerOfTwo(
+ m.right().ResolvedValue())));
node->TrimInputCount(2);
NodeProperties::ChangeOp(node, machine()->Word32Shr());
return Changed(node);
@@ -1112,12 +1169,12 @@ Reduction MachineOperatorReducer::ReduceInt32Mod(Node* node) {
if (m.right().Is(-1)) return ReplaceInt32(0); // x % -1 => 0
if (m.LeftEqualsRight()) return ReplaceInt32(0); // x % x => 0
if (m.IsFoldable()) { // K % K => K (K stands for arbitrary constants)
- return ReplaceInt32(
- base::bits::SignedMod32(m.left().Value(), m.right().Value()));
+ return ReplaceInt32(base::bits::SignedMod32(m.left().ResolvedValue(),
+ m.right().ResolvedValue()));
}
- if (m.right().HasValue()) {
+ if (m.right().HasResolvedValue()) {
Node* const dividend = m.left().node();
- uint32_t const divisor = Abs(m.right().Value());
+ uint32_t const divisor = Abs(m.right().ResolvedValue());
if (base::bits::IsPowerOfTwo(divisor)) {
uint32_t const mask = divisor - 1;
Node* const zero = Int32Constant(0);
@@ -1147,14 +1204,14 @@ Reduction MachineOperatorReducer::ReduceUint32Mod(Node* node) {
if (m.right().Is(1)) return ReplaceUint32(0); // x % 1 => 0
if (m.LeftEqualsRight()) return ReplaceInt32(0); // x % x => 0
if (m.IsFoldable()) { // K % K => K (K stands for arbitrary constants)
- return ReplaceUint32(
- base::bits::UnsignedMod32(m.left().Value(), m.right().Value()));
+ return ReplaceUint32(base::bits::UnsignedMod32(m.left().ResolvedValue(),
+ m.right().ResolvedValue()));
}
- if (m.right().HasValue()) {
+ if (m.right().HasResolvedValue()) {
Node* const dividend = m.left().node();
- uint32_t const divisor = m.right().Value();
+ uint32_t const divisor = m.right().ResolvedValue();
if (base::bits::IsPowerOfTwo(divisor)) { // x % 2^n => x & 2^n-1
- node->ReplaceInput(1, Uint32Constant(m.right().Value() - 1));
+ node->ReplaceInput(1, Uint32Constant(m.right().ResolvedValue() - 1));
node->TrimInputCount(2);
NodeProperties::ChangeOp(node, machine()->Word32And());
} else {
@@ -1187,10 +1244,11 @@ Reduction MachineOperatorReducer::ReduceStore(Node* node) {
switch (value->opcode()) {
case IrOpcode::kWord32And: {
Uint32BinopMatcher m(value);
- if (m.right().HasValue() && ((rep == MachineRepresentation::kWord8 &&
- (m.right().Value() & 0xFF) == 0xFF) ||
- (rep == MachineRepresentation::kWord16 &&
- (m.right().Value() & 0xFFFF) == 0xFFFF))) {
+ if (m.right().HasResolvedValue() &&
+ ((rep == MachineRepresentation::kWord8 &&
+ (m.right().ResolvedValue() & 0xFF) == 0xFF) ||
+ (rep == MachineRepresentation::kWord16 &&
+ (m.right().ResolvedValue() & 0xFFFF) == 0xFFFF))) {
node->ReplaceInput(value_input, m.left().node());
return Changed(node);
}
@@ -1203,7 +1261,7 @@ Reduction MachineOperatorReducer::ReduceStore(Node* node) {
(rep == MachineRepresentation::kWord16 &&
m.right().IsInRange(1, 16)))) {
Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().Is(m.right().Value())) {
+ if (mleft.right().Is(m.right().ResolvedValue())) {
node->ReplaceInput(value_input, mleft.left().node());
return Changed(node);
}
@@ -1223,8 +1281,8 @@ Reduction MachineOperatorReducer::ReduceProjection(size_t index, Node* node) {
Int32BinopMatcher m(node);
if (m.IsFoldable()) {
int32_t val;
- bool ovf = base::bits::SignedAddOverflow32(m.left().Value(),
- m.right().Value(), &val);
+ bool ovf = base::bits::SignedAddOverflow32(
+ m.left().ResolvedValue(), m.right().ResolvedValue(), &val);
return ReplaceInt32(index == 0 ? val : ovf);
}
if (m.right().Is(0)) {
@@ -1237,8 +1295,8 @@ Reduction MachineOperatorReducer::ReduceProjection(size_t index, Node* node) {
Int32BinopMatcher m(node);
if (m.IsFoldable()) {
int32_t val;
- bool ovf = base::bits::SignedSubOverflow32(m.left().Value(),
- m.right().Value(), &val);
+ bool ovf = base::bits::SignedSubOverflow32(
+ m.left().ResolvedValue(), m.right().ResolvedValue(), &val);
return ReplaceInt32(index == 0 ? val : ovf);
}
if (m.right().Is(0)) {
@@ -1251,8 +1309,8 @@ Reduction MachineOperatorReducer::ReduceProjection(size_t index, Node* node) {
Int32BinopMatcher m(node);
if (m.IsFoldable()) {
int32_t val;
- bool ovf = base::bits::SignedMulOverflow32(m.left().Value(),
- m.right().Value(), &val);
+ bool ovf = base::bits::SignedMulOverflow32(
+ m.left().ResolvedValue(), m.right().ResolvedValue(), &val);
return ReplaceInt32(index == 0 ? val : ovf);
}
if (m.right().Is(0)) {
@@ -1280,7 +1338,8 @@ Reduction MachineOperatorReducer::ReduceWord32Comparisons(Node* node) {
m.right().op() == machine()->Word32SarShiftOutZeros()) {
Int32BinopMatcher mleft(m.left().node());
Int32BinopMatcher mright(m.right().node());
- if (mleft.right().HasValue() && mright.right().Is(mleft.right().Value())) {
+ if (mleft.right().HasResolvedValue() &&
+ mright.right().Is(mleft.right().ResolvedValue())) {
node->ReplaceInput(0, mleft.left().node());
node->ReplaceInput(1, mright.left().node());
return Changed(node);
@@ -1331,7 +1390,8 @@ Reduction MachineOperatorReducer::ReduceWord64Comparisons(Node* node) {
m.right().op() == machine()->Word64SarShiftOutZeros()) {
Int64BinopMatcher mleft(m.left().node());
Int64BinopMatcher mright(m.right().node());
- if (mleft.right().HasValue() && mright.right().Is(mleft.right().Value())) {
+ if (mleft.right().HasResolvedValue() &&
+ mright.right().Is(mleft.right().ResolvedValue())) {
node->ReplaceInput(0, mleft.left().node());
node->ReplaceInput(1, mright.left().node());
return Changed(node);
@@ -1365,8 +1425,8 @@ Reduction MachineOperatorReducer::ReduceWord32Shl(Node* node) {
Int32BinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.left().node()); // x << 0 => x
if (m.IsFoldable()) { // K << K => K (K stands for arbitrary constants)
- return ReplaceInt32(
- base::ShlWithWraparound(m.left().Value(), m.right().Value()));
+ return ReplaceInt32(base::ShlWithWraparound(m.left().ResolvedValue(),
+ m.right().ResolvedValue()));
}
if (m.right().IsInRange(1, 31)) {
if (m.left().IsWord32Sar() || m.left().IsWord32Shr()) {
@@ -1381,8 +1441,8 @@ Reduction MachineOperatorReducer::ReduceWord32Shl(Node* node) {
if (mleft.op() == machine()->Word32SarShiftOutZeros() &&
mleft.right().IsInRange(1, 31)) {
Node* x = mleft.left().node();
- int k = mleft.right().Value();
- int l = m.right().Value();
+ int k = mleft.right().ResolvedValue();
+ int l = m.right().ResolvedValue();
if (k == l) {
return Replace(x);
} else if (k > l) {
@@ -1400,11 +1460,11 @@ Reduction MachineOperatorReducer::ReduceWord32Shl(Node* node) {
// (x >>> K) << K => x & ~(2^K - 1)
// (x >> K) << K => x & ~(2^K - 1)
- if (mleft.right().Is(m.right().Value())) {
+ if (mleft.right().Is(m.right().ResolvedValue())) {
node->ReplaceInput(0, mleft.left().node());
node->ReplaceInput(1,
Uint32Constant(std::numeric_limits<uint32_t>::max()
- << m.right().Value()));
+ << m.right().ResolvedValue()));
NodeProperties::ChangeOp(node, machine()->Word32And());
return Changed(node).FollowedBy(ReduceWord32And(node));
}
@@ -1418,8 +1478,8 @@ Reduction MachineOperatorReducer::ReduceWord64Shl(Node* node) {
Int64BinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.left().node()); // x << 0 => x
if (m.IsFoldable()) { // K << K => K (K stands for arbitrary constants)
- return ReplaceInt64(
- base::ShlWithWraparound(m.left().Value(), m.right().Value()));
+ return ReplaceInt64(base::ShlWithWraparound(m.left().ResolvedValue(),
+ m.right().ResolvedValue()));
}
if (m.right().IsInRange(1, 63) &&
(m.left().IsWord64Sar() || m.left().IsWord64Shr())) {
@@ -1434,8 +1494,8 @@ Reduction MachineOperatorReducer::ReduceWord64Shl(Node* node) {
if (mleft.op() == machine()->Word64SarShiftOutZeros() &&
mleft.right().IsInRange(1, 63)) {
Node* x = mleft.left().node();
- int64_t k = mleft.right().Value();
- int64_t l = m.right().Value();
+ int64_t k = mleft.right().ResolvedValue();
+ int64_t l = m.right().ResolvedValue();
if (k == l) {
return Replace(x);
} else if (k > l) {
@@ -1453,10 +1513,10 @@ Reduction MachineOperatorReducer::ReduceWord64Shl(Node* node) {
// (x >>> K) << K => x & ~(2^K - 1)
// (x >> K) << K => x & ~(2^K - 1)
- if (mleft.right().Is(m.right().Value())) {
+ if (mleft.right().Is(m.right().ResolvedValue())) {
node->ReplaceInput(0, mleft.left().node());
node->ReplaceInput(1, Uint64Constant(std::numeric_limits<uint64_t>::max()
- << m.right().Value()));
+ << m.right().ResolvedValue()));
NodeProperties::ChangeOp(node, machine()->Word64And());
return Changed(node).FollowedBy(ReduceWord64And(node));
}
@@ -1468,13 +1528,14 @@ Reduction MachineOperatorReducer::ReduceWord32Shr(Node* node) {
Uint32BinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.left().node()); // x >>> 0 => x
if (m.IsFoldable()) { // K >>> K => K (K stands for arbitrary constants)
- return ReplaceInt32(m.left().Value() >> (m.right().Value() & 31));
+ return ReplaceInt32(m.left().ResolvedValue() >>
+ (m.right().ResolvedValue() & 31));
}
- if (m.left().IsWord32And() && m.right().HasValue()) {
+ if (m.left().IsWord32And() && m.right().HasResolvedValue()) {
Uint32BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue()) {
- uint32_t shift = m.right().Value() & 31;
- uint32_t mask = mleft.right().Value();
+ if (mleft.right().HasResolvedValue()) {
+ uint32_t shift = m.right().ResolvedValue() & 31;
+ uint32_t mask = mleft.right().ResolvedValue();
if ((mask >> shift) == 0) {
// (m >>> s) == 0 implies ((x & m) >>> s) == 0
return ReplaceInt32(0);
@@ -1489,7 +1550,8 @@ Reduction MachineOperatorReducer::ReduceWord64Shr(Node* node) {
Uint64BinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.left().node()); // x >>> 0 => x
if (m.IsFoldable()) { // K >> K => K (K stands for arbitrary constants)
- return ReplaceInt64(m.left().Value() >> (m.right().Value() & 63));
+ return ReplaceInt64(m.left().ResolvedValue() >>
+ (m.right().ResolvedValue() & 63));
}
return NoChange();
}
@@ -1498,7 +1560,8 @@ Reduction MachineOperatorReducer::ReduceWord32Sar(Node* node) {
Int32BinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.left().node()); // x >> 0 => x
if (m.IsFoldable()) { // K >> K => K (K stands for arbitrary constants)
- return ReplaceInt32(m.left().Value() >> (m.right().Value() & 31));
+ return ReplaceInt32(m.left().ResolvedValue() >>
+ (m.right().ResolvedValue() & 31));
}
if (m.left().IsWord32Shl()) {
Int32BinopMatcher mleft(m.left().node());
@@ -1532,7 +1595,8 @@ Reduction MachineOperatorReducer::ReduceWord64Sar(Node* node) {
Int64BinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.left().node()); // x >> 0 => x
if (m.IsFoldable()) {
- return ReplaceInt64(m.left().Value() >> (m.right().Value() & 63));
+ return ReplaceInt64(m.left().ResolvedValue() >>
+ (m.right().ResolvedValue() & 63));
}
return NoChange();
}
@@ -1549,33 +1613,34 @@ Reduction MachineOperatorReducer::ReduceWordNAnd(Node* node) {
return Replace(m.left().node());
}
if (m.IsFoldable()) { // K & K => K (K stands for arbitrary constants)
- return a.ReplaceIntN(m.left().Value() & m.right().Value());
+ return a.ReplaceIntN(m.left().ResolvedValue() & m.right().ResolvedValue());
}
if (m.LeftEqualsRight()) return Replace(m.left().node()); // x & x => x
- if (A::IsWordNAnd(m.left()) && m.right().HasValue()) {
+ if (A::IsWordNAnd(m.left()) && m.right().HasResolvedValue()) {
typename A::IntNBinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue()) { // (x & K) & K => x & K
+ if (mleft.right().HasResolvedValue()) { // (x & K) & K => x & K
node->ReplaceInput(0, mleft.left().node());
- node->ReplaceInput(
- 1, a.IntNConstant(m.right().Value() & mleft.right().Value()));
+ node->ReplaceInput(1, a.IntNConstant(m.right().ResolvedValue() &
+ mleft.right().ResolvedValue()));
return Changed(node).FollowedBy(a.ReduceWordNAnd(node));
}
}
if (m.right().IsNegativePowerOf2()) {
- typename A::intN_t const mask = m.right().Value();
+ typename A::intN_t const mask = m.right().ResolvedValue();
typename A::intN_t const neg_mask = base::NegateWithWraparound(mask);
if (A::IsWordNShl(m.left())) {
typename A::UintNBinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue() &&
- (mleft.right().Value() & (A::WORD_SIZE - 1)) >=
+ if (mleft.right().HasResolvedValue() &&
+ (mleft.right().ResolvedValue() & (A::WORD_SIZE - 1)) >=
base::bits::CountTrailingZeros(mask)) {
// (x << L) & (-1 << K) => x << L iff L >= K
return Replace(mleft.node());
}
} else if (A::IsIntNAdd(m.left())) {
typename A::IntNBinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue() &&
- (mleft.right().Value() & mask) == mleft.right().Value()) {
+ if (mleft.right().HasResolvedValue() &&
+ (mleft.right().ResolvedValue() & mask) ==
+ mleft.right().ResolvedValue()) {
// (x + (K << L)) & (-1 << L) => (x & (-1 << L)) + (K << L)
node->ReplaceInput(0,
a.WordNAnd(mleft.left().node(), m.right().node()));
@@ -1659,9 +1724,9 @@ struct BitfieldCheck {
Uint32BinopMatcher eq(node);
if (eq.left().IsWord32And()) {
Uint32BinopMatcher mand(eq.left().node());
- if (mand.right().HasValue() && eq.right().HasValue()) {
- BitfieldCheck result{mand.left().node(), mand.right().Value(),
- eq.right().Value(), false};
+ if (mand.right().HasResolvedValue() && eq.right().HasResolvedValue()) {
+ BitfieldCheck result{mand.left().node(), mand.right().ResolvedValue(),
+ eq.right().ResolvedValue(), false};
if (mand.left().IsTruncateInt64ToInt32()) {
result.truncate_from_64_bit = true;
result.source =
@@ -1703,12 +1768,14 @@ struct BitfieldCheck {
// Look for the pattern `(val >> shift) & 1`. The shift may be omitted.
if (WordNAdapter::IsWordNAnd(NodeMatcher(node))) {
typename WordNAdapter::IntNBinopMatcher mand(node);
- if (mand.right().HasValue() && mand.right().Value() == 1) {
+ if (mand.right().HasResolvedValue() &&
+ mand.right().ResolvedValue() == 1) {
if (WordNAdapter::IsWordNShr(mand.left()) ||
WordNAdapter::IsWordNSar(mand.left())) {
typename WordNAdapter::UintNBinopMatcher shift(mand.left().node());
- if (shift.right().HasValue() && shift.right().Value() < 32u) {
- uint32_t mask = 1 << shift.right().Value();
+ if (shift.right().HasResolvedValue() &&
+ shift.right().ResolvedValue() < 32u) {
+ uint32_t mask = 1 << shift.right().ResolvedValue();
return BitfieldCheck{shift.left().node(), mask, mask,
WordNAdapter::WORD_SIZE == 64};
}
@@ -1782,9 +1849,10 @@ Reduction MachineOperatorReducer::TryMatchWord32Ror(Node* node) {
Int32BinopMatcher mshr(shr);
if (mshl.left().node() != mshr.left().node()) return NoChange();
- if (mshl.right().HasValue() && mshr.right().HasValue()) {
+ if (mshl.right().HasResolvedValue() && mshr.right().HasResolvedValue()) {
// Case where y is a constant.
- if (mshl.right().Value() + mshr.right().Value() != 32) return NoChange();
+ if (mshl.right().ResolvedValue() + mshr.right().ResolvedValue() != 32)
+ return NoChange();
} else {
Node* sub = nullptr;
Node* y = nullptr;
@@ -1817,17 +1885,17 @@ Reduction MachineOperatorReducer::ReduceWordNOr(Node* node) {
if (m.right().Is(0)) return Replace(m.left().node()); // x | 0 => x
if (m.right().Is(-1)) return Replace(m.right().node()); // x | -1 => -1
if (m.IsFoldable()) { // K | K => K (K stands for arbitrary constants)
- return a.ReplaceIntN(m.left().Value() | m.right().Value());
+ return a.ReplaceIntN(m.left().ResolvedValue() | m.right().ResolvedValue());
}
if (m.LeftEqualsRight()) return Replace(m.left().node()); // x | x => x
// (x & K1) | K2 => x | K2 if K2 has ones for every zero bit in K1.
// This case can be constructed by UpdateWord and UpdateWord32 in CSA.
- if (m.right().HasValue()) {
+ if (m.right().HasResolvedValue()) {
if (A::IsWordNAnd(m.left())) {
typename A::IntNBinopMatcher mand(m.left().node());
- if (mand.right().HasValue()) {
- if ((m.right().Value() | mand.right().Value()) == -1) {
+ if (mand.right().HasResolvedValue()) {
+ if ((m.right().ResolvedValue() | mand.right().ResolvedValue()) == -1) {
node->ReplaceInput(0, mand.left().node());
return Changed(node);
}
@@ -1856,7 +1924,7 @@ Reduction MachineOperatorReducer::ReduceWordNXor(Node* node) {
typename A::IntNBinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.left().node()); // x ^ 0 => x
if (m.IsFoldable()) { // K ^ K => K (K stands for arbitrary constants)
- return a.ReplaceIntN(m.left().Value() ^ m.right().Value());
+ return a.ReplaceIntN(m.left().ResolvedValue() ^ m.right().ResolvedValue());
}
if (m.LeftEqualsRight()) return ReplaceInt32(0); // x ^ x => 0
if (A::IsWordNXor(m.left()) && m.right().Is(-1)) {
@@ -1882,7 +1950,7 @@ Reduction MachineOperatorReducer::ReduceWord64Xor(Node* node) {
Reduction MachineOperatorReducer::ReduceWord32Equal(Node* node) {
Int32BinopMatcher m(node);
if (m.IsFoldable()) { // K == K => K (K stands for arbitrary constants)
- return ReplaceBool(m.left().Value() == m.right().Value());
+ return ReplaceBool(m.left().ResolvedValue() == m.right().ResolvedValue());
}
if (m.left().IsInt32Sub() && m.right().Is(0)) { // x - y == 0 => x == y
Int32BinopMatcher msub(m.left().node());
@@ -1892,15 +1960,15 @@ Reduction MachineOperatorReducer::ReduceWord32Equal(Node* node) {
}
// TODO(turbofan): fold HeapConstant, ExternalReference, pointer compares
if (m.LeftEqualsRight()) return ReplaceBool(true); // x == x => true
- if (m.right().HasValue()) {
+ if (m.right().HasResolvedValue()) {
base::Optional<std::pair<Node*, uint32_t>> replacements;
if (m.left().IsTruncateInt64ToInt32()) {
replacements = ReduceWord32EqualForConstantRhs<Word64Adapter>(
NodeProperties::GetValueInput(m.left().node(), 0),
- static_cast<uint32_t>(m.right().Value()));
+ static_cast<uint32_t>(m.right().ResolvedValue()));
} else {
replacements = ReduceWord32EqualForConstantRhs<Word32Adapter>(
- m.left().node(), static_cast<uint32_t>(m.right().Value()));
+ m.left().node(), static_cast<uint32_t>(m.right().ResolvedValue()));
}
if (replacements) {
node->ReplaceInput(0, replacements->first);
@@ -1916,10 +1984,11 @@ Reduction MachineOperatorReducer::ReduceFloat64InsertLowWord32(Node* node) {
DCHECK_EQ(IrOpcode::kFloat64InsertLowWord32, node->opcode());
Float64Matcher mlhs(node->InputAt(0));
Uint32Matcher mrhs(node->InputAt(1));
- if (mlhs.HasValue() && mrhs.HasValue()) {
- return ReplaceFloat64(bit_cast<double>(
- (bit_cast<uint64_t>(mlhs.Value()) & uint64_t{0xFFFFFFFF00000000}) |
- mrhs.Value()));
+ if (mlhs.HasResolvedValue() && mrhs.HasResolvedValue()) {
+ return ReplaceFloat64(
+ bit_cast<double>((bit_cast<uint64_t>(mlhs.ResolvedValue()) &
+ uint64_t{0xFFFFFFFF00000000}) |
+ mrhs.ResolvedValue()));
}
return NoChange();
}
@@ -1928,10 +1997,10 @@ Reduction MachineOperatorReducer::ReduceFloat64InsertHighWord32(Node* node) {
DCHECK_EQ(IrOpcode::kFloat64InsertHighWord32, node->opcode());
Float64Matcher mlhs(node->InputAt(0));
Uint32Matcher mrhs(node->InputAt(1));
- if (mlhs.HasValue() && mrhs.HasValue()) {
+ if (mlhs.HasResolvedValue() && mrhs.HasResolvedValue()) {
return ReplaceFloat64(bit_cast<double>(
- (bit_cast<uint64_t>(mlhs.Value()) & uint64_t{0xFFFFFFFF}) |
- (static_cast<uint64_t>(mrhs.Value()) << 32)));
+ (bit_cast<uint64_t>(mlhs.ResolvedValue()) & uint64_t{0xFFFFFFFF}) |
+ (static_cast<uint64_t>(mrhs.ResolvedValue()) << 32)));
}
return NoChange();
}
@@ -1939,8 +2008,8 @@ Reduction MachineOperatorReducer::ReduceFloat64InsertHighWord32(Node* node) {
namespace {
bool IsFloat64RepresentableAsFloat32(const Float64Matcher& m) {
- if (m.HasValue()) {
- double v = m.Value();
+ if (m.HasResolvedValue()) {
+ double v = m.ResolvedValue();
return DoubleToFloat32(v) == v;
}
return false;
@@ -1957,11 +2026,14 @@ Reduction MachineOperatorReducer::ReduceFloat64Compare(Node* node) {
if (m.IsFoldable()) {
switch (node->opcode()) {
case IrOpcode::kFloat64Equal:
- return ReplaceBool(m.left().Value() == m.right().Value());
+ return ReplaceBool(m.left().ResolvedValue() ==
+ m.right().ResolvedValue());
case IrOpcode::kFloat64LessThan:
- return ReplaceBool(m.left().Value() < m.right().Value());
+ return ReplaceBool(m.left().ResolvedValue() <
+ m.right().ResolvedValue());
case IrOpcode::kFloat64LessThanOrEqual:
- return ReplaceBool(m.left().Value() <= m.right().Value());
+ return ReplaceBool(m.left().ResolvedValue() <=
+ m.right().ResolvedValue());
default:
UNREACHABLE();
}
@@ -1990,12 +2062,12 @@ Reduction MachineOperatorReducer::ReduceFloat64Compare(Node* node) {
UNREACHABLE();
}
node->ReplaceInput(
- 0, m.left().HasValue()
- ? Float32Constant(static_cast<float>(m.left().Value()))
+ 0, m.left().HasResolvedValue()
+ ? Float32Constant(static_cast<float>(m.left().ResolvedValue()))
: m.left().InputAt(0));
node->ReplaceInput(
- 1, m.right().HasValue()
- ? Float32Constant(static_cast<float>(m.right().Value()))
+ 1, m.right().HasResolvedValue()
+ ? Float32Constant(static_cast<float>(m.right().ResolvedValue()))
: m.right().InputAt(0));
return Changed(node);
}
@@ -2005,8 +2077,8 @@ Reduction MachineOperatorReducer::ReduceFloat64Compare(Node* node) {
Reduction MachineOperatorReducer::ReduceFloat64RoundDown(Node* node) {
DCHECK_EQ(IrOpcode::kFloat64RoundDown, node->opcode());
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) {
- return ReplaceFloat64(std::floor(m.Value()));
+ if (m.HasResolvedValue()) {
+ return ReplaceFloat64(std::floor(m.ResolvedValue()));
}
return NoChange();
}
@@ -2055,12 +2127,12 @@ MachineOperatorReducer::ReduceWord32EqualForConstantRhs(Node* lhs,
typename WordNAdapter::UintNBinopMatcher mand(lhs);
if ((WordNAdapter::IsWordNShr(mand.left()) ||
WordNAdapter::IsWordNSar(mand.left())) &&
- mand.right().HasValue()) {
+ mand.right().HasResolvedValue()) {
typename WordNAdapter::UintNBinopMatcher mshift(mand.left().node());
// ((x >> K1) & K2) == K3 => (x & (K2 << K1)) == (K3 << K1)
- if (mshift.right().HasValue()) {
- auto shift_bits = mshift.right().Value();
- auto mask = mand.right().Value();
+ if (mshift.right().HasResolvedValue()) {
+ auto shift_bits = mshift.right().ResolvedValue();
+ auto mask = mand.right().ResolvedValue();
// Make sure that we won't shift data off the end, and that all of the
// data ends up in the lower 32 bits for 64-bit mode.
if (shift_bits <= base::bits::CountLeadingZeros(mask) &&
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index 98befab060..e3d16d7e60 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -32,15 +32,15 @@ std::ostream& operator<<(std::ostream& os, StoreRepresentation rep) {
return os << rep.representation() << ", " << rep.write_barrier_kind();
}
-size_t hash_value(LoadKind kind) { return static_cast<size_t>(kind); }
+size_t hash_value(MemoryAccessKind kind) { return static_cast<size_t>(kind); }
-std::ostream& operator<<(std::ostream& os, LoadKind kind) {
+std::ostream& operator<<(std::ostream& os, MemoryAccessKind kind) {
switch (kind) {
- case LoadKind::kNormal:
+ case MemoryAccessKind::kNormal:
return os << "kNormal";
- case LoadKind::kUnaligned:
+ case MemoryAccessKind::kUnaligned:
return os << "kUnaligned";
- case LoadKind::kProtected:
+ case MemoryAccessKind::kProtected:
return os << "kProtected";
}
UNREACHABLE();
@@ -50,30 +50,30 @@ size_t hash_value(LoadTransformation rep) { return static_cast<size_t>(rep); }
std::ostream& operator<<(std::ostream& os, LoadTransformation rep) {
switch (rep) {
- case LoadTransformation::kS8x16LoadSplat:
- return os << "kS8x16LoadSplat";
- case LoadTransformation::kS16x8LoadSplat:
- return os << "kS16x8LoadSplat";
- case LoadTransformation::kS32x4LoadSplat:
- return os << "kS32x4LoadSplat";
- case LoadTransformation::kS64x2LoadSplat:
- return os << "kS64x2LoadSplat";
- case LoadTransformation::kI16x8Load8x8S:
- return os << "kI16x8Load8x8S";
- case LoadTransformation::kI16x8Load8x8U:
- return os << "kI16x8Load8x8U";
- case LoadTransformation::kI32x4Load16x4S:
- return os << "kI32x4Load16x4S";
- case LoadTransformation::kI32x4Load16x4U:
- return os << "kI32x4Load16x4U";
- case LoadTransformation::kI64x2Load32x2S:
- return os << "kI64x2Load32x2S";
- case LoadTransformation::kI64x2Load32x2U:
- return os << "kI64x2Load32x2U";
- case LoadTransformation::kS128LoadMem32Zero:
- return os << "kS128LoadMem32Zero";
- case LoadTransformation::kS128LoadMem64Zero:
- return os << "kS128LoadMem64Zero";
+ case LoadTransformation::kS128Load8Splat:
+ return os << "kS128Load8Splat";
+ case LoadTransformation::kS128Load16Splat:
+ return os << "kS128Load16Splat";
+ case LoadTransformation::kS128Load32Splat:
+ return os << "kS128Load32Splat";
+ case LoadTransformation::kS128Load64Splat:
+ return os << "kS128Load64Splat";
+ case LoadTransformation::kS128Load8x8S:
+ return os << "kS128Load8x8S";
+ case LoadTransformation::kS128Load8x8U:
+ return os << "kS128Load8x8U";
+ case LoadTransformation::kS128Load16x4S:
+ return os << "kS128Load16x4S";
+ case LoadTransformation::kS128Load16x4U:
+ return os << "kS128Load16x4U";
+ case LoadTransformation::kS128Load32x2S:
+ return os << "kS128Load32x2S";
+ case LoadTransformation::kS128Load32x2U:
+ return os << "kS128Load32x2U";
+ case LoadTransformation::kS128Load32Zero:
+ return os << "kS128Load32Zero";
+ case LoadTransformation::kS128Load64Zero:
+ return os << "kS128Load64Zero";
}
UNREACHABLE();
}
@@ -99,6 +99,25 @@ bool operator!=(LoadTransformParameters lhs, LoadTransformParameters rhs) {
return !(lhs == rhs);
}
+size_t hash_value(LoadLaneParameters params) {
+ return base::hash_combine(params.kind, params.rep, params.laneidx);
+}
+
+std::ostream& operator<<(std::ostream& os, LoadLaneParameters params) {
+ return os << "(" << params.kind << " " << params.rep << " " << params.laneidx
+ << ")";
+}
+
+LoadLaneParameters const& LoadLaneParametersOf(Operator const* op) {
+ DCHECK_EQ(IrOpcode::kLoadLane, op->opcode());
+ return OpParameter<LoadLaneParameters>(op);
+}
+
+bool operator==(LoadLaneParameters lhs, LoadLaneParameters rhs) {
+ return lhs.kind == rhs.kind && lhs.rep == rhs.rep &&
+ lhs.laneidx == rhs.laneidx;
+}
+
LoadRepresentation LoadRepresentationOf(Operator const* op) {
DCHECK(IrOpcode::kLoad == op->opcode() ||
IrOpcode::kProtectedLoad == op->opcode() ||
@@ -122,6 +141,25 @@ UnalignedStoreRepresentation const& UnalignedStoreRepresentationOf(
return OpParameter<UnalignedStoreRepresentation>(op);
}
+size_t hash_value(StoreLaneParameters params) {
+ return base::hash_combine(params.kind, params.rep, params.laneidx);
+}
+
+std::ostream& operator<<(std::ostream& os, StoreLaneParameters params) {
+ return os << "(" << params.kind << " " << params.rep << " " << params.laneidx
+ << ")";
+}
+
+StoreLaneParameters const& StoreLaneParametersOf(Operator const* op) {
+ DCHECK_EQ(IrOpcode::kStoreLane, op->opcode());
+ return OpParameter<StoreLaneParameters>(op);
+}
+
+bool operator==(StoreLaneParameters lhs, StoreLaneParameters rhs) {
+ return lhs.kind == rhs.kind && lhs.rep == rhs.rep &&
+ lhs.laneidx == rhs.laneidx;
+}
+
bool operator==(StackSlotRepresentation lhs, StackSlotRepresentation rhs) {
return lhs.size() == rhs.size() && lhs.alignment() == rhs.alignment();
}
@@ -375,22 +413,23 @@ ShiftKind ShiftKindOf(Operator const* op) {
V(I64x2Splat, Operator::kNoProperties, 1, 0, 1) \
V(I64x2SplatI32Pair, Operator::kNoProperties, 2, 0, 1) \
V(I64x2Neg, Operator::kNoProperties, 1, 0, 1) \
+ V(I64x2SConvertI32x4Low, Operator::kNoProperties, 1, 0, 1) \
+ V(I64x2SConvertI32x4High, Operator::kNoProperties, 1, 0, 1) \
+ V(I64x2UConvertI32x4Low, Operator::kNoProperties, 1, 0, 1) \
+ V(I64x2UConvertI32x4High, Operator::kNoProperties, 1, 0, 1) \
+ V(I64x2BitMask, Operator::kNoProperties, 1, 0, 1) \
V(I64x2Shl, Operator::kNoProperties, 2, 0, 1) \
V(I64x2ShrS, Operator::kNoProperties, 2, 0, 1) \
V(I64x2Add, Operator::kCommutative, 2, 0, 1) \
V(I64x2Sub, Operator::kNoProperties, 2, 0, 1) \
V(I64x2Mul, Operator::kCommutative, 2, 0, 1) \
- V(I64x2MinS, Operator::kCommutative, 2, 0, 1) \
- V(I64x2MaxS, Operator::kCommutative, 2, 0, 1) \
V(I64x2Eq, Operator::kCommutative, 2, 0, 1) \
- V(I64x2Ne, Operator::kCommutative, 2, 0, 1) \
- V(I64x2GtS, Operator::kNoProperties, 2, 0, 1) \
- V(I64x2GeS, Operator::kNoProperties, 2, 0, 1) \
V(I64x2ShrU, Operator::kNoProperties, 2, 0, 1) \
- V(I64x2MinU, Operator::kCommutative, 2, 0, 1) \
- V(I64x2MaxU, Operator::kCommutative, 2, 0, 1) \
- V(I64x2GtU, Operator::kNoProperties, 2, 0, 1) \
- V(I64x2GeU, Operator::kNoProperties, 2, 0, 1) \
+ V(I64x2ExtMulLowI32x4S, Operator::kCommutative, 2, 0, 1) \
+ V(I64x2ExtMulHighI32x4S, Operator::kCommutative, 2, 0, 1) \
+ V(I64x2ExtMulLowI32x4U, Operator::kCommutative, 2, 0, 1) \
+ V(I64x2ExtMulHighI32x4U, Operator::kCommutative, 2, 0, 1) \
+ V(I64x2SignSelect, Operator::kNoProperties, 3, 0, 1) \
V(I32x4Splat, Operator::kNoProperties, 1, 0, 1) \
V(I32x4SConvertF32x4, Operator::kNoProperties, 1, 0, 1) \
V(I32x4SConvertI16x8Low, Operator::kNoProperties, 1, 0, 1) \
@@ -419,6 +458,13 @@ ShiftKind ShiftKindOf(Operator const* op) {
V(I32x4Abs, Operator::kNoProperties, 1, 0, 1) \
V(I32x4BitMask, Operator::kNoProperties, 1, 0, 1) \
V(I32x4DotI16x8S, Operator::kCommutative, 2, 0, 1) \
+ V(I32x4ExtMulLowI16x8S, Operator::kCommutative, 2, 0, 1) \
+ V(I32x4ExtMulHighI16x8S, Operator::kCommutative, 2, 0, 1) \
+ V(I32x4ExtMulLowI16x8U, Operator::kCommutative, 2, 0, 1) \
+ V(I32x4ExtMulHighI16x8U, Operator::kCommutative, 2, 0, 1) \
+ V(I32x4SignSelect, Operator::kNoProperties, 3, 0, 1) \
+ V(I32x4ExtAddPairwiseI16x8S, Operator::kNoProperties, 1, 0, 1) \
+ V(I32x4ExtAddPairwiseI16x8U, Operator::kNoProperties, 1, 0, 1) \
V(I16x8Splat, Operator::kNoProperties, 1, 0, 1) \
V(I16x8SConvertI8x16Low, Operator::kNoProperties, 1, 0, 1) \
V(I16x8SConvertI8x16High, Operator::kNoProperties, 1, 0, 1) \
@@ -427,10 +473,10 @@ ShiftKind ShiftKindOf(Operator const* op) {
V(I16x8ShrS, Operator::kNoProperties, 2, 0, 1) \
V(I16x8SConvertI32x4, Operator::kNoProperties, 2, 0, 1) \
V(I16x8Add, Operator::kCommutative, 2, 0, 1) \
- V(I16x8AddSaturateS, Operator::kCommutative, 2, 0, 1) \
+ V(I16x8AddSatS, Operator::kCommutative, 2, 0, 1) \
V(I16x8AddHoriz, Operator::kNoProperties, 2, 0, 1) \
V(I16x8Sub, Operator::kNoProperties, 2, 0, 1) \
- V(I16x8SubSaturateS, Operator::kNoProperties, 2, 0, 1) \
+ V(I16x8SubSatS, Operator::kNoProperties, 2, 0, 1) \
V(I16x8Mul, Operator::kCommutative, 2, 0, 1) \
V(I16x8MinS, Operator::kCommutative, 2, 0, 1) \
V(I16x8MaxS, Operator::kCommutative, 2, 0, 1) \
@@ -442,24 +488,32 @@ ShiftKind ShiftKindOf(Operator const* op) {
V(I16x8UConvertI8x16High, Operator::kNoProperties, 1, 0, 1) \
V(I16x8ShrU, Operator::kNoProperties, 2, 0, 1) \
V(I16x8UConvertI32x4, Operator::kNoProperties, 2, 0, 1) \
- V(I16x8AddSaturateU, Operator::kCommutative, 2, 0, 1) \
- V(I16x8SubSaturateU, Operator::kNoProperties, 2, 0, 1) \
+ V(I16x8AddSatU, Operator::kCommutative, 2, 0, 1) \
+ V(I16x8SubSatU, Operator::kNoProperties, 2, 0, 1) \
V(I16x8MinU, Operator::kCommutative, 2, 0, 1) \
V(I16x8MaxU, Operator::kCommutative, 2, 0, 1) \
V(I16x8GtU, Operator::kNoProperties, 2, 0, 1) \
V(I16x8GeU, Operator::kNoProperties, 2, 0, 1) \
V(I16x8RoundingAverageU, Operator::kCommutative, 2, 0, 1) \
+ V(I16x8Q15MulRSatS, Operator::kCommutative, 2, 0, 1) \
V(I16x8Abs, Operator::kNoProperties, 1, 0, 1) \
V(I16x8BitMask, Operator::kNoProperties, 1, 0, 1) \
+ V(I16x8ExtMulLowI8x16S, Operator::kCommutative, 2, 0, 1) \
+ V(I16x8ExtMulHighI8x16S, Operator::kCommutative, 2, 0, 1) \
+ V(I16x8ExtMulLowI8x16U, Operator::kCommutative, 2, 0, 1) \
+ V(I16x8ExtMulHighI8x16U, Operator::kCommutative, 2, 0, 1) \
+ V(I16x8SignSelect, Operator::kNoProperties, 3, 0, 1) \
+ V(I16x8ExtAddPairwiseI8x16S, Operator::kNoProperties, 1, 0, 1) \
+ V(I16x8ExtAddPairwiseI8x16U, Operator::kNoProperties, 1, 0, 1) \
V(I8x16Splat, Operator::kNoProperties, 1, 0, 1) \
V(I8x16Neg, Operator::kNoProperties, 1, 0, 1) \
V(I8x16Shl, Operator::kNoProperties, 2, 0, 1) \
V(I8x16ShrS, Operator::kNoProperties, 2, 0, 1) \
V(I8x16SConvertI16x8, Operator::kNoProperties, 2, 0, 1) \
V(I8x16Add, Operator::kCommutative, 2, 0, 1) \
- V(I8x16AddSaturateS, Operator::kCommutative, 2, 0, 1) \
+ V(I8x16AddSatS, Operator::kCommutative, 2, 0, 1) \
V(I8x16Sub, Operator::kNoProperties, 2, 0, 1) \
- V(I8x16SubSaturateS, Operator::kNoProperties, 2, 0, 1) \
+ V(I8x16SubSatS, Operator::kNoProperties, 2, 0, 1) \
V(I8x16Mul, Operator::kCommutative, 2, 0, 1) \
V(I8x16MinS, Operator::kCommutative, 2, 0, 1) \
V(I8x16MaxS, Operator::kCommutative, 2, 0, 1) \
@@ -469,15 +523,17 @@ ShiftKind ShiftKindOf(Operator const* op) {
V(I8x16GeS, Operator::kNoProperties, 2, 0, 1) \
V(I8x16ShrU, Operator::kNoProperties, 2, 0, 1) \
V(I8x16UConvertI16x8, Operator::kNoProperties, 2, 0, 1) \
- V(I8x16AddSaturateU, Operator::kCommutative, 2, 0, 1) \
- V(I8x16SubSaturateU, Operator::kNoProperties, 2, 0, 1) \
+ V(I8x16AddSatU, Operator::kCommutative, 2, 0, 1) \
+ V(I8x16SubSatU, Operator::kNoProperties, 2, 0, 1) \
V(I8x16MinU, Operator::kCommutative, 2, 0, 1) \
V(I8x16MaxU, Operator::kCommutative, 2, 0, 1) \
V(I8x16GtU, Operator::kNoProperties, 2, 0, 1) \
V(I8x16GeU, Operator::kNoProperties, 2, 0, 1) \
V(I8x16RoundingAverageU, Operator::kCommutative, 2, 0, 1) \
+ V(I8x16Popcnt, Operator::kNoProperties, 1, 0, 1) \
V(I8x16Abs, Operator::kNoProperties, 1, 0, 1) \
V(I8x16BitMask, Operator::kNoProperties, 1, 0, 1) \
+ V(I8x16SignSelect, Operator::kNoProperties, 3, 0, 1) \
V(S128Load, Operator::kNoProperties, 2, 0, 1) \
V(S128Store, Operator::kNoProperties, 3, 0, 1) \
V(S128Zero, Operator::kNoProperties, 0, 0, 1) \
@@ -487,8 +543,6 @@ ShiftKind ShiftKindOf(Operator const* op) {
V(S128Not, Operator::kNoProperties, 1, 0, 1) \
V(S128Select, Operator::kNoProperties, 3, 0, 1) \
V(S128AndNot, Operator::kNoProperties, 2, 0, 1) \
- V(V64x2AnyTrue, Operator::kNoProperties, 1, 0, 1) \
- V(V64x2AllTrue, Operator::kNoProperties, 1, 0, 1) \
V(V32x4AnyTrue, Operator::kNoProperties, 1, 0, 1) \
V(V32x4AllTrue, Operator::kNoProperties, 1, 0, 1) \
V(V16x8AnyTrue, Operator::kNoProperties, 1, 0, 1) \
@@ -563,18 +617,18 @@ ShiftKind ShiftKindOf(Operator const* op) {
V(kCompressed)
#define LOAD_TRANSFORM_LIST(V) \
- V(S8x16LoadSplat) \
- V(S16x8LoadSplat) \
- V(S32x4LoadSplat) \
- V(S64x2LoadSplat) \
- V(I16x8Load8x8S) \
- V(I16x8Load8x8U) \
- V(I32x4Load16x4S) \
- V(I32x4Load16x4U) \
- V(I64x2Load32x2S) \
- V(I64x2Load32x2U) \
- V(S128LoadMem32Zero) \
- V(S128LoadMem64Zero)
+ V(S128Load8Splat) \
+ V(S128Load16Splat) \
+ V(S128Load32Splat) \
+ V(S128Load64Splat) \
+ V(S128Load8x8S) \
+ V(S128Load8x8U) \
+ V(S128Load16x4S) \
+ V(S128Load16x4U) \
+ V(S128Load32x2S) \
+ V(S128Load32x2U) \
+ V(S128Load32Zero) \
+ V(S128Load64Zero)
#define ATOMIC_U32_TYPE_LIST(V) \
V(Uint8) \
@@ -608,6 +662,15 @@ ShiftKind ShiftKindOf(Operator const* op) {
V(I16x8, 8) \
V(I8x16, 16)
+#define SIMD_I64x2_LANES(V) V(0) V(1)
+
+#define SIMD_I32x4_LANES(V) SIMD_I64x2_LANES(V) V(2) V(3)
+
+#define SIMD_I16x8_LANES(V) SIMD_I32x4_LANES(V) V(4) V(5) V(6) V(7)
+
+#define SIMD_I8x16_LANES(V) \
+ SIMD_I16x8_LANES(V) V(8) V(9) V(10) V(11) V(12) V(13) V(14) V(15)
+
#define STACK_SLOT_CACHED_SIZES_ALIGNMENTS_LIST(V) \
V(4, 0) V(8, 0) V(16, 0) V(4, 4) V(8, 8) V(16, 16)
@@ -747,14 +810,30 @@ struct ProtectedLoadOperator : public Operator1<LoadRepresentation> {
1, 1, 1, 1, 0, LoadRepresentation(rep, sem)) {}
};
-template <LoadKind kind, LoadTransformation type>
+template <MemoryAccessKind kind, LoadTransformation type>
struct LoadTransformOperator : public Operator1<LoadTransformParameters> {
LoadTransformOperator()
- : Operator1(IrOpcode::kLoadTransform, Operator::kEliminatable,
+ : Operator1(IrOpcode::kLoadTransform,
+ kind == MemoryAccessKind::kProtected
+ ? Operator::kNoDeopt | Operator::kNoThrow
+ : Operator::kEliminatable,
"LoadTransform", 2, 1, 1, 1, 1, 0,
LoadTransformParameters{kind, type}) {}
};
+template <MemoryAccessKind kind, MachineRepresentation rep, MachineSemantic sem,
+ uint8_t laneidx>
+struct LoadLaneOperator : public Operator1<LoadLaneParameters> {
+ LoadLaneOperator()
+ : Operator1(
+ IrOpcode::kLoadLane,
+ kind == MemoryAccessKind::kProtected
+ ? Operator::kNoDeopt | Operator::kNoThrow
+ : Operator::kEliminatable,
+ "LoadLane", 3, 1, 1, 1, 1, 0,
+ LoadLaneParameters{kind, LoadRepresentation(rep, sem), laneidx}) {}
+};
+
template <MachineRepresentation rep, WriteBarrierKind write_barrier_kind>
struct StoreOperator : public Operator1<StoreRepresentation> {
StoreOperator()
@@ -781,6 +860,15 @@ struct ProtectedStoreOperator : public Operator1<StoreRepresentation> {
StoreRepresentation(rep, kNoWriteBarrier)) {}
};
+template <MemoryAccessKind kind, MachineRepresentation rep, uint8_t laneidx>
+struct StoreLaneOperator : public Operator1<StoreLaneParameters> {
+ StoreLaneOperator()
+ : Operator1(IrOpcode::kStoreLane,
+ Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow,
+ "StoreLane", 3, 1, 1, 0, 1, 0,
+ StoreLaneParameters{kind, rep, laneidx}) {}
+};
+
template <MachineRepresentation rep, MachineSemantic sem>
struct Word32AtomicLoadOperator : public Operator1<LoadRepresentation> {
Word32AtomicLoadOperator()
@@ -1109,11 +1197,12 @@ const Operator* MachineOperatorBuilder::ProtectedLoad(LoadRepresentation rep) {
}
const Operator* MachineOperatorBuilder::LoadTransform(
- LoadKind kind, LoadTransformation transform) {
-#define LOAD_TRANSFORM_KIND(TYPE, KIND) \
- if (kind == LoadKind::k##KIND && transform == LoadTransformation::k##TYPE) { \
- return GetCachedOperator<LoadTransformOperator< \
- LoadKind::k##KIND, LoadTransformation::k##TYPE>>(); \
+ MemoryAccessKind kind, LoadTransformation transform) {
+#define LOAD_TRANSFORM_KIND(TYPE, KIND) \
+ if (kind == MemoryAccessKind::k##KIND && \
+ transform == LoadTransformation::k##TYPE) { \
+ return GetCachedOperator<LoadTransformOperator< \
+ MemoryAccessKind::k##KIND, LoadTransformation::k##TYPE>>(); \
}
#define LOAD_TRANSFORM(TYPE) \
LOAD_TRANSFORM_KIND(TYPE, Normal) \
@@ -1126,6 +1215,73 @@ const Operator* MachineOperatorBuilder::LoadTransform(
UNREACHABLE();
}
+const Operator* MachineOperatorBuilder::LoadLane(MemoryAccessKind kind,
+ LoadRepresentation rep,
+ uint8_t laneidx) {
+#define LOAD_LANE_KIND(TYPE, KIND, LANEIDX) \
+ if (kind == MemoryAccessKind::k##KIND && rep == MachineType::TYPE() && \
+ laneidx == LANEIDX) { \
+ return GetCachedOperator<LoadLaneOperator< \
+ MemoryAccessKind::k##KIND, MachineType::TYPE().representation(), \
+ MachineType::TYPE().semantic(), LANEIDX>>(); \
+ }
+
+#define LOAD_LANE_T(T, LANE) \
+ LOAD_LANE_KIND(T, Normal, LANE) \
+ LOAD_LANE_KIND(T, Unaligned, LANE) \
+ LOAD_LANE_KIND(T, Protected, LANE)
+
+#define LOAD_LANE_INT8(LANE) LOAD_LANE_T(Int8, LANE)
+#define LOAD_LANE_INT16(LANE) LOAD_LANE_T(Int16, LANE)
+#define LOAD_LANE_INT32(LANE) LOAD_LANE_T(Int32, LANE)
+#define LOAD_LANE_INT64(LANE) LOAD_LANE_T(Int64, LANE)
+
+ // Semicolons unnecessary, but helps formatting.
+ SIMD_I8x16_LANES(LOAD_LANE_INT8);
+ SIMD_I16x8_LANES(LOAD_LANE_INT16);
+ SIMD_I32x4_LANES(LOAD_LANE_INT32);
+ SIMD_I64x2_LANES(LOAD_LANE_INT64);
+#undef LOAD_LANE_INT8
+#undef LOAD_LANE_INT16
+#undef LOAD_LANE_INT32
+#undef LOAD_LANE_INT64
+#undef LOAD_LANE_KIND
+ UNREACHABLE();
+}
+
+const Operator* MachineOperatorBuilder::StoreLane(MemoryAccessKind kind,
+ MachineRepresentation rep,
+ uint8_t laneidx) {
+#define STORE_LANE_KIND(REP, KIND, LANEIDX) \
+ if (kind == MemoryAccessKind::k##KIND && \
+ rep == MachineRepresentation::REP && laneidx == LANEIDX) { \
+ return GetCachedOperator<StoreLaneOperator< \
+ MemoryAccessKind::k##KIND, MachineRepresentation::REP, LANEIDX>>(); \
+ }
+
+#define STORE_LANE_T(T, LANE) \
+ STORE_LANE_KIND(T, Normal, LANE) \
+ STORE_LANE_KIND(T, Unaligned, LANE) \
+ STORE_LANE_KIND(T, Protected, LANE)
+
+#define STORE_LANE_WORD8(LANE) STORE_LANE_T(kWord8, LANE)
+#define STORE_LANE_WORD16(LANE) STORE_LANE_T(kWord16, LANE)
+#define STORE_LANE_WORD32(LANE) STORE_LANE_T(kWord32, LANE)
+#define STORE_LANE_WORD64(LANE) STORE_LANE_T(kWord64, LANE)
+
+ // Semicolons unnecessary, but helps formatting.
+ SIMD_I8x16_LANES(STORE_LANE_WORD8);
+ SIMD_I16x8_LANES(STORE_LANE_WORD16);
+ SIMD_I32x4_LANES(STORE_LANE_WORD32);
+ SIMD_I64x2_LANES(STORE_LANE_WORD64);
+#undef STORE_LANE_WORD8
+#undef STORE_LANE_WORD16
+#undef STORE_LANE_WORD32
+#undef STORE_LANE_WORD64
+#undef STORE_LANE_KIND
+ UNREACHABLE();
+}
+
const Operator* MachineOperatorBuilder::StackSlot(int size, int alignment) {
DCHECK_LE(0, size);
DCHECK(alignment == 0 || alignment == 4 || alignment == 8 || alignment == 16);
diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h
index 8c373fd6ca..7912c55de5 100644
--- a/deps/v8/src/compiler/machine-operator.h
+++ b/deps/v8/src/compiler/machine-operator.h
@@ -49,29 +49,29 @@ using LoadRepresentation = MachineType;
V8_EXPORT_PRIVATE LoadRepresentation LoadRepresentationOf(Operator const*)
V8_WARN_UNUSED_RESULT;
-enum class LoadKind {
+enum class MemoryAccessKind {
kNormal,
kUnaligned,
kProtected,
};
-size_t hash_value(LoadKind);
+size_t hash_value(MemoryAccessKind);
-V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, LoadKind);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, MemoryAccessKind);
enum class LoadTransformation {
- kS8x16LoadSplat,
- kS16x8LoadSplat,
- kS32x4LoadSplat,
- kS64x2LoadSplat,
- kI16x8Load8x8S,
- kI16x8Load8x8U,
- kI32x4Load16x4S,
- kI32x4Load16x4U,
- kI64x2Load32x2S,
- kI64x2Load32x2U,
- kS128LoadMem32Zero,
- kS128LoadMem64Zero,
+ kS128Load8Splat,
+ kS128Load16Splat,
+ kS128Load32Splat,
+ kS128Load64Splat,
+ kS128Load8x8S,
+ kS128Load8x8U,
+ kS128Load16x4S,
+ kS128Load16x4U,
+ kS128Load32x2S,
+ kS128Load32x2U,
+ kS128Load32Zero,
+ kS128Load64Zero,
};
size_t hash_value(LoadTransformation);
@@ -79,7 +79,7 @@ size_t hash_value(LoadTransformation);
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, LoadTransformation);
struct LoadTransformParameters {
- LoadKind kind;
+ MemoryAccessKind kind;
LoadTransformation transformation;
};
@@ -91,6 +91,17 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
V8_EXPORT_PRIVATE LoadTransformParameters const& LoadTransformParametersOf(
Operator const*) V8_WARN_UNUSED_RESULT;
+struct LoadLaneParameters {
+ MemoryAccessKind kind;
+ LoadRepresentation rep;
+ uint8_t laneidx;
+};
+
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, LoadLaneParameters);
+
+V8_EXPORT_PRIVATE LoadLaneParameters const& LoadLaneParametersOf(
+ Operator const*) V8_WARN_UNUSED_RESULT;
+
// A Store needs a MachineType and a WriteBarrierKind in order to emit the
// correct write barrier.
class StoreRepresentation final {
@@ -124,6 +135,17 @@ using UnalignedStoreRepresentation = MachineRepresentation;
UnalignedStoreRepresentation const& UnalignedStoreRepresentationOf(
Operator const*) V8_WARN_UNUSED_RESULT;
+struct StoreLaneParameters {
+ MemoryAccessKind kind;
+ MachineRepresentation rep;
+ uint8_t laneidx;
+};
+
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, StoreLaneParameters);
+
+V8_EXPORT_PRIVATE StoreLaneParameters const& StoreLaneParametersOf(
+ Operator const*) V8_WARN_UNUSED_RESULT;
+
class StackSlotRepresentation final {
public:
StackSlotRepresentation(int size, int alignment)
@@ -306,6 +328,9 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
AlignmentRequirements alignmentRequirements =
AlignmentRequirements::FullUnalignedAccessSupport());
+ MachineOperatorBuilder(const MachineOperatorBuilder&) = delete;
+ MachineOperatorBuilder& operator=(const MachineOperatorBuilder&) = delete;
+
const Operator* Comment(const char* msg);
const Operator* AbortCSAAssert();
const Operator* DebugBreak();
@@ -637,22 +662,23 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* I64x2ReplaceLane(int32_t);
const Operator* I64x2ReplaceLaneI32Pair(int32_t);
const Operator* I64x2Neg();
+ const Operator* I64x2SConvertI32x4Low();
+ const Operator* I64x2SConvertI32x4High();
+ const Operator* I64x2UConvertI32x4Low();
+ const Operator* I64x2UConvertI32x4High();
+ const Operator* I64x2BitMask();
const Operator* I64x2Shl();
const Operator* I64x2ShrS();
const Operator* I64x2Add();
const Operator* I64x2Sub();
const Operator* I64x2Mul();
- const Operator* I64x2MinS();
- const Operator* I64x2MaxS();
const Operator* I64x2Eq();
- const Operator* I64x2Ne();
- const Operator* I64x2GtS();
- const Operator* I64x2GeS();
const Operator* I64x2ShrU();
- const Operator* I64x2MinU();
- const Operator* I64x2MaxU();
- const Operator* I64x2GtU();
- const Operator* I64x2GeU();
+ const Operator* I64x2ExtMulLowI32x4S();
+ const Operator* I64x2ExtMulHighI32x4S();
+ const Operator* I64x2ExtMulLowI32x4U();
+ const Operator* I64x2ExtMulHighI32x4U();
+ const Operator* I64x2SignSelect();
const Operator* I32x4Splat();
const Operator* I32x4ExtractLane(int32_t);
@@ -685,6 +711,13 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* I32x4Abs();
const Operator* I32x4BitMask();
const Operator* I32x4DotI16x8S();
+ const Operator* I32x4ExtMulLowI16x8S();
+ const Operator* I32x4ExtMulHighI16x8S();
+ const Operator* I32x4ExtMulLowI16x8U();
+ const Operator* I32x4ExtMulHighI16x8U();
+ const Operator* I32x4SignSelect();
+ const Operator* I32x4ExtAddPairwiseI16x8S();
+ const Operator* I32x4ExtAddPairwiseI16x8U();
const Operator* I16x8Splat();
const Operator* I16x8ExtractLaneU(int32_t);
@@ -697,10 +730,10 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* I16x8ShrS();
const Operator* I16x8SConvertI32x4();
const Operator* I16x8Add();
- const Operator* I16x8AddSaturateS();
+ const Operator* I16x8AddSatS();
const Operator* I16x8AddHoriz();
const Operator* I16x8Sub();
- const Operator* I16x8SubSaturateS();
+ const Operator* I16x8SubSatS();
const Operator* I16x8Mul();
const Operator* I16x8MinS();
const Operator* I16x8MaxS();
@@ -713,15 +746,23 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* I16x8UConvertI8x16High();
const Operator* I16x8ShrU();
const Operator* I16x8UConvertI32x4();
- const Operator* I16x8AddSaturateU();
- const Operator* I16x8SubSaturateU();
+ const Operator* I16x8AddSatU();
+ const Operator* I16x8SubSatU();
const Operator* I16x8MinU();
const Operator* I16x8MaxU();
const Operator* I16x8GtU();
const Operator* I16x8GeU();
const Operator* I16x8RoundingAverageU();
+ const Operator* I16x8Q15MulRSatS();
const Operator* I16x8Abs();
const Operator* I16x8BitMask();
+ const Operator* I16x8ExtMulLowI8x16S();
+ const Operator* I16x8ExtMulHighI8x16S();
+ const Operator* I16x8ExtMulLowI8x16U();
+ const Operator* I16x8ExtMulHighI8x16U();
+ const Operator* I16x8SignSelect();
+ const Operator* I16x8ExtAddPairwiseI8x16S();
+ const Operator* I16x8ExtAddPairwiseI8x16U();
const Operator* I8x16Splat();
const Operator* I8x16ExtractLaneU(int32_t);
@@ -732,9 +773,9 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* I8x16ShrS();
const Operator* I8x16SConvertI16x8();
const Operator* I8x16Add();
- const Operator* I8x16AddSaturateS();
+ const Operator* I8x16AddSatS();
const Operator* I8x16Sub();
- const Operator* I8x16SubSaturateS();
+ const Operator* I8x16SubSatS();
const Operator* I8x16Mul();
const Operator* I8x16MinS();
const Operator* I8x16MaxS();
@@ -745,15 +786,17 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* I8x16ShrU();
const Operator* I8x16UConvertI16x8();
- const Operator* I8x16AddSaturateU();
- const Operator* I8x16SubSaturateU();
+ const Operator* I8x16AddSatU();
+ const Operator* I8x16SubSatU();
const Operator* I8x16MinU();
const Operator* I8x16MaxU();
const Operator* I8x16GtU();
const Operator* I8x16GeU();
const Operator* I8x16RoundingAverageU();
+ const Operator* I8x16Popcnt();
const Operator* I8x16Abs();
const Operator* I8x16BitMask();
+ const Operator* I8x16SignSelect();
const Operator* S128Load();
const Operator* S128Store();
@@ -770,8 +813,6 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* I8x16Swizzle();
const Operator* I8x16Shuffle(const uint8_t shuffle[16]);
- const Operator* V64x2AnyTrue();
- const Operator* V64x2AllTrue();
const Operator* V32x4AnyTrue();
const Operator* V32x4AllTrue();
const Operator* V16x8AnyTrue();
@@ -784,12 +825,21 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* PoisonedLoad(LoadRepresentation rep);
const Operator* ProtectedLoad(LoadRepresentation rep);
- const Operator* LoadTransform(LoadKind kind, LoadTransformation transform);
+ const Operator* LoadTransform(MemoryAccessKind kind,
+ LoadTransformation transform);
+
+ // SIMD load: replace a specified lane with [base + index].
+ const Operator* LoadLane(MemoryAccessKind kind, LoadRepresentation rep,
+ uint8_t laneidx);
// store [base + index], value
const Operator* Store(StoreRepresentation rep);
const Operator* ProtectedStore(MachineRepresentation rep);
+ // SIMD store: store a specified lane of value into [base + index].
+ const Operator* StoreLane(MemoryAccessKind kind, MachineRepresentation rep,
+ uint8_t laneidx);
+
// unaligned load [base + index]
const Operator* UnalignedLoad(LoadRepresentation rep);
@@ -931,8 +981,6 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
MachineRepresentation const word_;
Flags const flags_;
AlignmentRequirements const alignment_requirements_;
-
- DISALLOW_COPY_AND_ASSIGN(MachineOperatorBuilder);
};
diff --git a/deps/v8/src/compiler/map-inference.cc b/deps/v8/src/compiler/map-inference.cc
index 3e48a95405..1e1a59d784 100644
--- a/deps/v8/src/compiler/map-inference.cc
+++ b/deps/v8/src/compiler/map-inference.cc
@@ -19,12 +19,12 @@ MapInference::MapInference(JSHeapBroker* broker, Node* object, Node* effect)
: broker_(broker), object_(object) {
ZoneHandleSet<Map> maps;
auto result =
- NodeProperties::InferReceiverMapsUnsafe(broker_, object_, effect, &maps);
+ NodeProperties::InferMapsUnsafe(broker_, object_, effect, &maps);
maps_.insert(maps_.end(), maps.begin(), maps.end());
- maps_state_ = (result == NodeProperties::kUnreliableReceiverMaps)
+ maps_state_ = (result == NodeProperties::kUnreliableMaps)
? kUnreliableDontNeedGuard
: kReliableOrGuarded;
- DCHECK_EQ(maps_.empty(), result == NodeProperties::kNoReceiverMaps);
+ DCHECK_EQ(maps_.empty(), result == NodeProperties::kNoMaps);
}
MapInference::~MapInference() { CHECK(Safe()); }
diff --git a/deps/v8/src/compiler/memory-lowering.cc b/deps/v8/src/compiler/memory-lowering.cc
index a1b68d48e3..21a0169f2e 100644
--- a/deps/v8/src/compiler/memory-lowering.cc
+++ b/deps/v8/src/compiler/memory-lowering.cc
@@ -98,6 +98,10 @@ Reduction MemoryLowering::ReduceAllocateRaw(
DCHECK_EQ(IrOpcode::kAllocateRaw, node->opcode());
DCHECK_IMPLIES(allocation_folding_ == AllocationFolding::kDoAllocationFolding,
state_ptr != nullptr);
+ // Code objects may have a maximum size smaller than kMaxHeapObjectSize due to
+ // guard pages. If we need to support allocating code here we would need to
+ // call MemoryChunkLayout::MaxRegularCodeObjectSize() at runtime.
+ DCHECK_NE(allocation_type, AllocationType::kCode);
Node* value;
Node* size = node->InputAt(0);
Node* effect = node->InputAt(1);
@@ -135,7 +139,7 @@ Reduction MemoryLowering::ReduceAllocateRaw(
IntPtrMatcher m(size);
if (m.IsInRange(0, kMaxRegularHeapObjectSize) && FLAG_inline_new &&
allocation_folding_ == AllocationFolding::kDoAllocationFolding) {
- intptr_t const object_size = m.Value();
+ intptr_t const object_size = m.ResolvedValue();
AllocationState const* state = *state_ptr;
if (state->size() <= kMaxRegularHeapObjectSize - object_size &&
state->group()->allocation() == allocation_type) {
@@ -306,7 +310,9 @@ Reduction MemoryLowering::ReduceLoadElement(Node* node) {
return Changed(node);
}
-Node* MemoryLowering::DecodeExternalPointer(Node* node) {
+Node* MemoryLowering::DecodeExternalPointer(
+ Node* node, ExternalPointerTag external_pointer_tag) {
+#ifdef V8_HEAP_SANDBOX
DCHECK(V8_HEAP_SANDBOX_BOOL);
DCHECK(node->opcode() == IrOpcode::kLoad ||
node->opcode() == IrOpcode::kPoisonedLoad);
@@ -317,16 +323,29 @@ Node* MemoryLowering::DecodeExternalPointer(Node* node) {
// Clone the load node and put it here.
// TODO(turbofan): consider adding GraphAssembler::Clone() suitable for
// cloning nodes from arbitrary locaions in effect/control chains.
- Node* node_copy = __ AddNode(graph()->CloneNode(node));
+ Node* index = __ AddNode(graph()->CloneNode(node));
// Uncomment this to generate a breakpoint for debugging purposes.
// __ DebugBreak();
- // Decode loaded enternal pointer.
+ // Decode loaded external pointer.
STATIC_ASSERT(kExternalPointerSize == kSystemPointerSize);
- Node* salt = __ IntPtrConstant(kExternalPointerSalt);
- Node* decoded_ptr = __ WordXor(node_copy, salt);
+ Node* external_pointer_table_address = __ ExternalConstant(
+ ExternalReference::external_pointer_table_address(isolate()));
+ Node* table = __ Load(MachineType::Pointer(), external_pointer_table_address,
+ Internals::kExternalPointerTableBufferOffset);
+ // TODO(v8:10391, saelo): bounds check if table is not caged
+ Node* offset = __ Int32Mul(index, __ Int32Constant(8));
+ Node* decoded_ptr =
+ __ Load(MachineType::Pointer(), table, __ ChangeUint32ToUint64(offset));
+ if (external_pointer_tag != 0) {
+ Node* tag = __ IntPtrConstant(external_pointer_tag);
+ decoded_ptr = __ WordXor(decoded_ptr, tag);
+ }
return decoded_ptr;
+#else
+ return node;
+#endif // V8_HEAP_SANDBOX
}
Reduction MemoryLowering::ReduceLoadField(Node* node) {
@@ -335,6 +354,11 @@ Reduction MemoryLowering::ReduceLoadField(Node* node) {
Node* offset = __ IntPtrConstant(access.offset - access.tag());
node->InsertInput(graph_zone(), 1, offset);
MachineType type = access.machine_type;
+ if (V8_HEAP_SANDBOX_BOOL &&
+ access.type.Is(Type::SandboxedExternalPointer())) {
+ // External pointer table indices are 32bit numbers
+ type = MachineType::Uint32();
+ }
if (NeedsPoisoning(access.load_sensitivity)) {
NodeProperties::ChangeOp(node, machine()->PoisonedLoad(type));
} else {
@@ -342,7 +366,12 @@ Reduction MemoryLowering::ReduceLoadField(Node* node) {
}
if (V8_HEAP_SANDBOX_BOOL &&
access.type.Is(Type::SandboxedExternalPointer())) {
- node = DecodeExternalPointer(node);
+#ifdef V8_HEAP_SANDBOX
+ ExternalPointerTag tag = access.external_pointer_tag;
+#else
+ ExternalPointerTag tag = kExternalPointerNullTag;
+#endif
+ node = DecodeExternalPointer(node, tag);
return Replace(node);
} else {
DCHECK(!access.type.Is(Type::SandboxedExternalPointer()));
diff --git a/deps/v8/src/compiler/memory-lowering.h b/deps/v8/src/compiler/memory-lowering.h
index 7990b1715c..7ad02b95af 100644
--- a/deps/v8/src/compiler/memory-lowering.h
+++ b/deps/v8/src/compiler/memory-lowering.h
@@ -32,6 +32,9 @@ class MemoryLowering final : public Reducer {
// An allocation state is propagated on the effect paths through the graph.
class AllocationState final : public ZoneObject {
public:
+ AllocationState(const AllocationState&) = delete;
+ AllocationState& operator=(const AllocationState&) = delete;
+
static AllocationState const* Empty(Zone* zone) {
return zone->New<AllocationState>();
}
@@ -65,8 +68,6 @@ class MemoryLowering final : public Reducer {
intptr_t const size_;
Node* const top_;
Node* const effect_;
-
- DISALLOW_COPY_AND_ASSIGN(AllocationState);
};
using WriteBarrierAssertFailedCallback = std::function<void(
@@ -108,7 +109,7 @@ class MemoryLowering final : public Reducer {
Node* value,
AllocationState const* state,
WriteBarrierKind);
- Node* DecodeExternalPointer(Node* encoded_pointer);
+ Node* DecodeExternalPointer(Node* encoded_pointer, ExternalPointerTag tag);
Node* ComputeIndex(ElementAccess const& access, Node* node);
bool NeedsPoisoning(LoadSensitivity load_sensitivity) const;
diff --git a/deps/v8/src/compiler/node-cache.h b/deps/v8/src/compiler/node-cache.h
index 935e5778e3..8e1d3d4eae 100644
--- a/deps/v8/src/compiler/node-cache.h
+++ b/deps/v8/src/compiler/node-cache.h
@@ -33,6 +33,8 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) NodeCache final {
public:
explicit NodeCache(Zone* zone) : map_(zone) {}
~NodeCache() = default;
+ NodeCache(const NodeCache&) = delete;
+ NodeCache& operator=(const NodeCache&) = delete;
// Search for node associated with {key} and return a pointer to a memory
// location in this cache that stores an entry for the key. If the location
@@ -50,8 +52,6 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) NodeCache final {
private:
ZoneUnorderedMap<Key, Node*, Hash, Pred> map_;
-
- DISALLOW_COPY_AND_ASSIGN(NodeCache);
};
// Various default cache types.
diff --git a/deps/v8/src/compiler/node-marker.h b/deps/v8/src/compiler/node-marker.h
index e38105dd8b..df4eac97a9 100644
--- a/deps/v8/src/compiler/node-marker.h
+++ b/deps/v8/src/compiler/node-marker.h
@@ -19,6 +19,8 @@ class Graph;
class NodeMarkerBase {
public:
NodeMarkerBase(Graph* graph, uint32_t num_states);
+ NodeMarkerBase(const NodeMarkerBase&) = delete;
+ NodeMarkerBase& operator=(const NodeMarkerBase&) = delete;
V8_INLINE Mark Get(const Node* node) {
Mark mark = node->mark();
@@ -37,8 +39,6 @@ class NodeMarkerBase {
private:
Mark const mark_min_;
Mark const mark_max_;
-
- DISALLOW_COPY_AND_ASSIGN(NodeMarkerBase);
};
// A NodeMarker assigns a local "state" to every node of a graph in constant
diff --git a/deps/v8/src/compiler/node-matchers.h b/deps/v8/src/compiler/node-matchers.h
index bd93b545e1..c9736c1d48 100644
--- a/deps/v8/src/compiler/node-matchers.h
+++ b/deps/v8/src/compiler/node-matchers.h
@@ -11,6 +11,7 @@
#include "src/base/compiler-specific.h"
#include "src/codegen/external-reference.h"
#include "src/common/globals.h"
+#include "src/compiler/common-operator.h"
#include "src/compiler/node.h"
#include "src/compiler/operator.h"
#include "src/numbers/double.h"
@@ -48,96 +49,89 @@ struct NodeMatcher {
Node* node_;
};
+inline Node* SkipValueIdentities(Node* node) {
+#ifdef DEBUG
+ bool seen_fold_constant = false;
+#endif
+ do {
+#ifdef DEBUG
+ if (node->opcode() == IrOpcode::kFoldConstant) {
+ DCHECK(!seen_fold_constant);
+ seen_fold_constant = true;
+ }
+#endif
+ } while (NodeProperties::IsValueIdentity(node, &node));
+ DCHECK_NOT_NULL(node);
+ return node;
+}
// A pattern matcher for abitrary value constants.
+//
+// Note that value identities on the input node are skipped when matching. The
+// resolved value may not be a parameter of the input node. The node() method
+// returns the unmodified input node. This is by design, as reducers may wish to
+// match value constants but delay reducing the node until a later phase. For
+// example, binary operator reducers may opt to keep FoldConstant operands while
+// applying a reduction that match on the constant value of the FoldConstant.
template <typename T, IrOpcode::Value kOpcode>
struct ValueMatcher : public NodeMatcher {
using ValueType = T;
- explicit ValueMatcher(Node* node) : NodeMatcher(node) {
- static_assert(kOpcode != IrOpcode::kFoldConstant, "unsupported opcode");
- if (node->opcode() == IrOpcode::kFoldConstant) {
- node = node->InputAt(1);
- }
- DCHECK_NE(node->opcode(), IrOpcode::kFoldConstant);
- has_value_ = opcode() == kOpcode;
- if (has_value_) {
- value_ = OpParameter<T>(node->op());
+ explicit ValueMatcher(Node* node)
+ : NodeMatcher(node), resolved_value_(), has_resolved_value_(false) {
+ node = SkipValueIdentities(node);
+ has_resolved_value_ = node->opcode() == kOpcode;
+ if (has_resolved_value_) {
+ resolved_value_ = OpParameter<T>(node->op());
}
}
- bool HasValue() const { return has_value_; }
- const T& Value() const {
- DCHECK(HasValue());
- return value_;
+ bool HasResolvedValue() const { return has_resolved_value_; }
+ const T& ResolvedValue() const {
+ CHECK(HasResolvedValue());
+ return resolved_value_;
}
private:
- T value_;
- bool has_value_;
+ T resolved_value_;
+ bool has_resolved_value_;
};
-
template <>
inline ValueMatcher<uint32_t, IrOpcode::kInt32Constant>::ValueMatcher(
Node* node)
- : NodeMatcher(node),
- value_(),
- has_value_(opcode() == IrOpcode::kInt32Constant) {
- if (has_value_) {
- value_ = static_cast<uint32_t>(OpParameter<int32_t>(node->op()));
+ : NodeMatcher(node), resolved_value_(), has_resolved_value_(false) {
+ node = SkipValueIdentities(node);
+ has_resolved_value_ = node->opcode() == IrOpcode::kInt32Constant;
+ if (has_resolved_value_) {
+ resolved_value_ = static_cast<uint32_t>(OpParameter<int32_t>(node->op()));
}
}
-
template <>
inline ValueMatcher<int64_t, IrOpcode::kInt64Constant>::ValueMatcher(Node* node)
- : NodeMatcher(node), value_(), has_value_(false) {
- if (opcode() == IrOpcode::kInt32Constant) {
- value_ = OpParameter<int32_t>(node->op());
- has_value_ = true;
- } else if (opcode() == IrOpcode::kInt64Constant) {
- value_ = OpParameter<int64_t>(node->op());
- has_value_ = true;
+ : NodeMatcher(node), resolved_value_(), has_resolved_value_(false) {
+ node = SkipValueIdentities(node);
+ if (node->opcode() == IrOpcode::kInt32Constant) {
+ resolved_value_ = OpParameter<int32_t>(node->op());
+ has_resolved_value_ = true;
+ } else if (node->opcode() == IrOpcode::kInt64Constant) {
+ resolved_value_ = OpParameter<int64_t>(node->op());
+ has_resolved_value_ = true;
}
}
-
template <>
inline ValueMatcher<uint64_t, IrOpcode::kInt64Constant>::ValueMatcher(
Node* node)
- : NodeMatcher(node), value_(), has_value_(false) {
- if (opcode() == IrOpcode::kInt32Constant) {
- value_ = static_cast<uint32_t>(OpParameter<int32_t>(node->op()));
- has_value_ = true;
- } else if (opcode() == IrOpcode::kInt64Constant) {
- value_ = static_cast<uint64_t>(OpParameter<int64_t>(node->op()));
- has_value_ = true;
- }
-}
-
-template <>
-inline ValueMatcher<double, IrOpcode::kNumberConstant>::ValueMatcher(Node* node)
- : NodeMatcher(node), value_(), has_value_(false) {
- if (node->opcode() == IrOpcode::kNumberConstant) {
- value_ = OpParameter<double>(node->op());
- has_value_ = true;
- } else if (node->opcode() == IrOpcode::kFoldConstant) {
- node = node->InputAt(1);
- DCHECK_NE(node->opcode(), IrOpcode::kFoldConstant);
- }
-}
-
-template <>
-inline ValueMatcher<Handle<HeapObject>, IrOpcode::kHeapConstant>::ValueMatcher(
- Node* node)
- : NodeMatcher(node), value_(), has_value_(false) {
- if (node->opcode() == IrOpcode::kHeapConstant) {
- value_ = OpParameter<Handle<HeapObject>>(node->op());
- has_value_ = true;
- } else if (node->opcode() == IrOpcode::kFoldConstant) {
- node = node->InputAt(1);
- DCHECK_NE(node->opcode(), IrOpcode::kFoldConstant);
+ : NodeMatcher(node), resolved_value_(), has_resolved_value_(false) {
+ node = SkipValueIdentities(node);
+ if (node->opcode() == IrOpcode::kInt32Constant) {
+ resolved_value_ = static_cast<uint32_t>(OpParameter<int32_t>(node->op()));
+ has_resolved_value_ = true;
+ } else if (node->opcode() == IrOpcode::kInt64Constant) {
+ resolved_value_ = static_cast<uint64_t>(OpParameter<int64_t>(node->op()));
+ has_resolved_value_ = true;
}
}
@@ -147,24 +141,27 @@ struct IntMatcher final : public ValueMatcher<T, kOpcode> {
explicit IntMatcher(Node* node) : ValueMatcher<T, kOpcode>(node) {}
bool Is(const T& value) const {
- return this->HasValue() && this->Value() == value;
+ return this->HasResolvedValue() && this->ResolvedValue() == value;
}
bool IsInRange(const T& low, const T& high) const {
- return this->HasValue() && low <= this->Value() && this->Value() <= high;
+ return this->HasResolvedValue() && low <= this->ResolvedValue() &&
+ this->ResolvedValue() <= high;
}
bool IsMultipleOf(T n) const {
- return this->HasValue() && (this->Value() % n) == 0;
+ return this->HasResolvedValue() && (this->ResolvedValue() % n) == 0;
}
bool IsPowerOf2() const {
- return this->HasValue() && this->Value() > 0 &&
- (this->Value() & (this->Value() - 1)) == 0;
+ return this->HasResolvedValue() && this->ResolvedValue() > 0 &&
+ (this->ResolvedValue() & (this->ResolvedValue() - 1)) == 0;
}
bool IsNegativePowerOf2() const {
- return this->HasValue() && this->Value() < 0 &&
- ((this->Value() == std::numeric_limits<T>::min()) ||
- (-this->Value() & (-this->Value() - 1)) == 0);
+ return this->HasResolvedValue() && this->ResolvedValue() < 0 &&
+ ((this->ResolvedValue() == std::numeric_limits<T>::min()) ||
+ (-this->ResolvedValue() & (-this->ResolvedValue() - 1)) == 0);
+ }
+ bool IsNegative() const {
+ return this->HasResolvedValue() && this->ResolvedValue() < 0;
}
- bool IsNegative() const { return this->HasValue() && this->Value() < 0; }
};
using Int32Matcher = IntMatcher<int32_t, IrOpcode::kInt32Constant>;
@@ -186,28 +183,36 @@ struct FloatMatcher final : public ValueMatcher<T, kOpcode> {
explicit FloatMatcher(Node* node) : ValueMatcher<T, kOpcode>(node) {}
bool Is(const T& value) const {
- return this->HasValue() && this->Value() == value;
+ return this->HasResolvedValue() && this->ResolvedValue() == value;
}
bool IsInRange(const T& low, const T& high) const {
- return this->HasValue() && low <= this->Value() && this->Value() <= high;
+ return this->HasResolvedValue() && low <= this->ResolvedValue() &&
+ this->ResolvedValue() <= high;
}
bool IsMinusZero() const {
- return this->Is(0.0) && std::signbit(this->Value());
+ return this->Is(0.0) && std::signbit(this->ResolvedValue());
+ }
+ bool IsNegative() const {
+ return this->HasResolvedValue() && this->ResolvedValue() < 0.0;
+ }
+ bool IsNaN() const {
+ return this->HasResolvedValue() && std::isnan(this->ResolvedValue());
+ }
+ bool IsZero() const {
+ return this->Is(0.0) && !std::signbit(this->ResolvedValue());
}
- bool IsNegative() const { return this->HasValue() && this->Value() < 0.0; }
- bool IsNaN() const { return this->HasValue() && std::isnan(this->Value()); }
- bool IsZero() const { return this->Is(0.0) && !std::signbit(this->Value()); }
bool IsNormal() const {
- return this->HasValue() && std::isnormal(this->Value());
+ return this->HasResolvedValue() && std::isnormal(this->ResolvedValue());
}
bool IsInteger() const {
- return this->HasValue() && std::nearbyint(this->Value()) == this->Value();
+ return this->HasResolvedValue() &&
+ std::nearbyint(this->ResolvedValue()) == this->ResolvedValue();
}
bool IsPositiveOrNegativePowerOf2() const {
- if (!this->HasValue() || (this->Value() == 0.0)) {
+ if (!this->HasResolvedValue() || (this->ResolvedValue() == 0.0)) {
return false;
}
- Double value = Double(this->Value());
+ Double value = Double(this->ResolvedValue());
return !value.IsInfinite() && base::bits::IsPowerOfTwo(value.Significand());
}
};
@@ -224,11 +229,12 @@ struct HeapObjectMatcherImpl final
: ValueMatcher<Handle<HeapObject>, kHeapConstantOpcode>(node) {}
bool Is(Handle<HeapObject> const& value) const {
- return this->HasValue() && this->Value().address() == value.address();
+ return this->HasResolvedValue() &&
+ this->ResolvedValue().address() == value.address();
}
HeapObjectRef Ref(JSHeapBroker* broker) const {
- return HeapObjectRef(broker, this->Value());
+ return HeapObjectRef(broker, this->ResolvedValue());
}
};
@@ -242,7 +248,7 @@ struct ExternalReferenceMatcher final
explicit ExternalReferenceMatcher(Node* node)
: ValueMatcher<ExternalReference, IrOpcode::kExternalConstant>(node) {}
bool Is(const ExternalReference& value) const {
- return this->HasValue() && this->Value() == value;
+ return this->HasResolvedValue() && this->ResolvedValue() == value;
}
};
@@ -285,7 +291,9 @@ struct BinopMatcher : public NodeMatcher {
const Left& left() const { return left_; }
const Right& right() const { return right_; }
- bool IsFoldable() const { return left().HasValue() && right().HasValue(); }
+ bool IsFoldable() const {
+ return left().HasResolvedValue() && right().HasResolvedValue();
+ }
bool LeftEqualsRight() const { return left().node() == right().node(); }
bool OwnsInput(Node* input) {
@@ -309,7 +317,7 @@ struct BinopMatcher : public NodeMatcher {
private:
void PutConstantOnRight() {
- if (left().HasValue() && !right().HasValue()) {
+ if (left().HasResolvedValue() && !right().HasResolvedValue()) {
SwapInputs();
}
}
@@ -340,17 +348,17 @@ struct ScaleMatcher {
if (node->InputCount() < 2) return;
BinopMatcher m(node);
if (node->opcode() == kShiftOpcode) {
- if (m.right().HasValue()) {
+ if (m.right().HasResolvedValue()) {
typename BinopMatcher::RightMatcher::ValueType value =
- m.right().Value();
+ m.right().ResolvedValue();
if (value >= 0 && value <= 3) {
scale_ = static_cast<int>(value);
}
}
} else if (node->opcode() == kMulOpcode) {
- if (m.right().HasValue()) {
+ if (m.right().HasResolvedValue()) {
typename BinopMatcher::RightMatcher::ValueType value =
- m.right().Value();
+ m.right().ResolvedValue();
if (value == 1) {
scale_ = 0;
} else if (value == 2) {
@@ -550,7 +558,7 @@ struct BaseWithIndexAndDisplacementMatcher {
if (right->opcode() == AddMatcher::kSubOpcode &&
OwnedByAddressingOperand(right)) {
AddMatcher right_matcher(right);
- if (right_matcher.right().HasValue()) {
+ if (right_matcher.right().HasResolvedValue()) {
// (S + (B - D))
base = right_matcher.left().node();
displacement = right_matcher.right().node();
@@ -562,7 +570,7 @@ struct BaseWithIndexAndDisplacementMatcher {
if (right->opcode() == AddMatcher::kAddOpcode &&
OwnedByAddressingOperand(right)) {
AddMatcher right_matcher(right);
- if (right_matcher.right().HasValue()) {
+ if (right_matcher.right().HasResolvedValue()) {
// (S + (B + D))
base = right_matcher.left().node();
displacement = right_matcher.right().node();
@@ -570,7 +578,7 @@ struct BaseWithIndexAndDisplacementMatcher {
// (S + (B + B))
base = right;
}
- } else if (m.right().HasValue()) {
+ } else if (m.right().HasResolvedValue()) {
// (S + D)
displacement = right;
} else {
@@ -585,7 +593,7 @@ struct BaseWithIndexAndDisplacementMatcher {
AddMatcher left_matcher(left);
Node* left_left = left_matcher.left().node();
Node* left_right = left_matcher.right().node();
- if (left_matcher.right().HasValue()) {
+ if (left_matcher.right().HasResolvedValue()) {
if (left_matcher.HasIndexInput() && left_left->OwnedBy(left)) {
// ((S - D) + B)
index = left_matcher.IndexInput();
@@ -612,7 +620,7 @@ struct BaseWithIndexAndDisplacementMatcher {
Node* left_left = left_matcher.left().node();
Node* left_right = left_matcher.right().node();
if (left_matcher.HasIndexInput() && left_left->OwnedBy(left)) {
- if (left_matcher.right().HasValue()) {
+ if (left_matcher.right().HasResolvedValue()) {
// ((S + D) + B)
index = left_matcher.IndexInput();
scale = left_matcher.scale();
@@ -620,7 +628,7 @@ struct BaseWithIndexAndDisplacementMatcher {
power_of_two_plus_one = left_matcher.power_of_two_plus_one();
displacement = left_right;
base = right;
- } else if (m.right().HasValue()) {
+ } else if (m.right().HasResolvedValue()) {
if (left->OwnedBy(node)) {
// ((S + B) + D)
index = left_matcher.IndexInput();
@@ -640,12 +648,12 @@ struct BaseWithIndexAndDisplacementMatcher {
base = right;
}
} else {
- if (left_matcher.right().HasValue()) {
+ if (left_matcher.right().HasResolvedValue()) {
// ((B + D) + B)
index = left_left;
displacement = left_right;
base = right;
- } else if (m.right().HasValue()) {
+ } else if (m.right().HasResolvedValue()) {
if (left->OwnedBy(node)) {
// ((B + B) + D)
index = left_left;
@@ -663,7 +671,7 @@ struct BaseWithIndexAndDisplacementMatcher {
}
}
} else {
- if (m.right().HasValue()) {
+ if (m.right().HasResolvedValue()) {
// (B + D)
base = left;
displacement = right;
diff --git a/deps/v8/src/compiler/node-origin-table.h b/deps/v8/src/compiler/node-origin-table.h
index 4bb66a769d..c45e235a6e 100644
--- a/deps/v8/src/compiler/node-origin-table.h
+++ b/deps/v8/src/compiler/node-origin-table.h
@@ -82,10 +82,12 @@ class V8_EXPORT_PRIVATE NodeOriginTable final
if (origins_) origins_->current_origin_ = prev_origin_;
}
+ Scope(const Scope&) = delete;
+ Scope& operator=(const Scope&) = delete;
+
private:
NodeOriginTable* const origins_;
NodeOrigin prev_origin_;
- DISALLOW_COPY_AND_ASSIGN(Scope);
};
class PhaseScope final {
@@ -103,13 +105,17 @@ class V8_EXPORT_PRIVATE NodeOriginTable final
if (origins_) origins_->current_phase_name_ = prev_phase_name_;
}
+ PhaseScope(const PhaseScope&) = delete;
+ PhaseScope& operator=(const PhaseScope&) = delete;
+
private:
NodeOriginTable* const origins_;
const char* prev_phase_name_;
- DISALLOW_COPY_AND_ASSIGN(PhaseScope);
};
explicit NodeOriginTable(Graph* graph);
+ NodeOriginTable(const NodeOriginTable&) = delete;
+ NodeOriginTable& operator=(const NodeOriginTable&) = delete;
void AddDecorator();
void RemoveDecorator();
@@ -130,8 +136,6 @@ class V8_EXPORT_PRIVATE NodeOriginTable final
const char* current_phase_name_;
NodeAuxData<NodeOrigin, NodeOrigin::Unknown> table_;
-
- DISALLOW_COPY_AND_ASSIGN(NodeOriginTable);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/node-properties.cc b/deps/v8/src/compiler/node-properties.cc
index 3b78872437..bc25b83d92 100644
--- a/deps/v8/src/compiler/node-properties.cc
+++ b/deps/v8/src/compiler/node-properties.cc
@@ -328,7 +328,7 @@ base::Optional<MapRef> NodeProperties::GetJSCreateMap(JSHeapBroker* broker,
receiver->opcode() == IrOpcode::kJSCreateArray);
HeapObjectMatcher mtarget(GetValueInput(receiver, 0));
HeapObjectMatcher mnewtarget(GetValueInput(receiver, 1));
- if (mtarget.HasValue() && mnewtarget.HasValue() &&
+ if (mtarget.HasResolvedValue() && mnewtarget.HasResolvedValue() &&
mnewtarget.Ref(broker).IsJSFunction()) {
ObjectRef target = mtarget.Ref(broker);
JSFunctionRef newtarget = mnewtarget.Ref(broker).AsJSFunction();
@@ -349,11 +349,11 @@ base::Optional<MapRef> NodeProperties::GetJSCreateMap(JSHeapBroker* broker,
}
// static
-NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMapsUnsafe(
+NodeProperties::InferMapsResult NodeProperties::InferMapsUnsafe(
JSHeapBroker* broker, Node* receiver, Node* effect,
ZoneHandleSet<Map>* maps_return) {
HeapObjectMatcher m(receiver);
- if (m.HasValue()) {
+ if (m.HasResolvedValue()) {
HeapObjectRef receiver = m.Ref(broker);
// We don't use ICs for the Array.prototype and the Object.prototype
// because the runtime has to be able to intercept them properly, so
@@ -368,11 +368,11 @@ NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMapsUnsafe(
// The {receiver_map} is only reliable when we install a stability
// code dependency.
*maps_return = ZoneHandleSet<Map>(receiver.map().object());
- return kUnreliableReceiverMaps;
+ return kUnreliableMaps;
}
}
}
- InferReceiverMapsResult result = kReliableReceiverMaps;
+ InferMapsResult result = kReliableMaps;
while (true) {
switch (effect->opcode()) {
case IrOpcode::kMapGuard: {
@@ -399,9 +399,9 @@ NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMapsUnsafe(
return result;
}
// We reached the allocation of the {receiver}.
- return kNoReceiverMaps;
+ return kNoMaps;
}
- result = kUnreliableReceiverMaps; // JSCreate can have side-effect.
+ result = kUnreliableMaps; // JSCreate can have side-effect.
break;
}
case IrOpcode::kJSCreatePromise: {
@@ -423,14 +423,14 @@ NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMapsUnsafe(
if (IsSame(receiver, object)) {
Node* const value = GetValueInput(effect, 1);
HeapObjectMatcher m(value);
- if (m.HasValue()) {
+ if (m.HasResolvedValue()) {
*maps_return = ZoneHandleSet<Map>(m.Ref(broker).AsMap().object());
return result;
}
}
// Without alias analysis we cannot tell whether this
// StoreField[map] affects {receiver} or not.
- result = kUnreliableReceiverMaps;
+ result = kUnreliableMaps;
}
break;
}
@@ -453,25 +453,25 @@ NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMapsUnsafe(
if (control->opcode() != IrOpcode::kLoop) {
DCHECK(control->opcode() == IrOpcode::kDead ||
control->opcode() == IrOpcode::kMerge);
- return kNoReceiverMaps;
+ return kNoMaps;
}
// Continue search for receiver map outside the loop. Since operations
// inside the loop may change the map, the result is unreliable.
effect = GetEffectInput(effect, 0);
- result = kUnreliableReceiverMaps;
+ result = kUnreliableMaps;
continue;
}
default: {
DCHECK_EQ(1, effect->op()->EffectOutputCount());
if (effect->op()->EffectInputCount() != 1) {
// Didn't find any appropriate CheckMaps node.
- return kNoReceiverMaps;
+ return kNoMaps;
}
if (!effect->op()->HasProperty(Operator::kNoWrite)) {
// Without alias/escape analysis we cannot tell whether this
// {effect} affects {receiver} or not.
- result = kUnreliableReceiverMaps;
+ result = kUnreliableMaps;
}
break;
}
@@ -479,7 +479,7 @@ NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMapsUnsafe(
// Stop walking the effect chain once we hit the definition of
// the {receiver} along the {effect}s.
- if (IsSame(receiver, effect)) return kNoReceiverMaps;
+ if (IsSame(receiver, effect)) return kNoMaps;
// Continue with the next {effect}.
DCHECK_EQ(1, effect->op()->EffectInputCount());
diff --git a/deps/v8/src/compiler/node-properties.h b/deps/v8/src/compiler/node-properties.h
index 5b31f15d48..059db4f5cb 100644
--- a/deps/v8/src/compiler/node-properties.h
+++ b/deps/v8/src/compiler/node-properties.h
@@ -121,6 +121,21 @@ class V8_EXPORT_PRIVATE NodeProperties final {
// the IfSuccess projection of {node} if present and {node} itself otherwise.
static Node* FindSuccessfulControlProjection(Node* node);
+ // Returns whether the node acts as the identity function on a value
+ // input. The input that is passed through is returned via {out_value}.
+ static bool IsValueIdentity(Node* node, Node** out_value) {
+ switch (node->opcode()) {
+ case IrOpcode::kTypeGuard:
+ *out_value = GetValueInput(node, 0);
+ return true;
+ case IrOpcode::kFoldConstant:
+ *out_value = GetValueInput(node, 1);
+ return true;
+ default:
+ return false;
+ }
+ }
+
// ---------------------------------------------------------------------------
// Miscellaneous mutators.
@@ -188,15 +203,15 @@ class V8_EXPORT_PRIVATE NodeProperties final {
// Walks up the {effect} chain to find a witness that provides map
// information about the {receiver}. Can look through potentially
// side effecting nodes.
- enum InferReceiverMapsResult {
- kNoReceiverMaps, // No receiver maps inferred.
- kReliableReceiverMaps, // Receiver maps can be trusted.
- kUnreliableReceiverMaps // Receiver maps might have changed (side-effect).
+ enum InferMapsResult {
+ kNoMaps, // No maps inferred.
+ kReliableMaps, // Maps can be trusted.
+ kUnreliableMaps // Maps might have changed (side-effect).
};
- // DO NOT USE InferReceiverMapsUnsafe IN NEW CODE. Use MapInference instead.
- static InferReceiverMapsResult InferReceiverMapsUnsafe(
- JSHeapBroker* broker, Node* receiver, Node* effect,
- ZoneHandleSet<Map>* maps_return);
+ // DO NOT USE InferMapsUnsafe IN NEW CODE. Use MapInference instead.
+ static InferMapsResult InferMapsUnsafe(JSHeapBroker* broker, Node* object,
+ Node* effect,
+ ZoneHandleSet<Map>* maps);
// Return the initial map of the new-target if the allocation can be inlined.
static base::Optional<MapRef> GetJSCreateMap(JSHeapBroker* broker,
diff --git a/deps/v8/src/compiler/node.cc b/deps/v8/src/compiler/node.cc
index b9f1c3c844..8525fa0b01 100644
--- a/deps/v8/src/compiler/node.cc
+++ b/deps/v8/src/compiler/node.cc
@@ -218,9 +218,9 @@ void Node::InsertInputs(Zone* zone, int index, int count) {
DCHECK_LT(0, count);
DCHECK_LT(index, InputCount());
for (int i = 0; i < count; i++) {
- AppendInput(zone, InputAt(Max(InputCount() - count, 0)));
+ AppendInput(zone, InputAt(std::max(InputCount() - count, 0)));
}
- for (int i = InputCount() - count - 1; i >= Max(index, count); --i) {
+ for (int i = InputCount() - count - 1; i >= std::max(index, count); --i) {
ReplaceInput(i, InputAt(i - count));
}
for (int i = 0; i < count; i++) {
diff --git a/deps/v8/src/compiler/node.h b/deps/v8/src/compiler/node.h
index 1936f06457..823bee4597 100644
--- a/deps/v8/src/compiler/node.h
+++ b/deps/v8/src/compiler/node.h
@@ -236,6 +236,8 @@ class V8_EXPORT_PRIVATE Node final {
// a node exceeds the maximum inline capacity.
Node(NodeId id, const Operator* op, int inline_count, int inline_capacity);
+ Node(const Node&) = delete;
+ Node& operator=(const Node&) = delete;
inline Address inputs_location() const;
@@ -300,8 +302,6 @@ class V8_EXPORT_PRIVATE Node final {
friend class Edge;
friend class NodeMarkerBase;
friend class NodeProperties;
-
- DISALLOW_COPY_AND_ASSIGN(Node);
};
Address Node::inputs_location() const {
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index f1faeec936..5027f734d4 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -819,22 +819,23 @@
V(I64x2ReplaceLane) \
V(I64x2ReplaceLaneI32Pair) \
V(I64x2Neg) \
+ V(I64x2SConvertI32x4Low) \
+ V(I64x2SConvertI32x4High) \
+ V(I64x2UConvertI32x4Low) \
+ V(I64x2UConvertI32x4High) \
+ V(I64x2BitMask) \
V(I64x2Shl) \
V(I64x2ShrS) \
V(I64x2Add) \
V(I64x2Sub) \
V(I64x2Mul) \
- V(I64x2MinS) \
- V(I64x2MaxS) \
V(I64x2Eq) \
- V(I64x2Ne) \
- V(I64x2GtS) \
- V(I64x2GeS) \
V(I64x2ShrU) \
- V(I64x2MinU) \
- V(I64x2MaxU) \
- V(I64x2GtU) \
- V(I64x2GeU) \
+ V(I64x2ExtMulLowI32x4S) \
+ V(I64x2ExtMulHighI32x4S) \
+ V(I64x2ExtMulLowI32x4U) \
+ V(I64x2ExtMulHighI32x4U) \
+ V(I64x2SignSelect) \
V(I32x4Splat) \
V(I32x4ExtractLane) \
V(I32x4ReplaceLane) \
@@ -869,6 +870,13 @@
V(I32x4Abs) \
V(I32x4BitMask) \
V(I32x4DotI16x8S) \
+ V(I32x4ExtMulLowI16x8S) \
+ V(I32x4ExtMulHighI16x8S) \
+ V(I32x4ExtMulLowI16x8U) \
+ V(I32x4ExtMulHighI16x8U) \
+ V(I32x4SignSelect) \
+ V(I32x4ExtAddPairwiseI16x8S) \
+ V(I32x4ExtAddPairwiseI16x8U) \
V(I16x8Splat) \
V(I16x8ExtractLaneU) \
V(I16x8ExtractLaneS) \
@@ -880,10 +888,10 @@
V(I16x8ShrS) \
V(I16x8SConvertI32x4) \
V(I16x8Add) \
- V(I16x8AddSaturateS) \
+ V(I16x8AddSatS) \
V(I16x8AddHoriz) \
V(I16x8Sub) \
- V(I16x8SubSaturateS) \
+ V(I16x8SubSatS) \
V(I16x8Mul) \
V(I16x8MinS) \
V(I16x8MaxS) \
@@ -897,8 +905,8 @@
V(I16x8UConvertI8x16High) \
V(I16x8ShrU) \
V(I16x8UConvertI32x4) \
- V(I16x8AddSaturateU) \
- V(I16x8SubSaturateU) \
+ V(I16x8AddSatU) \
+ V(I16x8SubSatU) \
V(I16x8MinU) \
V(I16x8MaxU) \
V(I16x8LtU) \
@@ -906,8 +914,16 @@
V(I16x8GtU) \
V(I16x8GeU) \
V(I16x8RoundingAverageU) \
+ V(I16x8Q15MulRSatS) \
V(I16x8Abs) \
V(I16x8BitMask) \
+ V(I16x8ExtMulLowI8x16S) \
+ V(I16x8ExtMulHighI8x16S) \
+ V(I16x8ExtMulLowI8x16U) \
+ V(I16x8ExtMulHighI8x16U) \
+ V(I16x8SignSelect) \
+ V(I16x8ExtAddPairwiseI8x16S) \
+ V(I16x8ExtAddPairwiseI8x16U) \
V(I8x16Splat) \
V(I8x16ExtractLaneU) \
V(I8x16ExtractLaneS) \
@@ -917,9 +933,9 @@
V(I8x16Shl) \
V(I8x16ShrS) \
V(I8x16Add) \
- V(I8x16AddSaturateS) \
+ V(I8x16AddSatS) \
V(I8x16Sub) \
- V(I8x16SubSaturateS) \
+ V(I8x16SubSatS) \
V(I8x16Mul) \
V(I8x16MinS) \
V(I8x16MaxS) \
@@ -930,8 +946,8 @@
V(I8x16GtS) \
V(I8x16GeS) \
V(I8x16UConvertI16x8) \
- V(I8x16AddSaturateU) \
- V(I8x16SubSaturateU) \
+ V(I8x16AddSatU) \
+ V(I8x16SubSatU) \
V(I8x16ShrU) \
V(I8x16MinU) \
V(I8x16MaxU) \
@@ -940,8 +956,10 @@
V(I8x16GtU) \
V(I8x16GeU) \
V(I8x16RoundingAverageU) \
+ V(I8x16Popcnt) \
V(I8x16Abs) \
V(I8x16BitMask) \
+ V(I8x16SignSelect) \
V(S128Load) \
V(S128Store) \
V(S128Zero) \
@@ -954,15 +972,15 @@
V(S128AndNot) \
V(I8x16Swizzle) \
V(I8x16Shuffle) \
- V(V64x2AnyTrue) \
- V(V64x2AllTrue) \
V(V32x4AnyTrue) \
V(V32x4AllTrue) \
V(V16x8AnyTrue) \
V(V16x8AllTrue) \
V(V8x16AnyTrue) \
V(V8x16AllTrue) \
- V(LoadTransform)
+ V(LoadTransform) \
+ V(LoadLane) \
+ V(StoreLane)
#define VALUE_OP_LIST(V) \
COMMON_OP_LIST(V) \
@@ -1094,12 +1112,15 @@ class V8_EXPORT_PRIVATE IrOpcode {
case kJSCreateLiteralArray:
case kJSCreateLiteralObject:
case kJSCreateLiteralRegExp:
+ case kJSForInNext:
+ case kJSForInPrepare:
case kJSGetIterator:
case kJSGetTemplateObject:
case kJSHasProperty:
case kJSInstanceOf:
case kJSLoadGlobal:
case kJSLoadNamed:
+ case kJSLoadNamedFromSuper:
case kJSLoadProperty:
case kJSStoreDataPropertyInLiteral:
case kJSStoreGlobal:
diff --git a/deps/v8/src/compiler/operator-properties.cc b/deps/v8/src/compiler/operator-properties.cc
index c77249f621..a8e29416b5 100644
--- a/deps/v8/src/compiler/operator-properties.cc
+++ b/deps/v8/src/compiler/operator-properties.cc
@@ -193,16 +193,17 @@ bool OperatorProperties::HasFrameStateInput(const Operator* op) {
case IrOpcode::kJSCloneObject:
// Property access operations
+ case IrOpcode::kJSDeleteProperty:
+ case IrOpcode::kJSLoadGlobal:
case IrOpcode::kJSLoadNamed:
case IrOpcode::kJSLoadNamedFromSuper:
- case IrOpcode::kJSStoreNamed:
case IrOpcode::kJSLoadProperty:
- case IrOpcode::kJSStoreProperty:
- case IrOpcode::kJSLoadGlobal:
+ case IrOpcode::kJSStoreDataPropertyInLiteral:
+ case IrOpcode::kJSStoreInArrayLiteral:
case IrOpcode::kJSStoreGlobal:
+ case IrOpcode::kJSStoreNamed:
case IrOpcode::kJSStoreNamedOwn:
- case IrOpcode::kJSStoreDataPropertyInLiteral:
- case IrOpcode::kJSDeleteProperty:
+ case IrOpcode::kJSStoreProperty:
// Conversions
case IrOpcode::kJSToLength:
diff --git a/deps/v8/src/compiler/operator-properties.h b/deps/v8/src/compiler/operator-properties.h
index 47db81df98..e566a58a02 100644
--- a/deps/v8/src/compiler/operator-properties.h
+++ b/deps/v8/src/compiler/operator-properties.h
@@ -17,6 +17,9 @@ class Operator;
class V8_EXPORT_PRIVATE OperatorProperties final {
public:
+ OperatorProperties(const OperatorProperties&) = delete;
+ OperatorProperties& operator=(const OperatorProperties&) = delete;
+
static bool HasContextInput(const Operator* op);
static int GetContextInputCount(const Operator* op) {
return HasContextInput(op) ? 1 : 0;
@@ -32,9 +35,6 @@ class V8_EXPORT_PRIVATE OperatorProperties final {
static int GetTotalInputCount(const Operator* op);
static bool IsBasicBlockBegin(const Operator* op);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(OperatorProperties);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/operator.h b/deps/v8/src/compiler/operator.h
index 7227c92cd8..3239eb0269 100644
--- a/deps/v8/src/compiler/operator.h
+++ b/deps/v8/src/compiler/operator.h
@@ -65,6 +65,8 @@ class V8_EXPORT_PRIVATE Operator : public NON_EXPORTED_BASE(ZoneObject) {
Operator(Opcode opcode, Properties properties, const char* mnemonic,
size_t value_in, size_t effect_in, size_t control_in,
size_t value_out, size_t effect_out, size_t control_out);
+ Operator(const Operator&) = delete;
+ Operator& operator=(const Operator&) = delete;
// A small integer unique to all instances of a particular kind of operator,
// useful for quick matching for specific kinds of operators. For fast access
@@ -141,8 +143,6 @@ class V8_EXPORT_PRIVATE Operator : public NON_EXPORTED_BASE(ZoneObject) {
uint32_t value_out_;
uint8_t effect_out_;
uint32_t control_out_;
-
- DISALLOW_COPY_AND_ASSIGN(Operator);
};
DEFINE_OPERATORS_FOR_FLAGS(Operator::Properties)
diff --git a/deps/v8/src/compiler/pipeline-statistics.h b/deps/v8/src/compiler/pipeline-statistics.h
index c034183a75..330c523521 100644
--- a/deps/v8/src/compiler/pipeline-statistics.h
+++ b/deps/v8/src/compiler/pipeline-statistics.h
@@ -23,6 +23,8 @@ class PipelineStatistics : public Malloced {
PipelineStatistics(OptimizedCompilationInfo* info,
CompilationStatistics* turbo_stats, ZoneStats* zone_stats);
~PipelineStatistics();
+ PipelineStatistics(const PipelineStatistics&) = delete;
+ PipelineStatistics& operator=(const PipelineStatistics&) = delete;
void BeginPhaseKind(const char* phase_kind_name);
void EndPhaseKind();
@@ -35,6 +37,8 @@ class PipelineStatistics : public Malloced {
class CommonStats {
public:
CommonStats() : outer_zone_initial_size_(0) {}
+ CommonStats(const CommonStats&) = delete;
+ CommonStats& operator=(const CommonStats&) = delete;
void Begin(PipelineStatistics* pipeline_stats);
void End(PipelineStatistics* pipeline_stats,
@@ -44,9 +48,6 @@ class PipelineStatistics : public Malloced {
base::ElapsedTimer timer_;
size_t outer_zone_initial_size_;
size_t allocated_bytes_at_start_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(CommonStats);
};
bool InPhaseKind() { return !!phase_kind_stats_.scope_; }
@@ -71,8 +72,6 @@ class PipelineStatistics : public Malloced {
// Stats for phase.
const char* phase_name_;
CommonStats phase_stats_;
-
- DISALLOW_COPY_AND_ASSIGN(PipelineStatistics);
};
@@ -85,11 +84,11 @@ class PhaseScope {
~PhaseScope() {
if (pipeline_stats_ != nullptr) pipeline_stats_->EndPhase();
}
+ PhaseScope(const PhaseScope&) = delete;
+ PhaseScope& operator=(const PhaseScope&) = delete;
private:
PipelineStatistics* const pipeline_stats_;
-
- DISALLOW_COPY_AND_ASSIGN(PhaseScope);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index 7b99d07b6b..1023c6bb68 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -83,6 +83,7 @@
#include "src/diagnostics/code-tracer.h"
#include "src/diagnostics/disassembler.h"
#include "src/execution/isolate-inl.h"
+#include "src/heap/local-heap.h"
#include "src/init/bootstrapper.h"
#include "src/logging/counters.h"
#include "src/objects/shared-function-info.h"
@@ -151,9 +152,9 @@ class PipelineData {
instruction_zone_(instruction_zone_scope_.zone()),
codegen_zone_scope_(zone_stats_, kCodegenZoneName),
codegen_zone_(codegen_zone_scope_.zone()),
- broker_(new JSHeapBroker(
- isolate_, info_->zone(), info_->trace_heap_broker(),
- is_concurrent_inlining, info->IsNativeContextIndependent())),
+ broker_(new JSHeapBroker(isolate_, info_->zone(),
+ info_->trace_heap_broker(),
+ is_concurrent_inlining, info->code_kind())),
register_allocation_zone_scope_(zone_stats_,
kRegisterAllocationZoneName),
register_allocation_zone_(register_allocation_zone_scope_.zone()),
@@ -288,6 +289,9 @@ class PipelineData {
DeleteGraphZone();
}
+ PipelineData(const PipelineData&) = delete;
+ PipelineData& operator=(const PipelineData&) = delete;
+
Isolate* isolate() const { return isolate_; }
AccountingAllocator* allocator() const { return allocator_; }
OptimizedCompilationInfo* info() const { return info_; }
@@ -486,6 +490,7 @@ class PipelineData {
call_descriptor->CalculateFixedFrameSize(info()->code_kind());
}
frame_ = codegen_zone()->New<Frame>(fixed_frame_size);
+ if (osr_helper_.has_value()) osr_helper()->SetupFrame(frame());
}
void InitializeTopTierRegisterAllocationData(
@@ -634,8 +639,6 @@ class PipelineData {
RuntimeCallStats* runtime_call_stats_ = nullptr;
const ProfileDataFromFile* profile_data_ = nullptr;
-
- DISALLOW_COPY_AND_ASSIGN(PipelineData);
};
class PipelineImpl final {
@@ -701,6 +704,8 @@ class SourcePositionWrapper final : public Reducer {
SourcePositionWrapper(Reducer* reducer, SourcePositionTable* table)
: reducer_(reducer), table_(table) {}
~SourcePositionWrapper() final = default;
+ SourcePositionWrapper(const SourcePositionWrapper&) = delete;
+ SourcePositionWrapper& operator=(const SourcePositionWrapper&) = delete;
const char* reducer_name() const override { return reducer_->reducer_name(); }
@@ -715,8 +720,6 @@ class SourcePositionWrapper final : public Reducer {
private:
Reducer* const reducer_;
SourcePositionTable* const table_;
-
- DISALLOW_COPY_AND_ASSIGN(SourcePositionWrapper);
};
class NodeOriginsWrapper final : public Reducer {
@@ -724,6 +727,8 @@ class NodeOriginsWrapper final : public Reducer {
NodeOriginsWrapper(Reducer* reducer, NodeOriginTable* table)
: reducer_(reducer), table_(table) {}
~NodeOriginsWrapper() final = default;
+ NodeOriginsWrapper(const NodeOriginsWrapper&) = delete;
+ NodeOriginsWrapper& operator=(const NodeOriginsWrapper&) = delete;
const char* reducer_name() const override { return reducer_->reducer_name(); }
@@ -737,8 +742,6 @@ class NodeOriginsWrapper final : public Reducer {
private:
Reducer* const reducer_;
NodeOriginTable* const table_;
-
- DISALLOW_COPY_AND_ASSIGN(NodeOriginsWrapper);
};
class PipelineRunScope {
@@ -764,18 +767,21 @@ class PipelineRunScope {
RuntimeCallTimerScope runtime_call_timer_scope;
};
-// LocalHeapScope encapsulates the liveness of the brokers's LocalHeap.
-class LocalHeapScope {
+// LocalIsolateScope encapsulates the phase where persistent handles are
+// attached to the LocalHeap inside {local_isolate}.
+class LocalIsolateScope {
public:
- explicit LocalHeapScope(JSHeapBroker* broker, OptimizedCompilationInfo* info)
+ explicit LocalIsolateScope(JSHeapBroker* broker,
+ OptimizedCompilationInfo* info,
+ LocalIsolate* local_isolate)
: broker_(broker), info_(info) {
- broker_->InitializeLocalHeap(info_);
- info_->tick_counter().AttachLocalHeap(broker_->local_heap());
+ broker_->AttachLocalIsolate(info_, local_isolate);
+ info_->tick_counter().AttachLocalHeap(local_isolate->heap());
}
- ~LocalHeapScope() {
+ ~LocalIsolateScope() {
info_->tick_counter().DetachLocalHeap();
- broker_->TearDownLocalHeap(info_);
+ broker_->DetachLocalIsolate(info_);
}
private:
@@ -1026,10 +1032,13 @@ class PipelineCompilationJob final : public OptimizedCompilationJob {
Handle<JSFunction> function, BailoutId osr_offset,
JavaScriptFrame* osr_frame, CodeKind code_kind);
~PipelineCompilationJob() final;
+ PipelineCompilationJob(const PipelineCompilationJob&) = delete;
+ PipelineCompilationJob& operator=(const PipelineCompilationJob&) = delete;
protected:
Status PrepareJobImpl(Isolate* isolate) final;
- Status ExecuteJobImpl(RuntimeCallStats* stats) final;
+ Status ExecuteJobImpl(RuntimeCallStats* stats,
+ LocalIsolate* local_isolate) final;
Status FinalizeJobImpl(Isolate* isolate) final;
// Registers weak object to optimized code dependencies.
@@ -1045,8 +1054,6 @@ class PipelineCompilationJob final : public OptimizedCompilationJob {
PipelineData data_;
PipelineImpl pipeline_;
Linkage* linkage_;
-
- DISALLOW_COPY_AND_ASSIGN(PipelineCompilationJob);
};
PipelineCompilationJob::PipelineCompilationJob(
@@ -1110,7 +1117,7 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
if (FLAG_turbo_loop_peeling) {
compilation_info()->set_loop_peeling();
}
- if (FLAG_turbo_inlining &&
+ if (FLAG_turbo_inlining && !compilation_info()->IsTurboprop() &&
!compilation_info()->IsNativeContextIndependent()) {
compilation_info()->set_inlining();
}
@@ -1139,7 +1146,8 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
if (compilation_info()->closure()->raw_feedback_cell().map() ==
ReadOnlyRoots(isolate).one_closure_cell_map() &&
!compilation_info()->is_osr() &&
- !compilation_info()->IsNativeContextIndependent()) {
+ !compilation_info()->IsNativeContextIndependent() &&
+ !compilation_info()->IsTurboprop()) {
compilation_info()->set_function_context_specializing();
data_.ChooseSpecializationContext();
}
@@ -1157,11 +1165,6 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
if (compilation_info()->is_osr()) data_.InitializeOsrHelper();
- // Make sure that we have generated the deopt entries code. This is in order
- // to avoid triggering the generation of deopt entries later during code
- // assembly.
- Deoptimizer::EnsureCodeForDeoptimizationEntries(isolate);
-
pipeline_.Serialize();
if (!data_.broker()->is_concurrent_inlining()) {
@@ -1175,30 +1178,28 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
}
PipelineCompilationJob::Status PipelineCompilationJob::ExecuteJobImpl(
- RuntimeCallStats* stats) {
+ RuntimeCallStats* stats, LocalIsolate* local_isolate) {
// Ensure that the RuntimeCallStats table is only available during execution
// and not during finalization as that might be on a different thread.
PipelineJobScope scope(&data_, stats);
- {
- LocalHeapScope local_heap_scope(data_.broker(), data_.info());
- if (data_.broker()->is_concurrent_inlining()) {
- if (!pipeline_.CreateGraph()) {
- return AbortOptimization(BailoutReason::kGraphBuildingFailed);
- }
- }
-
- // We selectively Unpark inside OptimizeGraph*.
- ParkedScope parked_scope(data_.broker()->local_heap());
+ LocalIsolateScope local_isolate_scope(data_.broker(), data_.info(),
+ local_isolate);
- bool success;
- if (FLAG_turboprop) {
- success = pipeline_.OptimizeGraphForMidTier(linkage_);
- } else {
- success = pipeline_.OptimizeGraph(linkage_);
+ if (data_.broker()->is_concurrent_inlining()) {
+ if (!pipeline_.CreateGraph()) {
+ return AbortOptimization(BailoutReason::kGraphBuildingFailed);
}
- if (!success) return FAILED;
}
+ // We selectively Unpark inside OptimizeGraph*.
+ bool success;
+ if (compilation_info_.code_kind() == CodeKind::TURBOPROP) {
+ success = pipeline_.OptimizeGraphForMidTier(linkage_);
+ } else {
+ success = pipeline_.OptimizeGraph(linkage_);
+ }
+ if (!success) return FAILED;
+
pipeline_.AssembleCode(linkage_);
return SUCCEEDED;
@@ -1279,9 +1280,14 @@ class WasmHeapStubCompilationJob final : public OptimizedCompilationJob {
pipeline_(&data_),
wasm_engine_(wasm_engine) {}
+ WasmHeapStubCompilationJob(const WasmHeapStubCompilationJob&) = delete;
+ WasmHeapStubCompilationJob& operator=(const WasmHeapStubCompilationJob&) =
+ delete;
+
protected:
Status PrepareJobImpl(Isolate* isolate) final;
- Status ExecuteJobImpl(RuntimeCallStats* stats) final;
+ Status ExecuteJobImpl(RuntimeCallStats* stats,
+ LocalIsolate* local_isolate) final;
Status FinalizeJobImpl(Isolate* isolate) final;
private:
@@ -1294,8 +1300,6 @@ class WasmHeapStubCompilationJob final : public OptimizedCompilationJob {
PipelineData data_;
PipelineImpl pipeline_;
wasm::WasmEngine* wasm_engine_;
-
- DISALLOW_COPY_AND_ASSIGN(WasmHeapStubCompilationJob);
};
// static
@@ -1316,7 +1320,7 @@ CompilationJob::Status WasmHeapStubCompilationJob::PrepareJobImpl(
}
CompilationJob::Status WasmHeapStubCompilationJob::ExecuteJobImpl(
- RuntimeCallStats* stats) {
+ RuntimeCallStats* stats, LocalIsolate* local_isolate) {
std::unique_ptr<PipelineStatistics> pipeline_statistics;
if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
pipeline_statistics.reset(new PipelineStatistics(
@@ -1405,10 +1409,11 @@ struct GraphBuilderPhase {
JSFunctionRef closure(data->broker(), data->info()->closure());
CallFrequency frequency(1.0f);
BuildGraphFromBytecode(
- data->broker(), temp_zone, closure.shared(), closure.feedback_vector(),
- data->info()->osr_offset(), data->jsgraph(), frequency,
- data->source_positions(), SourcePosition::kNotInlined,
- data->info()->code_kind(), flags, &data->info()->tick_counter());
+ data->broker(), temp_zone, closure.shared(),
+ closure.raw_feedback_cell(), data->info()->osr_offset(),
+ data->jsgraph(), frequency, data->source_positions(),
+ SourcePosition::kNotInlined, data->info()->code_kind(), flags,
+ &data->info()->tick_counter());
}
};
@@ -1667,11 +1672,11 @@ struct TypeAssertionsPhase {
struct SimplifiedLoweringPhase {
DECL_PIPELINE_PHASE_CONSTANTS(SimplifiedLowering)
- void Run(PipelineData* data, Zone* temp_zone) {
+ void Run(PipelineData* data, Zone* temp_zone, Linkage* linkage) {
SimplifiedLowering lowering(data->jsgraph(), data->broker(), temp_zone,
data->source_positions(), data->node_origins(),
data->info()->GetPoisoningMitigationLevel(),
- &data->info()->tick_counter());
+ &data->info()->tick_counter(), linkage);
// RepresentationChanger accesses the heap.
UnparkedScopeIfNeeded scope(data->broker());
@@ -1691,6 +1696,9 @@ struct LoopPeelingPhase {
LoopTree* loop_tree = LoopFinder::BuildLoopTree(
data->jsgraph()->graph(), &data->info()->tick_counter(), temp_zone);
+ // We call the typer inside of PeelInnerLoopsOfTree which inspects heap
+ // objects, so we need to unpark the local heap.
+ UnparkedScopeIfNeeded scope(data->broker());
LoopPeeler(data->graph(), data->common(), loop_tree, temp_zone,
data->source_positions(), data->node_origins())
.PeelInnerLoopsOfTree();
@@ -1792,7 +1800,8 @@ struct EffectControlLinearizationPhase {
// - introduce effect phis and rewire effects to get SSA again.
LinearizeEffectControl(data->jsgraph(), schedule, temp_zone,
data->source_positions(), data->node_origins(),
- mask_array_index, MaintainSchedule::kDiscard);
+ mask_array_index, MaintainSchedule::kDiscard,
+ data->broker());
}
{
// The {EffectControlLinearizer} might leave {Dead} nodes behind, so we
@@ -1967,7 +1976,8 @@ struct ScheduledEffectControlLinearizationPhase {
// - introduce effect phis and rewire effects to get SSA again.
LinearizeEffectControl(data->jsgraph(), data->schedule(), temp_zone,
data->source_positions(), data->node_origins(),
- mask_array_index, MaintainSchedule::kMaintain);
+ mask_array_index, MaintainSchedule::kMaintain,
+ data->broker());
// TODO(rmcilroy) Avoid having to rebuild rpo_order on schedule each time.
Scheduler::ComputeSpecialRPO(temp_zone, data->schedule());
@@ -2128,7 +2138,7 @@ struct InstructionSelectionPhase {
data->info()->switch_jump_table()
? InstructionSelector::kEnableSwitchJumpTable
: InstructionSelector::kDisableSwitchJumpTable,
- &data->info()->tick_counter(),
+ &data->info()->tick_counter(), data->broker(),
data->address_of_max_unoptimized_frame_height(),
data->address_of_max_pushed_argument_count(),
data->info()->source_positions()
@@ -2469,6 +2479,7 @@ void PipelineImpl::Serialize() {
bool PipelineImpl::CreateGraph() {
PipelineData* data = this->data_;
+ UnparkedScopeIfNeeded unparked_scope(data->broker());
data->BeginPhaseKind("V8.TFGraphCreation");
@@ -2557,7 +2568,7 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
// Perform simplified lowering. This has to run w/o the Typer decorator,
// because we cannot compute meaningful types anyways, and the computed types
// might even conflict with the representation/truncation logic.
- Run<SimplifiedLoweringPhase>();
+ Run<SimplifiedLoweringPhase>(linkage);
RunPrintAndVerify(SimplifiedLoweringPhase::phase_name(), true);
// From now on it is invalid to look at types on the nodes, because the types
@@ -2578,6 +2589,8 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
data->BeginPhaseKind("V8.TFBlockBuilding");
+ data->InitializeFrameData(linkage->GetIncomingDescriptor());
+
// Run early optimization pass.
Run<EarlyOptimizationPhase>();
RunPrintAndVerify(EarlyOptimizationPhase::phase_name(), true);
@@ -2650,7 +2663,7 @@ bool PipelineImpl::OptimizeGraphForMidTier(Linkage* linkage) {
// Perform simplified lowering. This has to run w/o the Typer decorator,
// because we cannot compute meaningful types anyways, and the computed types
// might even conflict with the representation/truncation logic.
- Run<SimplifiedLoweringPhase>();
+ Run<SimplifiedLoweringPhase>(linkage);
RunPrintAndVerify(SimplifiedLoweringPhase::phase_name(), true);
// From now on it is invalid to look at types on the nodes, because the types
@@ -2671,6 +2684,8 @@ bool PipelineImpl::OptimizeGraphForMidTier(Linkage* linkage) {
data->BeginPhaseKind("V8.TFBlockBuilding");
+ data->InitializeFrameData(linkage->GetIncomingDescriptor());
+
ComputeScheduledGraph();
Run<ScheduledEffectControlLinearizationPhase>();
@@ -3000,7 +3015,6 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
PipelineImpl pipeline(&data);
Linkage linkage(Linkage::ComputeIncoming(data.instruction_zone(), info));
- Deoptimizer::EnsureCodeForDeoptimizationEntries(isolate);
{
CompilationHandleScope compilation_scope(isolate, info);
@@ -3008,7 +3022,7 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
info->ReopenHandlesInNewHandleScope(isolate);
pipeline.Serialize();
// Emulating the proper pipeline, we call CreateGraph on different places
- // (i.e before or after creating a LocalHeapScope) depending on
+ // (i.e before or after creating a LocalIsolateScope) depending on
// is_concurrent_inlining.
if (!data.broker()->is_concurrent_inlining()) {
if (!pipeline.CreateGraph()) return MaybeHandle<Code>();
@@ -3016,13 +3030,15 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
}
{
- LocalHeapScope local_heap_scope(data.broker(), info);
+ LocalIsolate local_isolate(isolate, ThreadKind::kMain);
+ LocalIsolateScope local_isolate_scope(data.broker(), info, &local_isolate);
if (data.broker()->is_concurrent_inlining()) {
if (!pipeline.CreateGraph()) return MaybeHandle<Code>();
}
// We selectively Unpark inside OptimizeGraph.
- ParkedScope parked_scope(data.broker()->local_heap());
if (!pipeline.OptimizeGraph(&linkage)) return MaybeHandle<Code>();
+
+ pipeline.AssembleCode(&linkage);
}
const bool will_retire_broker = out_broker == nullptr;
@@ -3034,7 +3050,6 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
info->DetachPersistentHandles(), info->DetachCanonicalHandles());
}
- pipeline.AssembleCode(&linkage);
Handle<Code> code;
if (pipeline.FinalizeCode(will_retire_broker).ToHandle(&code) &&
pipeline.CommitDependencies(code)) {
@@ -3224,7 +3239,7 @@ bool Pipeline::AllocateRegistersForTesting(const RegisterConfiguration* config,
bool use_mid_tier_register_allocator,
bool run_verifier) {
OptimizedCompilationInfo info(ArrayVector("testing"), sequence->zone(),
- CodeKind::STUB);
+ CodeKind::FOR_TESTING);
ZoneStats zone_stats(sequence->isolate()->allocator());
PipelineData data(&zone_stats, &info, sequence->isolate(), sequence);
data.InitializeFrameData(nullptr);
@@ -3310,7 +3325,11 @@ bool PipelineImpl::SelectInstructions(Linkage* linkage) {
data->InitializeInstructionSequence(call_descriptor);
- data->InitializeFrameData(call_descriptor);
+ // Depending on which code path led us to this function, the frame may or
+ // may not have been initialized. If it hasn't yet, initialize it now.
+ if (!data->frame()) {
+ data->InitializeFrameData(call_descriptor);
+ }
// Select and schedule instructions covering the scheduled graph.
Run<InstructionSelectionPhase>(linkage);
if (data->compilation_failed()) {
@@ -3366,7 +3385,7 @@ bool PipelineImpl::SelectInstructions(Linkage* linkage) {
config = RegisterConfiguration::Default();
}
- if (FLAG_turboprop_mid_tier_reg_alloc) {
+ if (data->info()->IsTurboprop() && FLAG_turboprop_mid_tier_reg_alloc) {
AllocateRegistersForMidTier(config, call_descriptor, run_verifier);
} else {
AllocateRegistersForTopTier(config, call_descriptor, run_verifier);
@@ -3468,6 +3487,8 @@ void PipelineImpl::AssembleCode(Linkage* linkage,
data->BeginPhaseKind("V8.TFCodeGeneration");
data->InitializeCodeGenerator(linkage, std::move(buffer));
+ UnparkedScopeIfNeeded unparked_scope(data->broker(), FLAG_code_comments);
+
Run<AssembleCodePhase>();
if (data->info()->trace_turbo_json()) {
TurboJsonFile json_of(data->info(), std::ios_base::app);
@@ -3606,7 +3627,6 @@ void PipelineImpl::AllocateRegistersForTopTier(
flags |= RegisterAllocationFlag::kTraceAllocation;
}
data->InitializeTopTierRegisterAllocationData(config, call_descriptor, flags);
- if (info()->is_osr()) data->osr_helper()->SetupFrame(data->frame());
Run<MeetRegisterConstraintsPhase>();
Run<ResolvePhisPhase>();
@@ -3690,8 +3710,6 @@ void PipelineImpl::AllocateRegistersForMidTier(
data->sequence()->ValidateDeferredBlockEntryPaths();
data->sequence()->ValidateDeferredBlockExitPaths();
#endif
-
- if (info()->is_osr()) data->osr_helper()->SetupFrame(data->frame());
data->InitializeMidTierRegisterAllocationData(config, call_descriptor);
TraceSequence(info(), data, "before register allocation");
diff --git a/deps/v8/src/compiler/processed-feedback.h b/deps/v8/src/compiler/processed-feedback.h
index 282923e0c3..da3785f35e 100644
--- a/deps/v8/src/compiler/processed-feedback.h
+++ b/deps/v8/src/compiler/processed-feedback.h
@@ -178,19 +178,19 @@ class MinimorphicLoadPropertyAccessFeedback : public ProcessedFeedback {
MinimorphicLoadPropertyAccessFeedback(NameRef const& name,
FeedbackSlotKind slot_kind,
Handle<Object> handler,
- MaybeHandle<Map> maybe_map,
+ ZoneVector<Handle<Map>> const& maps,
bool has_migration_target_maps);
NameRef const& name() const { return name_; }
- bool is_monomorphic() const { return !maybe_map_.is_null(); }
+ bool is_monomorphic() const { return maps_.size() == 1; }
Handle<Object> handler() const { return handler_; }
- MaybeHandle<Map> map() const { return maybe_map_; }
+ ZoneVector<Handle<Map>> const& maps() const { return maps_; }
bool has_migration_target_maps() const { return has_migration_target_maps_; }
private:
NameRef const name_;
Handle<Object> const handler_;
- MaybeHandle<Map> const maybe_map_;
+ ZoneVector<Handle<Map>> const maps_;
bool const has_migration_target_maps_;
};
diff --git a/deps/v8/src/compiler/property-access-builder.cc b/deps/v8/src/compiler/property-access-builder.cc
index 4235160037..5214f7ad9b 100644
--- a/deps/v8/src/compiler/property-access-builder.cc
+++ b/deps/v8/src/compiler/property-access-builder.cc
@@ -82,30 +82,30 @@ bool PropertyAccessBuilder::TryBuildNumberCheck(
}
void PropertyAccessBuilder::BuildCheckMaps(
- Node* receiver, Node** effect, Node* control,
- ZoneVector<Handle<Map>> const& receiver_maps) {
- HeapObjectMatcher m(receiver);
- if (m.HasValue()) {
- MapRef receiver_map = m.Ref(broker()).map();
- if (receiver_map.is_stable()) {
- for (Handle<Map> map : receiver_maps) {
- if (MapRef(broker(), map).equals(receiver_map)) {
- dependencies()->DependOnStableMap(receiver_map);
+ Node* object, Node** effect, Node* control,
+ ZoneVector<Handle<Map>> const& maps) {
+ HeapObjectMatcher m(object);
+ if (m.HasResolvedValue()) {
+ MapRef object_map = m.Ref(broker()).map();
+ if (object_map.is_stable()) {
+ for (Handle<Map> map : maps) {
+ if (MapRef(broker(), map).equals(object_map)) {
+ dependencies()->DependOnStableMap(object_map);
return;
}
}
}
}
- ZoneHandleSet<Map> maps;
+ ZoneHandleSet<Map> map_set;
CheckMapsFlags flags = CheckMapsFlag::kNone;
- for (Handle<Map> map : receiver_maps) {
- MapRef receiver_map(broker(), map);
- maps.insert(receiver_map.object(), graph()->zone());
- if (receiver_map.is_migration_target()) {
+ for (Handle<Map> map : maps) {
+ MapRef object_map(broker(), map);
+ map_set.insert(object_map.object(), graph()->zone());
+ if (object_map.is_migration_target()) {
flags |= CheckMapsFlag::kTryMigrateInstance;
}
}
- *effect = graph()->NewNode(simplified()->CheckMaps(flags, maps), receiver,
+ *effect = graph()->NewNode(simplified()->CheckMaps(flags, map_set), object,
*effect, control);
}
@@ -124,12 +124,12 @@ Node* PropertyAccessBuilder::BuildCheckValue(Node* receiver, Effect* effect,
}
Node* PropertyAccessBuilder::ResolveHolder(
- PropertyAccessInfo const& access_info, Node* receiver) {
+ PropertyAccessInfo const& access_info, Node* lookup_start_object) {
Handle<JSObject> holder;
if (access_info.holder().ToHandle(&holder)) {
return jsgraph()->Constant(ObjectRef(broker(), holder));
}
- return receiver;
+ return lookup_start_object;
}
MachineRepresentation PropertyAccessBuilder::ConvertRepresentation(
@@ -150,25 +150,27 @@ MachineRepresentation PropertyAccessBuilder::ConvertRepresentation(
Node* PropertyAccessBuilder::TryBuildLoadConstantDataField(
NameRef const& name, PropertyAccessInfo const& access_info,
- Node* receiver) {
+ Node* lookup_start_object) {
if (!access_info.IsDataConstant()) return nullptr;
// First, determine if we have a constant holder to load from.
Handle<JSObject> holder;
// If {access_info} has a holder, just use it.
if (!access_info.holder().ToHandle(&holder)) {
- // Otherwise, try to match the {receiver} as a constant.
- HeapObjectMatcher m(receiver);
- if (!m.HasValue() || !m.Ref(broker()).IsJSObject()) return nullptr;
+ // Otherwise, try to match the {lookup_start_object} as a constant.
+ HeapObjectMatcher m(lookup_start_object);
+ if (!m.HasResolvedValue() || !m.Ref(broker()).IsJSObject()) return nullptr;
- // Let us make sure the actual map of the constant receiver is among
- // the maps in {access_info}.
- MapRef receiver_map = m.Ref(broker()).map();
- if (std::find_if(access_info.receiver_maps().begin(),
- access_info.receiver_maps().end(), [&](Handle<Map> map) {
- return MapRef(broker(), map).equals(receiver_map);
- }) == access_info.receiver_maps().end()) {
- // The map of the receiver is not in the feedback, let us bail out.
+ // Let us make sure the actual map of the constant lookup_start_object is
+ // among the maps in {access_info}.
+ MapRef lookup_start_object_map = m.Ref(broker()).map();
+ if (std::find_if(
+ access_info.lookup_start_object_maps().begin(),
+ access_info.lookup_start_object_maps().end(), [&](Handle<Map> map) {
+ return MapRef(broker(), map).equals(lookup_start_object_map);
+ }) == access_info.lookup_start_object_maps().end()) {
+ // The map of the lookup_start_object is not in the feedback, let us bail
+ // out.
return nullptr;
}
holder = m.Ref(broker()).AsJSObject().object();
@@ -253,7 +255,7 @@ Node* PropertyAccessBuilder::BuildLoadDataField(NameRef const& name,
Node* PropertyAccessBuilder::BuildMinimorphicLoadDataField(
NameRef const& name, MinimorphicLoadPropertyAccessInfo const& access_info,
- Node* receiver, Node** effect, Node** control) {
+ Node* lookup_start_object, Node** effect, Node** control) {
DCHECK_NULL(dependencies());
MachineRepresentation const field_representation =
ConvertRepresentation(access_info.field_representation());
@@ -268,22 +270,22 @@ Node* PropertyAccessBuilder::BuildMinimorphicLoadDataField(
kFullWriteBarrier,
LoadSensitivity::kCritical,
ConstFieldInfo::None()};
- return BuildLoadDataField(name, receiver, field_access,
+ return BuildLoadDataField(name, lookup_start_object, field_access,
access_info.is_inobject(), effect, control);
}
Node* PropertyAccessBuilder::BuildLoadDataField(
- NameRef const& name, PropertyAccessInfo const& access_info, Node* receiver,
- Node** effect, Node** control) {
+ NameRef const& name, PropertyAccessInfo const& access_info,
+ Node* lookup_start_object, Node** effect, Node** control) {
DCHECK(access_info.IsDataField() || access_info.IsDataConstant());
- if (Node* value =
- TryBuildLoadConstantDataField(name, access_info, receiver)) {
+ if (Node* value = TryBuildLoadConstantDataField(name, access_info,
+ lookup_start_object)) {
return value;
}
MachineRepresentation const field_representation =
ConvertRepresentation(access_info.field_representation());
- Node* storage = ResolveHolder(access_info, receiver);
+ Node* storage = ResolveHolder(access_info, lookup_start_object);
FieldAccess field_access = {
kTaggedBase,
diff --git a/deps/v8/src/compiler/property-access-builder.h b/deps/v8/src/compiler/property-access-builder.h
index 05436c2635..237f501dbb 100644
--- a/deps/v8/src/compiler/property-access-builder.h
+++ b/deps/v8/src/compiler/property-access-builder.h
@@ -45,13 +45,13 @@ class PropertyAccessBuilder {
// TODO(jgruber): Remove the untyped version once all uses are
// updated.
- void BuildCheckMaps(Node* receiver, Node** effect, Node* control,
- ZoneVector<Handle<Map>> const& receiver_maps);
- void BuildCheckMaps(Node* receiver, Effect* effect, Control control,
- ZoneVector<Handle<Map>> const& receiver_maps) {
+ void BuildCheckMaps(Node* object, Node** effect, Node* control,
+ ZoneVector<Handle<Map>> const& maps);
+ void BuildCheckMaps(Node* object, Effect* effect, Control control,
+ ZoneVector<Handle<Map>> const& maps) {
Node* e = *effect;
Node* c = control;
- BuildCheckMaps(receiver, &e, c, receiver_maps);
+ BuildCheckMaps(object, &e, c, maps);
*effect = e;
}
Node* BuildCheckValue(Node* receiver, Effect* effect, Control control,
@@ -61,13 +61,14 @@ class PropertyAccessBuilder {
// properties (without heap-object or map checks).
Node* BuildLoadDataField(NameRef const& name,
PropertyAccessInfo const& access_info,
- Node* receiver, Node** effect, Node** control);
+ Node* lookup_start_object, Node** effect,
+ Node** control);
// Builds the load for data-field access for minimorphic loads that use
// dynamic map checks. These cannot depend on any information from the maps.
Node* BuildMinimorphicLoadDataField(
NameRef const& name, MinimorphicLoadPropertyAccessInfo const& access_info,
- Node* receiver, Node** effect, Node** control);
+ Node* lookup_start_object, Node** effect, Node** control);
static MachineRepresentation ConvertRepresentation(
Representation representation);
@@ -83,10 +84,11 @@ class PropertyAccessBuilder {
Node* TryBuildLoadConstantDataField(NameRef const& name,
PropertyAccessInfo const& access_info,
- Node* receiver);
+ Node* lookup_start_object);
// Returns a node with the holder for the property access described by
// {access_info}.
- Node* ResolveHolder(PropertyAccessInfo const& access_info, Node* receiver);
+ Node* ResolveHolder(PropertyAccessInfo const& access_info,
+ Node* lookup_start_object);
Node* BuildLoadDataField(NameRef const& name, Node* holder,
FieldAccess& field_access, bool is_inobject,
diff --git a/deps/v8/src/compiler/raw-machine-assembler.cc b/deps/v8/src/compiler/raw-machine-assembler.cc
index fa8aaad5c5..dce807b7ab 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.cc
+++ b/deps/v8/src/compiler/raw-machine-assembler.cc
@@ -632,6 +632,21 @@ void RawMachineAssembler::Return(int count, Node* vs[]) {
}
void RawMachineAssembler::PopAndReturn(Node* pop, Node* value) {
+ // PopAndReturn is supposed to be using ONLY in CSA/Torque builtins for
+ // dropping ALL JS arguments that are currently located on the stack.
+ // The check below ensures that there are no directly accessible stack
+ // parameters from current builtin, which implies that the builtin with
+ // JS calling convention (TFJ) was created with kDontAdaptArgumentsSentinel.
+ // This simplifies semantics of this instruction because in case of presence
+ // of directly accessible stack parameters it's impossible to distinguish
+ // the following cases:
+ // 1) stack parameter is included in JS arguments (and therefore it will be
+ // dropped as a part of 'pop' number of arguments),
+ // 2) stack parameter is NOT included in JS arguments (and therefore it should
+ // be dropped in ADDITION to the 'pop' number of arguments).
+ // Additionally, in order to simplify assembly code, PopAndReturn is also
+ // not allowed in builtins with stub linkage and parameters on stack.
+ CHECK_EQ(call_descriptor()->StackParameterCount(), 0);
Node* values[] = {pop, value};
Node* ret = MakeNode(common()->Return(1), 2, values);
schedule()->AddReturn(CurrentBlock(), ret);
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
index 3fa57839a0..8509568bf5 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.h
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -56,6 +56,9 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
PoisoningMitigationLevel::kPoisonCriticalOnly);
~RawMachineAssembler() = default;
+ RawMachineAssembler(const RawMachineAssembler&) = delete;
+ RawMachineAssembler& operator=(const RawMachineAssembler&) = delete;
+
Isolate* isolate() const { return isolate_; }
Graph* graph() const { return graph_; }
Zone* zone() const { return graph()->zone(); }
@@ -835,6 +838,15 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
return AddNode(machine()->Float64SilenceNaN(), a);
}
+ // SIMD operations.
+ Node* I64x2Splat(Node* a) { return AddNode(machine()->I64x2Splat(), a); }
+ Node* I64x2SplatI32Pair(Node* a, Node* b) {
+ return AddNode(machine()->I64x2SplatI32Pair(), a, b);
+ }
+ Node* I32x4Splat(Node* a) { return AddNode(machine()->I32x4Splat(), a); }
+ Node* I16x8Splat(Node* a) { return AddNode(machine()->I16x8Splat(), a); }
+ Node* I8x16Splat(Node* a) { return AddNode(machine()->I8x16Splat(), a); }
+
// Stack operations.
Node* LoadFramePointer() { return AddNode(machine()->LoadFramePointer()); }
Node* LoadParentFramePointer() {
@@ -1051,8 +1063,6 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
NodeVector parameters_;
BasicBlock* current_block_;
PoisoningMitigationLevel poisoning_level_;
-
- DISALLOW_COPY_AND_ASSIGN(RawMachineAssembler);
};
class V8_EXPORT_PRIVATE RawMachineLabel final {
@@ -1062,6 +1072,8 @@ class V8_EXPORT_PRIVATE RawMachineLabel final {
explicit RawMachineLabel(Type type = kNonDeferred)
: deferred_(type == kDeferred) {}
~RawMachineLabel();
+ RawMachineLabel(const RawMachineLabel&) = delete;
+ RawMachineLabel& operator=(const RawMachineLabel&) = delete;
BasicBlock* block() const { return block_; }
@@ -1071,7 +1083,6 @@ class V8_EXPORT_PRIVATE RawMachineLabel final {
bool bound_ = false;
bool deferred_;
friend class RawMachineAssembler;
- DISALLOW_COPY_AND_ASSIGN(RawMachineLabel);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/redundancy-elimination.h b/deps/v8/src/compiler/redundancy-elimination.h
index 5cf9f7ce61..cabdb1b41c 100644
--- a/deps/v8/src/compiler/redundancy-elimination.h
+++ b/deps/v8/src/compiler/redundancy-elimination.h
@@ -15,6 +15,8 @@ class V8_EXPORT_PRIVATE RedundancyElimination final : public AdvancedReducer {
public:
RedundancyElimination(Editor* editor, Zone* zone);
~RedundancyElimination() final;
+ RedundancyElimination(const RedundancyElimination&) = delete;
+ RedundancyElimination& operator=(const RedundancyElimination&) = delete;
const char* reducer_name() const override { return "RedundancyElimination"; }
@@ -73,8 +75,6 @@ class V8_EXPORT_PRIVATE RedundancyElimination final : public AdvancedReducer {
PathChecksForEffectNodes node_checks_;
Zone* const zone_;
-
- DISALLOW_COPY_AND_ASSIGN(RedundancyElimination);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/representation-change.cc b/deps/v8/src/compiler/representation-change.cc
index 46207a8b4e..2455ea3115 100644
--- a/deps/v8/src/compiler/representation-change.cc
+++ b/deps/v8/src/compiler/representation-change.cc
@@ -674,7 +674,7 @@ Node* RepresentationChanger::GetFloat64RepresentationFor(
Node* node, MachineRepresentation output_rep, Type output_type,
Node* use_node, UseInfo use_info) {
NumberMatcher m(node);
- if (m.HasValue()) {
+ if (m.HasResolvedValue()) {
// BigInts are not used as number constants.
DCHECK(use_info.type_check() != TypeCheckKind::kBigInt);
switch (use_info.type_check()) {
@@ -682,7 +682,7 @@ Node* RepresentationChanger::GetFloat64RepresentationFor(
case TypeCheckKind::kNumber:
case TypeCheckKind::kNumberOrBoolean:
case TypeCheckKind::kNumberOrOddball:
- return jsgraph()->Float64Constant(m.Value());
+ return jsgraph()->Float64Constant(m.ResolvedValue());
case TypeCheckKind::kBigInt:
case TypeCheckKind::kHeapObject:
case TypeCheckKind::kSigned32:
@@ -1089,7 +1089,7 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
}
case IrOpcode::kHeapConstant: {
HeapObjectMatcher m(node);
- if (m.HasValue() && m.Ref(broker_).IsBigInt() &&
+ if (m.HasResolvedValue() && m.Ref(broker_).IsBigInt() &&
use_info.truncation().IsUsedAsWord64()) {
auto bigint = m.Ref(broker_).AsBigInt();
return jsgraph()->Int64Constant(
diff --git a/deps/v8/src/compiler/schedule.h b/deps/v8/src/compiler/schedule.h
index 499a326f20..900516c05d 100644
--- a/deps/v8/src/compiler/schedule.h
+++ b/deps/v8/src/compiler/schedule.h
@@ -56,6 +56,8 @@ class V8_EXPORT_PRIVATE BasicBlock final
};
BasicBlock(Zone* zone, Id id);
+ BasicBlock(const BasicBlock&) = delete;
+ BasicBlock& operator=(const BasicBlock&) = delete;
Id id() const { return id_; }
#if DEBUG
@@ -187,8 +189,6 @@ class V8_EXPORT_PRIVATE BasicBlock final
AssemblerDebugInfo debug_info_;
#endif
Id id_;
-
- DISALLOW_COPY_AND_ASSIGN(BasicBlock);
};
std::ostream& operator<<(std::ostream&, const BasicBlock&);
@@ -202,6 +202,8 @@ std::ostream& operator<<(std::ostream&, const BasicBlock::Id&);
class V8_EXPORT_PRIVATE Schedule final : public NON_EXPORTED_BASE(ZoneObject) {
public:
explicit Schedule(Zone* zone, size_t node_count_hint = 0);
+ Schedule(const Schedule&) = delete;
+ Schedule& operator=(const Schedule&) = delete;
// Return the block which contains {node}, if any.
BasicBlock* block(Node* node) const;
@@ -307,8 +309,6 @@ class V8_EXPORT_PRIVATE Schedule final : public NON_EXPORTED_BASE(ZoneObject) {
BasicBlockVector rpo_order_; // Reverse-post-order block list.
BasicBlock* start_;
BasicBlock* end_;
-
- DISALLOW_COPY_AND_ASSIGN(Schedule);
};
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, const Schedule&);
diff --git a/deps/v8/src/compiler/serializer-for-background-compilation.cc b/deps/v8/src/compiler/serializer-for-background-compilation.cc
index 83b88cd3bf..3e57da18a0 100644
--- a/deps/v8/src/compiler/serializer-for-background-compilation.cc
+++ b/deps/v8/src/compiler/serializer-for-background-compilation.cc
@@ -101,6 +101,7 @@ namespace compiler {
V(IncBlockCounter) \
V(ResumeGenerator) \
V(SuspendGenerator) \
+ V(ThrowIfNotSuperConstructor) \
V(ThrowSuperAlreadyCalledIfNotHole) \
V(ThrowSuperNotCalledIfHole) \
V(ToObject)
@@ -459,8 +460,14 @@ class SerializerForBackgroundCompilation {
bool honor_bailout_on_uninitialized);
void ProcessNamedPropertyAccess(Hints* receiver, NameRef const& name,
FeedbackSlot slot, AccessMode access_mode);
+ void ProcessNamedSuperPropertyAccess(Hints* receiver, NameRef const& name,
+ FeedbackSlot slot,
+ AccessMode access_mode);
void ProcessNamedAccess(Hints* receiver, NamedAccessFeedback const& feedback,
AccessMode access_mode, Hints* result_hints);
+ void ProcessNamedSuperAccess(Hints* receiver,
+ NamedAccessFeedback const& feedback,
+ AccessMode access_mode, Hints* result_hints);
void ProcessElementAccess(Hints const& receiver, Hints const& key,
ElementAccessFeedback const& feedback,
AccessMode access_mode);
@@ -494,7 +501,8 @@ class SerializerForBackgroundCompilation {
bool honor_bailout_on_uninitialized);
PropertyAccessInfo ProcessMapForNamedPropertyAccess(
- Hints* receiver, MapRef receiver_map, NameRef const& name,
+ Hints* receiver, base::Optional<MapRef> receiver_map,
+ MapRef lookup_start_object_map, NameRef const& name,
AccessMode access_mode, base::Optional<JSObjectRef> concrete_receiver,
Hints* result_hints);
@@ -1109,7 +1117,8 @@ bool SerializerForBackgroundCompilation::BailoutOnUninitialized(
// OSR entry point. TODO(neis): Support OSR?
return false;
}
- if (FLAG_turboprop && feedback.slot_kind() == FeedbackSlotKind::kCall) {
+ if (broker()->is_turboprop() &&
+ feedback.slot_kind() == FeedbackSlotKind::kCall) {
return false;
}
if (feedback.IsInsufficient()) {
@@ -2083,7 +2092,7 @@ void SerializerForBackgroundCompilation::ProcessCalleeForCallOrConstruct(
if (callee->IsJSBoundFunction()) {
JSBoundFunctionRef bound_function(broker(),
Handle<JSBoundFunction>::cast(callee));
- bound_function.Serialize();
+ if (!bound_function.Serialize()) return;
callee = UnrollBoundFunction(bound_function, broker(), arguments,
&expanded_arguments, zone())
.object();
@@ -2153,7 +2162,7 @@ void SerializerForBackgroundCompilation::ProcessCallOrConstruct(
arguments->insert(arguments->begin(), result_hints_from_new_target);
}
- // For JSNativeContextSpecialization::InferReceiverRootMap
+ // For JSNativeContextSpecialization::InferRootMap
Hints new_accumulator_hints = result_hints_from_new_target.Copy(zone());
ProcessCallOrConstructRecursive(callee, new_target, *arguments,
@@ -2245,7 +2254,8 @@ void SerializerForBackgroundCompilation::ProcessApiCall(
Builtins::kCallFunctionTemplate_CheckAccessAndCompatibleReceiver));
FunctionTemplateInfoRef target_template_info(
- broker(), handle(target->function_data(), broker()->isolate()));
+ broker(),
+ handle(target->function_data(kAcquireLoad), broker()->isolate()));
if (!target_template_info.has_call_code()) return;
target_template_info.SerializeCallCode();
@@ -2680,8 +2690,8 @@ void SerializerForBackgroundCompilation::ProcessHintsForRegExpTest(
namespace {
void ProcessMapForFunctionBind(MapRef map) {
map.SerializePrototype();
- int min_nof_descriptors = i::Max(JSFunction::kLengthDescriptorIndex,
- JSFunction::kNameDescriptorIndex) +
+ int min_nof_descriptors = std::max({JSFunction::kLengthDescriptorIndex,
+ JSFunction::kNameDescriptorIndex}) +
1;
if (map.NumberOfOwnDescriptors() >= min_nof_descriptors) {
map.SerializeOwnDescriptor(
@@ -2960,18 +2970,20 @@ void SerializerForBackgroundCompilation::ProcessUnaryOrBinaryOperation(
PropertyAccessInfo
SerializerForBackgroundCompilation::ProcessMapForNamedPropertyAccess(
- Hints* receiver, MapRef receiver_map, NameRef const& name,
- AccessMode access_mode, base::Optional<JSObjectRef> concrete_receiver,
- Hints* result_hints) {
- // For JSNativeContextSpecialization::InferReceiverRootMap
- receiver_map.SerializeRootMap();
+ Hints* receiver, base::Optional<MapRef> receiver_map,
+ MapRef lookup_start_object_map, NameRef const& name, AccessMode access_mode,
+ base::Optional<JSObjectRef> concrete_receiver, Hints* result_hints) {
+ DCHECK_IMPLIES(concrete_receiver.has_value(), receiver_map.has_value());
+
+ // For JSNativeContextSpecialization::InferRootMap
+ lookup_start_object_map.SerializeRootMap();
// For JSNativeContextSpecialization::ReduceNamedAccess.
JSGlobalProxyRef global_proxy =
broker()->target_native_context().global_proxy_object();
JSGlobalObjectRef global_object =
broker()->target_native_context().global_object();
- if (receiver_map.equals(global_proxy.map())) {
+ if (lookup_start_object_map.equals(global_proxy.map())) {
base::Optional<PropertyCellRef> cell = global_object.GetPropertyCell(
name, SerializationPolicy::kSerializeIfNeeded);
if (access_mode == AccessMode::kLoad && cell.has_value()) {
@@ -2980,7 +2992,7 @@ SerializerForBackgroundCompilation::ProcessMapForNamedPropertyAccess(
}
PropertyAccessInfo access_info = broker()->GetPropertyAccessInfo(
- receiver_map, name, access_mode, dependencies(),
+ lookup_start_object_map, name, access_mode, dependencies(),
SerializationPolicy::kSerializeIfNeeded);
// For JSNativeContextSpecialization::InlinePropertySetterCall
@@ -2989,25 +3001,27 @@ SerializerForBackgroundCompilation::ProcessMapForNamedPropertyAccess(
if (access_info.constant()->IsJSFunction()) {
JSFunctionRef function(broker(), access_info.constant());
- // For JSCallReducer and JSInlining(Heuristic).
- HintsVector arguments({Hints::SingleMap(receiver_map.object(), zone())},
- zone());
- // In the case of a setter any added result hints won't make sense, but
- // they will be ignored anyways by Process*PropertyAccess due to the
- // access mode not being kLoad.
- ProcessCalleeForCallOrConstruct(
- function.object(), base::nullopt, arguments,
- SpeculationMode::kDisallowSpeculation, kMissingArgumentsAreUndefined,
- result_hints);
-
- // For JSCallReducer::ReduceCallApiFunction.
- Handle<SharedFunctionInfo> sfi = function.shared().object();
- if (sfi->IsApiFunction()) {
- FunctionTemplateInfoRef fti_ref(
- broker(), handle(sfi->get_api_func_data(), broker()->isolate()));
- if (fti_ref.has_call_code()) {
- fti_ref.SerializeCallCode();
- ProcessReceiverMapForApiCall(fti_ref, receiver_map.object());
+ if (receiver_map.has_value()) {
+ // For JSCallReducer and JSInlining(Heuristic).
+ HintsVector arguments(
+ {Hints::SingleMap(receiver_map->object(), zone())}, zone());
+ // In the case of a setter any added result hints won't make sense, but
+ // they will be ignored anyways by Process*PropertyAccess due to the
+ // access mode not being kLoad.
+ ProcessCalleeForCallOrConstruct(
+ function.object(), base::nullopt, arguments,
+ SpeculationMode::kDisallowSpeculation,
+ kMissingArgumentsAreUndefined, result_hints);
+
+ // For JSCallReducer::ReduceCallApiFunction.
+ Handle<SharedFunctionInfo> sfi = function.shared().object();
+ if (sfi->IsApiFunction()) {
+ FunctionTemplateInfoRef fti_ref(
+ broker(), handle(sfi->get_api_func_data(), broker()->isolate()));
+ if (fti_ref.has_call_code()) {
+ fti_ref.SerializeCallCode();
+ ProcessReceiverMapForApiCall(fti_ref, receiver_map->object());
+ }
}
}
} else if (access_info.constant()->IsJSBoundFunction()) {
@@ -3035,7 +3049,7 @@ SerializerForBackgroundCompilation::ProcessMapForNamedPropertyAccess(
holder = JSObjectRef(broker(), prototype);
} else {
CHECK_IMPLIES(concrete_receiver.has_value(),
- concrete_receiver->map().equals(receiver_map));
+ concrete_receiver->map().equals(*receiver_map));
holder = concrete_receiver;
}
@@ -3149,6 +3163,38 @@ void SerializerForBackgroundCompilation::ProcessNamedPropertyAccess(
}
}
+void SerializerForBackgroundCompilation::ProcessNamedSuperPropertyAccess(
+ Hints* receiver, NameRef const& name, FeedbackSlot slot,
+ AccessMode access_mode) {
+ if (slot.IsInvalid() || feedback_vector().is_null()) return;
+ FeedbackSource source(feedback_vector(), slot);
+ ProcessedFeedback const& feedback =
+ broker()->ProcessFeedbackForPropertyAccess(source, access_mode, name);
+ if (BailoutOnUninitialized(feedback)) return;
+
+ Hints new_accumulator_hints;
+ switch (feedback.kind()) {
+ case ProcessedFeedback::kNamedAccess:
+ DCHECK(name.equals(feedback.AsNamedAccess().name()));
+ ProcessNamedSuperAccess(receiver, feedback.AsNamedAccess(), access_mode,
+ &new_accumulator_hints);
+ break;
+ case ProcessedFeedback::kMinimorphicPropertyAccess:
+ DCHECK(name.equals(feedback.AsMinimorphicPropertyAccess().name()));
+ ProcessMinimorphicPropertyAccess(feedback.AsMinimorphicPropertyAccess(),
+ source);
+ break;
+ case ProcessedFeedback::kInsufficient:
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ if (access_mode == AccessMode::kLoad) {
+ environment()->accumulator_hints() = new_accumulator_hints;
+ }
+}
+
void SerializerForBackgroundCompilation::ProcessNamedAccess(
Hints* receiver, NamedAccessFeedback const& feedback,
AccessMode access_mode, Hints* result_hints) {
@@ -3162,17 +3208,18 @@ void SerializerForBackgroundCompilation::ProcessNamedAccess(
for (Handle<Map> map :
GetRelevantReceiverMaps(broker()->isolate(), receiver->maps())) {
MapRef map_ref(broker(), map);
- ProcessMapForNamedPropertyAccess(receiver, map_ref, feedback.name(),
- access_mode, base::nullopt, result_hints);
+ ProcessMapForNamedPropertyAccess(receiver, map_ref, map_ref,
+ feedback.name(), access_mode,
+ base::nullopt, result_hints);
}
for (Handle<Object> hint : receiver->constants()) {
ObjectRef object(broker(), hint);
if (access_mode == AccessMode::kLoad && object.IsJSObject()) {
MapRef map_ref = object.AsJSObject().map();
- ProcessMapForNamedPropertyAccess(receiver, map_ref, feedback.name(),
- access_mode, object.AsJSObject(),
- result_hints);
+ ProcessMapForNamedPropertyAccess(receiver, map_ref, map_ref,
+ feedback.name(), access_mode,
+ object.AsJSObject(), result_hints);
}
// For JSNativeContextSpecialization::ReduceJSLoadNamed.
if (access_mode == AccessMode::kLoad && object.IsJSFunction() &&
@@ -3190,6 +3237,30 @@ void SerializerForBackgroundCompilation::ProcessNamedAccess(
}
}
+void SerializerForBackgroundCompilation::ProcessNamedSuperAccess(
+ Hints* receiver, NamedAccessFeedback const& feedback,
+ AccessMode access_mode, Hints* result_hints) {
+ MapHandles receiver_maps =
+ GetRelevantReceiverMaps(broker()->isolate(), receiver->maps());
+ for (Handle<Map> receiver_map : receiver_maps) {
+ MapRef receiver_map_ref(broker(), receiver_map);
+ for (Handle<Map> feedback_map : feedback.maps()) {
+ MapRef feedback_map_ref(broker(), feedback_map);
+ ProcessMapForNamedPropertyAccess(
+ receiver, receiver_map_ref, feedback_map_ref, feedback.name(),
+ access_mode, base::nullopt, result_hints);
+ }
+ }
+ if (receiver_maps.empty()) {
+ for (Handle<Map> feedback_map : feedback.maps()) {
+ MapRef feedback_map_ref(broker(), feedback_map);
+ ProcessMapForNamedPropertyAccess(
+ receiver, base::nullopt, feedback_map_ref, feedback.name(),
+ access_mode, base::nullopt, result_hints);
+ }
+ }
+}
+
void SerializerForBackgroundCompilation::ProcessElementAccess(
Hints const& receiver, Hints const& key,
ElementAccessFeedback const& feedback, AccessMode access_mode) {
@@ -3214,7 +3285,7 @@ void SerializerForBackgroundCompilation::ProcessElementAccess(
for (Handle<Object> hint : receiver.constants()) {
ObjectRef receiver_ref(broker(), hint);
- // For JSNativeContextSpecialization::InferReceiverRootMap
+ // For JSNativeContextSpecialization::InferRootMap
if (receiver_ref.IsHeapObject()) {
receiver_ref.AsHeapObject().map().SerializeRootMap();
}
@@ -3245,7 +3316,7 @@ void SerializerForBackgroundCompilation::ProcessElementAccess(
}
}
- // For JSNativeContextSpecialization::InferReceiverRootMap
+ // For JSNativeContextSpecialization::InferRootMap
for (Handle<Map> map : receiver.maps()) {
MapRef map_ref(broker(), map);
map_ref.SerializeRootMap();
@@ -3263,9 +3334,11 @@ void SerializerForBackgroundCompilation::VisitLdaNamedProperty(
void SerializerForBackgroundCompilation::VisitLdaNamedPropertyFromSuper(
BytecodeArrayIterator* iterator) {
- NameRef(broker(),
- iterator->GetConstantForIndexOperand(1, broker()->isolate()));
- // TODO(marja, v8:9237): Process feedback once it's added to the byte code.
+ Hints* receiver = &register_hints(iterator->GetRegisterOperand(0));
+ NameRef name(broker(),
+ iterator->GetConstantForIndexOperand(1, broker()->isolate()));
+ FeedbackSlot slot = iterator->GetSlotOperand(2);
+ ProcessNamedSuperPropertyAccess(receiver, name, slot, AccessMode::kLoad);
}
// TODO(neis): Do feedback-independent serialization also for *NoFeedback
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.cc b/deps/v8/src/compiler/simd-scalar-lowering.cc
index f832107939..36d590b1aa 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.cc
+++ b/deps/v8/src/compiler/simd-scalar-lowering.cc
@@ -141,6 +141,7 @@ void SimdScalarLowering::LowerGraph() {
V(I32x4ShrU) \
V(I32x4MinU) \
V(I32x4MaxU) \
+ V(I32x4DotI16x8S) \
V(I32x4Eq) \
V(I32x4Ne) \
V(I32x4LtS) \
@@ -158,6 +159,8 @@ void SimdScalarLowering::LowerGraph() {
V(S128Or) \
V(S128Xor) \
V(S128Not) \
+ V(S128AndNot) \
+ V(S128Select) \
V(V32x4AnyTrue) \
V(V32x4AllTrue) \
V(V16x8AnyTrue) \
@@ -178,7 +181,13 @@ void SimdScalarLowering::LowerGraph() {
V(F64x2Mul) \
V(F64x2Div) \
V(F64x2Min) \
- V(F64x2Max)
+ V(F64x2Max) \
+ V(F64x2Pmin) \
+ V(F64x2Pmax) \
+ V(F64x2Ceil) \
+ V(F64x2Floor) \
+ V(F64x2Trunc) \
+ V(F64x2NearestInt)
#define FOREACH_FLOAT32X4_OPCODE(V) \
V(F32x4Splat) \
@@ -197,7 +206,13 @@ void SimdScalarLowering::LowerGraph() {
V(F32x4Mul) \
V(F32x4Div) \
V(F32x4Min) \
- V(F32x4Max)
+ V(F32x4Max) \
+ V(F32x4Pmin) \
+ V(F32x4Pmax) \
+ V(F32x4Ceil) \
+ V(F32x4Floor) \
+ V(F32x4Trunc) \
+ V(F32x4NearestInt)
#define FOREACH_FLOAT64x2_TO_INT64x2OPCODE(V) \
V(F64x2Eq) \
@@ -225,10 +240,10 @@ void SimdScalarLowering::LowerGraph() {
V(I16x8ShrS) \
V(I16x8SConvertI32x4) \
V(I16x8Add) \
- V(I16x8AddSaturateS) \
+ V(I16x8AddSatS) \
V(I16x8AddHoriz) \
V(I16x8Sub) \
- V(I16x8SubSaturateS) \
+ V(I16x8SubSatS) \
V(I16x8Mul) \
V(I16x8MinS) \
V(I16x8MaxS) \
@@ -236,8 +251,8 @@ void SimdScalarLowering::LowerGraph() {
V(I16x8UConvertI8x16High) \
V(I16x8ShrU) \
V(I16x8UConvertI32x4) \
- V(I16x8AddSaturateU) \
- V(I16x8SubSaturateU) \
+ V(I16x8AddSatU) \
+ V(I16x8SubSatU) \
V(I16x8MinU) \
V(I16x8MaxU) \
V(I16x8Eq) \
@@ -264,16 +279,16 @@ void SimdScalarLowering::LowerGraph() {
V(I8x16Shl) \
V(I8x16ShrS) \
V(I8x16Add) \
- V(I8x16AddSaturateS) \
+ V(I8x16AddSatS) \
V(I8x16Sub) \
- V(I8x16SubSaturateS) \
+ V(I8x16SubSatS) \
V(I8x16Mul) \
V(I8x16MinS) \
V(I8x16MaxS) \
V(I8x16ShrU) \
V(I8x16UConvertI16x8) \
- V(I8x16AddSaturateU) \
- V(I8x16SubSaturateU) \
+ V(I8x16AddSatU) \
+ V(I8x16SubSatU) \
V(I8x16MinU) \
V(I8x16MaxU) \
V(I8x16Eq) \
@@ -324,6 +339,7 @@ void SimdScalarLowering::SetLoweredType(Node* node, Node* output) {
FOREACH_INT32X4_OPCODE(CASE_STMT)
case IrOpcode::kReturn:
case IrOpcode::kParameter:
+ case IrOpcode::kPhi:
case IrOpcode::kCall: {
replacements_[node->id()].type = SimdType::kInt32x4;
break;
@@ -351,28 +367,24 @@ void SimdScalarLowering::SetLoweredType(Node* node, Node* output) {
case IrOpcode::kLoadTransform: {
LoadTransformParameters params = LoadTransformParametersOf(node->op());
switch (params.transformation) {
- case LoadTransformation::kS8x16LoadSplat:
+ case LoadTransformation::kS128Load8Splat:
replacements_[node->id()].type = SimdType::kInt8x16;
break;
- case LoadTransformation::kS16x8LoadSplat:
+ case LoadTransformation::kS128Load16Splat:
+ case LoadTransformation::kS128Load8x8S:
+ case LoadTransformation::kS128Load8x8U:
replacements_[node->id()].type = SimdType::kInt16x8;
break;
- case LoadTransformation::kS32x4LoadSplat:
+ case LoadTransformation::kS128Load32Splat:
+ case LoadTransformation::kS128Load16x4S:
+ case LoadTransformation::kS128Load16x4U:
+ case LoadTransformation::kS128Load32Zero:
replacements_[node->id()].type = SimdType::kInt32x4;
break;
- case LoadTransformation::kS64x2LoadSplat:
- replacements_[node->id()].type = SimdType::kInt64x2;
- break;
- case LoadTransformation::kI16x8Load8x8S:
- case LoadTransformation::kI16x8Load8x8U:
- replacements_[node->id()].type = SimdType::kInt16x8;
- break;
- case LoadTransformation::kI32x4Load16x4S:
- case LoadTransformation::kI32x4Load16x4U:
- replacements_[node->id()].type = SimdType::kInt32x4;
- break;
- case LoadTransformation::kI64x2Load32x2S:
- case LoadTransformation::kI64x2Load32x2U:
+ case LoadTransformation::kS128Load64Splat:
+ case LoadTransformation::kS128Load32x2S:
+ case LoadTransformation::kS128Load32x2U:
+ case LoadTransformation::kS128Load64Zero:
replacements_[node->id()].type = SimdType::kInt64x2;
break;
default:
@@ -499,11 +511,20 @@ void SimdScalarLowering::GetIndexNodes(Node* index, Node** new_indices,
int num_lanes = NumLanes(type);
int lane_width = kSimd128Size / num_lanes;
int laneIndex = kLaneOffsets[0] / lane_width;
- new_indices[laneIndex] = index;
+
+ Node* rep = index;
+
+ if (HasReplacement(0, index)) {
+ // Index nodes are lowered to scalar nodes.
+ DCHECK_EQ(1, ReplacementCount(index));
+ rep = GetReplacements(index)[0];
+ }
+
+ new_indices[laneIndex] = rep;
for (int i = 1; i < num_lanes; ++i) {
laneIndex = kLaneOffsets[i * lane_width] / lane_width;
new_indices[laneIndex] = graph()->NewNode(
- machine()->Int32Add(), index,
+ machine()->Int32Add(), rep,
graph()->NewNode(
common()->Int32Constant(static_cast<int>(i) * lane_width)));
}
@@ -563,53 +584,53 @@ void SimdScalarLowering::LowerLoadTransformOp(Node* node, SimdType type) {
// Load extends have a different machine type for loading.
switch (params.transformation) {
- case LoadTransformation::kI16x8Load8x8S:
+ case LoadTransformation::kS128Load8x8S:
load_rep = MachineType::Int8();
load_type = SimdType::kInt8x16;
break;
- case LoadTransformation::kI16x8Load8x8U:
+ case LoadTransformation::kS128Load8x8U:
load_rep = MachineType::Uint8();
load_type = SimdType::kInt8x16;
break;
- case LoadTransformation::kI32x4Load16x4S:
+ case LoadTransformation::kS128Load16x4S:
load_rep = MachineType::Int16();
load_type = SimdType::kInt16x8;
break;
- case LoadTransformation::kI32x4Load16x4U:
+ case LoadTransformation::kS128Load16x4U:
load_rep = MachineType::Uint16();
load_type = SimdType::kInt16x8;
break;
- case LoadTransformation::kI64x2Load32x2S:
+ case LoadTransformation::kS128Load32x2S:
load_rep = MachineType::Int32();
load_type = SimdType::kInt32x4;
break;
- case LoadTransformation::kI64x2Load32x2U:
+ case LoadTransformation::kS128Load32x2U:
load_rep = MachineType::Uint32();
load_type = SimdType::kInt32x4;
break;
- case LoadTransformation::kS8x16LoadSplat:
- case LoadTransformation::kS16x8LoadSplat:
- case LoadTransformation::kS32x4LoadSplat:
- case LoadTransformation::kS64x2LoadSplat:
+ case LoadTransformation::kS128Load8Splat:
+ case LoadTransformation::kS128Load16Splat:
+ case LoadTransformation::kS128Load32Splat:
+ case LoadTransformation::kS128Load64Splat:
+ case LoadTransformation::kS128Load32Zero:
+ case LoadTransformation::kS128Load64Zero:
load_rep = MachineTypeFrom(type);
break;
default:
- // Lowering for s64x2 is not implemented since lowering for 64x2
- // operations doesn't work properly yet.
- UNIMPLEMENTED();
+ UNREACHABLE();
}
DCHECK_NE(load_rep, MachineType::None());
const Operator* load_op;
switch (params.kind) {
- case LoadKind::kNormal:
+ case MemoryAccessKind::kNormal:
load_op = machine()->Load(load_rep);
break;
- case LoadKind::kUnaligned:
+ case MemoryAccessKind::kUnaligned:
load_op = machine()->UnalignedLoad(load_rep);
break;
- case LoadKind::kProtected:
+ case MemoryAccessKind::kProtected:
load_op = machine()->ProtectedLoad(load_rep);
break;
}
@@ -617,39 +638,71 @@ void SimdScalarLowering::LowerLoadTransformOp(Node* node, SimdType type) {
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
int num_lanes = NumLanes(type);
- Node** rep_nodes = zone()->NewArray<Node*>(num_lanes);
+ Node** reps = zone()->NewArray<Node*>(num_lanes);
Node* effect_input = node->InputAt(2);
Node* control_input = node->InputAt(3);
+ // This node is also used as effect input into other nodes, so we need to
+ // change this node in place.
+ reps[0] = node;
+ NodeProperties::ChangeOp(reps[0], load_op);
+
if (type != load_type) {
// We load a smaller lane size, then extend to a larger lane size. So use
// the smaller lane size to calculte the index nodes for loads, but only
// actually load half of those lanes.
Node** indices = zone()->NewArray<Node*>(num_lanes * 2);
GetIndexNodes(index, indices, load_type);
- for (int i = num_lanes - 1; i >= 0; --i) {
- rep_nodes[i] = graph()->NewNode(load_op, base, indices[i], effect_input,
- control_input);
- effect_input = rep_nodes[i];
- // Load operations are Word32 nodes, change them to Word64.
- if (params.transformation == LoadTransformation::kI64x2Load32x2S) {
- rep_nodes[i] =
- graph()->NewNode(machine()->ChangeInt32ToInt64(), rep_nodes[i]);
- } else if (params.transformation == LoadTransformation::kI64x2Load32x2U) {
- rep_nodes[i] =
- graph()->NewNode(machine()->ChangeUint32ToUint64(), rep_nodes[i]);
- }
+ reps[0]->ReplaceInput(1, indices[0]);
+
+ for (int i = num_lanes - 1; i > 0; --i) {
+ reps[i] = graph()->NewNode(load_op, base, indices[i], effect_input,
+ control_input);
+ effect_input = reps[i];
}
} else {
- // Load splat, load from the same index for every lane.
- for (int i = num_lanes - 1; i >= 0; --i) {
- rep_nodes[i] =
- graph()->NewNode(load_op, base, index, effect_input, control_input);
- effect_input = rep_nodes[i];
+ if (params.transformation == LoadTransformation::kS128Load32Zero) {
+ for (int i = num_lanes - 1; i > 0; --i) {
+ reps[i] = mcgraph_->Int32Constant(0);
+ }
+ } else if (params.transformation == LoadTransformation::kS128Load64Zero) {
+ for (int i = num_lanes - 1; i > 0; --i) {
+ reps[i] = mcgraph_->Int64Constant(0);
+ }
+ } else {
+ // Load splat, load from the same index for every lane.
+ Node* rep = HasReplacement(0, index) ? GetReplacements(index)[0] : index;
+
+ // Replace first node, we only called ChangeOp above.
+ reps[0]->ReplaceInput(1, rep);
+ for (int i = num_lanes - 1; i > 0; --i) {
+ reps[i] =
+ graph()->NewNode(load_op, base, rep, effect_input, control_input);
+ effect_input = reps[i];
+ }
+ }
+ }
+
+ // Update the effect input, completing the effect chain, but only if there is
+ // an effect output (LoadZero does not have an effect output, it is zero).
+ if (reps[1]->op()->EffectOutputCount() > 0) {
+ reps[0]->ReplaceInput(2, reps[1]);
+ }
+
+ // Special case, the load nodes need to be sign extended, and we do it here so
+ // the loop above can connect all the effect edges correctly.
+ if (params.transformation == LoadTransformation::kS128Load32x2S) {
+ for (int i = 0; i < num_lanes; ++i) {
+ reps[i] = graph()->NewNode(machine()->ChangeInt32ToInt64(), reps[i]);
+ }
+ } else if (params.transformation == LoadTransformation::kS128Load32x2U) {
+ for (int i = 0; i < num_lanes; ++i) {
+ reps[i] = graph()->NewNode(machine()->ChangeUint32ToUint64(), reps[i]);
}
}
- ReplaceNode(node, rep_nodes, num_lanes);
+
+ ReplaceNode(node, reps, num_lanes);
}
void SimdScalarLowering::LowerStoreOp(Node* node) {
@@ -947,8 +1000,9 @@ void SimdScalarLowering::LowerConvertFromFloat(Node* node, bool is_signed) {
for (int i = 0; i < kNumLanes32; ++i) {
Node* double_rep =
graph()->NewNode(machine()->ChangeFloat32ToFloat64(), rep[i]);
- Diamond nan_d(graph(), common(), graph()->NewNode(machine()->Float64Equal(),
- double_rep, double_rep));
+ Diamond nan_d(
+ graph(), common(),
+ graph()->NewNode(machine()->Float64Equal(), double_rep, double_rep));
Node* temp =
nan_d.Phi(MachineRepresentation::kFloat64, double_rep, double_zero);
Diamond min_d(graph(), common(),
@@ -1003,25 +1057,28 @@ void SimdScalarLowering::LowerPack(Node* node, SimdType input_rep_type,
const Operator* less_op = machine()->Int32LessThan();
Node* min = nullptr;
Node* max = nullptr;
+ const Operator* sign_extend;
MachineRepresentation phi_rep;
if (output_rep_type == SimdType::kInt16x8) {
+ sign_extend = machine()->SignExtendWord16ToInt32();
DCHECK(input_rep_type == SimdType::kInt32x4);
if (is_signed) {
min = mcgraph_->Int32Constant(std::numeric_limits<int16_t>::min());
max = mcgraph_->Int32Constant(std::numeric_limits<int16_t>::max());
} else {
- min = mcgraph_->Int32Constant(std::numeric_limits<uint16_t>::min());
+ min = mcgraph_->Uint32Constant(std::numeric_limits<uint16_t>::min());
max = mcgraph_->Uint32Constant(std::numeric_limits<uint16_t>::max());
}
phi_rep = MachineRepresentation::kWord16;
} else {
+ sign_extend = machine()->SignExtendWord8ToInt32();
DCHECK(output_rep_type == SimdType::kInt8x16 &&
input_rep_type == SimdType::kInt16x8);
if (is_signed) {
min = mcgraph_->Int32Constant(std::numeric_limits<int8_t>::min());
max = mcgraph_->Int32Constant(std::numeric_limits<int8_t>::max());
} else {
- min = mcgraph_->Int32Constant(std::numeric_limits<uint8_t>::min());
+ min = mcgraph_->Uint32Constant(std::numeric_limits<uint8_t>::min());
max = mcgraph_->Uint32Constant(std::numeric_limits<uint8_t>::max());
}
phi_rep = MachineRepresentation::kWord8;
@@ -1037,14 +1094,24 @@ void SimdScalarLowering::LowerPack(Node* node, SimdType input_rep_type,
Diamond d_min(graph(), common(), graph()->NewNode(less_op, input, min));
input = d_min.Phi(phi_rep, min, input);
Diamond d_max(graph(), common(), graph()->NewNode(less_op, max, input));
- rep_node[i] = d_max.Phi(phi_rep, max, input);
+ // We keep nodes in sign-extended form. E.g. for uint8_t, we need to
+ // compare with 0x000000ff (saturated narrowing), but the result of
+ // conversion should be 0xffffffff to work well with the rest of lowering.
+ rep_node[i] = graph()->NewNode(sign_extend, d_max.Phi(phi_rep, max, input));
}
ReplaceNode(node, rep_node, num_lanes);
}
void SimdScalarLowering::LowerShiftOp(Node* node, SimdType type) {
DCHECK_EQ(2, node->InputCount());
- Node* shift_node = Mask(node->InputAt(1), GetMaskForShift(node));
+
+ // The shift node, if it has a replacement, should be a single scalar.
+ DCHECK_GE(1, ReplacementCount(node->InputAt(1)));
+ Node* val = (HasReplacement(0, node->InputAt(1)))
+ ? GetReplacements(node->InputAt(1))[0]
+ : node->InputAt(1);
+
+ Node* shift_node = Mask(val, GetMaskForShift(node));
Node** rep = GetReplacementsWithType(node->InputAt(0), type);
int num_lanes = NumLanes(type);
Node** rep_node = zone()->NewArray<Node*>(num_lanes);
@@ -1191,8 +1258,22 @@ void SimdScalarLowering::LowerAllTrueOp(Node* node, SimdType rep_type) {
tmp_result = d.Phi(MachineRepresentation::kWord32, zero, tmp_result);
}
rep_node[0] = tmp_result;
- for (int i = 1; i < num_lanes; ++i) {
- rep_node[i] = nullptr;
+ ReplaceNode(node, rep_node, 1);
+}
+
+void SimdScalarLowering::LowerFloatPseudoMinMax(Node* node, const Operator* op,
+ bool is_max, SimdType type) {
+ DCHECK_EQ(2, node->InputCount());
+ Node** rep_left = GetReplacementsWithType(node->InputAt(0), type);
+ Node** rep_right = GetReplacementsWithType(node->InputAt(1), type);
+ int num_lanes = NumLanes(type);
+ Node** rep_node = zone()->NewArray<Node*>(num_lanes);
+ MachineRepresentation rep = MachineTypeFrom(type).representation();
+ for (int i = 0; i < num_lanes; ++i) {
+ Node* cmp = is_max ? graph()->NewNode(op, rep_left[i], rep_right[i])
+ : graph()->NewNode(op, rep_right[i], rep_left[i]);
+ Diamond d(graph(), common(), cmp);
+ rep_node[i] = d.Phi(rep, rep_right[i], rep_left[i]);
}
ReplaceNode(node, rep_node, num_lanes);
}
@@ -1215,9 +1296,8 @@ void SimdScalarLowering::LowerNode(Node* node) {
case SimdType::kInt8x16: {
for (int i = 0; i < num_lanes; ++i) {
Address data_address = reinterpret_cast<Address>(params.data() + i);
- rep_node[i] = mcgraph_->Int32Constant(
- static_cast<int32_t>(static_cast<int8_t>(
- base::ReadLittleEndianValue<int8_t>(data_address))));
+ rep_node[i] = mcgraph_->Int32Constant(static_cast<int32_t>(
+ base::ReadLittleEndianValue<int8_t>(data_address)));
}
break;
}
@@ -1402,7 +1482,11 @@ void SimdScalarLowering::LowerNode(Node* node) {
// arguments need to be converted to i32x4 as well.
for (int i = NodeProperties::PastValueIndex(node) - 1; i >= 0; i--) {
Node* input = node->InputAt(i);
- if (HasReplacement(0, input)) {
+ if (ReplacementCount(input) == 1) {
+ // Special case for extract lanes
+ Node** reps = GetReplacements(input);
+ ReplaceNode(input, reps, 1);
+ } else if (HasReplacement(0, input)) {
Node** reps = GetReplacementsWithType(input, SimdType::kInt32x4);
ReplaceNode(input, reps, NumLanes(SimdType::kInt32x4));
}
@@ -1527,23 +1611,23 @@ void SimdScalarLowering::LowerNode(Node* node) {
LowerBinaryOpForSmallInt(node, rep_type, machine()->Int32Mul());
break;
}
- case IrOpcode::kI16x8AddSaturateS:
- case IrOpcode::kI8x16AddSaturateS: {
+ case IrOpcode::kI16x8AddSatS:
+ case IrOpcode::kI8x16AddSatS: {
LowerSaturateBinaryOp(node, rep_type, machine()->Int32Add(), true);
break;
}
- case IrOpcode::kI16x8SubSaturateS:
- case IrOpcode::kI8x16SubSaturateS: {
+ case IrOpcode::kI16x8SubSatS:
+ case IrOpcode::kI8x16SubSatS: {
LowerSaturateBinaryOp(node, rep_type, machine()->Int32Sub(), true);
break;
}
- case IrOpcode::kI16x8AddSaturateU:
- case IrOpcode::kI8x16AddSaturateU: {
+ case IrOpcode::kI16x8AddSatU:
+ case IrOpcode::kI8x16AddSatU: {
LowerSaturateBinaryOp(node, rep_type, machine()->Int32Add(), false);
break;
}
- case IrOpcode::kI16x8SubSaturateU:
- case IrOpcode::kI8x16SubSaturateU: {
+ case IrOpcode::kI16x8SubSatU:
+ case IrOpcode::kI8x16SubSatU: {
LowerSaturateBinaryOp(node, rep_type, machine()->Int32Sub(), false);
break;
}
@@ -1571,6 +1655,25 @@ void SimdScalarLowering::LowerNode(Node* node) {
LowerIntMinMax(node, machine()->Uint32LessThan(), false, rep_type);
break;
}
+ case IrOpcode::kI32x4DotI16x8S: {
+ // i32x4.dot_i16x8_s wants the inputs to be i16x8, but outputs to i32x4.
+ DCHECK_EQ(2, node->InputCount());
+ Node** rep_left =
+ GetReplacementsWithType(node->InputAt(0), SimdType::kInt16x8);
+ Node** rep_right =
+ GetReplacementsWithType(node->InputAt(1), SimdType::kInt16x8);
+ int num_lanes = NumLanes(rep_type);
+ Node** rep_node = zone()->NewArray<Node*>(num_lanes);
+ for (int i = 0; i < num_lanes; ++i) {
+ Node* lo = graph()->NewNode(machine()->Int32Mul(), rep_left[i * 2],
+ rep_right[i * 2]);
+ Node* hi = graph()->NewNode(machine()->Int32Mul(), rep_left[i * 2 + 1],
+ rep_right[i * 2 + 1]);
+ rep_node[i] = graph()->NewNode(machine()->Int32Add(), lo, hi);
+ }
+ ReplaceNode(node, rep_node, num_lanes);
+ break;
+ }
case IrOpcode::kI64x2Neg: {
DCHECK_EQ(1, node->InputCount());
Node** rep = GetReplacementsWithType(node->InputAt(0), rep_type);
@@ -1756,6 +1859,16 @@ void SimdScalarLowering::LowerNode(Node* node) {
F32X4_BINOP_CASE(Div)
F32X4_BINOP_CASE(Min)
F32X4_BINOP_CASE(Max)
+ case IrOpcode::kF32x4Pmin: {
+ LowerFloatPseudoMinMax(node, machine()->Float32LessThan(), false,
+ rep_type);
+ break;
+ }
+ case IrOpcode::kF32x4Pmax: {
+ LowerFloatPseudoMinMax(node, machine()->Float32LessThan(), true,
+ rep_type);
+ break;
+ }
#undef F32X4_BINOP_CASE
#define F32X4_UNOP_CASE(name) \
case IrOpcode::kF32x4##name: { \
@@ -1766,6 +1879,22 @@ void SimdScalarLowering::LowerNode(Node* node) {
F32X4_UNOP_CASE(Neg)
F32X4_UNOP_CASE(Sqrt)
#undef F32X4_UNOP_CASE
+ case IrOpcode::kF32x4Ceil: {
+ LowerUnaryOp(node, rep_type, machine()->Float32RoundUp().op());
+ break;
+ }
+ case IrOpcode::kF32x4Floor: {
+ LowerUnaryOp(node, rep_type, machine()->Float32RoundDown().op());
+ break;
+ }
+ case IrOpcode::kF32x4Trunc: {
+ LowerUnaryOp(node, rep_type, machine()->Float32RoundTruncate().op());
+ break;
+ }
+ case IrOpcode::kF32x4NearestInt: {
+ LowerUnaryOp(node, rep_type, machine()->Float32RoundTiesEven().op());
+ break;
+ }
case IrOpcode::kF32x4RecipApprox:
case IrOpcode::kF32x4RecipSqrtApprox: {
DCHECK_EQ(1, node->InputCount());
@@ -1826,6 +1955,32 @@ void SimdScalarLowering::LowerNode(Node* node) {
LowerBinaryOp(node, rep_type, machine()->Float64Max());
break;
}
+ case IrOpcode::kF64x2Pmin: {
+ LowerFloatPseudoMinMax(node, machine()->Float64LessThan(), false,
+ rep_type);
+ break;
+ }
+ case IrOpcode::kF64x2Pmax: {
+ LowerFloatPseudoMinMax(node, machine()->Float64LessThan(), true,
+ rep_type);
+ break;
+ }
+ case IrOpcode::kF64x2Ceil: {
+ LowerUnaryOp(node, rep_type, machine()->Float64RoundUp().op());
+ break;
+ }
+ case IrOpcode::kF64x2Floor: {
+ LowerUnaryOp(node, rep_type, machine()->Float64RoundDown().op());
+ break;
+ }
+ case IrOpcode::kF64x2Trunc: {
+ LowerUnaryOp(node, rep_type, machine()->Float64RoundTruncate().op());
+ break;
+ }
+ case IrOpcode::kF64x2NearestInt: {
+ LowerUnaryOp(node, rep_type, machine()->Float64RoundTiesEven().op());
+ break;
+ }
case IrOpcode::kF64x2Splat:
case IrOpcode::kF32x4Splat:
case IrOpcode::kI64x2Splat:
@@ -1862,13 +2017,17 @@ void SimdScalarLowering::LowerNode(Node* node) {
case IrOpcode::kI8x16ExtractLaneU:
case IrOpcode::kI8x16ExtractLaneS: {
int32_t lane = OpParameter<int32_t>(node->op());
- Node** rep_node = zone()->NewArray<Node*>(num_lanes);
+ Node** rep_node = zone()->NewArray<Node*>(1);
rep_node[0] = GetReplacementsWithType(node->InputAt(0), rep_type)[lane];
- for (int i = 1; i < num_lanes; ++i) {
- rep_node[i] = nullptr;
+
+ // If unsigned, mask the top bits.
+ if (node->opcode() == IrOpcode::kI16x8ExtractLaneU) {
+ rep_node[0] = Mask(rep_node[0], kMask16);
+ } else if (node->opcode() == IrOpcode::kI8x16ExtractLaneU) {
+ rep_node[0] = Mask(rep_node[0], kMask8);
}
- ReplaceNode(node, rep_node, num_lanes);
+ ReplaceNode(node, rep_node, 1);
break;
}
case IrOpcode::kF64x2ReplaceLane:
@@ -1890,6 +2049,17 @@ void SimdScalarLowering::LowerNode(Node* node) {
} else {
rep_node[lane] = repNode;
}
+
+ // The replacement nodes for these opcodes are in Word32, and we always
+ // store nodes in sign extended form (and mask to account for overflows.)
+ if (node->opcode() == IrOpcode::kI16x8ReplaceLane) {
+ rep_node[lane] = graph()->NewNode(machine()->SignExtendWord16ToInt32(),
+ Mask(rep_node[lane], kMask16));
+ } else if (node->opcode() == IrOpcode::kI8x16ReplaceLane) {
+ rep_node[lane] = graph()->NewNode(machine()->SignExtendWord8ToInt32(),
+ Mask(rep_node[lane], kMask8));
+ }
+
ReplaceNode(node, rep_node, num_lanes);
break;
}
@@ -2035,7 +2205,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
// but we still need GetReplacementsWithType if input is float.
DCHECK_EQ(ReplacementType(node), SimdType::kInt32x4);
Node** reps = GetReplacementsWithType(node->InputAt(0), rep_type);
- Node** rep_node = zone()->NewArray<Node*>(num_lanes);
+ Node** rep_node = zone()->NewArray<Node*>(1);
Node* true_node = mcgraph_->Int32Constant(1);
Node* zero = mcgraph_->Int32Constant(0);
Node* tmp_result = zero;
@@ -2046,10 +2216,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
d.Phi(MachineRepresentation::kWord32, tmp_result, true_node);
}
rep_node[0] = tmp_result;
- for (int i = 1; i < num_lanes; ++i) {
- rep_node[i] = nullptr;
- }
- ReplaceNode(node, rep_node, num_lanes);
+ ReplaceNode(node, rep_node, 1);
break;
}
case IrOpcode::kV32x4AllTrue: {
@@ -2103,7 +2270,9 @@ void SimdScalarLowering::LowerNode(Node* node) {
ReplaceNode(node, rep_node, num_lanes);
break;
}
- default: { DefaultLowering(node); }
+ default: {
+ DefaultLowering(node);
+ }
}
}
@@ -2237,7 +2406,7 @@ void SimdScalarLowering::Int32ToSmallerInt(Node** replacements, Node** result) {
for (int j = 0; j < num_ints; j++) {
result[num_ints * i + j] = graph()->NewNode(
sign_extend,
- graph()->NewNode(machine()->Word32Sar(), replacements[i],
+ graph()->NewNode(machine()->Word32Shr(), replacements[i],
mcgraph_->Int32Constant(j * bit_size)));
}
} else {
@@ -2284,89 +2453,239 @@ void SimdScalarLowering::Int32ToInt64(Node** replacements, Node** result) {
}
Node** SimdScalarLowering::GetReplacementsWithType(Node* node, SimdType type) {
+ // Operations like extract lane, bitmask, any_true, all_true replaces a SIMD
+ // node with a scalar. Those won't be correctly handled here. They should be
+ // special cased and replaced with the appropriate scalar.
+ DCHECK_LT(1, ReplacementCount(node));
+
Node** replacements = GetReplacements(node);
- if (ReplacementType(node) == type) {
- return GetReplacements(node);
+ if (type == ReplacementType(node)) {
+ return replacements;
}
+
int num_lanes = NumLanes(type);
Node** result = zone()->NewArray<Node*>(num_lanes);
- if (type == SimdType::kInt64x2) {
- if (ReplacementType(node) == SimdType::kInt32x4) {
- Int32ToInt64(replacements, result);
- } else if (ReplacementType(node) == SimdType::kFloat64x2) {
- Float64ToInt64(replacements, result);
- } else {
- UNIMPLEMENTED();
+
+ switch (type) {
+ case SimdType::kInt64x2: {
+ switch (ReplacementType(node)) {
+ case SimdType::kInt64x2: {
+ UNREACHABLE();
+ }
+ case SimdType::kInt32x4: {
+ Int32ToInt64(replacements, result);
+ break;
+ }
+ case SimdType::kInt16x8: {
+ Node** to_int32 = zone()->NewArray<Node*>(kNumLanes32);
+ SmallerIntToInt32<int16_t>(replacements, to_int32);
+ Int32ToInt64(to_int32, result);
+ break;
+ }
+ case SimdType::kInt8x16: {
+ Node** to_int32 = zone()->NewArray<Node*>(kNumLanes32);
+ SmallerIntToInt32<int8_t>(replacements, to_int32);
+ Int32ToInt64(to_int32, result);
+ break;
+ }
+ case SimdType::kFloat64x2: {
+ Float64ToInt64(replacements, result);
+ break;
+ }
+ case SimdType::kFloat32x4: {
+ Node** to_int32 = zone()->NewArray<Node*>(kNumLanes32);
+ Float32ToInt32(replacements, to_int32);
+ Int32ToInt64(to_int32, result);
+ break;
+ }
+ }
+ break;
}
- } else if (type == SimdType::kInt32x4) {
- if (ReplacementType(node) == SimdType::kInt64x2) {
- Int64ToInt32(replacements, result);
- } else if (ReplacementType(node) == SimdType::kFloat64x2) {
- Node** float64_to_int64 = zone()->NewArray<Node*>(kNumLanes64);
- Float64ToInt64(replacements, float64_to_int64);
- Int64ToInt32(float64_to_int64, result);
- } else if (ReplacementType(node) == SimdType::kFloat32x4) {
- Float32ToInt32(replacements, result);
- } else if (ReplacementType(node) == SimdType::kInt16x8) {
- SmallerIntToInt32<int16_t>(replacements, result);
- } else if (ReplacementType(node) == SimdType::kInt8x16) {
- SmallerIntToInt32<int8_t>(replacements, result);
- } else {
- UNREACHABLE();
+ case SimdType::kInt32x4: {
+ switch (ReplacementType(node)) {
+ case SimdType::kInt64x2: {
+ Int64ToInt32(replacements, result);
+ break;
+ }
+ case SimdType::kInt32x4: {
+ UNREACHABLE();
+ }
+ case SimdType::kInt16x8: {
+ SmallerIntToInt32<int16_t>(replacements, result);
+ break;
+ }
+ case SimdType::kInt8x16: {
+ SmallerIntToInt32<int8_t>(replacements, result);
+ break;
+ }
+ case SimdType::kFloat64x2: {
+ Node** float64_to_int64 = zone()->NewArray<Node*>(kNumLanes64);
+ Float64ToInt64(replacements, float64_to_int64);
+ Int64ToInt32(float64_to_int64, result);
+ break;
+ }
+ case SimdType::kFloat32x4: {
+ Float32ToInt32(replacements, result);
+ break;
+ }
+ }
+ break;
}
- } else if (type == SimdType::kFloat64x2) {
- if (ReplacementType(node) == SimdType::kInt64x2) {
- Int64ToFloat64(replacements, result);
- } else if (ReplacementType(node) == SimdType::kInt32x4) {
- Node** int32_to_int64 = zone()->NewArray<Node*>(kNumLanes64);
- Int32ToInt64(replacements, int32_to_int64);
- Int64ToFloat64(int32_to_int64, result);
- } else {
- UNIMPLEMENTED();
+ case SimdType::kInt16x8: {
+ switch (ReplacementType(node)) {
+ case SimdType::kInt64x2: {
+ Node** to_int32 = zone()->NewArray<Node*>(kNumLanes32);
+ Int64ToInt32(replacements, to_int32);
+ Int32ToSmallerInt<int16_t>(to_int32, result);
+ break;
+ }
+ case SimdType::kInt32x4: {
+ Int32ToSmallerInt<int16_t>(replacements, result);
+ break;
+ }
+ case SimdType::kInt16x8: {
+ UNREACHABLE();
+ }
+ case SimdType::kInt8x16: {
+ Node** to_int32 = zone()->NewArray<Node*>(kNumLanes32);
+ SmallerIntToInt32<int8_t>(replacements, to_int32);
+ Int32ToSmallerInt<int16_t>(to_int32, result);
+ break;
+ }
+ case SimdType::kFloat64x2: {
+ Node** to_int64 = zone()->NewArray<Node*>(kNumLanes64);
+ Node** to_int32 = zone()->NewArray<Node*>(kNumLanes32);
+ Float64ToInt64(replacements, to_int64);
+ Int64ToInt32(to_int64, to_int32);
+ Int32ToSmallerInt<int16_t>(to_int32, result);
+ break;
+ }
+ case SimdType::kFloat32x4: {
+ Node** float32_to_int32 = zone()->NewArray<Node*>(kNumLanes32);
+ Float32ToInt32(replacements, float32_to_int32);
+ Int32ToSmallerInt<int16_t>(float32_to_int32, result);
+ break;
+ }
+ }
+ break;
}
- } else if (type == SimdType::kFloat32x4) {
- if (ReplacementType(node) == SimdType::kFloat64x2) {
- Node** float64_to_int64 = zone()->NewArray<Node*>(kNumLanes64);
- Node** int64_to_int32 = zone()->NewArray<Node*>(kNumLanes32);
- Float64ToInt64(replacements, float64_to_int64);
- Int64ToInt32(float64_to_int64, int64_to_int32);
- Int32ToFloat32(int64_to_int32, result);
- } else if (ReplacementType(node) == SimdType::kInt32x4) {
- Int32ToFloat32(replacements, result);
- } else if (ReplacementType(node) == SimdType::kInt16x8) {
- UNIMPLEMENTED();
- } else if (ReplacementType(node) == SimdType::kInt8x16) {
- SmallerIntToInt32<int8_t>(replacements, result);
- Int32ToFloat32(result, result);
- } else {
- UNREACHABLE();
+ case SimdType::kInt8x16: {
+ switch (ReplacementType(node)) {
+ case SimdType::kInt64x2: {
+ Node** int64_to_int32 = zone()->NewArray<Node*>(kNumLanes32);
+ Int64ToInt32(replacements, int64_to_int32);
+ Int32ToSmallerInt<int8_t>(int64_to_int32, result);
+ break;
+ }
+ case SimdType::kInt32x4: {
+ Int32ToSmallerInt<int8_t>(replacements, result);
+ break;
+ }
+ case SimdType::kInt16x8: {
+ Node** int16_to_int32 = zone()->NewArray<Node*>(kNumLanes32);
+ SmallerIntToInt32<int16_t>(replacements, int16_to_int32);
+ Int32ToSmallerInt<int8_t>(int16_to_int32, result);
+ break;
+ }
+ case SimdType::kInt8x16: {
+ UNREACHABLE();
+ }
+ case SimdType::kFloat64x2: {
+ Node** to_int64 = zone()->NewArray<Node*>(kNumLanes64);
+ Node** to_int32 = zone()->NewArray<Node*>(kNumLanes32);
+ Float64ToInt64(replacements, to_int64);
+ Int64ToInt32(to_int64, to_int32);
+ Int32ToSmallerInt<int8_t>(to_int32, result);
+ break;
+ }
+ case SimdType::kFloat32x4: {
+ Node** float32_to_int32 = zone()->NewArray<Node*>(kNumLanes32);
+ Float32ToInt32(replacements, float32_to_int32);
+ Int32ToSmallerInt<int8_t>(float32_to_int32, result);
+ break;
+ }
+ }
+ break;
}
- } else if (type == SimdType::kInt16x8) {
- if (ReplacementType(node) == SimdType::kInt32x4) {
- Int32ToSmallerInt<int16_t>(replacements, result);
- } else if (ReplacementType(node) == SimdType::kFloat32x4) {
- Node** float32_to_int32 = zone()->NewArray<Node*>(kNumLanes32);
- Float32ToInt32(replacements, float32_to_int32);
- Int32ToSmallerInt<int16_t>(float32_to_int32, result);
- } else {
- UNREACHABLE();
+ case SimdType::kFloat64x2: {
+ switch (ReplacementType(node)) {
+ case SimdType::kInt64x2: {
+ Int64ToFloat64(replacements, result);
+ break;
+ }
+ case SimdType::kInt32x4: {
+ Node** int32_to_int64 = zone()->NewArray<Node*>(kNumLanes64);
+ Int32ToInt64(replacements, int32_to_int64);
+ Int64ToFloat64(int32_to_int64, result);
+ break;
+ }
+ case SimdType::kInt16x8: {
+ Node** to_int32 = zone()->NewArray<Node*>(kNumLanes32);
+ Node** to_int64 = zone()->NewArray<Node*>(kNumLanes64);
+ SmallerIntToInt32<int16_t>(replacements, to_int32);
+ Int32ToInt64(to_int32, to_int64);
+ Int64ToFloat64(to_int64, result);
+ break;
+ }
+ case SimdType::kInt8x16: {
+ Node** to_int32 = zone()->NewArray<Node*>(kNumLanes32);
+ Node** to_int64 = zone()->NewArray<Node*>(kNumLanes64);
+ SmallerIntToInt32<int8_t>(replacements, to_int32);
+ Int32ToInt64(to_int32, to_int64);
+ Int64ToFloat64(to_int64, result);
+ break;
+ }
+ case SimdType::kFloat64x2: {
+ UNREACHABLE();
+ }
+ case SimdType::kFloat32x4: {
+ Node** to_int32 = zone()->NewArray<Node*>(kNumLanes32);
+ Node** to_int64 = zone()->NewArray<Node*>(kNumLanes64);
+ Float32ToInt32(replacements, to_int32);
+ Int32ToInt64(to_int32, to_int64);
+ Int64ToFloat64(to_int64, result);
+ break;
+ }
+ }
+ break;
}
- } else if (type == SimdType::kInt8x16) {
- if (ReplacementType(node) == SimdType::kInt64x2) {
- Node** int64_to_int32 = zone()->NewArray<Node*>(kNumLanes32);
- Int64ToInt32(replacements, int64_to_int32);
- Int32ToSmallerInt<int8_t>(int64_to_int32, result);
- } else if (ReplacementType(node) == SimdType::kInt32x4) {
- Int32ToSmallerInt<int8_t>(replacements, result);
- } else if (ReplacementType(node) == SimdType::kInt16x8) {
- Node** int16_to_int32 = zone()->NewArray<Node*>(kNumLanes32);
- SmallerIntToInt32<int16_t>(replacements, int16_to_int32);
- Int32ToSmallerInt<int8_t>(int16_to_int32, result);
- } else {
- UNIMPLEMENTED();
+ case SimdType::kFloat32x4: {
+ switch (ReplacementType(node)) {
+ case SimdType::kInt64x2: {
+ Node** to_int32 = zone()->NewArray<Node*>(kNumLanes32);
+ Int64ToInt32(replacements, to_int32);
+ Int32ToFloat32(to_int32, result);
+ break;
+ }
+ case SimdType::kInt32x4: {
+ Int32ToFloat32(replacements, result);
+ break;
+ }
+ case SimdType::kInt16x8: {
+ Node** to_int32 = zone()->NewArray<Node*>(kNumLanes32);
+ SmallerIntToInt32<int16_t>(replacements, to_int32);
+ Int32ToFloat32(to_int32, result);
+ break;
+ }
+ case SimdType::kInt8x16: {
+ SmallerIntToInt32<int8_t>(replacements, result);
+ Int32ToFloat32(result, result);
+ break;
+ }
+ case SimdType::kFloat64x2: {
+ Node** float64_to_int64 = zone()->NewArray<Node*>(kNumLanes64);
+ Node** int64_to_int32 = zone()->NewArray<Node*>(kNumLanes32);
+ Float64ToInt64(replacements, float64_to_int64);
+ Int64ToInt32(float64_to_int64, int64_to_int32);
+ Int32ToFloat32(int64_to_int32, result);
+ break;
+ }
+ case SimdType::kFloat32x4: {
+ UNREACHABLE();
+ }
+ }
+ break;
}
- } else {
- UNREACHABLE();
}
return result;
}
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.h b/deps/v8/src/compiler/simd-scalar-lowering.h
index b86071f0ae..c4ba1e3019 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.h
+++ b/deps/v8/src/compiler/simd-scalar-lowering.h
@@ -119,6 +119,8 @@ class SimdScalarLowering {
MachineType MachineTypeFrom(SimdType simdType);
void LowerBitMaskOp(Node* node, SimdType rep_type, int msb_index);
void LowerAllTrueOp(Node* node, SimdType rep_type);
+ void LowerFloatPseudoMinMax(Node* node, const Operator* op, bool is_max,
+ SimdType type);
MachineGraph* const mcgraph_;
NodeMarker<State> state_;
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index 2842259a2e..ef56d56e44 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -293,7 +293,7 @@ class RepresentationSelector {
RepresentationChanger* changer,
SourcePositionTable* source_positions,
NodeOriginTable* node_origins,
- TickCounter* tick_counter)
+ TickCounter* tick_counter, Linkage* linkage)
: jsgraph_(jsgraph),
zone_(zone),
might_need_revisit_(zone),
@@ -310,7 +310,8 @@ class RepresentationSelector {
node_origins_(node_origins),
type_cache_(TypeCache::Get()),
op_typer_(broker, graph_zone()),
- tick_counter_(tick_counter) {
+ tick_counter_(tick_counter),
+ linkage_(linkage) {
}
void ResetNodeInfoState() {
@@ -1362,8 +1363,8 @@ class RepresentationSelector {
return kPointerWriteBarrier;
}
NumberMatcher m(value);
- if (m.HasValue()) {
- if (IsSmiDouble(m.Value())) {
+ if (m.HasResolvedValue()) {
+ if (IsSmiDouble(m.ResolvedValue())) {
// Storing a smi doesn't need a write barrier.
return kNoWriteBarrier;
}
@@ -1408,7 +1409,6 @@ class RepresentationSelector {
IsSomePositiveOrderedNumber(input1_type)
? CheckForMinusZeroMode::kDontCheckForMinusZero
: CheckForMinusZeroMode::kCheckForMinusZero;
-
NodeProperties::ChangeOp(node, simplified()->CheckedInt32Mul(mz_mode));
}
@@ -1452,6 +1452,13 @@ class RepresentationSelector {
Type left_feedback_type = TypeOf(node->InputAt(0));
Type right_feedback_type = TypeOf(node->InputAt(1));
+
+ // Using Signed32 as restriction type amounts to promising there won't be
+ // signed overflow. This is incompatible with relying on a Word32
+ // truncation in order to skip the overflow check.
+ Type const restriction =
+ truncation.IsUsedAsWord32() ? Type::Any() : Type::Signed32();
+
// Handle the case when no int32 checks on inputs are necessary (but
// an overflow check is needed on the output). Note that we do not
// have to do any check if at most one side can be minus zero. For
@@ -1465,7 +1472,7 @@ class RepresentationSelector {
right_upper.Is(Type::Signed32OrMinusZero()) &&
(left_upper.Is(Type::Signed32()) || right_upper.Is(Type::Signed32()))) {
VisitBinop<T>(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord32, Type::Signed32());
+ MachineRepresentation::kWord32, restriction);
} else {
// If the output's truncation is identify-zeros, we can pass it
// along. Moreover, if the operation is addition and we know the
@@ -1485,8 +1492,9 @@ class RepresentationSelector {
UseInfo right_use = CheckedUseInfoAsWord32FromHint(hint, FeedbackSource(),
kIdentifyZeros);
VisitBinop<T>(node, left_use, right_use, MachineRepresentation::kWord32,
- Type::Signed32());
+ restriction);
}
+
if (lower<T>()) {
if (truncation.IsUsedAsWord32() ||
!CanOverflowSigned32(node->op(), left_feedback_type,
@@ -1745,15 +1753,15 @@ class RepresentationSelector {
return UseInfo::Bool();
case CTypeInfo::Type::kInt32:
case CTypeInfo::Type::kUint32:
- case CTypeInfo::Type::kFloat32:
return UseInfo::CheckedNumberAsWord32(feedback);
// TODO(mslekova): We deopt for unsafe integers, but ultimately we want
// to make this less restrictive in order to stay on the fast path.
case CTypeInfo::Type::kInt64:
case CTypeInfo::Type::kUint64:
return UseInfo::CheckedSigned64AsWord64(kIdentifyZeros, feedback);
+ case CTypeInfo::Type::kFloat32:
case CTypeInfo::Type::kFloat64:
- return UseInfo::CheckedNumberAsFloat64(kIdentifyZeros, feedback);
+ return UseInfo::CheckedNumberAsFloat64(kDistinguishZeros, feedback);
case CTypeInfo::Type::kV8Value:
return UseInfo::AnyTagged();
}
@@ -1838,9 +1846,10 @@ class RepresentationSelector {
// here, otherwise the input conversion will fail.
return VisitLeaf<T>(node, MachineRepresentation::kTagged);
case IrOpcode::kParameter:
- // TODO(titzer): use representation from linkage.
return VisitUnop<T>(node, UseInfo::None(),
- MachineRepresentation::kTagged);
+ linkage()
+ ->GetParameterType(ParameterIndexOf(node->op()))
+ .representation());
case IrOpcode::kInt32Constant:
return VisitLeaf<T>(node, MachineRepresentation::kWord32);
case IrOpcode::kInt64Constant:
@@ -2828,7 +2837,16 @@ class RepresentationSelector {
return VisitUnop<T>(node, UseInfo::AnyTagged(),
MachineRepresentation::kTaggedPointer);
}
- case IrOpcode::kTierUpCheck:
+ case IrOpcode::kTierUpCheck: {
+ ProcessInput<T>(node, 0, UseInfo::AnyTagged());
+ ProcessInput<T>(node, 1, UseInfo::AnyTagged());
+ ProcessInput<T>(node, 2, UseInfo::AnyTagged());
+ ProcessInput<T>(node, 3, UseInfo::TruncatingWord32());
+ ProcessInput<T>(node, 4, UseInfo::AnyTagged());
+ ProcessRemainingInputs<T>(node, 5);
+ SetOutput<T>(node, MachineRepresentation::kNone);
+ return;
+ }
case IrOpcode::kUpdateInterruptBudget: {
ProcessInput<T>(node, 0, UseInfo::AnyTagged());
ProcessRemainingInputs<T>(node, 1);
@@ -3836,6 +3854,7 @@ class RepresentationSelector {
TypeCache const* type_cache_;
OperationTyper op_typer_; // helper for the feedback typer
TickCounter* const tick_counter_;
+ Linkage* const linkage_;
NodeInfo* GetInfo(Node* node) {
DCHECK(node->id() < count_);
@@ -3843,6 +3862,7 @@ class RepresentationSelector {
}
Zone* zone() { return zone_; }
Zone* graph_zone() { return jsgraph_->zone(); }
+ Linkage* linkage() { return linkage_; }
};
// Template specializations
@@ -4006,7 +4026,8 @@ SimplifiedLowering::SimplifiedLowering(JSGraph* jsgraph, JSHeapBroker* broker,
SourcePositionTable* source_positions,
NodeOriginTable* node_origins,
PoisoningMitigationLevel poisoning_level,
- TickCounter* tick_counter)
+ TickCounter* tick_counter,
+ Linkage* linkage)
: jsgraph_(jsgraph),
broker_(broker),
zone_(zone),
@@ -4014,13 +4035,14 @@ SimplifiedLowering::SimplifiedLowering(JSGraph* jsgraph, JSHeapBroker* broker,
source_positions_(source_positions),
node_origins_(node_origins),
poisoning_level_(poisoning_level),
- tick_counter_(tick_counter) {}
+ tick_counter_(tick_counter),
+ linkage_(linkage) {}
void SimplifiedLowering::LowerAllNodes() {
RepresentationChanger changer(jsgraph(), broker_);
RepresentationSelector selector(jsgraph(), broker_, zone_, &changer,
source_positions_, node_origins_,
- tick_counter_);
+ tick_counter_, linkage_);
selector.Run(this);
}
@@ -4279,7 +4301,7 @@ Node* SimplifiedLowering::Int32Div(Node* const node) {
return graph()->NewNode(machine()->Int32Sub(), zero, lhs);
} else if (m.right().Is(0)) {
return rhs;
- } else if (machine()->Int32DivIsSafe() || m.right().HasValue()) {
+ } else if (machine()->Int32DivIsSafe() || m.right().HasResolvedValue()) {
return graph()->NewNode(machine()->Int32Div(), lhs, rhs, graph()->start());
}
@@ -4350,7 +4372,7 @@ Node* SimplifiedLowering::Int32Mod(Node* const node) {
if (m.right().Is(-1) || m.right().Is(0)) {
return zero;
- } else if (m.right().HasValue()) {
+ } else if (m.right().HasResolvedValue()) {
return graph()->NewNode(machine()->Int32Mod(), lhs, rhs, graph()->start());
}
@@ -4463,7 +4485,7 @@ Node* SimplifiedLowering::Uint32Div(Node* const node) {
if (m.right().Is(0)) {
return zero;
- } else if (machine()->Uint32DivIsSafe() || m.right().HasValue()) {
+ } else if (machine()->Uint32DivIsSafe() || m.right().HasResolvedValue()) {
return graph()->NewNode(machine()->Uint32Div(), lhs, rhs, graph()->start());
}
@@ -4482,7 +4504,7 @@ Node* SimplifiedLowering::Uint32Mod(Node* const node) {
if (m.right().Is(0)) {
return zero;
- } else if (m.right().HasValue()) {
+ } else if (m.right().HasResolvedValue()) {
return graph()->NewNode(machine()->Uint32Mod(), lhs, rhs, graph()->start());
}
diff --git a/deps/v8/src/compiler/simplified-lowering.h b/deps/v8/src/compiler/simplified-lowering.h
index 414e3588d7..f38d3df132 100644
--- a/deps/v8/src/compiler/simplified-lowering.h
+++ b/deps/v8/src/compiler/simplified-lowering.h
@@ -30,7 +30,7 @@ class V8_EXPORT_PRIVATE SimplifiedLowering final {
SourcePositionTable* source_position,
NodeOriginTable* node_origins,
PoisoningMitigationLevel poisoning_level,
- TickCounter* tick_counter);
+ TickCounter* tick_counter, Linkage* linkage);
~SimplifiedLowering() = default;
void LowerAllNodes();
@@ -72,6 +72,7 @@ class V8_EXPORT_PRIVATE SimplifiedLowering final {
PoisoningMitigationLevel poisoning_level_;
TickCounter* const tick_counter_;
+ Linkage* const linkage_;
Node* Float64Round(Node* const node);
Node* Float64Sign(Node* const node);
@@ -98,6 +99,7 @@ class V8_EXPORT_PRIVATE SimplifiedLowering final {
CommonOperatorBuilder* common() { return jsgraph()->common(); }
MachineOperatorBuilder* machine() { return jsgraph()->machine(); }
SimplifiedOperatorBuilder* simplified() { return jsgraph()->simplified(); }
+ Linkage* linkage() { return linkage_; }
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/simplified-operator-reducer.cc b/deps/v8/src/compiler/simplified-operator-reducer.cc
index 3a5b3c6ec6..d2591b5502 100644
--- a/deps/v8/src/compiler/simplified-operator-reducer.cc
+++ b/deps/v8/src/compiler/simplified-operator-reducer.cc
@@ -20,8 +20,8 @@ namespace {
Decision DecideObjectIsSmi(Node* const input) {
NumberMatcher m(input);
- if (m.HasValue()) {
- return IsSmiDouble(m.Value()) ? Decision::kTrue : Decision::kFalse;
+ if (m.HasResolvedValue()) {
+ return IsSmiDouble(m.ResolvedValue()) ? Decision::kTrue : Decision::kFalse;
}
if (m.IsAllocate()) return Decision::kFalse;
if (m.IsChangeBitToTagged()) return Decision::kFalse;
@@ -44,7 +44,6 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
DisallowHeapAccess no_heap_access;
switch (node->opcode()) {
case IrOpcode::kBooleanNot: {
- // TODO(neis): Provide HeapObjectRefMatcher?
HeapObjectMatcher m(node->InputAt(0));
if (m.Is(factory()->true_value())) return ReplaceBoolean(false);
if (m.Is(factory()->false_value())) return ReplaceBoolean(true);
@@ -60,7 +59,7 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
}
case IrOpcode::kChangeTaggedToBit: {
HeapObjectMatcher m(node->InputAt(0));
- if (m.HasValue()) {
+ if (m.HasResolvedValue()) {
return ReplaceInt32(m.Ref(broker()).BooleanValue());
}
if (m.IsChangeBitToTagged()) return Replace(m.InputAt(0));
@@ -68,14 +67,14 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
}
case IrOpcode::kChangeFloat64ToTagged: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceNumber(m.Value());
+ if (m.HasResolvedValue()) return ReplaceNumber(m.ResolvedValue());
if (m.IsChangeTaggedToFloat64()) return Replace(m.node()->InputAt(0));
break;
}
case IrOpcode::kChangeInt31ToTaggedSigned:
case IrOpcode::kChangeInt32ToTagged: {
Int32Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceNumber(m.Value());
+ if (m.HasResolvedValue()) return ReplaceNumber(m.ResolvedValue());
if (m.IsChangeTaggedToInt32() || m.IsChangeTaggedSignedToInt32()) {
return Replace(m.InputAt(0));
}
@@ -84,7 +83,7 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
case IrOpcode::kChangeTaggedToFloat64:
case IrOpcode::kTruncateTaggedToFloat64: {
NumberMatcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceFloat64(m.Value());
+ if (m.HasResolvedValue()) return ReplaceFloat64(m.ResolvedValue());
if (m.IsChangeFloat64ToTagged() || m.IsChangeFloat64ToTaggedPointer()) {
return Replace(m.node()->InputAt(0));
}
@@ -99,7 +98,8 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
case IrOpcode::kChangeTaggedSignedToInt32:
case IrOpcode::kChangeTaggedToInt32: {
NumberMatcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceInt32(DoubleToInt32(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceInt32(DoubleToInt32(m.ResolvedValue()));
if (m.IsChangeFloat64ToTagged() || m.IsChangeFloat64ToTaggedPointer()) {
return Change(node, machine()->ChangeFloat64ToInt32(), m.InputAt(0));
}
@@ -110,7 +110,8 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
}
case IrOpcode::kChangeTaggedToUint32: {
NumberMatcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceUint32(DoubleToUint32(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceUint32(DoubleToUint32(m.ResolvedValue()));
if (m.IsChangeFloat64ToTagged() || m.IsChangeFloat64ToTaggedPointer()) {
return Change(node, machine()->ChangeFloat64ToUint32(), m.InputAt(0));
}
@@ -119,12 +120,14 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
}
case IrOpcode::kChangeUint32ToTagged: {
Uint32Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceNumber(FastUI2D(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceNumber(FastUI2D(m.ResolvedValue()));
break;
}
case IrOpcode::kTruncateTaggedToWord32: {
NumberMatcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceInt32(DoubleToInt32(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceInt32(DoubleToInt32(m.ResolvedValue()));
if (m.IsChangeInt31ToTaggedSigned() || m.IsChangeInt32ToTagged() ||
m.IsChangeUint32ToTagged()) {
return Replace(m.InputAt(0));
@@ -136,8 +139,9 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
}
case IrOpcode::kCheckedFloat64ToInt32: {
Float64Matcher m(node->InputAt(0));
- if (m.HasValue() && IsInt32Double(m.Value())) {
- Node* value = jsgraph()->Int32Constant(static_cast<int32_t>(m.Value()));
+ if (m.HasResolvedValue() && IsInt32Double(m.ResolvedValue())) {
+ Node* value =
+ jsgraph()->Int32Constant(static_cast<int32_t>(m.ResolvedValue()));
ReplaceWithValue(node, value);
return Replace(value);
}
@@ -212,7 +216,8 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
}
case IrOpcode::kNumberAbs: {
NumberMatcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceNumber(std::fabs(m.Value()));
+ if (m.HasResolvedValue())
+ return ReplaceNumber(std::fabs(m.ResolvedValue()));
break;
}
case IrOpcode::kReferenceEqual: {
@@ -224,26 +229,25 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
// (x + a) + b => x + (a + b) where a and b are constants and have the
// same sign.
Int32BinopMatcher m(node);
- if (m.right().HasValue()) {
+ if (m.right().HasResolvedValue()) {
Node* checked_int32_add = m.left().node();
if (checked_int32_add->opcode() == IrOpcode::kCheckedInt32Add) {
Int32BinopMatcher n(checked_int32_add);
- if (n.right().HasValue() &&
- (n.right().Value() >= 0) == (m.right().Value() >= 0)) {
+ if (n.right().HasResolvedValue() &&
+ (n.right().ResolvedValue() >= 0) ==
+ (m.right().ResolvedValue() >= 0)) {
int32_t val;
bool overflow = base::bits::SignedAddOverflow32(
- n.right().Value(), m.right().Value(), &val);
+ n.right().ResolvedValue(), m.right().ResolvedValue(), &val);
if (!overflow) {
- bool has_no_other_value_uses = true;
+ bool has_no_other_uses = true;
for (Edge edge : checked_int32_add->use_edges()) {
- if (!edge.from()->IsDead() &&
- !NodeProperties::IsEffectEdge(edge) &&
- edge.from() != node) {
- has_no_other_value_uses = false;
+ if (!edge.from()->IsDead() && edge.from() != node) {
+ has_no_other_uses = false;
break;
}
}
- if (has_no_other_value_uses) {
+ if (has_no_other_uses) {
node->ReplaceInput(0, n.left().node());
node->ReplaceInput(1, jsgraph()->Int32Constant(val));
RelaxEffectsAndControls(checked_int32_add);
diff --git a/deps/v8/src/compiler/simplified-operator-reducer.h b/deps/v8/src/compiler/simplified-operator-reducer.h
index b7069b44f3..650de7fb55 100644
--- a/deps/v8/src/compiler/simplified-operator-reducer.h
+++ b/deps/v8/src/compiler/simplified-operator-reducer.h
@@ -29,6 +29,9 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorReducer final
SimplifiedOperatorReducer(Editor* editor, JSGraph* jsgraph,
JSHeapBroker* broker);
~SimplifiedOperatorReducer() final;
+ SimplifiedOperatorReducer(const SimplifiedOperatorReducer&) = delete;
+ SimplifiedOperatorReducer& operator=(const SimplifiedOperatorReducer&) =
+ delete;
const char* reducer_name() const override {
return "SimplifiedOperatorReducer";
@@ -57,8 +60,6 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorReducer final
JSGraph* const jsgraph_;
JSHeapBroker* const broker_;
-
- DISALLOW_COPY_AND_ASSIGN(SimplifiedOperatorReducer);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
index 33bd71d221..db6d8257b1 100644
--- a/deps/v8/src/compiler/simplified-operator.cc
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -294,7 +294,7 @@ bool operator==(DynamicCheckMapsParameters const& lhs,
DCHECK_IMPLIES(lhs.feedback() == rhs.feedback(),
lhs.flags() == rhs.flags() && lhs.state() == rhs.state() &&
lhs.handler().address() == rhs.handler().address() &&
- lhs.map().address() == rhs.map().address());
+ lhs.maps() == rhs.maps());
return lhs.feedback() == rhs.feedback();
}
@@ -308,7 +308,7 @@ size_t hash_value(DynamicCheckMapsParameters const& p) {
std::ostream& operator<<(std::ostream& os,
DynamicCheckMapsParameters const& p) {
return os << p.handler() << ", " << p.feedback() << "," << p.state() << ","
- << p.flags() << "," << p.map().address();
+ << p.flags() << "," << p.maps();
}
DynamicCheckMapsParameters const& DynamicCheckMapsParametersOf(
@@ -1325,7 +1325,7 @@ const Operator* SimplifiedOperatorBuilder::UpdateInterruptBudget(int delta) {
const Operator* SimplifiedOperatorBuilder::TierUpCheck() {
return zone()->New<Operator>(IrOpcode::kTierUpCheck,
Operator::kNoThrow | Operator::kNoDeopt,
- "TierUpCheck", 1, 1, 1, 0, 1, 0);
+ "TierUpCheck", 5, 1, 1, 0, 1, 0);
}
const Operator* SimplifiedOperatorBuilder::AssertType(Type type) {
@@ -1487,10 +1487,9 @@ const Operator* SimplifiedOperatorBuilder::CheckMaps(
}
const Operator* SimplifiedOperatorBuilder::DynamicCheckMaps(
- CheckMapsFlags flags, Handle<Object> handler, MaybeHandle<Map> maybe_map,
- const FeedbackSource& feedback) {
- DynamicCheckMapsParameters const parameters(flags, handler, maybe_map,
- feedback);
+ CheckMapsFlags flags, Handle<Object> handler,
+ ZoneHandleSet<Map> const& maps, const FeedbackSource& feedback) {
+ DynamicCheckMapsParameters const parameters(flags, handler, maps, feedback);
return zone()->New<Operator1<DynamicCheckMapsParameters>>( // --
IrOpcode::kDynamicCheckMaps, // opcode
Operator::kNoThrow | Operator::kNoWrite, // flags
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index eab865fd59..e130674c91 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -85,6 +85,9 @@ struct FieldAccess {
ConstFieldInfo const_field_info; // the constness of this access, and the
// field owner map, if the access is const
bool is_store_in_literal; // originates from a kStoreInLiteral access
+#ifdef V8_HEAP_SANDBOX
+ ExternalPointerTag external_pointer_tag = kExternalPointerNullTag;
+#endif
FieldAccess()
: base_is_tagged(kTaggedBase),
@@ -101,7 +104,12 @@ struct FieldAccess {
WriteBarrierKind write_barrier_kind,
LoadSensitivity load_sensitivity = LoadSensitivity::kUnsafe,
ConstFieldInfo const_field_info = ConstFieldInfo::None(),
- bool is_store_in_literal = false)
+ bool is_store_in_literal = false
+#ifdef V8_HEAP_SANDBOX
+ ,
+ ExternalPointerTag external_pointer_tag = kExternalPointerNullTag
+#endif
+ )
: base_is_tagged(base_is_tagged),
offset(offset),
name(name),
@@ -111,7 +119,12 @@ struct FieldAccess {
write_barrier_kind(write_barrier_kind),
load_sensitivity(load_sensitivity),
const_field_info(const_field_info),
- is_store_in_literal(is_store_in_literal) {
+ is_store_in_literal(is_store_in_literal)
+#ifdef V8_HEAP_SANDBOX
+ ,
+ external_pointer_tag(external_pointer_tag)
+#endif
+ {
DCHECK_GE(offset, 0);
}
@@ -432,25 +445,22 @@ class DynamicCheckMapsParameters final {
enum ICState { kMonomorphic, kPolymorphic };
DynamicCheckMapsParameters(CheckMapsFlags flags, Handle<Object> handler,
- MaybeHandle<Map> maybe_map,
+ ZoneHandleSet<Map> const& maps,
const FeedbackSource& feedback)
- : flags_(flags),
- handler_(handler),
- maybe_map_(maybe_map),
- feedback_(feedback) {}
+ : flags_(flags), handler_(handler), maps_(maps), feedback_(feedback) {}
CheckMapsFlags flags() const { return flags_; }
Handle<Object> handler() const { return handler_; }
- MaybeHandle<Map> map() const { return maybe_map_; }
+ ZoneHandleSet<Map> const& maps() const { return maps_; }
FeedbackSource const& feedback() const { return feedback_; }
ICState state() const {
- return maybe_map_.is_null() ? ICState::kPolymorphic : ICState::kMonomorphic;
+ return maps_.size() == 1 ? ICState::kMonomorphic : ICState::kPolymorphic;
}
private:
CheckMapsFlags const flags_;
Handle<Object> const handler_;
- MaybeHandle<Map> const maybe_map_;
+ ZoneHandleSet<Map> const maps_;
FeedbackSource const feedback_;
};
@@ -708,6 +718,9 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
: public NON_EXPORTED_BASE(ZoneObject) {
public:
explicit SimplifiedOperatorBuilder(Zone* zone);
+ SimplifiedOperatorBuilder(const SimplifiedOperatorBuilder&) = delete;
+ SimplifiedOperatorBuilder& operator=(const SimplifiedOperatorBuilder&) =
+ delete;
const Operator* BooleanNot();
@@ -888,7 +901,7 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* CheckMaps(CheckMapsFlags, ZoneHandleSet<Map>,
const FeedbackSource& = FeedbackSource());
const Operator* DynamicCheckMaps(CheckMapsFlags flags, Handle<Object> handler,
- MaybeHandle<Map> map,
+ ZoneHandleSet<Map> const& maps,
const FeedbackSource& feedback);
const Operator* CheckNotTaggedHole();
const Operator* CheckNumber(const FeedbackSource& feedback);
@@ -1054,8 +1067,6 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const SimplifiedOperatorGlobalCache& cache_;
Zone* const zone_;
-
- DISALLOW_COPY_AND_ASSIGN(SimplifiedOperatorBuilder);
};
// Node wrappers.
@@ -1177,7 +1188,12 @@ class TierUpCheckNode final : public SimplifiedNodeWrapperBase {
CONSTEXPR_DCHECK(node->opcode() == IrOpcode::kTierUpCheck);
}
-#define INPUTS(V) V(FeedbackVector, feedback_vector, 0, FeedbackVector)
+#define INPUTS(V) \
+ V(FeedbackVector, feedback_vector, 0, FeedbackVector) \
+ V(Target, target, 1, JSReceiver) \
+ V(NewTarget, new_target, 2, Object) \
+ V(InputCount, input_count, 3, UntaggedT) \
+ V(Context, context, 4, Context)
INPUTS(DEFINE_INPUT_ACCESSORS)
#undef INPUTS
};
diff --git a/deps/v8/src/compiler/type-narrowing-reducer.h b/deps/v8/src/compiler/type-narrowing-reducer.h
index 136f11626e..ab8c4a483c 100644
--- a/deps/v8/src/compiler/type-narrowing-reducer.h
+++ b/deps/v8/src/compiler/type-narrowing-reducer.h
@@ -21,6 +21,8 @@ class V8_EXPORT_PRIVATE TypeNarrowingReducer final
public:
TypeNarrowingReducer(Editor* editor, JSGraph* jsgraph, JSHeapBroker* broker);
~TypeNarrowingReducer() final;
+ TypeNarrowingReducer(const TypeNarrowingReducer&) = delete;
+ TypeNarrowingReducer& operator=(const TypeNarrowingReducer&) = delete;
const char* reducer_name() const override { return "TypeNarrowingReducer"; }
@@ -33,8 +35,6 @@ class V8_EXPORT_PRIVATE TypeNarrowingReducer final
JSGraph* const jsgraph_;
OperationTyper op_typer_;
-
- DISALLOW_COPY_AND_ASSIGN(TypeNarrowingReducer);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/typed-optimization.cc b/deps/v8/src/compiler/typed-optimization.cc
index abc88c4b8e..6905f4e36d 100644
--- a/deps/v8/src/compiler/typed-optimization.cc
+++ b/deps/v8/src/compiler/typed-optimization.cc
@@ -813,7 +813,7 @@ Reduction TypedOptimization::ReduceJSToNumberInput(Node* input) {
if (input_type.Is(Type::String())) {
HeapObjectMatcher m(input);
- if (m.HasValue() && m.Ref(broker()).IsString()) {
+ if (m.HasResolvedValue() && m.Ref(broker()).IsString()) {
StringRef input_value = m.Ref(broker()).AsString();
double number;
ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(number, input_value.ToNumber());
diff --git a/deps/v8/src/compiler/typed-optimization.h b/deps/v8/src/compiler/typed-optimization.h
index 336c29540d..be3f56f845 100644
--- a/deps/v8/src/compiler/typed-optimization.h
+++ b/deps/v8/src/compiler/typed-optimization.h
@@ -30,6 +30,8 @@ class V8_EXPORT_PRIVATE TypedOptimization final
TypedOptimization(Editor* editor, CompilationDependencies* dependencies,
JSGraph* jsgraph, JSHeapBroker* broker);
~TypedOptimization() override;
+ TypedOptimization(const TypedOptimization&) = delete;
+ TypedOptimization& operator=(const TypedOptimization&) = delete;
const char* reducer_name() const override { return "TypedOptimization"; }
@@ -89,8 +91,6 @@ class V8_EXPORT_PRIVATE TypedOptimization final
Type const true_type_;
Type const false_type_;
TypeCache const* type_cache_;
-
- DISALLOW_COPY_AND_ASSIGN(TypedOptimization);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index a4996f3cc2..831263554a 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -576,6 +576,10 @@ Type Typer::Visitor::ObjectIsCallable(Type type, Typer* t) {
Type Typer::Visitor::ObjectIsConstructor(Type type, Typer* t) {
// TODO(turbofan): Introduce a Type::Constructor?
CHECK(!type.IsNone());
+ if (type.IsHeapConstant() &&
+ type.AsHeapConstant()->Ref().map().is_constructor()) {
+ return t->singleton_true_;
+ }
if (!type.Maybe(Type::Callable())) return t->singleton_false_;
return Type::Boolean();
}
@@ -1434,7 +1438,7 @@ Type Typer::Visitor::JSOrdinaryHasInstanceTyper(Type lhs, Type rhs, Typer* t) {
}
Type Typer::Visitor::TypeJSGetSuperConstructor(Node* node) {
- return Type::Callable();
+ return Type::NonInternal();
}
// JS context operators.
diff --git a/deps/v8/src/compiler/typer.h b/deps/v8/src/compiler/typer.h
index 305470d724..d1b6afeaf9 100644
--- a/deps/v8/src/compiler/typer.h
+++ b/deps/v8/src/compiler/typer.h
@@ -31,6 +31,8 @@ class V8_EXPORT_PRIVATE Typer {
Typer(JSHeapBroker* broker, Flags flags, Graph* graph,
TickCounter* tick_counter);
~Typer();
+ Typer(const Typer&) = delete;
+ Typer& operator=(const Typer&) = delete;
void Run();
// TODO(bmeurer,jarin): Remove this once we have a notion of "roots" on Graph.
@@ -57,8 +59,6 @@ class V8_EXPORT_PRIVATE Typer {
Type singleton_false_;
Type singleton_true_;
-
- DISALLOW_COPY_AND_ASSIGN(Typer);
};
DEFINE_OPERATORS_FOR_FLAGS(Typer::Flags)
diff --git a/deps/v8/src/compiler/types.cc b/deps/v8/src/compiler/types.cc
index 0daf20d78a..3594dd9cad 100644
--- a/deps/v8/src/compiler/types.cc
+++ b/deps/v8/src/compiler/types.cc
@@ -294,7 +294,6 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case BYTECODE_ARRAY_TYPE:
case OBJECT_BOILERPLATE_DESCRIPTION_TYPE:
case ARRAY_BOILERPLATE_DESCRIPTION_TYPE:
- case DESCRIPTOR_ARRAY_TYPE:
case TRANSITION_ARRAY_TYPE:
case FEEDBACK_CELL_TYPE:
case CLOSURE_FEEDBACK_CELL_ARRAY_TYPE:
@@ -310,6 +309,7 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case EVAL_CONTEXT_TYPE:
case FUNCTION_CONTEXT_TYPE:
case MODULE_CONTEXT_TYPE:
+ case MODULE_REQUEST_TYPE:
case NATIVE_CONTEXT_TYPE:
case SCRIPT_CONTEXT_TYPE:
case WITH_CONTEXT_TYPE:
@@ -329,50 +329,7 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
// Remaining instance types are unsupported for now. If any of them do
// require bit set types, they should get kOtherInternal.
- case FREE_SPACE_TYPE:
- case FILLER_TYPE:
- case ACCESS_CHECK_INFO_TYPE:
- case ASM_WASM_DATA_TYPE:
- case CALL_HANDLER_INFO_TYPE:
- case INTERCEPTOR_INFO_TYPE:
- case OBJECT_TEMPLATE_INFO_TYPE:
- case ALLOCATION_MEMENTO_TYPE:
- case ALIASED_ARGUMENTS_ENTRY_TYPE:
- case PROMISE_CAPABILITY_TYPE:
- case PROMISE_REACTION_TYPE:
- case CLASS_POSITIONS_TYPE:
- case DEBUG_INFO_TYPE:
- case STACK_FRAME_INFO_TYPE:
- case STACK_TRACE_FRAME_TYPE:
- case SMALL_ORDERED_HASH_MAP_TYPE:
- case SMALL_ORDERED_HASH_SET_TYPE:
- case SMALL_ORDERED_NAME_DICTIONARY_TYPE:
- case PROTOTYPE_INFO_TYPE:
- case INTERPRETER_DATA_TYPE:
- case TEMPLATE_OBJECT_DESCRIPTION_TYPE:
- case TUPLE2_TYPE:
- case BREAK_POINT_TYPE:
- case BREAK_POINT_INFO_TYPE:
- case WASM_VALUE_TYPE:
- case CACHED_TEMPLATE_OBJECT_TYPE:
- case ENUM_CACHE_TYPE:
- case WASM_CAPI_FUNCTION_DATA_TYPE:
- case WASM_INDIRECT_FUNCTION_TABLE_TYPE:
- case WASM_EXCEPTION_TAG_TYPE:
- case WASM_EXPORTED_FUNCTION_DATA_TYPE:
- case WASM_JS_FUNCTION_DATA_TYPE:
- case LOAD_HANDLER_TYPE:
- case STORE_HANDLER_TYPE:
- case ASYNC_GENERATOR_REQUEST_TYPE:
- case CODE_DATA_CONTAINER_TYPE:
- case CALLBACK_TASK_TYPE:
- case CALLABLE_TASK_TYPE:
- case PROMISE_FULFILL_REACTION_JOB_TASK_TYPE:
- case PROMISE_REJECT_REACTION_JOB_TASK_TYPE:
- case PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE:
-#define MAKE_TORQUE_CLASS_TYPE(INSTANCE_TYPE, Name, name) case INSTANCE_TYPE:
- TORQUE_DEFINED_INSTANCE_TYPE_LIST(MAKE_TORQUE_CLASS_TYPE)
-#undef MAKE_TORQUE_CLASS_TYPE
+ default:
UNREACHABLE();
}
UNREACHABLE();
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index 302e1212ee..b137e6711d 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -772,7 +772,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
// We don't check the input for Type::Function because this_function can
// be context-allocated.
CheckValueInputIs(node, 0, Type::Any());
- CheckTypeIs(node, Type::Callable());
+ CheckTypeIs(node, Type::NonInternal());
break;
case IrOpcode::kJSHasContextExtension:
diff --git a/deps/v8/src/compiler/verifier.h b/deps/v8/src/compiler/verifier.h
index 308b44060a..0be0102cc0 100644
--- a/deps/v8/src/compiler/verifier.h
+++ b/deps/v8/src/compiler/verifier.h
@@ -25,6 +25,9 @@ class Verifier {
enum CheckInputs { kValuesOnly, kAll };
enum CodeType { kDefault, kWasm };
+ Verifier(const Verifier&) = delete;
+ Verifier& operator=(const Verifier&) = delete;
+
static void Run(Graph* graph, Typing typing = TYPED,
CheckInputs check_inputs = kAll,
CodeType code_type = kDefault);
@@ -53,7 +56,6 @@ class Verifier {
private:
class Visitor;
- DISALLOW_COPY_AND_ASSIGN(Verifier);
};
// Verifies properties of a schedule, such as dominance, phi placement, etc.
diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc
index 91dde088f6..d6b7113b27 100644
--- a/deps/v8/src/compiler/wasm-compiler.cc
+++ b/deps/v8/src/compiler/wasm-compiler.cc
@@ -16,6 +16,7 @@
#include "src/codegen/code-factory.h"
#include "src/codegen/compiler.h"
#include "src/codegen/interface-descriptors.h"
+#include "src/codegen/machine-type.h"
#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/backend/code-generator.h"
#include "src/compiler/backend/instruction-selector.h"
@@ -297,9 +298,9 @@ Node* WasmGraphBuilder::RefFunc(uint32_t function_index) {
Node* call_target = mcgraph()->RelocatableIntPtrConstant(
wasm::WasmCode::kWasmRefFunc, RelocInfo::WASM_STUB_CALL);
- return SetEffectControl(
- graph()->NewNode(mcgraph()->common()->Call(call_descriptor), call_target,
- Uint32Constant(function_index), effect(), control()));
+ return SetEffectControl(graph()->NewNode(
+ mcgraph()->common()->Call(call_descriptor), call_target,
+ mcgraph()->Uint32Constant(function_index), effect(), control()));
}
Node* WasmGraphBuilder::RefAsNonNull(Node* arg,
@@ -321,10 +322,6 @@ Node* WasmGraphBuilder::BuildLoadIsolateRoot() {
return LOAD_INSTANCE_FIELD(IsolateRoot, MachineType::Pointer());
}
-Node* WasmGraphBuilder::Uint32Constant(uint32_t value) {
- return mcgraph()->Uint32Constant(value);
-}
-
Node* WasmGraphBuilder::Int32Constant(int32_t value) {
return mcgraph()->Int32Constant(value);
}
@@ -333,10 +330,6 @@ Node* WasmGraphBuilder::Int64Constant(int64_t value) {
return mcgraph()->Int64Constant(value);
}
-Node* WasmGraphBuilder::IntPtrConstant(intptr_t value) {
- return mcgraph()->IntPtrConstant(value);
-}
-
void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position) {
DCHECK_NOT_NULL(env_); // Wrappers don't get stack checks.
if (!FLAG_wasm_stack_checks || !env_->runtime_exception_support) {
@@ -1062,7 +1055,7 @@ Node* WasmGraphBuilder::TrapIfEq32(wasm::TrapReason reason, Node* node,
int32_t val,
wasm::WasmCodePosition position) {
Int32Matcher m(node);
- if (m.HasValue() && !m.Is(val)) return graph()->start();
+ if (m.HasResolvedValue() && !m.Is(val)) return graph()->start();
if (val == 0) {
return TrapIfFalse(reason, node, position);
} else {
@@ -1084,7 +1077,7 @@ Node* WasmGraphBuilder::TrapIfEq64(wasm::TrapReason reason, Node* node,
int64_t val,
wasm::WasmCodePosition position) {
Int64Matcher m(node);
- if (m.HasValue() && !m.Is(val)) return graph()->start();
+ if (m.HasResolvedValue() && !m.Is(val)) return graph()->start();
return TrapIfTrue(reason,
graph()->NewNode(mcgraph()->machine()->Word64Equal(), node,
mcgraph()->Int64Constant(val)),
@@ -1144,9 +1137,10 @@ Node* WasmGraphBuilder::MaskShiftCount32(Node* node) {
if (!mcgraph()->machine()->Word32ShiftIsSafe()) {
// Shifts by constants are so common we pattern-match them here.
Int32Matcher match(node);
- if (match.HasValue()) {
- int32_t masked = (match.Value() & kMask32);
- if (match.Value() != masked) node = mcgraph()->Int32Constant(masked);
+ if (match.HasResolvedValue()) {
+ int32_t masked = (match.ResolvedValue() & kMask32);
+ if (match.ResolvedValue() != masked)
+ node = mcgraph()->Int32Constant(masked);
} else {
node = graph()->NewNode(mcgraph()->machine()->Word32And(), node,
mcgraph()->Int32Constant(kMask32));
@@ -1160,9 +1154,10 @@ Node* WasmGraphBuilder::MaskShiftCount64(Node* node) {
if (!mcgraph()->machine()->Word32ShiftIsSafe()) {
// Shifts by constants are so common we pattern-match them here.
Int64Matcher match(node);
- if (match.HasValue()) {
- int64_t masked = (match.Value() & kMask64);
- if (match.Value() != masked) node = mcgraph()->Int64Constant(masked);
+ if (match.HasResolvedValue()) {
+ int64_t masked = (match.ResolvedValue() & kMask64);
+ if (match.ResolvedValue() != masked)
+ node = mcgraph()->Int64Constant(masked);
} else {
node = graph()->NewNode(mcgraph()->machine()->Word64And(), node,
mcgraph()->Int64Constant(kMask64));
@@ -2084,7 +2079,7 @@ Node* WasmGraphBuilder::Throw(uint32_t exception_index,
uint32_t encoded_size = WasmExceptionPackage::GetEncodedSize(exception);
Node* create_parameters[] = {
LoadExceptionTagFromTable(exception_index),
- BuildChangeUint31ToSmi(Uint32Constant(encoded_size))};
+ BuildChangeUint31ToSmi(mcgraph()->Uint32Constant(encoded_size))};
Node* except_obj =
BuildCallToRuntime(Runtime::kWasmThrowCreate, create_parameters,
arraysize(create_parameters));
@@ -2357,10 +2352,10 @@ Node* WasmGraphBuilder::BuildI32AsmjsDivS(Node* left, Node* right) {
MachineOperatorBuilder* m = mcgraph()->machine();
Int32Matcher mr(right);
- if (mr.HasValue()) {
- if (mr.Value() == 0) {
+ if (mr.HasResolvedValue()) {
+ if (mr.ResolvedValue() == 0) {
return mcgraph()->Int32Constant(0);
- } else if (mr.Value() == -1) {
+ } else if (mr.ResolvedValue() == -1) {
// The result is the negation of the left input.
return graph()->NewNode(m->Int32Sub(), mcgraph()->Int32Constant(0), left);
}
@@ -2400,8 +2395,8 @@ Node* WasmGraphBuilder::BuildI32AsmjsRemS(Node* left, Node* right) {
Node* const zero = mcgraph()->Int32Constant(0);
Int32Matcher mr(right);
- if (mr.HasValue()) {
- if (mr.Value() == 0 || mr.Value() == -1) {
+ if (mr.HasResolvedValue()) {
+ if (mr.ResolvedValue() == 0 || mr.ResolvedValue() == -1) {
return zero;
}
return graph()->NewNode(m->Int32Mod(), left, right, control());
@@ -2672,7 +2667,7 @@ Node* WasmGraphBuilder::BuildWasmCall(const wasm::FunctionSig* sig,
wasm::WasmCodePosition position,
Node* instance_node,
UseRetpoline use_retpoline) {
- auto call_descriptor =
+ CallDescriptor* call_descriptor =
GetWasmCallDescriptor(mcgraph()->zone(), sig, use_retpoline);
const Operator* op = mcgraph()->common()->Call(call_descriptor);
Node* call = BuildCallNode(sig, args, position, instance_node, op);
@@ -2699,7 +2694,7 @@ Node* WasmGraphBuilder::BuildWasmReturnCall(const wasm::FunctionSig* sig,
wasm::WasmCodePosition position,
Node* instance_node,
UseRetpoline use_retpoline) {
- auto call_descriptor =
+ CallDescriptor* call_descriptor =
GetWasmCallDescriptor(mcgraph()->zone(), sig, use_retpoline);
const Operator* op = mcgraph()->common()->TailCall(call_descriptor);
Node* call = BuildCallNode(sig, args, position, instance_node, op);
@@ -2878,7 +2873,7 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index,
// Bounds check against the table size.
Node* in_bounds = graph()->NewNode(machine->Uint32LessThan(), key, ift_size);
- TrapIfFalse(wasm::kTrapFuncInvalid, in_bounds, position);
+ TrapIfFalse(wasm::kTrapTableOutOfBounds, in_bounds, position);
// Mask the key to prevent SSCA.
if (untrusted_code_mitigations_) {
@@ -2896,20 +2891,27 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index,
Node* int32_scaled_key = Uint32ToUintptr(
graph()->NewNode(machine->Word32Shl(), key, Int32Constant(2)));
+ Node* loaded_sig = SetEffect(
+ graph()->NewNode(machine->Load(MachineType::Int32()), ift_sig_ids,
+ int32_scaled_key, effect(), control()));
// Check that the dynamic type of the function is a subtype of its static
// (table) type. Currently, the only subtyping between function types is
// $t <: funcref for all $t: function_type.
// TODO(7748): Expand this with function subtyping.
- if (env_->module->tables[table_index].type == wasm::kWasmFuncRef) {
- int32_t expected_sig_id = env_->module->signature_ids[sig_index];
-
- Node* loaded_sig = SetEffect(
- graph()->NewNode(machine->Load(MachineType::Int32()), ift_sig_ids,
- int32_scaled_key, effect(), control()));
- Node* sig_match = graph()->NewNode(machine->WordEqual(), loaded_sig,
+ const bool needs_typechecking =
+ env_->module->tables[table_index].type == wasm::kWasmFuncRef;
+ if (needs_typechecking) {
+ int32_t expected_sig_id = env_->module->canonicalized_type_ids[sig_index];
+ Node* sig_match = graph()->NewNode(machine->Word32Equal(), loaded_sig,
Int32Constant(expected_sig_id));
-
TrapIfFalse(wasm::kTrapFuncSigMismatch, sig_match, position);
+ } else {
+ // We still have to check that the entry is initialized.
+ // TODO(9495): Skip this check for non-nullable tables when they are
+ // allowed.
+ Node* function_is_null =
+ graph()->NewNode(machine->Word32Equal(), loaded_sig, Int32Constant(-1));
+ TrapIfTrue(wasm::kTrapNullDereference, function_is_null, position);
}
Node* tagged_scaled_key;
@@ -2953,10 +2955,9 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index,
}
}
-Node* WasmGraphBuilder::BuildLoadFunctionDataFromExportedFunction(
- Node* closure) {
+Node* WasmGraphBuilder::BuildLoadFunctionDataFromJSFunction(Node* js_function) {
Node* shared = gasm_->Load(
- MachineType::AnyTagged(), closure,
+ MachineType::AnyTagged(), js_function,
wasm::ObjectAccess::SharedFunctionInfoOffsetInTaggedJSFunction());
return gasm_->Load(MachineType::AnyTagged(), shared,
SharedFunctionInfo::kFunctionDataOffset - kHeapObjectTag);
@@ -3001,7 +3002,7 @@ Node* WasmGraphBuilder::BuildCallRef(uint32_t sig_index, Vector<Node*> args,
const wasm::FunctionSig* sig = env_->module->signature(sig_index);
- Node* function_data = BuildLoadFunctionDataFromExportedFunction(args[0]);
+ Node* function_data = BuildLoadFunctionDataFromJSFunction(args[0]);
Node* is_js_function =
HasInstanceType(gasm_.get(), function_data, WASM_JS_FUNCTION_DATA_TYPE);
@@ -3078,13 +3079,30 @@ Node* WasmGraphBuilder::BuildCallRef(uint32_t sig_index, Vector<Node*> args,
}
{
- // Call to a WasmJSFunction.
- // The call target is the wasm-to-js wrapper code.
+ // Call to a WasmJSFunction. The call target is
+ // function_data->wasm_to_js_wrapper_code()->instruction_start().
+ // The instance_node is the pair
+ // (current WasmInstanceObject, function_data->callable()).
gasm_->Bind(&js_label);
- // TODO(9495): Implement when the interaction with the type reflection
- // proposal is clear.
- TrapIfTrue(wasm::kTrapWasmJSFunction, gasm_->Int32Constant(1), position);
- gasm_->Goto(&end_label, args[0], RefNull() /* Dummy value */);
+
+ Node* wrapper_code =
+ gasm_->Load(MachineType::TaggedPointer(), function_data,
+ wasm::ObjectAccess::ToTagged(
+ WasmJSFunctionData::kWasmToJsWrapperCodeOffset));
+ Node* call_target = gasm_->IntAdd(
+ wrapper_code,
+ gasm_->IntPtrConstant(wasm::ObjectAccess::ToTagged(Code::kHeaderSize)));
+
+ Node* callable = gasm_->Load(
+ MachineType::TaggedPointer(), function_data,
+ wasm::ObjectAccess::ToTagged(WasmJSFunctionData::kCallableOffset));
+ // TODO(manoskouk): Find an elegant way to avoid allocating this pair for
+ // every call.
+ Node* function_instance_node = CALL_BUILTIN(
+ WasmAllocatePair, instance_node_.get(), callable,
+ LOAD_INSTANCE_FIELD(NativeContext, MachineType::TaggedPointer()));
+
+ gasm_->Goto(&end_label, call_target, function_instance_node);
}
gasm_->Bind(&end_label);
@@ -3158,9 +3176,9 @@ Node* WasmGraphBuilder::BuildI32Rol(Node* left, Node* right) {
// Implement Rol by Ror since TurboFan does not have Rol opcode.
// TODO(weiliang): support Word32Rol opcode in TurboFan.
Int32Matcher m(right);
- if (m.HasValue()) {
+ if (m.HasResolvedValue()) {
return Binop(wasm::kExprI32Ror, left,
- mcgraph()->Int32Constant(32 - (m.Value() & 0x1F)));
+ mcgraph()->Int32Constant(32 - (m.ResolvedValue() & 0x1F)));
} else {
return Binop(wasm::kExprI32Ror, left,
Binop(wasm::kExprI32Sub, mcgraph()->Int32Constant(32), right));
@@ -3172,8 +3190,8 @@ Node* WasmGraphBuilder::BuildI64Rol(Node* left, Node* right) {
// TODO(weiliang): support Word64Rol opcode in TurboFan.
Int64Matcher m(right);
Node* inv_right =
- m.HasValue()
- ? mcgraph()->Int64Constant(64 - (m.Value() & 0x3F))
+ m.HasResolvedValue()
+ ? mcgraph()->Int64Constant(64 - (m.ResolvedValue() & 0x3F))
: Binop(wasm::kExprI64Sub, mcgraph()->Int64Constant(64), right);
return Binop(wasm::kExprI64Ror, left, inv_right);
}
@@ -3183,59 +3201,44 @@ Node* WasmGraphBuilder::Invert(Node* node) {
}
Node* WasmGraphBuilder::BuildTruncateIntPtrToInt32(Node* value) {
- if (mcgraph()->machine()->Is64()) {
- value =
- graph()->NewNode(mcgraph()->machine()->TruncateInt64ToInt32(), value);
- }
- return value;
+ return mcgraph()->machine()->Is64() ? gasm_->TruncateInt64ToInt32(value)
+ : value;
}
Node* WasmGraphBuilder::BuildChangeInt32ToIntPtr(Node* value) {
- if (mcgraph()->machine()->Is64()) {
- value = graph()->NewNode(mcgraph()->machine()->ChangeInt32ToInt64(), value);
- }
- return value;
+ return mcgraph()->machine()->Is64() ? gasm_->ChangeInt32ToInt64(value)
+ : value;
}
Node* WasmGraphBuilder::BuildChangeInt32ToSmi(Node* value) {
// With pointer compression, only the lower 32 bits are used.
- if (COMPRESS_POINTERS_BOOL) {
- return graph()->NewNode(mcgraph()->machine()->Word32Shl(), value,
- BuildSmiShiftBitsConstant32());
- }
- value = BuildChangeInt32ToIntPtr(value);
- return graph()->NewNode(mcgraph()->machine()->WordShl(), value,
- BuildSmiShiftBitsConstant());
+ return COMPRESS_POINTERS_BOOL
+ ? gasm_->Word32Shl(value, BuildSmiShiftBitsConstant32())
+ : gasm_->WordShl(BuildChangeInt32ToIntPtr(value),
+ BuildSmiShiftBitsConstant());
}
Node* WasmGraphBuilder::BuildChangeUint31ToSmi(Node* value) {
- if (COMPRESS_POINTERS_BOOL) {
- return graph()->NewNode(mcgraph()->machine()->Word32Shl(), value,
- BuildSmiShiftBitsConstant32());
- }
- return graph()->NewNode(mcgraph()->machine()->WordShl(),
- Uint32ToUintptr(value), BuildSmiShiftBitsConstant());
+ return COMPRESS_POINTERS_BOOL
+ ? gasm_->Word32Shl(value, BuildSmiShiftBitsConstant32())
+ : graph()->NewNode(mcgraph()->machine()->WordShl(),
+ Uint32ToUintptr(value),
+ BuildSmiShiftBitsConstant());
}
Node* WasmGraphBuilder::BuildSmiShiftBitsConstant() {
- return mcgraph()->IntPtrConstant(kSmiShiftSize + kSmiTagSize);
+ return gasm_->IntPtrConstant(kSmiShiftSize + kSmiTagSize);
}
Node* WasmGraphBuilder::BuildSmiShiftBitsConstant32() {
- return mcgraph()->Int32Constant(kSmiShiftSize + kSmiTagSize);
+ return gasm_->Int32Constant(kSmiShiftSize + kSmiTagSize);
}
Node* WasmGraphBuilder::BuildChangeSmiToInt32(Node* value) {
- if (COMPRESS_POINTERS_BOOL) {
- value =
- graph()->NewNode(mcgraph()->machine()->TruncateInt64ToInt32(), value);
- value = graph()->NewNode(mcgraph()->machine()->Word32Sar(), value,
- BuildSmiShiftBitsConstant32());
- } else {
- value = BuildChangeSmiToIntPtr(value);
- value = BuildTruncateIntPtrToInt32(value);
- }
- return value;
+ return COMPRESS_POINTERS_BOOL
+ ? gasm_->Word32Sar(gasm_->TruncateInt64ToInt32(value),
+ BuildSmiShiftBitsConstant32())
+ : BuildTruncateIntPtrToInt32(BuildChangeSmiToIntPtr(value));
}
Node* WasmGraphBuilder::BuildChangeSmiToIntPtr(Node* value) {
@@ -3250,7 +3253,7 @@ Node* WasmGraphBuilder::BuildChangeSmiToIntPtr(Node* value) {
Node* WasmGraphBuilder::BuildConvertUint32ToSmiWithSaturation(Node* value,
uint32_t maxval) {
DCHECK(Smi::IsValid(maxval));
- Node* max = Uint32Constant(maxval);
+ Node* max = mcgraph()->Uint32Constant(maxval);
Node* check = graph()->NewNode(mcgraph()->machine()->Uint32LessThanOrEqual(),
value, max);
Node* valsmi = BuildChangeUint31ToSmi(value);
@@ -3469,13 +3472,12 @@ void WasmGraphBuilder::GetBaseAndOffsetForImportedMutableExternRefGlobal(
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(0)));
}
-Node* WasmGraphBuilder::MemBuffer(uint32_t offset) {
+Node* WasmGraphBuilder::MemBuffer(uintptr_t offset) {
DCHECK_NOT_NULL(instance_cache_);
Node* mem_start = instance_cache_->mem_start;
DCHECK_NOT_NULL(mem_start);
if (offset == 0) return mem_start;
- return graph()->NewNode(mcgraph()->machine()->IntAdd(), mem_start,
- mcgraph()->IntPtrConstant(offset));
+ return gasm_->IntAdd(mem_start, gasm_->UintPtrConstant(offset));
}
Node* WasmGraphBuilder::CurrentMemoryPages() {
@@ -3612,7 +3614,7 @@ Node* WasmGraphBuilder::TableGet(uint32_t table_index, Node* index,
return SetEffectControl(graph()->NewNode(
mcgraph()->common()->Call(call_descriptor), call_target,
- IntPtrConstant(table_index), index, effect(), control()));
+ mcgraph()->IntPtrConstant(table_index), index, effect(), control()));
}
Node* WasmGraphBuilder::TableSet(uint32_t table_index, Node* index, Node* val,
@@ -3624,13 +3626,12 @@ Node* WasmGraphBuilder::TableSet(uint32_t table_index, Node* index, Node* val,
Node* call_target = mcgraph()->RelocatableIntPtrConstant(
wasm::WasmCode::kWasmTableSet, RelocInfo::WASM_STUB_CALL);
- return SetEffectControl(graph()->NewNode(
- mcgraph()->common()->Call(call_descriptor), call_target,
- IntPtrConstant(table_index), index, val, effect(), control()));
+ return gasm_->Call(call_descriptor, call_target,
+ gasm_->IntPtrConstant(table_index), index, val);
}
Node* WasmGraphBuilder::CheckBoundsAndAlignment(
- uint8_t access_size, Node* index, uint32_t offset,
+ int8_t access_size, Node* index, uint64_t offset,
wasm::WasmCodePosition position) {
// Atomic operations need bounds checks until the backend can emit protected
// loads.
@@ -3639,11 +3640,13 @@ Node* WasmGraphBuilder::CheckBoundsAndAlignment(
const uintptr_t align_mask = access_size - 1;
+ // {offset} is validated to be within uintptr_t range in {BoundsCheckMem}.
+ uintptr_t capped_offset = static_cast<uintptr_t>(offset);
// Don't emit an alignment check if the index is a constant.
// TODO(wasm): a constant match is also done above in {BoundsCheckMem}.
UintPtrMatcher match(index);
- if (match.HasValue()) {
- uintptr_t effective_offset = match.Value() + offset;
+ if (match.HasResolvedValue()) {
+ uintptr_t effective_offset = match.ResolvedValue() + capped_offset;
if ((effective_offset & align_mask) != 0) {
// statically known to be unaligned; trap.
TrapIfEq32(wasm::kTrapUnalignedAccess, Int32Constant(0), 0, position);
@@ -3654,15 +3657,12 @@ Node* WasmGraphBuilder::CheckBoundsAndAlignment(
// Unlike regular memory accesses, atomic memory accesses should trap if
// the effective offset is misaligned.
// TODO(wasm): this addition is redundant with one inserted by {MemBuffer}.
- Node* effective_offset = graph()->NewNode(mcgraph()->machine()->IntAdd(),
- MemBuffer(offset), index);
+ Node* effective_offset = gasm_->IntAdd(MemBuffer(capped_offset), index);
- Node* cond = graph()->NewNode(mcgraph()->machine()->WordAnd(),
- effective_offset, IntPtrConstant(align_mask));
+ Node* cond =
+ gasm_->WordAnd(effective_offset, gasm_->IntPtrConstant(align_mask));
TrapIfFalse(wasm::kTrapUnalignedAccess,
- graph()->NewNode(mcgraph()->machine()->Word32Equal(), cond,
- mcgraph()->Int32Constant(0)),
- position);
+ gasm_->Word32Equal(cond, gasm_->Int32Constant(0)), position);
return index;
}
@@ -3688,7 +3688,7 @@ Node* WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index,
env_->max_memory_size)) {
// The access will be out of bounds, even for the largest memory.
TrapIfEq32(wasm::kTrapMemOutOfBounds, Int32Constant(0), 0, position);
- return mcgraph()->UintPtrConstant(0);
+ return gasm_->UintPtrConstant(0);
}
uintptr_t end_offset = offset + access_size - 1u;
Node* end_offset_node = mcgraph_->UintPtrConstant(end_offset);
@@ -3702,19 +3702,18 @@ Node* WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index,
// - computing {effective_size} as {mem_size - end_offset} and
// - checking that {index < effective_size}.
- auto m = mcgraph()->machine();
Node* mem_size = instance_cache_->mem_size;
if (end_offset >= env_->min_memory_size) {
// The end offset is larger than the smallest memory.
// Dynamically check the end offset against the dynamic memory size.
- Node* cond = graph()->NewNode(m->UintLessThan(), end_offset_node, mem_size);
+ Node* cond = gasm_->UintLessThan(end_offset_node, mem_size);
TrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position);
} else {
// The end offset is smaller than the smallest memory, so only one check is
// required. Check to see if the index is also a constant.
UintPtrMatcher match(index);
- if (match.HasValue()) {
- uintptr_t index_val = match.Value();
+ if (match.HasResolvedValue()) {
+ uintptr_t index_val = match.ResolvedValue();
if (index_val < env_->min_memory_size - end_offset) {
// The input index is a constant and everything is statically within
// bounds of the smallest possible memory.
@@ -3724,18 +3723,17 @@ Node* WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index,
}
// This produces a positive number, since {end_offset < min_size <= mem_size}.
- Node* effective_size =
- graph()->NewNode(m->IntSub(), mem_size, end_offset_node);
+ Node* effective_size = gasm_->IntSub(mem_size, end_offset_node);
// Introduce the actual bounds check.
- Node* cond = graph()->NewNode(m->UintLessThan(), index, effective_size);
+ Node* cond = gasm_->UintLessThan(index, effective_size);
TrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position);
if (untrusted_code_mitigations_) {
// In the fallthrough case, condition the index with the memory mask.
Node* mem_mask = instance_cache_->mem_mask;
DCHECK_NOT_NULL(mem_mask);
- index = graph()->NewNode(m->WordAnd(), index, mem_mask);
+ index = gasm_->WordAnd(index, mem_mask);
}
return index;
}
@@ -3828,20 +3826,20 @@ Node* WasmGraphBuilder::TraceFunctionExit(Vector<Node*> vals,
Node* WasmGraphBuilder::TraceMemoryOperation(bool is_store,
MachineRepresentation rep,
- Node* index, uint32_t offset,
+ Node* index, uintptr_t offset,
wasm::WasmCodePosition position) {
int kAlign = 4; // Ensure that the LSB is 0, such that this looks like a Smi.
TNode<RawPtrT> info =
gasm_->StackSlot(sizeof(wasm::MemoryTracingInfo), kAlign);
- Node* address = gasm_->Int32Add(Int32Constant(offset), index);
- auto store = [&](int offset, MachineRepresentation rep, Node* data) {
+ Node* effective_offset = gasm_->IntAdd(gasm_->UintPtrConstant(offset), index);
+ auto store = [&](int field_offset, MachineRepresentation rep, Node* data) {
gasm_->Store(StoreRepresentation(rep, kNoWriteBarrier), info,
- gasm_->Int32Constant(offset), data);
+ gasm_->Int32Constant(field_offset), data);
};
- // Store address, is_store, and mem_rep.
- store(offsetof(wasm::MemoryTracingInfo, address),
- MachineRepresentation::kWord32, address);
+ // Store effective_offset, is_store, and mem_rep.
+ store(offsetof(wasm::MemoryTracingInfo, offset),
+ MachineType::PointerRepresentation(), effective_offset);
store(offsetof(wasm::MemoryTracingInfo, is_store),
MachineRepresentation::kWord8,
mcgraph()->Int32Constant(is_store ? 1 : 0));
@@ -3862,55 +3860,56 @@ LoadTransformation GetLoadTransformation(
switch (transform) {
case wasm::LoadTransformationKind::kSplat: {
if (memtype == MachineType::Int8()) {
- return LoadTransformation::kS8x16LoadSplat;
+ return LoadTransformation::kS128Load8Splat;
} else if (memtype == MachineType::Int16()) {
- return LoadTransformation::kS16x8LoadSplat;
+ return LoadTransformation::kS128Load16Splat;
} else if (memtype == MachineType::Int32()) {
- return LoadTransformation::kS32x4LoadSplat;
+ return LoadTransformation::kS128Load32Splat;
} else if (memtype == MachineType::Int64()) {
- return LoadTransformation::kS64x2LoadSplat;
+ return LoadTransformation::kS128Load64Splat;
}
break;
}
case wasm::LoadTransformationKind::kExtend: {
if (memtype == MachineType::Int8()) {
- return LoadTransformation::kI16x8Load8x8S;
+ return LoadTransformation::kS128Load8x8S;
} else if (memtype == MachineType::Uint8()) {
- return LoadTransformation::kI16x8Load8x8U;
+ return LoadTransformation::kS128Load8x8U;
} else if (memtype == MachineType::Int16()) {
- return LoadTransformation::kI32x4Load16x4S;
+ return LoadTransformation::kS128Load16x4S;
} else if (memtype == MachineType::Uint16()) {
- return LoadTransformation::kI32x4Load16x4U;
+ return LoadTransformation::kS128Load16x4U;
} else if (memtype == MachineType::Int32()) {
- return LoadTransformation::kI64x2Load32x2S;
+ return LoadTransformation::kS128Load32x2S;
} else if (memtype == MachineType::Uint32()) {
- return LoadTransformation::kI64x2Load32x2U;
+ return LoadTransformation::kS128Load32x2U;
}
break;
}
case wasm::LoadTransformationKind::kZeroExtend: {
if (memtype == MachineType::Int32()) {
- return LoadTransformation::kS128LoadMem32Zero;
+ return LoadTransformation::kS128Load32Zero;
} else if (memtype == MachineType::Int64()) {
- return LoadTransformation::kS128LoadMem64Zero;
+ return LoadTransformation::kS128Load64Zero;
}
+ break;
}
}
UNREACHABLE();
}
-LoadKind GetLoadKind(MachineGraph* mcgraph, MachineType memtype,
- bool use_trap_handler) {
+MemoryAccessKind GetMemoryAccessKind(MachineGraph* mcgraph, MachineType memtype,
+ bool use_trap_handler) {
if (memtype.representation() == MachineRepresentation::kWord8 ||
mcgraph->machine()->UnalignedLoadSupported(memtype.representation())) {
if (use_trap_handler) {
- return LoadKind::kProtected;
+ return MemoryAccessKind::kProtected;
}
- return LoadKind::kNormal;
+ return MemoryAccessKind::kNormal;
}
// TODO(eholk): Support unaligned loads with trap handlers.
DCHECK(!use_trap_handler);
- return LoadKind::kUnaligned;
+ return MemoryAccessKind::kUnaligned;
}
} // namespace
@@ -3920,7 +3919,7 @@ LoadKind GetLoadKind(MachineGraph* mcgraph, MachineType memtype,
#if defined(V8_TARGET_BIG_ENDIAN) || defined(V8_TARGET_ARCH_S390_LE_SIM)
Node* WasmGraphBuilder::LoadTransformBigEndian(
wasm::ValueType type, MachineType memtype,
- wasm::LoadTransformationKind transform, Node* index, uint32_t offset,
+ wasm::LoadTransformationKind transform, Node* index, uint64_t offset,
uint32_t alignment, wasm::WasmCodePosition position) {
#define LOAD_EXTEND(num_lanes, bytes_per_load, replace_lane) \
result = graph()->NewNode(mcgraph()->machine()->S128Zero()); \
@@ -3946,41 +3945,55 @@ Node* WasmGraphBuilder::LoadTransformBigEndian(
LoadTransformation transformation = GetLoadTransformation(memtype, transform);
switch (transformation) {
- case LoadTransformation::kS8x16LoadSplat: {
+ case LoadTransformation::kS128Load8Splat: {
result = LoadMem(type, memtype, index, offset, alignment, position);
result = graph()->NewNode(mcgraph()->machine()->I8x16Splat(), result);
break;
}
- case LoadTransformation::kI16x8Load8x8S:
- case LoadTransformation::kI16x8Load8x8U: {
+ case LoadTransformation::kS128Load8x8S:
+ case LoadTransformation::kS128Load8x8U: {
LOAD_EXTEND(8, 1, I16x8ReplaceLane)
break;
}
- case LoadTransformation::kS16x8LoadSplat: {
+ case LoadTransformation::kS128Load16Splat: {
result = LoadMem(type, memtype, index, offset, alignment, position);
result = graph()->NewNode(mcgraph()->machine()->I16x8Splat(), result);
break;
}
- case LoadTransformation::kI32x4Load16x4S:
- case LoadTransformation::kI32x4Load16x4U: {
+ case LoadTransformation::kS128Load16x4S:
+ case LoadTransformation::kS128Load16x4U: {
LOAD_EXTEND(4, 2, I32x4ReplaceLane)
break;
}
- case LoadTransformation::kS32x4LoadSplat: {
+ case LoadTransformation::kS128Load32Splat: {
result = LoadMem(type, memtype, index, offset, alignment, position);
result = graph()->NewNode(mcgraph()->machine()->I32x4Splat(), result);
break;
}
- case LoadTransformation::kI64x2Load32x2S:
- case LoadTransformation::kI64x2Load32x2U: {
+ case LoadTransformation::kS128Load32x2S:
+ case LoadTransformation::kS128Load32x2U: {
LOAD_EXTEND(2, 4, I64x2ReplaceLane)
break;
}
- case LoadTransformation::kS64x2LoadSplat: {
+ case LoadTransformation::kS128Load64Splat: {
result = LoadMem(type, memtype, index, offset, alignment, position);
result = graph()->NewNode(mcgraph()->machine()->I64x2Splat(), result);
break;
}
+ case LoadTransformation::kS128Load32Zero: {
+ result = graph()->NewNode(mcgraph()->machine()->S128Zero());
+ result = graph()->NewNode(
+ mcgraph()->machine()->I32x4ReplaceLane(0), result,
+ LoadMem(type, memtype, index, offset, alignment, position));
+ break;
+ }
+ case LoadTransformation::kS128Load64Zero: {
+ result = graph()->NewNode(mcgraph()->machine()->S128Zero());
+ result = graph()->NewNode(
+ mcgraph()->machine()->I64x2ReplaceLane(0), result,
+ LoadMem(type, memtype, index, offset, alignment, position));
+ break;
+ }
default:
UNREACHABLE();
}
@@ -3990,14 +4003,44 @@ Node* WasmGraphBuilder::LoadTransformBigEndian(
}
#endif
+Node* WasmGraphBuilder::LoadLane(MachineType memtype, Node* value, Node* index,
+ uint32_t offset, uint8_t laneidx,
+ wasm::WasmCodePosition position) {
+ has_simd_ = true;
+ Node* load;
+ uint8_t access_size = memtype.MemSize();
+ index =
+ BoundsCheckMem(access_size, index, offset, position, kCanOmitBoundsCheck);
+
+ MemoryAccessKind load_kind =
+ GetMemoryAccessKind(mcgraph(), memtype, use_trap_handler());
+
+ load = SetEffect(graph()->NewNode(
+ mcgraph()->machine()->LoadLane(load_kind, memtype, laneidx),
+ MemBuffer(offset), index, value, effect(), control()));
+
+ if (load_kind == MemoryAccessKind::kProtected) {
+ SetSourcePosition(load, position);
+ }
+
+ if (FLAG_trace_wasm_memory) {
+ TraceMemoryOperation(false, memtype.representation(), index, offset,
+ position);
+ }
+
+ return load;
+}
+
Node* WasmGraphBuilder::LoadTransform(wasm::ValueType type, MachineType memtype,
wasm::LoadTransformationKind transform,
- Node* index, uint32_t offset,
+ Node* index, uint64_t offset,
uint32_t alignment,
wasm::WasmCodePosition position) {
has_simd_ = true;
Node* load;
+ // {offset} is validated to be within uintptr_t range in {BoundsCheckMem}.
+ uintptr_t capped_offset = static_cast<uintptr_t>(offset);
#if defined(V8_TARGET_BIG_ENDIAN) || defined(V8_TARGET_ARCH_S390_LE_SIM)
// LoadTransform cannot efficiently be executed on BE machines as a
@@ -4005,7 +4048,7 @@ Node* WasmGraphBuilder::LoadTransform(wasm::ValueType type, MachineType memtype,
// therefore we divide them into separate "load" and "operation" nodes.
load = LoadTransformBigEndian(type, memtype, transform, index, offset,
alignment, position);
- USE(GetLoadKind);
+ USE(GetMemoryAccessKind);
#else
// Wasm semantics throw on OOB. Introduce explicit bounds check and
// conditioning when not using the trap handler.
@@ -4018,26 +4061,27 @@ Node* WasmGraphBuilder::LoadTransform(wasm::ValueType type, MachineType memtype,
BoundsCheckMem(access_size, index, offset, position, kCanOmitBoundsCheck);
LoadTransformation transformation = GetLoadTransformation(memtype, transform);
- LoadKind load_kind = GetLoadKind(mcgraph(), memtype, use_trap_handler());
+ MemoryAccessKind load_kind =
+ GetMemoryAccessKind(mcgraph(), memtype, use_trap_handler());
load = SetEffect(graph()->NewNode(
mcgraph()->machine()->LoadTransform(load_kind, transformation),
- MemBuffer(offset), index, effect(), control()));
+ MemBuffer(capped_offset), index, effect(), control()));
- if (load_kind == LoadKind::kProtected) {
+ if (load_kind == MemoryAccessKind::kProtected) {
SetSourcePosition(load, position);
}
#endif
if (FLAG_trace_wasm_memory) {
- TraceMemoryOperation(false, memtype.representation(), index, offset,
+ TraceMemoryOperation(false, memtype.representation(), index, capped_offset,
position);
}
return load;
}
Node* WasmGraphBuilder::LoadMem(wasm::ValueType type, MachineType memtype,
- Node* index, uint32_t offset,
+ Node* index, uint64_t offset,
uint32_t alignment,
wasm::WasmCodePosition position) {
Node* load;
@@ -4051,25 +4095,22 @@ Node* WasmGraphBuilder::LoadMem(wasm::ValueType type, MachineType memtype,
index = BoundsCheckMem(memtype.MemSize(), index, offset, position,
kCanOmitBoundsCheck);
+ // {offset} is validated to be within uintptr_t range in {BoundsCheckMem}.
+ uintptr_t capped_offset = static_cast<uintptr_t>(offset);
if (memtype.representation() == MachineRepresentation::kWord8 ||
mcgraph()->machine()->UnalignedLoadSupported(memtype.representation())) {
if (use_trap_handler()) {
- load = graph()->NewNode(mcgraph()->machine()->ProtectedLoad(memtype),
- MemBuffer(offset), index, effect(), control());
+ load = gasm_->ProtectedLoad(memtype, MemBuffer(capped_offset), index);
SetSourcePosition(load, position);
} else {
- load = graph()->NewNode(mcgraph()->machine()->Load(memtype),
- MemBuffer(offset), index, effect(), control());
+ load = gasm_->Load(memtype, MemBuffer(capped_offset), index);
}
} else {
// TODO(eholk): Support unaligned loads with trap handlers.
DCHECK(!use_trap_handler());
- load = graph()->NewNode(mcgraph()->machine()->UnalignedLoad(memtype),
- MemBuffer(offset), index, effect(), control());
+ load = gasm_->LoadUnaligned(memtype, MemBuffer(capped_offset), index);
}
- SetEffect(load);
-
#if defined(V8_TARGET_BIG_ENDIAN)
load = BuildChangeEndiannessLoad(load, memtype, type);
#endif
@@ -4077,26 +4118,53 @@ Node* WasmGraphBuilder::LoadMem(wasm::ValueType type, MachineType memtype,
if (type == wasm::kWasmI64 &&
ElementSizeInBytes(memtype.representation()) < 8) {
// TODO(titzer): TF zeroes the upper bits of 64-bit loads for subword sizes.
- if (memtype.IsSigned()) {
- // sign extend
- load = graph()->NewNode(mcgraph()->machine()->ChangeInt32ToInt64(), load);
- } else {
- // zero extend
- load =
- graph()->NewNode(mcgraph()->machine()->ChangeUint32ToUint64(), load);
- }
+ load = memtype.IsSigned()
+ ? gasm_->ChangeInt32ToInt64(load) // sign extend
+ : gasm_->ChangeUint32ToUint64(load); // zero extend
}
if (FLAG_trace_wasm_memory) {
- TraceMemoryOperation(false, memtype.representation(), index, offset,
+ TraceMemoryOperation(false, memtype.representation(), index, capped_offset,
position);
}
return load;
}
+Node* WasmGraphBuilder::StoreLane(MachineRepresentation mem_rep, Node* index,
+ uint32_t offset, uint32_t alignment,
+ Node* val, uint8_t laneidx,
+ wasm::WasmCodePosition position,
+ wasm::ValueType type) {
+ Node* store;
+ has_simd_ = true;
+ index = BoundsCheckMem(i::ElementSizeInBytes(mem_rep), index, offset,
+ position, kCanOmitBoundsCheck);
+
+ MachineType memtype = MachineType(mem_rep, MachineSemantic::kNone);
+ MemoryAccessKind load_kind =
+ GetMemoryAccessKind(mcgraph(), memtype, use_trap_handler());
+
+ // {offset} is validated to be within uintptr_t range in {BoundsCheckMem}.
+ uintptr_t capped_offset = static_cast<uintptr_t>(offset);
+
+ store = SetEffect(graph()->NewNode(
+ mcgraph()->machine()->StoreLane(load_kind, mem_rep, laneidx),
+ MemBuffer(capped_offset), index, val, effect(), control()));
+
+ if (load_kind == MemoryAccessKind::kProtected) {
+ SetSourcePosition(store, position);
+ }
+
+ if (FLAG_trace_wasm_memory) {
+ TraceMemoryOperation(true, mem_rep, index, capped_offset, position);
+ }
+
+ return store;
+}
+
Node* WasmGraphBuilder::StoreMem(MachineRepresentation mem_rep, Node* index,
- uint32_t offset, uint32_t alignment, Node* val,
+ uint64_t offset, uint32_t alignment, Node* val,
wasm::WasmCodePosition position,
wasm::ValueType type) {
Node* store;
@@ -4112,32 +4180,27 @@ Node* WasmGraphBuilder::StoreMem(MachineRepresentation mem_rep, Node* index,
val = BuildChangeEndiannessStore(val, mem_rep, type);
#endif
+ // {offset} is validated to be within uintptr_t range in {BoundsCheckMem}.
+ uintptr_t capped_offset = static_cast<uintptr_t>(offset);
if (mem_rep == MachineRepresentation::kWord8 ||
mcgraph()->machine()->UnalignedStoreSupported(mem_rep)) {
if (use_trap_handler()) {
store =
- graph()->NewNode(mcgraph()->machine()->ProtectedStore(mem_rep),
- MemBuffer(offset), index, val, effect(), control());
+ gasm_->ProtectedStore(mem_rep, MemBuffer(capped_offset), index, val);
SetSourcePosition(store, position);
} else {
- StoreRepresentation rep(mem_rep, kNoWriteBarrier);
- store =
- graph()->NewNode(mcgraph()->machine()->Store(rep), MemBuffer(offset),
- index, val, effect(), control());
+ store = gasm_->Store(StoreRepresentation{mem_rep, kNoWriteBarrier},
+ MemBuffer(capped_offset), index, val);
}
} else {
// TODO(eholk): Support unaligned stores with trap handlers.
DCHECK(!use_trap_handler());
UnalignedStoreRepresentation rep(mem_rep);
- store =
- graph()->NewNode(mcgraph()->machine()->UnalignedStore(rep),
- MemBuffer(offset), index, val, effect(), control());
+ store = gasm_->StoreUnaligned(rep, MemBuffer(capped_offset), index, val);
}
- SetEffect(store);
-
if (FLAG_trace_wasm_memory) {
- TraceMemoryOperation(true, mem_rep, index, offset, position);
+ TraceMemoryOperation(true, mem_rep, index, capped_offset, position);
}
return store;
@@ -4200,8 +4263,8 @@ Node* WasmGraphBuilder::Uint32ToUintptr(Node* node) {
if (mcgraph()->machine()->Is32()) return node;
// Fold instances of ChangeUint32ToUint64(IntConstant) directly.
Uint32Matcher matcher(node);
- if (matcher.HasValue()) {
- uintptr_t value = matcher.Value();
+ if (matcher.HasResolvedValue()) {
+ uintptr_t value = matcher.ResolvedValue();
return mcgraph()->IntPtrConstant(bit_cast<intptr_t>(value));
}
return graph()->NewNode(mcgraph()->machine()->ChangeUint32ToUint64(), node);
@@ -4573,6 +4636,20 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
return graph()->NewNode(mcgraph()->machine()->I64x2Splat(), inputs[0]);
case wasm::kExprI64x2Neg:
return graph()->NewNode(mcgraph()->machine()->I64x2Neg(), inputs[0]);
+ case wasm::kExprI64x2SConvertI32x4Low:
+ return graph()->NewNode(mcgraph()->machine()->I64x2SConvertI32x4Low(),
+ inputs[0]);
+ case wasm::kExprI64x2SConvertI32x4High:
+ return graph()->NewNode(mcgraph()->machine()->I64x2SConvertI32x4High(),
+ inputs[0]);
+ case wasm::kExprI64x2UConvertI32x4Low:
+ return graph()->NewNode(mcgraph()->machine()->I64x2UConvertI32x4Low(),
+ inputs[0]);
+ case wasm::kExprI64x2UConvertI32x4High:
+ return graph()->NewNode(mcgraph()->machine()->I64x2UConvertI32x4High(),
+ inputs[0]);
+ case wasm::kExprI64x2BitMask:
+ return graph()->NewNode(mcgraph()->machine()->I64x2BitMask(), inputs[0]);
case wasm::kExprI64x2Shl:
return graph()->NewNode(mcgraph()->machine()->I64x2Shl(), inputs[0],
inputs[1]);
@@ -4588,51 +4665,27 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprI64x2Mul:
return graph()->NewNode(mcgraph()->machine()->I64x2Mul(), inputs[0],
inputs[1]);
- case wasm::kExprI64x2MinS:
- return graph()->NewNode(mcgraph()->machine()->I64x2MinS(), inputs[0],
- inputs[1]);
- case wasm::kExprI64x2MaxS:
- return graph()->NewNode(mcgraph()->machine()->I64x2MaxS(), inputs[0],
- inputs[1]);
case wasm::kExprI64x2Eq:
return graph()->NewNode(mcgraph()->machine()->I64x2Eq(), inputs[0],
inputs[1]);
- case wasm::kExprI64x2Ne:
- return graph()->NewNode(mcgraph()->machine()->I64x2Ne(), inputs[0],
- inputs[1]);
- case wasm::kExprI64x2LtS:
- return graph()->NewNode(mcgraph()->machine()->I64x2GtS(), inputs[1],
- inputs[0]);
- case wasm::kExprI64x2LeS:
- return graph()->NewNode(mcgraph()->machine()->I64x2GeS(), inputs[1],
- inputs[0]);
- case wasm::kExprI64x2GtS:
- return graph()->NewNode(mcgraph()->machine()->I64x2GtS(), inputs[0],
- inputs[1]);
- case wasm::kExprI64x2GeS:
- return graph()->NewNode(mcgraph()->machine()->I64x2GeS(), inputs[0],
- inputs[1]);
case wasm::kExprI64x2ShrU:
return graph()->NewNode(mcgraph()->machine()->I64x2ShrU(), inputs[0],
inputs[1]);
- case wasm::kExprI64x2MinU:
- return graph()->NewNode(mcgraph()->machine()->I64x2MinU(), inputs[0],
- inputs[1]);
- case wasm::kExprI64x2MaxU:
- return graph()->NewNode(mcgraph()->machine()->I64x2MaxU(), inputs[0],
- inputs[1]);
- case wasm::kExprI64x2LtU:
- return graph()->NewNode(mcgraph()->machine()->I64x2GtU(), inputs[1],
- inputs[0]);
- case wasm::kExprI64x2LeU:
- return graph()->NewNode(mcgraph()->machine()->I64x2GeU(), inputs[1],
- inputs[0]);
- case wasm::kExprI64x2GtU:
- return graph()->NewNode(mcgraph()->machine()->I64x2GtU(), inputs[0],
- inputs[1]);
- case wasm::kExprI64x2GeU:
- return graph()->NewNode(mcgraph()->machine()->I64x2GeU(), inputs[0],
- inputs[1]);
+ case wasm::kExprI64x2ExtMulLowI32x4S:
+ return graph()->NewNode(mcgraph()->machine()->I64x2ExtMulLowI32x4S(),
+ inputs[0], inputs[1]);
+ case wasm::kExprI64x2ExtMulHighI32x4S:
+ return graph()->NewNode(mcgraph()->machine()->I64x2ExtMulHighI32x4S(),
+ inputs[0], inputs[1]);
+ case wasm::kExprI64x2ExtMulLowI32x4U:
+ return graph()->NewNode(mcgraph()->machine()->I64x2ExtMulLowI32x4U(),
+ inputs[0], inputs[1]);
+ case wasm::kExprI64x2ExtMulHighI32x4U:
+ return graph()->NewNode(mcgraph()->machine()->I64x2ExtMulHighI32x4U(),
+ inputs[0], inputs[1]);
+ case wasm::kExprI64x2SignSelect:
+ return graph()->NewNode(mcgraph()->machine()->I64x2SignSelect(),
+ inputs[0], inputs[1], inputs[2]);
case wasm::kExprI32x4Splat:
return graph()->NewNode(mcgraph()->machine()->I32x4Splat(), inputs[0]);
case wasm::kExprI32x4SConvertF32x4:
@@ -4725,6 +4778,27 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprI32x4DotI16x8S:
return graph()->NewNode(mcgraph()->machine()->I32x4DotI16x8S(), inputs[0],
inputs[1]);
+ case wasm::kExprI32x4ExtMulLowI16x8S:
+ return graph()->NewNode(mcgraph()->machine()->I32x4ExtMulLowI16x8S(),
+ inputs[0], inputs[1]);
+ case wasm::kExprI32x4ExtMulHighI16x8S:
+ return graph()->NewNode(mcgraph()->machine()->I32x4ExtMulHighI16x8S(),
+ inputs[0], inputs[1]);
+ case wasm::kExprI32x4ExtMulLowI16x8U:
+ return graph()->NewNode(mcgraph()->machine()->I32x4ExtMulLowI16x8U(),
+ inputs[0], inputs[1]);
+ case wasm::kExprI32x4ExtMulHighI16x8U:
+ return graph()->NewNode(mcgraph()->machine()->I32x4ExtMulHighI16x8U(),
+ inputs[0], inputs[1]);
+ case wasm::kExprI32x4SignSelect:
+ return graph()->NewNode(mcgraph()->machine()->I32x4SignSelect(),
+ inputs[0], inputs[1], inputs[2]);
+ case wasm::kExprI32x4ExtAddPairwiseI16x8S:
+ return graph()->NewNode(mcgraph()->machine()->I32x4ExtAddPairwiseI16x8S(),
+ inputs[0]);
+ case wasm::kExprI32x4ExtAddPairwiseI16x8U:
+ return graph()->NewNode(mcgraph()->machine()->I32x4ExtAddPairwiseI16x8U(),
+ inputs[0]);
case wasm::kExprI16x8Splat:
return graph()->NewNode(mcgraph()->machine()->I16x8Splat(), inputs[0]);
case wasm::kExprI16x8SConvertI8x16Low:
@@ -4747,18 +4821,18 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprI16x8Add:
return graph()->NewNode(mcgraph()->machine()->I16x8Add(), inputs[0],
inputs[1]);
- case wasm::kExprI16x8AddSaturateS:
- return graph()->NewNode(mcgraph()->machine()->I16x8AddSaturateS(),
- inputs[0], inputs[1]);
+ case wasm::kExprI16x8AddSatS:
+ return graph()->NewNode(mcgraph()->machine()->I16x8AddSatS(), inputs[0],
+ inputs[1]);
case wasm::kExprI16x8AddHoriz:
return graph()->NewNode(mcgraph()->machine()->I16x8AddHoriz(), inputs[0],
inputs[1]);
case wasm::kExprI16x8Sub:
return graph()->NewNode(mcgraph()->machine()->I16x8Sub(), inputs[0],
inputs[1]);
- case wasm::kExprI16x8SubSaturateS:
- return graph()->NewNode(mcgraph()->machine()->I16x8SubSaturateS(),
- inputs[0], inputs[1]);
+ case wasm::kExprI16x8SubSatS:
+ return graph()->NewNode(mcgraph()->machine()->I16x8SubSatS(), inputs[0],
+ inputs[1]);
case wasm::kExprI16x8Mul:
return graph()->NewNode(mcgraph()->machine()->I16x8Mul(), inputs[0],
inputs[1]);
@@ -4798,12 +4872,12 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprI16x8ShrU:
return graph()->NewNode(mcgraph()->machine()->I16x8ShrU(), inputs[0],
inputs[1]);
- case wasm::kExprI16x8AddSaturateU:
- return graph()->NewNode(mcgraph()->machine()->I16x8AddSaturateU(),
- inputs[0], inputs[1]);
- case wasm::kExprI16x8SubSaturateU:
- return graph()->NewNode(mcgraph()->machine()->I16x8SubSaturateU(),
- inputs[0], inputs[1]);
+ case wasm::kExprI16x8AddSatU:
+ return graph()->NewNode(mcgraph()->machine()->I16x8AddSatU(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI16x8SubSatU:
+ return graph()->NewNode(mcgraph()->machine()->I16x8SubSatU(), inputs[0],
+ inputs[1]);
case wasm::kExprI16x8MinU:
return graph()->NewNode(mcgraph()->machine()->I16x8MinU(), inputs[0],
inputs[1]);
@@ -4825,10 +4899,34 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprI16x8RoundingAverageU:
return graph()->NewNode(mcgraph()->machine()->I16x8RoundingAverageU(),
inputs[0], inputs[1]);
+ case wasm::kExprI16x8Q15MulRSatS:
+ return graph()->NewNode(mcgraph()->machine()->I16x8Q15MulRSatS(),
+ inputs[0], inputs[1]);
case wasm::kExprI16x8Abs:
return graph()->NewNode(mcgraph()->machine()->I16x8Abs(), inputs[0]);
case wasm::kExprI16x8BitMask:
return graph()->NewNode(mcgraph()->machine()->I16x8BitMask(), inputs[0]);
+ case wasm::kExprI16x8ExtMulLowI8x16S:
+ return graph()->NewNode(mcgraph()->machine()->I16x8ExtMulLowI8x16S(),
+ inputs[0], inputs[1]);
+ case wasm::kExprI16x8ExtMulHighI8x16S:
+ return graph()->NewNode(mcgraph()->machine()->I16x8ExtMulHighI8x16S(),
+ inputs[0], inputs[1]);
+ case wasm::kExprI16x8ExtMulLowI8x16U:
+ return graph()->NewNode(mcgraph()->machine()->I16x8ExtMulLowI8x16U(),
+ inputs[0], inputs[1]);
+ case wasm::kExprI16x8ExtMulHighI8x16U:
+ return graph()->NewNode(mcgraph()->machine()->I16x8ExtMulHighI8x16U(),
+ inputs[0], inputs[1]);
+ case wasm::kExprI16x8SignSelect:
+ return graph()->NewNode(mcgraph()->machine()->I16x8SignSelect(),
+ inputs[0], inputs[1], inputs[2]);
+ case wasm::kExprI16x8ExtAddPairwiseI8x16S:
+ return graph()->NewNode(mcgraph()->machine()->I16x8ExtAddPairwiseI8x16S(),
+ inputs[0]);
+ case wasm::kExprI16x8ExtAddPairwiseI8x16U:
+ return graph()->NewNode(mcgraph()->machine()->I16x8ExtAddPairwiseI8x16U(),
+ inputs[0]);
case wasm::kExprI8x16Splat:
return graph()->NewNode(mcgraph()->machine()->I8x16Splat(), inputs[0]);
case wasm::kExprI8x16Neg:
@@ -4845,15 +4943,15 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprI8x16Add:
return graph()->NewNode(mcgraph()->machine()->I8x16Add(), inputs[0],
inputs[1]);
- case wasm::kExprI8x16AddSaturateS:
- return graph()->NewNode(mcgraph()->machine()->I8x16AddSaturateS(),
- inputs[0], inputs[1]);
+ case wasm::kExprI8x16AddSatS:
+ return graph()->NewNode(mcgraph()->machine()->I8x16AddSatS(), inputs[0],
+ inputs[1]);
case wasm::kExprI8x16Sub:
return graph()->NewNode(mcgraph()->machine()->I8x16Sub(), inputs[0],
inputs[1]);
- case wasm::kExprI8x16SubSaturateS:
- return graph()->NewNode(mcgraph()->machine()->I8x16SubSaturateS(),
- inputs[0], inputs[1]);
+ case wasm::kExprI8x16SubSatS:
+ return graph()->NewNode(mcgraph()->machine()->I8x16SubSatS(), inputs[0],
+ inputs[1]);
case wasm::kExprI8x16Mul:
return graph()->NewNode(mcgraph()->machine()->I8x16Mul(), inputs[0],
inputs[1]);
@@ -4887,12 +4985,12 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprI8x16UConvertI16x8:
return graph()->NewNode(mcgraph()->machine()->I8x16UConvertI16x8(),
inputs[0], inputs[1]);
- case wasm::kExprI8x16AddSaturateU:
- return graph()->NewNode(mcgraph()->machine()->I8x16AddSaturateU(),
- inputs[0], inputs[1]);
- case wasm::kExprI8x16SubSaturateU:
- return graph()->NewNode(mcgraph()->machine()->I8x16SubSaturateU(),
- inputs[0], inputs[1]);
+ case wasm::kExprI8x16AddSatU:
+ return graph()->NewNode(mcgraph()->machine()->I8x16AddSatU(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI8x16SubSatU:
+ return graph()->NewNode(mcgraph()->machine()->I8x16SubSatU(), inputs[0],
+ inputs[1]);
case wasm::kExprI8x16MinU:
return graph()->NewNode(mcgraph()->machine()->I8x16MinU(), inputs[0],
inputs[1]);
@@ -4914,10 +5012,15 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprI8x16RoundingAverageU:
return graph()->NewNode(mcgraph()->machine()->I8x16RoundingAverageU(),
inputs[0], inputs[1]);
+ case wasm::kExprI8x16Popcnt:
+ return graph()->NewNode(mcgraph()->machine()->I8x16Popcnt(), inputs[0]);
case wasm::kExprI8x16Abs:
return graph()->NewNode(mcgraph()->machine()->I8x16Abs(), inputs[0]);
case wasm::kExprI8x16BitMask:
return graph()->NewNode(mcgraph()->machine()->I8x16BitMask(), inputs[0]);
+ case wasm::kExprI8x16SignSelect:
+ return graph()->NewNode(mcgraph()->machine()->I8x16SignSelect(),
+ inputs[0], inputs[1], inputs[2]);
case wasm::kExprS128And:
return graph()->NewNode(mcgraph()->machine()->S128And(), inputs[0],
inputs[1]);
@@ -4935,10 +5038,6 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprS128AndNot:
return graph()->NewNode(mcgraph()->machine()->S128AndNot(), inputs[0],
inputs[1]);
- case wasm::kExprV64x2AnyTrue:
- return graph()->NewNode(mcgraph()->machine()->V64x2AnyTrue(), inputs[0]);
- case wasm::kExprV64x2AllTrue:
- return graph()->NewNode(mcgraph()->machine()->V64x2AllTrue(), inputs[0]);
case wasm::kExprV32x4AnyTrue:
return graph()->NewNode(mcgraph()->machine()->V32x4AnyTrue(), inputs[0]);
case wasm::kExprV32x4AllTrue:
@@ -5017,158 +5116,176 @@ Node* WasmGraphBuilder::Simd8x16ShuffleOp(const uint8_t shuffle[16],
inputs[0], inputs[1]);
}
-#define ATOMIC_BINOP_LIST(V) \
- V(I32AtomicAdd, Add, Uint32, Word32) \
- V(I64AtomicAdd, Add, Uint64, Word64) \
- V(I32AtomicAdd8U, Add, Uint8, Word32) \
- V(I32AtomicAdd16U, Add, Uint16, Word32) \
- V(I64AtomicAdd8U, Add, Uint8, Word64) \
- V(I64AtomicAdd16U, Add, Uint16, Word64) \
- V(I64AtomicAdd32U, Add, Uint32, Word64) \
- V(I32AtomicSub, Sub, Uint32, Word32) \
- V(I64AtomicSub, Sub, Uint64, Word64) \
- V(I32AtomicSub8U, Sub, Uint8, Word32) \
- V(I32AtomicSub16U, Sub, Uint16, Word32) \
- V(I64AtomicSub8U, Sub, Uint8, Word64) \
- V(I64AtomicSub16U, Sub, Uint16, Word64) \
- V(I64AtomicSub32U, Sub, Uint32, Word64) \
- V(I32AtomicAnd, And, Uint32, Word32) \
- V(I64AtomicAnd, And, Uint64, Word64) \
- V(I32AtomicAnd8U, And, Uint8, Word32) \
- V(I64AtomicAnd16U, And, Uint16, Word64) \
- V(I32AtomicAnd16U, And, Uint16, Word32) \
- V(I64AtomicAnd8U, And, Uint8, Word64) \
- V(I64AtomicAnd32U, And, Uint32, Word64) \
- V(I32AtomicOr, Or, Uint32, Word32) \
- V(I64AtomicOr, Or, Uint64, Word64) \
- V(I32AtomicOr8U, Or, Uint8, Word32) \
- V(I32AtomicOr16U, Or, Uint16, Word32) \
- V(I64AtomicOr8U, Or, Uint8, Word64) \
- V(I64AtomicOr16U, Or, Uint16, Word64) \
- V(I64AtomicOr32U, Or, Uint32, Word64) \
- V(I32AtomicXor, Xor, Uint32, Word32) \
- V(I64AtomicXor, Xor, Uint64, Word64) \
- V(I32AtomicXor8U, Xor, Uint8, Word32) \
- V(I32AtomicXor16U, Xor, Uint16, Word32) \
- V(I64AtomicXor8U, Xor, Uint8, Word64) \
- V(I64AtomicXor16U, Xor, Uint16, Word64) \
- V(I64AtomicXor32U, Xor, Uint32, Word64) \
- V(I32AtomicExchange, Exchange, Uint32, Word32) \
- V(I64AtomicExchange, Exchange, Uint64, Word64) \
- V(I32AtomicExchange8U, Exchange, Uint8, Word32) \
- V(I32AtomicExchange16U, Exchange, Uint16, Word32) \
- V(I64AtomicExchange8U, Exchange, Uint8, Word64) \
- V(I64AtomicExchange16U, Exchange, Uint16, Word64) \
- V(I64AtomicExchange32U, Exchange, Uint32, Word64)
-
-#define ATOMIC_CMP_EXCHG_LIST(V) \
- V(I32AtomicCompareExchange, Uint32, Word32) \
- V(I64AtomicCompareExchange, Uint64, Word64) \
- V(I32AtomicCompareExchange8U, Uint8, Word32) \
- V(I32AtomicCompareExchange16U, Uint16, Word32) \
- V(I64AtomicCompareExchange8U, Uint8, Word64) \
- V(I64AtomicCompareExchange16U, Uint16, Word64) \
- V(I64AtomicCompareExchange32U, Uint32, Word64)
-
-#define ATOMIC_LOAD_LIST(V) \
- V(I32AtomicLoad, Uint32, Word32) \
- V(I64AtomicLoad, Uint64, Word64) \
- V(I32AtomicLoad8U, Uint8, Word32) \
- V(I32AtomicLoad16U, Uint16, Word32) \
- V(I64AtomicLoad8U, Uint8, Word64) \
- V(I64AtomicLoad16U, Uint16, Word64) \
- V(I64AtomicLoad32U, Uint32, Word64)
-
-#define ATOMIC_STORE_LIST(V) \
- V(I32AtomicStore, Uint32, kWord32, Word32) \
- V(I64AtomicStore, Uint64, kWord64, Word64) \
- V(I32AtomicStore8U, Uint8, kWord8, Word32) \
- V(I32AtomicStore16U, Uint16, kWord16, Word32) \
- V(I64AtomicStore8U, Uint8, kWord8, Word64) \
- V(I64AtomicStore16U, Uint16, kWord16, Word64) \
- V(I64AtomicStore32U, Uint32, kWord32, Word64)
-
Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
- uint32_t alignment, uint32_t offset,
+ uint32_t alignment, uint64_t offset,
wasm::WasmCodePosition position) {
- Node* node;
+ struct AtomicOpInfo {
+ enum Type : int8_t {
+ kNoInput = 0,
+ kOneInput = 1,
+ kTwoInputs = 2,
+ kSpecial
+ };
+
+ using OperatorByType =
+ const Operator* (MachineOperatorBuilder::*)(MachineType);
+ using OperatorByRep =
+ const Operator* (MachineOperatorBuilder::*)(MachineRepresentation);
+
+ const Type type;
+ const MachineType machine_type;
+ const OperatorByType operator_by_type = nullptr;
+ const OperatorByRep operator_by_rep = nullptr;
+
+ constexpr AtomicOpInfo(Type t, MachineType m, OperatorByType o)
+ : type(t), machine_type(m), operator_by_type(o) {}
+ constexpr AtomicOpInfo(Type t, MachineType m, OperatorByRep o)
+ : type(t), machine_type(m), operator_by_rep(o) {}
+
+ // Constexpr, hence just a table lookup in most compilers.
+ static constexpr AtomicOpInfo Get(wasm::WasmOpcode opcode) {
+ switch (opcode) {
+#define CASE(Name, Type, MachType, Op) \
+ case wasm::kExpr##Name: \
+ return {Type, MachineType::MachType(), &MachineOperatorBuilder::Op};
+
+ // Binops.
+ CASE(I32AtomicAdd, kOneInput, Uint32, Word32AtomicAdd)
+ CASE(I64AtomicAdd, kOneInput, Uint64, Word64AtomicAdd)
+ CASE(I32AtomicAdd8U, kOneInput, Uint8, Word32AtomicAdd)
+ CASE(I32AtomicAdd16U, kOneInput, Uint16, Word32AtomicAdd)
+ CASE(I64AtomicAdd8U, kOneInput, Uint8, Word64AtomicAdd)
+ CASE(I64AtomicAdd16U, kOneInput, Uint16, Word64AtomicAdd)
+ CASE(I64AtomicAdd32U, kOneInput, Uint32, Word64AtomicAdd)
+ CASE(I32AtomicSub, kOneInput, Uint32, Word32AtomicSub)
+ CASE(I64AtomicSub, kOneInput, Uint64, Word64AtomicSub)
+ CASE(I32AtomicSub8U, kOneInput, Uint8, Word32AtomicSub)
+ CASE(I32AtomicSub16U, kOneInput, Uint16, Word32AtomicSub)
+ CASE(I64AtomicSub8U, kOneInput, Uint8, Word64AtomicSub)
+ CASE(I64AtomicSub16U, kOneInput, Uint16, Word64AtomicSub)
+ CASE(I64AtomicSub32U, kOneInput, Uint32, Word64AtomicSub)
+ CASE(I32AtomicAnd, kOneInput, Uint32, Word32AtomicAnd)
+ CASE(I64AtomicAnd, kOneInput, Uint64, Word64AtomicAnd)
+ CASE(I32AtomicAnd8U, kOneInput, Uint8, Word32AtomicAnd)
+ CASE(I32AtomicAnd16U, kOneInput, Uint16, Word32AtomicAnd)
+ CASE(I64AtomicAnd8U, kOneInput, Uint8, Word64AtomicAnd)
+ CASE(I64AtomicAnd16U, kOneInput, Uint16, Word64AtomicAnd)
+ CASE(I64AtomicAnd32U, kOneInput, Uint32, Word64AtomicAnd)
+ CASE(I32AtomicOr, kOneInput, Uint32, Word32AtomicOr)
+ CASE(I64AtomicOr, kOneInput, Uint64, Word64AtomicOr)
+ CASE(I32AtomicOr8U, kOneInput, Uint8, Word32AtomicOr)
+ CASE(I32AtomicOr16U, kOneInput, Uint16, Word32AtomicOr)
+ CASE(I64AtomicOr8U, kOneInput, Uint8, Word64AtomicOr)
+ CASE(I64AtomicOr16U, kOneInput, Uint16, Word64AtomicOr)
+ CASE(I64AtomicOr32U, kOneInput, Uint32, Word64AtomicOr)
+ CASE(I32AtomicXor, kOneInput, Uint32, Word32AtomicXor)
+ CASE(I64AtomicXor, kOneInput, Uint64, Word64AtomicXor)
+ CASE(I32AtomicXor8U, kOneInput, Uint8, Word32AtomicXor)
+ CASE(I32AtomicXor16U, kOneInput, Uint16, Word32AtomicXor)
+ CASE(I64AtomicXor8U, kOneInput, Uint8, Word64AtomicXor)
+ CASE(I64AtomicXor16U, kOneInput, Uint16, Word64AtomicXor)
+ CASE(I64AtomicXor32U, kOneInput, Uint32, Word64AtomicXor)
+ CASE(I32AtomicExchange, kOneInput, Uint32, Word32AtomicExchange)
+ CASE(I64AtomicExchange, kOneInput, Uint64, Word64AtomicExchange)
+ CASE(I32AtomicExchange8U, kOneInput, Uint8, Word32AtomicExchange)
+ CASE(I32AtomicExchange16U, kOneInput, Uint16, Word32AtomicExchange)
+ CASE(I64AtomicExchange8U, kOneInput, Uint8, Word64AtomicExchange)
+ CASE(I64AtomicExchange16U, kOneInput, Uint16, Word64AtomicExchange)
+ CASE(I64AtomicExchange32U, kOneInput, Uint32, Word64AtomicExchange)
+
+ // Compare-exchange.
+ CASE(I32AtomicCompareExchange, kTwoInputs, Uint32,
+ Word32AtomicCompareExchange)
+ CASE(I64AtomicCompareExchange, kTwoInputs, Uint64,
+ Word64AtomicCompareExchange)
+ CASE(I32AtomicCompareExchange8U, kTwoInputs, Uint8,
+ Word32AtomicCompareExchange)
+ CASE(I32AtomicCompareExchange16U, kTwoInputs, Uint16,
+ Word32AtomicCompareExchange)
+ CASE(I64AtomicCompareExchange8U, kTwoInputs, Uint8,
+ Word64AtomicCompareExchange)
+ CASE(I64AtomicCompareExchange16U, kTwoInputs, Uint16,
+ Word64AtomicCompareExchange)
+ CASE(I64AtomicCompareExchange32U, kTwoInputs, Uint32,
+ Word64AtomicCompareExchange)
+
+ // Load.
+ CASE(I32AtomicLoad, kNoInput, Uint32, Word32AtomicLoad)
+ CASE(I64AtomicLoad, kNoInput, Uint64, Word64AtomicLoad)
+ CASE(I32AtomicLoad8U, kNoInput, Uint8, Word32AtomicLoad)
+ CASE(I32AtomicLoad16U, kNoInput, Uint16, Word32AtomicLoad)
+ CASE(I64AtomicLoad8U, kNoInput, Uint8, Word64AtomicLoad)
+ CASE(I64AtomicLoad16U, kNoInput, Uint16, Word64AtomicLoad)
+ CASE(I64AtomicLoad32U, kNoInput, Uint32, Word64AtomicLoad)
+
+ // Store.
+ CASE(I32AtomicStore, kOneInput, Uint32, Word32AtomicStore)
+ CASE(I64AtomicStore, kOneInput, Uint64, Word64AtomicStore)
+ CASE(I32AtomicStore8U, kOneInput, Uint8, Word32AtomicStore)
+ CASE(I32AtomicStore16U, kOneInput, Uint16, Word32AtomicStore)
+ CASE(I64AtomicStore8U, kOneInput, Uint8, Word64AtomicStore)
+ CASE(I64AtomicStore16U, kOneInput, Uint16, Word64AtomicStore)
+ CASE(I64AtomicStore32U, kOneInput, Uint32, Word64AtomicStore)
+
+#undef CASE
+
+ case wasm::kExprAtomicNotify:
+ return {kSpecial, MachineType::Int32(), OperatorByType{nullptr}};
+ case wasm::kExprI32AtomicWait:
+ return {kSpecial, MachineType::Int32(), OperatorByType{nullptr}};
+ case wasm::kExprI64AtomicWait:
+ return {kSpecial, MachineType::Int64(), OperatorByType{nullptr}};
+ default:
+#if V8_HAS_CXX14_CONSTEXPR
+ UNREACHABLE();
+#else
+ // Return something for older GCC.
+ return {kSpecial, MachineType::Int64(), OperatorByType{nullptr}};
+#endif
+ }
+ }
+ };
+
+ AtomicOpInfo info = AtomicOpInfo::Get(opcode);
+
+ Node* index = CheckBoundsAndAlignment(info.machine_type.MemSize(), inputs[0],
+ offset, position);
+
+ // {offset} is validated to be within uintptr_t range in {BoundsCheckMem}.
+ uintptr_t capped_offset = static_cast<uintptr_t>(offset);
+ if (info.type != AtomicOpInfo::kSpecial) {
+ const Operator* op =
+ info.operator_by_type
+ ? (mcgraph()->machine()->*info.operator_by_type)(info.machine_type)
+ : (mcgraph()->machine()->*info.operator_by_rep)(
+ info.machine_type.representation());
+
+ Node* input_nodes[6] = {MemBuffer(capped_offset), index};
+ int num_actual_inputs = info.type;
+ std::copy_n(inputs + 1, num_actual_inputs, input_nodes + 2);
+ input_nodes[num_actual_inputs + 2] = effect();
+ input_nodes[num_actual_inputs + 3] = control();
+ return gasm_->AddNode(
+ graph()->NewNode(op, num_actual_inputs + 4, input_nodes));
+ }
+
+ // After we've bounds-checked, compute the effective offset.
+ Node* effective_offset =
+ gasm_->IntAdd(gasm_->UintPtrConstant(capped_offset), index);
+
switch (opcode) {
-#define BUILD_ATOMIC_BINOP(Name, Operation, Type, Prefix) \
- case wasm::kExpr##Name: { \
- Node* index = CheckBoundsAndAlignment(MachineType::Type().MemSize(), \
- inputs[0], offset, position); \
- node = graph()->NewNode( \
- mcgraph()->machine()->Prefix##Atomic##Operation(MachineType::Type()), \
- MemBuffer(offset), index, inputs[1], effect(), control()); \
- break; \
- }
- ATOMIC_BINOP_LIST(BUILD_ATOMIC_BINOP)
-#undef BUILD_ATOMIC_BINOP
-
-#define BUILD_ATOMIC_CMP_EXCHG(Name, Type, Prefix) \
- case wasm::kExpr##Name: { \
- Node* index = CheckBoundsAndAlignment(MachineType::Type().MemSize(), \
- inputs[0], offset, position); \
- node = graph()->NewNode( \
- mcgraph()->machine()->Prefix##AtomicCompareExchange( \
- MachineType::Type()), \
- MemBuffer(offset), index, inputs[1], inputs[2], effect(), control()); \
- break; \
- }
- ATOMIC_CMP_EXCHG_LIST(BUILD_ATOMIC_CMP_EXCHG)
-#undef BUILD_ATOMIC_CMP_EXCHG
-
-#define BUILD_ATOMIC_LOAD_OP(Name, Type, Prefix) \
- case wasm::kExpr##Name: { \
- Node* index = CheckBoundsAndAlignment(MachineType::Type().MemSize(), \
- inputs[0], offset, position); \
- node = graph()->NewNode( \
- mcgraph()->machine()->Prefix##AtomicLoad(MachineType::Type()), \
- MemBuffer(offset), index, effect(), control()); \
- break; \
- }
- ATOMIC_LOAD_LIST(BUILD_ATOMIC_LOAD_OP)
-#undef BUILD_ATOMIC_LOAD_OP
-
-#define BUILD_ATOMIC_STORE_OP(Name, Type, Rep, Prefix) \
- case wasm::kExpr##Name: { \
- Node* index = CheckBoundsAndAlignment(MachineType::Type().MemSize(), \
- inputs[0], offset, position); \
- node = graph()->NewNode( \
- mcgraph()->machine()->Prefix##AtomicStore(MachineRepresentation::Rep), \
- MemBuffer(offset), index, inputs[1], effect(), control()); \
- break; \
- }
- ATOMIC_STORE_LIST(BUILD_ATOMIC_STORE_OP)
-#undef BUILD_ATOMIC_STORE_OP
case wasm::kExprAtomicNotify: {
- Node* index = CheckBoundsAndAlignment(MachineType::Uint32().MemSize(),
- inputs[0], offset, position);
- // Now that we've bounds-checked, compute the effective address.
- Node* address = graph()->NewNode(mcgraph()->machine()->Int32Add(),
- Uint32Constant(offset), index);
- WasmAtomicNotifyDescriptor interface_descriptor;
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- mcgraph()->zone(), interface_descriptor,
- interface_descriptor.GetStackParameterCount(),
- CallDescriptor::kNoFlags, Operator::kNoProperties,
- StubCallMode::kCallWasmRuntimeStub);
+ auto* call_descriptor =
+ GetBuiltinCallDescriptor<WasmAtomicNotifyDescriptor>(
+ this, StubCallMode::kCallWasmRuntimeStub);
Node* call_target = mcgraph()->RelocatableIntPtrConstant(
wasm::WasmCode::kWasmAtomicNotify, RelocInfo::WASM_STUB_CALL);
- node = graph()->NewNode(mcgraph()->common()->Call(call_descriptor),
- call_target, address, inputs[1], effect(),
- control());
- break;
+ return gasm_->Call(call_descriptor, call_target, effective_offset,
+ inputs[1]);
}
case wasm::kExprI32AtomicWait: {
- Node* index = CheckBoundsAndAlignment(MachineType::Uint32().MemSize(),
- inputs[0], offset, position);
- // Now that we've bounds-checked, compute the effective address.
- Node* address = graph()->NewNode(mcgraph()->machine()->Int32Add(),
- Uint32Constant(offset), index);
-
- auto call_descriptor = GetI32AtomicWaitCallDescriptor();
+ auto* call_descriptor = GetI32AtomicWaitCallDescriptor();
intptr_t target = mcgraph()->machine()->Is64()
? wasm::WasmCode::kWasmI32AtomicWait64
@@ -5176,20 +5293,12 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
Node* call_target = mcgraph()->RelocatableIntPtrConstant(
target, RelocInfo::WASM_STUB_CALL);
- node = graph()->NewNode(mcgraph()->common()->Call(call_descriptor),
- call_target, address, inputs[1], inputs[2],
- effect(), control());
- break;
+ return gasm_->Call(call_descriptor, call_target, effective_offset,
+ inputs[1], inputs[2]);
}
case wasm::kExprI64AtomicWait: {
- Node* index = CheckBoundsAndAlignment(MachineType::Uint64().MemSize(),
- inputs[0], offset, position);
- // Now that we've bounds-checked, compute the effective address.
- Node* address = graph()->NewNode(mcgraph()->machine()->Int32Add(),
- Uint32Constant(offset), index);
-
- CallDescriptor* call_descriptor = GetI64AtomicWaitCallDescriptor();
+ auto* call_descriptor = GetI64AtomicWaitCallDescriptor();
intptr_t target = mcgraph()->machine()->Is64()
? wasm::WasmCode::kWasmI64AtomicWait64
@@ -5197,16 +5306,13 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
Node* call_target = mcgraph()->RelocatableIntPtrConstant(
target, RelocInfo::WASM_STUB_CALL);
- node = graph()->NewNode(mcgraph()->common()->Call(call_descriptor),
- call_target, address, inputs[1], inputs[2],
- effect(), control());
- break;
+ return gasm_->Call(call_descriptor, call_target, effective_offset,
+ inputs[1], inputs[2]);
}
default:
FATAL_UNSUPPORTED_OPCODE(opcode);
}
- return SetEffect(node);
}
Node* WasmGraphBuilder::AtomicFence() {
@@ -5214,11 +5320,6 @@ Node* WasmGraphBuilder::AtomicFence() {
effect(), control()));
}
-#undef ATOMIC_BINOP_LIST
-#undef ATOMIC_CMP_EXCHG_LIST
-#undef ATOMIC_LOAD_LIST
-#undef ATOMIC_STORE_LIST
-
Node* WasmGraphBuilder::MemoryInit(uint32_t data_segment_index, Node* dst,
Node* src, Node* size,
wasm::WasmCodePosition position) {
@@ -6157,9 +6258,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
wasm::ValueType type) {
// Make sure ValueType fits in a Smi.
STATIC_ASSERT(wasm::ValueType::kLastUsedBit + 1 <= kSmiValueSize);
- Node* inputs[] = {
- instance_node_.get(), input,
- IntPtrConstant(IntToSmi(static_cast<int>(type.raw_bit_field())))};
+ Node* inputs[] = {instance_node_.get(), input,
+ mcgraph()->IntPtrConstant(
+ IntToSmi(static_cast<int>(type.raw_bit_field())))};
Node* check = BuildChangeSmiToInt32(SetEffect(BuildCallToRuntimeWithContext(
Runtime::kWasmIsValidRefValue, js_context, inputs, 3)));
@@ -6169,7 +6270,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
SetControl(type_check.if_false);
Node* old_effect = effect();
- BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError, js_context,
+ BuildCallToRuntimeWithContext(Runtime::kWasmThrowJSTypeError, js_context,
nullptr, 0);
SetEffectControl(type_check.EffectPhi(old_effect, effect()),
@@ -6241,14 +6342,38 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
BuildChangeSmiToInt32(input));
}
+ Node* HeapNumberToFloat64(Node* input) {
+ return gasm_->Load(MachineType::Float64(), input,
+ wasm::ObjectAccess::ToTagged(HeapNumber::kValueOffset));
+ }
+
Node* FromJSFast(Node* input, wasm::ValueType type) {
switch (type.kind()) {
case wasm::ValueType::kI32:
return BuildChangeSmiToInt32(input);
- case wasm::ValueType::kF32:
- return SmiToFloat32(input);
- case wasm::ValueType::kF64:
- return SmiToFloat64(input);
+ case wasm::ValueType::kF32: {
+ auto done = gasm_->MakeLabel(MachineRepresentation::kFloat32);
+ auto heap_number = gasm_->MakeLabel();
+ gasm_->GotoIfNot(IsSmi(input), &heap_number);
+ gasm_->Goto(&done, SmiToFloat32(input));
+ gasm_->Bind(&heap_number);
+ Node* value =
+ graph()->NewNode(mcgraph()->machine()->TruncateFloat64ToFloat32(),
+ HeapNumberToFloat64(input));
+ gasm_->Goto(&done, value);
+ gasm_->Bind(&done);
+ return done.PhiAt(0);
+ }
+ case wasm::ValueType::kF64: {
+ auto done = gasm_->MakeLabel(MachineRepresentation::kFloat64);
+ auto heap_number = gasm_->MakeLabel();
+ gasm_->GotoIfNot(IsSmi(input), &heap_number);
+ gasm_->Goto(&done, SmiToFloat64(input));
+ gasm_->Bind(&heap_number);
+ gasm_->Goto(&done, HeapNumberToFloat64(input));
+ gasm_->Bind(&done);
+ return done.PhiAt(0);
+ }
case wasm::ValueType::kRef:
case wasm::ValueType::kOptRef:
case wasm::ValueType::kI64:
@@ -6313,7 +6438,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* BuildMultiReturnFixedArrayFromIterable(const wasm::FunctionSig* sig,
Node* iterable, Node* context) {
Node* length = BuildChangeUint31ToSmi(
- Uint32Constant(static_cast<uint32_t>(sig->return_count())));
+ mcgraph()->Uint32Constant(static_cast<uint32_t>(sig->return_count())));
return CALL_BUILTIN(IterableToFixedArrayForWasm, iterable, length, context);
}
@@ -6420,12 +6545,30 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
gasm_->Int32Constant(0));
}
- Node* CanTransformFast(Node* input, wasm::ValueType type) {
+ void CanTransformFast(
+ Node* input, wasm::ValueType type,
+ v8::internal::compiler::GraphAssemblerLabel<0>* slow_path) {
switch (type.kind()) {
- case wasm::ValueType::kI32:
- case wasm::ValueType::kF64:
+ case wasm::ValueType::kI32: {
+ gasm_->GotoIfNot(IsSmi(input), slow_path);
+ return;
+ }
case wasm::ValueType::kF32:
- return IsSmi(input);
+ case wasm::ValueType::kF64: {
+ auto done = gasm_->MakeLabel();
+ gasm_->GotoIf(IsSmi(input), &done);
+ Node* map =
+ gasm_->Load(MachineType::TaggedPointer(), input,
+ wasm::ObjectAccess::ToTagged(HeapObject::kMapOffset));
+ Node* heap_number_map = LOAD_FULL_POINTER(
+ BuildLoadIsolateRoot(),
+ IsolateData::root_slot_offset(RootIndex::kHeapNumberMap));
+ Node* is_heap_number = gasm_->WordEqual(heap_number_map, map);
+ gasm_->GotoIf(is_heap_number, &done);
+ gasm_->Goto(slow_path);
+ gasm_->Bind(&done);
+ return;
+ }
case wasm::ValueType::kRef:
case wasm::ValueType::kOptRef:
case wasm::ValueType::kI64:
@@ -6460,7 +6603,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// an actual reference to an instance or a placeholder reference,
// called {WasmExportedFunction} via the {WasmExportedFunctionData}
// structure.
- Node* function_data = BuildLoadFunctionDataFromExportedFunction(js_closure);
+ Node* function_data = BuildLoadFunctionDataFromJSFunction(js_closure);
instance_node_.set(
BuildLoadInstanceFromExportedFunctionData(function_data));
@@ -6468,7 +6611,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// Throw a TypeError. Use the js_context of the calling javascript
// function (passed as a parameter), such that the generated code is
// js_context independent.
- BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError, js_context,
+ BuildCallToRuntimeWithContext(Runtime::kWasmThrowJSTypeError, js_context,
nullptr, 0);
TerminateThrow(effect(), control());
return;
@@ -6494,8 +6637,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// fast is encountered, skip checking the rest and fall back to the slow
// path.
for (int i = 0; i < wasm_count; ++i) {
- gasm_->GotoIfNot(CanTransformFast(params[i + 1], sig_->GetParam(i)),
- &slow_path);
+ CanTransformFast(params[i + 1], sig_->GetParam(i), &slow_path);
}
// Convert JS parameters to wasm numbers using the fast transformation
// and build the call.
@@ -6557,7 +6699,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
global_proxy);
}
- bool BuildWasmImportCallWrapper(WasmImportCallKind kind, int expected_arity) {
+ bool BuildWasmToJSWrapper(WasmImportCallKind kind, int expected_arity) {
int wasm_count = static_cast<int>(sig_->parameter_count());
// Build the start and the parameter nodes.
@@ -6572,7 +6714,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// =======================================================================
// === Runtime TypeError =================================================
// =======================================================================
- BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError,
+ BuildCallToRuntimeWithContext(Runtime::kWasmThrowJSTypeError,
native_context, nullptr, 0);
TerminateThrow(effect(), control());
return false;
@@ -6621,8 +6763,45 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
args.begin());
break;
}
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
// =======================================================================
- // === JS Functions with arguments adapter ===============================
+ // === JS Functions with mismatching arity ===============================
+ // =======================================================================
+ case WasmImportCallKind::kJSFunctionArityMismatch: {
+ int pushed_count = std::max(expected_arity, wasm_count);
+ base::SmallVector<Node*, 16> args(pushed_count + 7);
+ int pos = 0;
+
+ args[pos++] = callable_node; // target callable.
+ // Determine receiver at runtime.
+ args[pos++] =
+ BuildReceiverNode(callable_node, native_context, undefined_node);
+
+ // Convert wasm numbers to JS values.
+ pos = AddArgumentNodes(VectorOf(args), pos, wasm_count, sig_);
+ for (int i = wasm_count; i < expected_arity; ++i) {
+ args[pos++] = undefined_node;
+ }
+ args[pos++] = undefined_node; // new target
+ args[pos++] = mcgraph()->Int32Constant(wasm_count); // argument count
+
+ Node* function_context =
+ gasm_->Load(MachineType::TaggedPointer(), callable_node,
+ wasm::ObjectAccess::ContextOffsetInTaggedJSFunction());
+ args[pos++] = function_context;
+ args[pos++] = effect();
+ args[pos++] = control();
+ DCHECK_EQ(pos, args.size());
+
+ auto call_descriptor = Linkage::GetJSCallDescriptor(
+ graph()->zone(), false, pushed_count + 1, CallDescriptor::kNoFlags);
+ call = graph()->NewNode(mcgraph()->common()->Call(call_descriptor), pos,
+ args.begin());
+ break;
+ }
+#else
+ // =======================================================================
+ // === JS Functions with mismatching arity ===============================
// =======================================================================
case WasmImportCallKind::kJSFunctionArityMismatch: {
base::SmallVector<Node*, 16> args(wasm_count + 9);
@@ -6630,9 +6809,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* function_context =
gasm_->Load(MachineType::TaggedPointer(), callable_node,
wasm::ObjectAccess::ContextOffsetInTaggedJSFunction());
- args[pos++] = mcgraph()->RelocatableIntPtrConstant(
- wasm::WasmCode::kArgumentsAdaptorTrampoline,
- RelocInfo::WASM_STUB_CALL);
+ args[pos++] =
+ GetBuiltinPointerTarget(Builtins::kArgumentsAdaptorTrampoline);
args[pos++] = callable_node; // target callable
args[pos++] = undefined_node; // new target
args[pos++] = mcgraph()->Int32Constant(wasm_count); // argument count
@@ -6657,7 +6835,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
auto call_descriptor = Linkage::GetStubCallDescriptor(
mcgraph()->zone(), ArgumentsAdaptorDescriptor{}, 1 + wasm_count,
CallDescriptor::kNoFlags, Operator::kNoProperties,
- StubCallMode::kCallWasmRuntimeStub);
+ StubCallMode::kCallBuiltinPointer);
// Convert wasm numbers to JS values.
pos = AddArgumentNodes(VectorOf(args), pos, wasm_count, sig_);
@@ -6670,47 +6848,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
args.begin());
break;
}
- // =======================================================================
- // === JS Functions without arguments adapter ============================
- // =======================================================================
- case WasmImportCallKind::kJSFunctionArityMismatchSkipAdaptor: {
- base::SmallVector<Node*, 16> args(expected_arity + 7);
- int pos = 0;
- Node* function_context =
- gasm_->Load(MachineType::TaggedPointer(), callable_node,
- wasm::ObjectAccess::ContextOffsetInTaggedJSFunction());
- args[pos++] = callable_node; // target callable.
-
- // Determine receiver at runtime.
- args[pos++] =
- BuildReceiverNode(callable_node, native_context, undefined_node);
-
- auto call_descriptor = Linkage::GetJSCallDescriptor(
- graph()->zone(), false, expected_arity + 1,
- CallDescriptor::kNoFlags);
-
- // Convert wasm numbers to JS values.
- if (expected_arity <= wasm_count) {
- pos = AddArgumentNodes(VectorOf(args), pos, expected_arity, sig_);
- } else {
- pos = AddArgumentNodes(VectorOf(args), pos, wasm_count, sig_);
- for (int i = wasm_count; i < expected_arity; ++i) {
- args[pos++] = undefined_node;
- }
- }
-
- args[pos++] = undefined_node; // new target
- args[pos++] =
- mcgraph()->Int32Constant(expected_arity); // argument count
- args[pos++] = function_context;
- args[pos++] = effect();
- args[pos++] = control();
-
- DCHECK_EQ(pos, args.size());
- call = graph()->NewNode(mcgraph()->common()->Call(call_descriptor), pos,
- args.begin());
- break;
- }
+#endif
// =======================================================================
// === General case of unknown callable ==================================
// =======================================================================
@@ -6837,11 +6975,11 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
BuildModifyThreadInWasmFlag(true);
- Node* exception_branch =
- graph()->NewNode(mcgraph()->common()->Branch(BranchHint::kTrue),
- graph()->NewNode(mcgraph()->machine()->WordEqual(),
- return_value, IntPtrConstant(0)),
- control());
+ Node* exception_branch = graph()->NewNode(
+ mcgraph()->common()->Branch(BranchHint::kTrue),
+ graph()->NewNode(mcgraph()->machine()->WordEqual(), return_value,
+ mcgraph()->IntPtrConstant(0)),
+ control());
SetControl(
graph()->NewNode(mcgraph()->common()->IfFalse(), exception_branch));
WasmThrowDescriptor interface_descriptor;
@@ -6897,7 +7035,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// Throw a TypeError if the signature is incompatible with JavaScript.
if (!wasm::IsJSCompatibleSignature(sig_, module_, enabled_features_)) {
- BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError, context,
+ BuildCallToRuntimeWithContext(Runtime::kWasmThrowJSTypeError, context,
nullptr, 0);
TerminateThrow(effect(), control());
return;
@@ -7114,17 +7252,17 @@ std::pair<WasmImportCallKind, Handle<JSReceiver>> ResolveWasmImportCall(
const wasm::WasmFeatures& enabled_features) {
if (WasmExportedFunction::IsWasmExportedFunction(*callable)) {
auto imported_function = Handle<WasmExportedFunction>::cast(callable);
- auto func_index = imported_function->function_index();
- auto module = imported_function->instance().module();
- const wasm::FunctionSig* imported_sig = module->functions[func_index].sig;
- if (*imported_sig != *expected_sig) {
+ if (!imported_function->MatchesSignature(module, expected_sig)) {
return std::make_pair(WasmImportCallKind::kLinkError, callable);
}
- if (static_cast<uint32_t>(func_index) >= module->num_imported_functions) {
+ uint32_t func_index =
+ static_cast<uint32_t>(imported_function->function_index());
+ if (func_index >=
+ imported_function->instance().module()->num_imported_functions) {
return std::make_pair(WasmImportCallKind::kWasmToWasm, callable);
}
Isolate* isolate = callable->GetIsolate();
- // Resolve the short-cut to the underlying callable and continue.
+ // Resolve the shortcut to the underlying callable and continue.
Handle<WasmInstanceObject> instance(imported_function->instance(), isolate);
ImportedFunctionEntry entry(instance, func_index);
callable = handle(entry.callable(), isolate);
@@ -7224,14 +7362,6 @@ std::pair<WasmImportCallKind, Handle<JSReceiver>> ResolveWasmImportCall(
Compiler::Compile(function, Compiler::CLEAR_EXCEPTION,
&is_compiled_scope);
}
-#ifndef V8_REVERSE_JSARGS
- // This optimization is disabled when the arguments are reversed. It will be
- // subsumed when the argumens adaptor frame is removed.
- if (shared->is_safe_to_skip_arguments_adaptor()) {
- return std::make_pair(
- WasmImportCallKind::kJSFunctionArityMismatchSkipAdaptor, callable);
- }
-#endif
return std::make_pair(WasmImportCallKind::kJSFunctionArityMismatch,
callable);
@@ -7378,7 +7508,7 @@ wasm::WasmCompilationResult CompileWasmImportCallWrapper(
WasmWrapperGraphBuilder builder(
&zone, mcgraph, sig, env->module, source_position_table,
StubCallMode::kCallWasmRuntimeStub, env->enabled_features);
- builder.BuildWasmImportCallWrapper(kind, expected_arity);
+ builder.BuildWasmToJSWrapper(kind, expected_arity);
// Build a name in the form "wasm-to-js-<kind>-<signature>".
constexpr size_t kMaxNameLen = 128;
@@ -7455,6 +7585,57 @@ wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::WasmEngine* wasm_engine,
return native_module->PublishCode(std::move(wasm_code));
}
+MaybeHandle<Code> CompileWasmToJSWrapper(Isolate* isolate,
+ const wasm::FunctionSig* sig,
+ WasmImportCallKind kind,
+ int expected_arity) {
+ std::unique_ptr<Zone> zone = std::make_unique<Zone>(
+ isolate->allocator(), ZONE_NAME, kCompressGraphZone);
+
+ // Create the Graph
+ Graph* graph = zone->New<Graph>(zone.get());
+ CommonOperatorBuilder* common = zone->New<CommonOperatorBuilder>(zone.get());
+ MachineOperatorBuilder* machine = zone->New<MachineOperatorBuilder>(
+ zone.get(), MachineType::PointerRepresentation(),
+ InstructionSelector::SupportedMachineOperatorFlags(),
+ InstructionSelector::AlignmentRequirements());
+ MachineGraph* mcgraph = zone->New<MachineGraph>(graph, common, machine);
+
+ WasmWrapperGraphBuilder builder(zone.get(), mcgraph, sig, nullptr, nullptr,
+ StubCallMode::kCallWasmRuntimeStub,
+ wasm::WasmFeatures::FromIsolate(isolate));
+ builder.BuildWasmToJSWrapper(kind, expected_arity);
+
+ // Build a name in the form "wasm-to-js-<kind>-<signature>".
+ constexpr size_t kMaxNameLen = 128;
+ constexpr size_t kNamePrefixLen = 11;
+ auto name_buffer = std::unique_ptr<char[]>(new char[kMaxNameLen]);
+ memcpy(name_buffer.get(), "wasm-to-js:", kNamePrefixLen);
+ PrintSignature(VectorOf(name_buffer.get(), kMaxNameLen) + kNamePrefixLen,
+ sig);
+
+ // Generate the call descriptor.
+ CallDescriptor* incoming =
+ GetWasmCallDescriptor(zone.get(), sig, WasmGraphBuilder::kNoRetpoline,
+ WasmCallKind::kWasmImportWrapper);
+
+ // Run the compilation job synchronously.
+ std::unique_ptr<OptimizedCompilationJob> job(
+ Pipeline::NewWasmHeapStubCompilationJob(
+ isolate, isolate->wasm_engine(), incoming, std::move(zone), graph,
+ CodeKind::WASM_TO_JS_FUNCTION, std::move(name_buffer),
+ AssemblerOptions::Default(isolate)));
+
+ // Compile the wrapper
+ if (job->ExecuteJob(isolate->counters()->runtime_call_stats()) ==
+ CompilationJob::FAILED ||
+ job->FinalizeJob(isolate) == CompilationJob::FAILED) {
+ return Handle<Code>();
+ }
+ Handle<Code> code = job->compilation_info()->code();
+ return code;
+}
+
MaybeHandle<Code> CompileJSToJSWrapper(Isolate* isolate,
const wasm::FunctionSig* sig,
const wasm::WasmModule* module) {
@@ -7547,7 +7728,7 @@ Handle<Code> CompileCWasmEntry(Isolate* isolate, const wasm::FunctionSig* sig,
CodeKind::C_WASM_ENTRY, std::move(name_buffer),
AssemblerOptions::Default(isolate)));
- CHECK_NE(job->ExecuteJob(isolate->counters()->runtime_call_stats()),
+ CHECK_NE(job->ExecuteJob(isolate->counters()->runtime_call_stats(), nullptr),
CompilationJob::FAILED);
CHECK_NE(job->FinalizeJob(isolate), CompilationJob::FAILED);
diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h
index ab42610239..c431f53efe 100644
--- a/deps/v8/src/compiler/wasm-compiler.h
+++ b/deps/v8/src/compiler/wasm-compiler.h
@@ -58,15 +58,12 @@ wasm::WasmCompilationResult ExecuteTurbofanWasmCompilation(
// type of the target function/callable and whether the signature matches the
// argument arity.
enum class WasmImportCallKind : uint8_t {
- kLinkError, // static Wasm->Wasm type error
- kRuntimeTypeError, // runtime Wasm->JS type error
- kWasmToCapi, // fast Wasm->C-API call
- kWasmToWasm, // fast Wasm->Wasm call
- kJSFunctionArityMatch, // fast Wasm->JS call
- kJSFunctionArityMismatch, // Wasm->JS, needs adapter frame
- kJSFunctionArityMismatchSkipAdaptor, // Wasm->JS, arity mismatch calling
- // strict mode function where we don't
- // need the ArgumentsAdaptorTrampoline.
+ kLinkError, // static Wasm->Wasm type error
+ kRuntimeTypeError, // runtime Wasm->JS type error
+ kWasmToCapi, // fast Wasm->C-API call
+ kWasmToWasm, // fast Wasm->Wasm call
+ kJSFunctionArityMatch, // fast Wasm->JS call
+ kJSFunctionArityMismatch, // Wasm->JS, needs adapter frame
// Math functions imported from JavaScript that are intrinsified
kFirstMathIntrinsic,
kF64Acos = kFirstMathIntrinsic,
@@ -126,6 +123,11 @@ std::unique_ptr<OptimizedCompilationJob> NewJSToWasmCompilationJob(
const wasm::FunctionSig* sig, const wasm::WasmModule* module,
bool is_import, const wasm::WasmFeatures& enabled_features);
+MaybeHandle<Code> CompileWasmToJSWrapper(Isolate* isolate,
+ const wasm::FunctionSig* sig,
+ WasmImportCallKind kind,
+ int expected_arity);
+
// Compiles a stub with JS linkage that serves as an adapter for function
// objects constructed via {WebAssembly.Function}. It performs a round-trip
// simulating a JS-to-Wasm-to-JS coercion of parameter and return values.
@@ -206,10 +208,8 @@ class WasmGraphBuilder {
Node* RefNull();
Node* RefFunc(uint32_t function_index);
Node* RefAsNonNull(Node* arg, wasm::WasmCodePosition position);
- Node* Uint32Constant(uint32_t value);
Node* Int32Constant(int32_t value);
Node* Int64Constant(int64_t value);
- Node* IntPtrConstant(intptr_t value);
Node* Float32Constant(float value);
Node* Float64Constant(double value);
Node* Simd128Constant(const uint8_t value[16]);
@@ -303,23 +303,28 @@ class WasmGraphBuilder {
//-----------------------------------------------------------------------
Node* CurrentMemoryPages();
Node* TraceMemoryOperation(bool is_store, MachineRepresentation, Node* index,
- uint32_t offset, wasm::WasmCodePosition);
+ uintptr_t offset, wasm::WasmCodePosition);
Node* LoadMem(wasm::ValueType type, MachineType memtype, Node* index,
- uint32_t offset, uint32_t alignment,
+ uint64_t offset, uint32_t alignment,
wasm::WasmCodePosition position);
#if defined(V8_TARGET_BIG_ENDIAN) || defined(V8_TARGET_ARCH_S390_LE_SIM)
Node* LoadTransformBigEndian(wasm::ValueType type, MachineType memtype,
wasm::LoadTransformationKind transform,
- Node* index, uint32_t offset, uint32_t alignment,
+ Node* index, uint64_t offset, uint32_t alignment,
wasm::WasmCodePosition position);
#endif
Node* LoadTransform(wasm::ValueType type, MachineType memtype,
wasm::LoadTransformationKind transform, Node* index,
- uint32_t offset, uint32_t alignment,
+ uint64_t offset, uint32_t alignment,
wasm::WasmCodePosition position);
- Node* StoreMem(MachineRepresentation mem_rep, Node* index, uint32_t offset,
+ Node* LoadLane(MachineType memtype, Node* value, Node* index, uint32_t offset,
+ uint8_t laneidx, wasm::WasmCodePosition position);
+ Node* StoreMem(MachineRepresentation mem_rep, Node* index, uint64_t offset,
uint32_t alignment, Node* val, wasm::WasmCodePosition position,
wasm::ValueType type);
+ Node* StoreLane(MachineRepresentation mem_rep, Node* index, uint32_t offset,
+ uint32_t alignment, Node* val, uint8_t laneidx,
+ wasm::WasmCodePosition position, wasm::ValueType type);
static void PrintDebugName(Node* node);
void set_instance_node(Node* instance_node) {
@@ -382,7 +387,7 @@ class WasmGraphBuilder {
Node* Simd8x16ShuffleOp(const uint8_t shuffle[16], Node* const* inputs);
Node* AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
- uint32_t alignment, uint32_t offset,
+ uint32_t alignment, uint64_t offset,
wasm::WasmCodePosition position);
Node* AtomicFence();
@@ -455,7 +460,10 @@ class WasmGraphBuilder {
Node* BuildLoadIsolateRoot();
- Node* MemBuffer(uint32_t offset);
+ // MemBuffer is only called with valid offsets (after bounds checking), so the
+ // offset fits in a platform-dependent uintptr_t.
+ Node* MemBuffer(uintptr_t offset);
+
// BoundsCheckMem receives a uint32 {index} node and returns a ptrsize index.
Node* BoundsCheckMem(uint8_t access_size, Node* index, uint64_t offset,
wasm::WasmCodePosition, EnforceBoundsCheck);
@@ -470,8 +478,8 @@ class WasmGraphBuilder {
// partially out-of-bounds, traps if it is completely out-of-bounds.
Node* BoundsCheckMemRange(Node** start, Node** size, wasm::WasmCodePosition);
- Node* CheckBoundsAndAlignment(uint8_t access_size, Node* index,
- uint32_t offset, wasm::WasmCodePosition);
+ Node* CheckBoundsAndAlignment(int8_t access_size, Node* index,
+ uint64_t offset, wasm::WasmCodePosition);
Node* Uint32ToUintptr(Node*);
const Operator* GetSafeLoadOperator(int offset, wasm::ValueType type);
@@ -612,7 +620,7 @@ class WasmGraphBuilder {
Node* BuildMultiReturnFixedArrayFromIterable(const wasm::FunctionSig* sig,
Node* iterable, Node* context);
- Node* BuildLoadFunctionDataFromExportedFunction(Node* closure);
+ Node* BuildLoadFunctionDataFromJSFunction(Node* closure);
Node* BuildLoadJumpTableOffsetFromExportedFunctionData(Node* function_data);
Node* BuildLoadFunctionIndexFromExportedFunctionData(Node* function_data);
diff --git a/deps/v8/src/compiler/zone-stats.h b/deps/v8/src/compiler/zone-stats.h
index a272a674d4..8e0f4fa72a 100644
--- a/deps/v8/src/compiler/zone-stats.h
+++ b/deps/v8/src/compiler/zone-stats.h
@@ -28,6 +28,9 @@ class V8_EXPORT_PRIVATE ZoneStats final {
support_zone_compression_(support_zone_compression) {}
~Scope() { Destroy(); }
+ Scope(const Scope&) = delete;
+ Scope& operator=(const Scope&) = delete;
+
Zone* zone() {
if (zone_ == nullptr)
zone_ =
@@ -46,13 +49,14 @@ class V8_EXPORT_PRIVATE ZoneStats final {
ZoneStats* const zone_stats_;
Zone* zone_;
const bool support_zone_compression_;
- DISALLOW_COPY_AND_ASSIGN(Scope);
};
class V8_EXPORT_PRIVATE StatsScope final {
public:
explicit StatsScope(ZoneStats* zone_stats);
~StatsScope();
+ StatsScope(const StatsScope&) = delete;
+ StatsScope& operator=(const StatsScope&) = delete;
size_t GetMaxAllocatedBytes();
size_t GetCurrentAllocatedBytes();
@@ -68,12 +72,12 @@ class V8_EXPORT_PRIVATE ZoneStats final {
InitialValues initial_values_;
size_t total_allocated_bytes_at_start_;
size_t max_allocated_bytes_;
-
- DISALLOW_COPY_AND_ASSIGN(StatsScope);
};
explicit ZoneStats(AccountingAllocator* allocator);
~ZoneStats();
+ ZoneStats(const ZoneStats&) = delete;
+ ZoneStats& operator=(const ZoneStats&) = delete;
size_t GetMaxAllocatedBytes() const;
size_t GetTotalAllocatedBytes() const;
@@ -92,8 +96,6 @@ class V8_EXPORT_PRIVATE ZoneStats final {
size_t max_allocated_bytes_;
size_t total_deleted_bytes_;
AccountingAllocator* allocator_;
-
- DISALLOW_COPY_AND_ASSIGN(ZoneStats);
};
} // namespace compiler
diff --git a/deps/v8/src/d8/d8.cc b/deps/v8/src/d8/d8.cc
index dc17a69697..da7f0e4dd1 100644
--- a/deps/v8/src/d8/d8.cc
+++ b/deps/v8/src/d8/d8.cc
@@ -540,8 +540,8 @@ class StreamingCompileTask final : public v8::Task {
StreamingCompileTask(Isolate* isolate,
v8::ScriptCompiler::StreamedSource* streamed_source)
: isolate_(isolate),
- script_streaming_task_(v8::ScriptCompiler::StartStreamingScript(
- isolate, streamed_source)) {
+ script_streaming_task_(
+ v8::ScriptCompiler::StartStreaming(isolate, streamed_source)) {
Shell::NotifyStartStreamingTask(isolate_);
}
@@ -835,7 +835,8 @@ MaybeLocal<Module> ResolveModuleCallback(Local<Context> context,
} // anonymous namespace
-MaybeLocal<Module> Shell::FetchModuleTree(Local<Context> context,
+MaybeLocal<Module> Shell::FetchModuleTree(Local<Module> referrer,
+ Local<Context> context,
const std::string& file_name) {
DCHECK(IsAbsolutePath(file_name));
Isolate* isolate = context->GetIsolate();
@@ -848,8 +849,16 @@ MaybeLocal<Module> Shell::FetchModuleTree(Local<Context> context,
source_text = ReadFile(isolate, fallback_file_name.c_str());
}
}
+
+ ModuleEmbedderData* d = GetModuleDataFromContext(context);
if (source_text.IsEmpty()) {
- std::string msg = "d8: Error reading module from " + file_name;
+ std::string msg = "d8: Error reading module from " + file_name;
+ if (!referrer.IsEmpty()) {
+ auto specifier_it =
+ d->module_to_specifier_map.find(Global<Module>(isolate, referrer));
+ CHECK(specifier_it != d->module_to_specifier_map.end());
+ msg += "\n imported by " + specifier_it->second;
+ }
Throw(isolate, msg.c_str());
return MaybeLocal<Module>();
}
@@ -863,7 +872,6 @@ MaybeLocal<Module> Shell::FetchModuleTree(Local<Context> context,
return MaybeLocal<Module>();
}
- ModuleEmbedderData* d = GetModuleDataFromContext(context);
CHECK(d->specifier_to_module_map
.insert(std::make_pair(file_name, Global<Module>(isolate, module)))
.second);
@@ -878,7 +886,7 @@ MaybeLocal<Module> Shell::FetchModuleTree(Local<Context> context,
std::string absolute_path =
NormalizePath(ToSTLString(isolate, name), dir_name);
if (d->specifier_to_module_map.count(absolute_path)) continue;
- if (FetchModuleTree(context, absolute_path).IsEmpty()) {
+ if (FetchModuleTree(module, context, absolute_path).IsEmpty()) {
return MaybeLocal<Module>();
}
}
@@ -1023,7 +1031,8 @@ void Shell::DoHostImportModuleDynamically(void* import_data) {
auto module_it = d->specifier_to_module_map.find(absolute_path);
if (module_it != d->specifier_to_module_map.end()) {
root_module = module_it->second.Get(isolate);
- } else if (!FetchModuleTree(realm, absolute_path).ToLocal(&root_module)) {
+ } else if (!FetchModuleTree(Local<Module>(), realm, absolute_path)
+ .ToLocal(&root_module)) {
CHECK(try_catch.HasCaught());
resolver->Reject(realm, try_catch.Exception()).ToChecked();
return;
@@ -1090,7 +1099,8 @@ bool Shell::ExecuteModule(Isolate* isolate, const char* file_name) {
Local<Module> root_module;
- if (!FetchModuleTree(realm, absolute_path).ToLocal(&root_module)) {
+ if (!FetchModuleTree(Local<Module>(), realm, absolute_path)
+ .ToLocal(&root_module)) {
CHECK(try_catch.HasCaught());
ReportException(isolate, &try_catch);
return false;
@@ -1210,6 +1220,7 @@ int PerIsolateData::HandleUnhandledPromiseRejections() {
Shell::ReportException(isolate_, message, value);
}
unhandled_promises_.clear();
+ ignore_unhandled_promises_ = false;
return static_cast<int>(i);
}
@@ -1313,10 +1324,14 @@ void Shell::RealmOwner(const v8::FunctionCallbackInfo<v8::Value>& args) {
Throw(args.GetIsolate(), "Invalid argument");
return;
}
- int index = data->RealmFind(args[0]
- ->ToObject(isolate->GetCurrentContext())
- .ToLocalChecked()
- ->CreationContext());
+ Local<Object> object =
+ args[0]->ToObject(isolate->GetCurrentContext()).ToLocalChecked();
+ i::Handle<i::JSReceiver> i_object = Utils::OpenHandle(*object);
+ if (i_object->IsJSGlobalProxy() &&
+ i::Handle<i::JSGlobalProxy>::cast(i_object)->IsDetached()) {
+ return;
+ }
+ int index = data->RealmFind(object->CreationContext());
if (index == -1) return;
args.GetReturnValue().Set(index);
}
@@ -2140,7 +2155,10 @@ Local<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
global_template->Set(isolate, "performance",
Shell::CreatePerformanceTemplate(isolate));
global_template->Set(isolate, "Worker", Shell::CreateWorkerTemplate(isolate));
- global_template->Set(isolate, "os", Shell::CreateOSTemplate(isolate));
+ // Prevent fuzzers from creating side effects.
+ if (!i::FLAG_fuzzing) {
+ global_template->Set(isolate, "os", Shell::CreateOSTemplate(isolate));
+ }
global_template->Set(isolate, "d8", Shell::CreateD8Template(isolate));
#ifdef V8_FUZZILLI
@@ -3571,7 +3589,6 @@ int Shell::RunMain(Isolate* isolate, bool last_run) {
PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate));
if (!options.isolate_sources[0].Execute(isolate)) success = false;
if (!CompleteMessageLoop(isolate)) success = false;
- if (!HandleUnhandledPromiseRejections(isolate)) success = false;
}
if (!use_existing_context) {
DisposeModuleEmbedderData(context);
@@ -3603,6 +3620,9 @@ int Shell::RunMain(Isolate* isolate, bool last_run) {
printf("%i pending unhandled Promise rejection(s) detected.\n",
Shell::unhandled_promise_rejections_.load());
success = false;
+ // RunMain may be executed multiple times, e.g. in REPRL mode, so we have to
+ // reset this counter.
+ Shell::unhandled_promise_rejections_.store(0);
}
// In order to finish successfully, success must be != expected_to_throw.
return success == Shell::options.expected_to_throw ? 1 : 0;
diff --git a/deps/v8/src/d8/d8.h b/deps/v8/src/d8/d8.h
index 11ec47d815..e0fb8d5892 100644
--- a/deps/v8/src/d8/d8.h
+++ b/deps/v8/src/d8/d8.h
@@ -608,7 +608,8 @@ class Shell : public i::AllStatic {
v8::MaybeLocal<Value> global_object);
static void DisposeRealm(const v8::FunctionCallbackInfo<v8::Value>& args,
int index);
- static MaybeLocal<Module> FetchModuleTree(v8::Local<v8::Context> context,
+ static MaybeLocal<Module> FetchModuleTree(v8::Local<v8::Module> origin_module,
+ v8::Local<v8::Context> context,
const std::string& file_name);
static ScriptCompiler::CachedData* LookupCodeCache(Isolate* isolate,
Local<Value> name);
diff --git a/deps/v8/src/date/DIR_METADATA b/deps/v8/src/date/DIR_METADATA
new file mode 100644
index 0000000000..b183b81885
--- /dev/null
+++ b/deps/v8/src/date/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>Runtime"
+} \ No newline at end of file
diff --git a/deps/v8/src/date/OWNERS b/deps/v8/src/date/OWNERS
index 6edeeae0ea..3e6f2b948d 100644
--- a/deps/v8/src/date/OWNERS
+++ b/deps/v8/src/date/OWNERS
@@ -2,5 +2,3 @@ ishell@chromium.org
jshin@chromium.org
ulan@chromium.org
verwaest@chromium.org
-
-# COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/debug/DIR_METADATA b/deps/v8/src/debug/DIR_METADATA
new file mode 100644
index 0000000000..3ba1106a5f
--- /dev/null
+++ b/deps/v8/src/debug/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Platform>DevTools>JavaScript"
+} \ No newline at end of file
diff --git a/deps/v8/src/debug/OWNERS b/deps/v8/src/debug/OWNERS
index 6c13c38e48..5b93352039 100644
--- a/deps/v8/src/debug/OWNERS
+++ b/deps/v8/src/debug/OWNERS
@@ -4,5 +4,3 @@ mvstanton@chromium.org
szuend@chromium.org
verwaest@chromium.org
yangguo@chromium.org
-
-# COMPONENT: Platform>DevTools>JavaScript
diff --git a/deps/v8/src/debug/debug-evaluate.cc b/deps/v8/src/debug/debug-evaluate.cc
index c7d0a890c4..7fb0b3723f 100644
--- a/deps/v8/src/debug/debug-evaluate.cc
+++ b/deps/v8/src/debug/debug-evaluate.cc
@@ -17,10 +17,26 @@
#include "src/interpreter/bytecodes.h"
#include "src/objects/contexts.h"
#include "src/snapshot/snapshot.h"
+#include "src/wasm/wasm-debug.h"
+#include "src/wasm/wasm-js.h"
namespace v8 {
namespace internal {
+namespace {
+static MaybeHandle<SharedFunctionInfo> GetFunctionInfo(Isolate* isolate,
+ Handle<String> source,
+ REPLMode repl_mode) {
+ Compiler::ScriptDetails script_details(isolate->factory()->empty_string());
+ script_details.repl_mode = repl_mode;
+ ScriptOriginOptions origin_options(false, true);
+ return Compiler::GetSharedFunctionInfoForScript(
+ isolate, source, script_details, origin_options, nullptr, nullptr,
+ ScriptCompiler::kNoCompileOptions, ScriptCompiler::kNoCacheNoReason,
+ NOT_NATIVES_CODE);
+}
+} // namespace
+
MaybeHandle<Object> DebugEvaluate::Global(Isolate* isolate,
Handle<String> source,
debug::EvaluateGlobalMode mode,
@@ -32,19 +48,12 @@ MaybeHandle<Object> DebugEvaluate::Global(Isolate* isolate,
mode ==
debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect);
- Handle<Context> context = isolate->native_context();
- Compiler::ScriptDetails script_details(isolate->factory()->empty_string());
- script_details.repl_mode = repl_mode;
- ScriptOriginOptions origin_options(false, true);
- MaybeHandle<SharedFunctionInfo> maybe_function_info =
- Compiler::GetSharedFunctionInfoForScript(
- isolate, source, script_details, origin_options, nullptr, nullptr,
- ScriptCompiler::kNoCompileOptions, ScriptCompiler::kNoCacheNoReason,
- NOT_NATIVES_CODE);
-
Handle<SharedFunctionInfo> shared_info;
- if (!maybe_function_info.ToHandle(&shared_info)) return MaybeHandle<Object>();
+ if (!GetFunctionInfo(isolate, source, repl_mode).ToHandle(&shared_info)) {
+ return MaybeHandle<Object>();
+ }
+ Handle<Context> context = isolate->native_context();
Handle<JSFunction> fun =
isolate->factory()->NewFunctionFromSharedFunctionInfo(shared_info,
context);
@@ -91,6 +100,39 @@ MaybeHandle<Object> DebugEvaluate::Local(Isolate* isolate,
return maybe_result;
}
+V8_EXPORT MaybeHandle<Object> DebugEvaluate::WebAssembly(
+ Handle<WasmInstanceObject> instance, StackFrameId frame_id,
+ Handle<String> source, bool throw_on_side_effect) {
+ Isolate* isolate = instance->GetIsolate();
+
+ StackTraceFrameIterator it(isolate, frame_id);
+ if (!it.is_wasm()) return isolate->factory()->undefined_value();
+ WasmFrame* frame = WasmFrame::cast(it.frame());
+
+ Handle<JSProxy> context_extension = WasmJs::GetJSDebugProxy(frame);
+
+ DisableBreak disable_break_scope(isolate->debug(), /*disable=*/true);
+
+ Handle<SharedFunctionInfo> shared_info;
+ if (!GetFunctionInfo(isolate, source, REPLMode::kNo).ToHandle(&shared_info)) {
+ return {};
+ }
+
+ Handle<ScopeInfo> scope_info =
+ ScopeInfo::CreateForWithScope(isolate, Handle<ScopeInfo>::null());
+ Handle<Context> context = isolate->factory()->NewWithContext(
+ isolate->native_context(), scope_info, context_extension);
+
+ Handle<Object> result;
+ if (!DebugEvaluate::Evaluate(isolate, shared_info, context, context_extension,
+ source, throw_on_side_effect)
+ .ToHandle(&result)) {
+ return {};
+ }
+
+ return result;
+}
+
MaybeHandle<Object> DebugEvaluate::WithTopmostArguments(Isolate* isolate,
Handle<String> source) {
// Handle the processing of break.
@@ -321,6 +363,7 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
V(ObjectEntries) \
V(ObjectEntriesSkipFastPath) \
V(ObjectHasOwnProperty) \
+ V(ObjectKeys) \
V(ObjectValues) \
V(ObjectValuesSkipFastPath) \
V(ObjectGetOwnPropertyNames) \
@@ -511,6 +554,7 @@ DebugInfo::SideEffectState BuiltinGetSideEffectState(Builtins::Name id) {
case Builtins::kObjectIsExtensible:
case Builtins::kObjectIsFrozen:
case Builtins::kObjectIsSealed:
+ case Builtins::kObjectKeys:
case Builtins::kObjectPrototypeValueOf:
case Builtins::kObjectValues:
case Builtins::kObjectPrototypeHasOwnProperty:
diff --git a/deps/v8/src/debug/debug-evaluate.h b/deps/v8/src/debug/debug-evaluate.h
index aa1abba115..2f4cc2da4e 100644
--- a/deps/v8/src/debug/debug-evaluate.h
+++ b/deps/v8/src/debug/debug-evaluate.h
@@ -37,6 +37,10 @@ class DebugEvaluate : public AllStatic {
Handle<String> source,
bool throw_on_side_effect);
+ static V8_EXPORT MaybeHandle<Object> WebAssembly(
+ Handle<WasmInstanceObject> instance, StackFrameId frame_id,
+ Handle<String> source, bool throw_on_side_effect);
+
// This is used for break-at-entry for builtins and API functions.
// Evaluate a piece of JavaScript in the native context, but with the
// materialized arguments object and receiver of the current call.
@@ -102,7 +106,6 @@ class DebugEvaluate : public AllStatic {
bool throw_on_side_effect);
};
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/debug/debug-frames.cc b/deps/v8/src/debug/debug-frames.cc
index d0ee6bda42..4c8da809bb 100644
--- a/deps/v8/src/debug/debug-frames.cc
+++ b/deps/v8/src/debug/debug-frames.cc
@@ -11,7 +11,7 @@
namespace v8 {
namespace internal {
-FrameInspector::FrameInspector(StandardFrame* frame, int inlined_frame_index,
+FrameInspector::FrameInspector(CommonFrame* frame, int inlined_frame_index,
Isolate* isolate)
: frame_(frame),
inlined_frame_index_(inlined_frame_index),
@@ -54,14 +54,10 @@ JavaScriptFrame* FrameInspector::javascript_frame() {
: JavaScriptFrame::cast(frame_);
}
-int FrameInspector::GetParametersCount() {
- if (is_optimized_) return deoptimized_frame_->parameters_count();
- return frame_->ComputeParametersCount();
-}
-
Handle<Object> FrameInspector::GetParameter(int index) {
if (is_optimized_) return deoptimized_frame_->GetParameter(index);
- return handle(frame_->GetParameter(index), isolate_);
+ DCHECK(IsJavaScript());
+ return handle(javascript_frame()->GetParameter(index), isolate_);
}
Handle<Object> FrameInspector::GetExpression(int index) {
diff --git a/deps/v8/src/debug/debug-frames.h b/deps/v8/src/debug/debug-frames.h
index 541ee1dc1d..c554ca17b5 100644
--- a/deps/v8/src/debug/debug-frames.h
+++ b/deps/v8/src/debug/debug-frames.h
@@ -16,17 +16,15 @@ namespace v8 {
namespace internal {
class JavaScriptFrame;
-class StandardFrame;
+class CommonFrame;
class WasmFrame;
class FrameInspector {
public:
- FrameInspector(StandardFrame* frame, int inlined_frame_index,
- Isolate* isolate);
+ FrameInspector(CommonFrame* frame, int inlined_frame_index, Isolate* isolate);
~FrameInspector();
- int GetParametersCount();
Handle<JSFunction> GetFunction() const { return function_; }
Handle<Script> GetScript() { return script_; }
Handle<Object> GetParameter(int index);
@@ -49,7 +47,7 @@ class FrameInspector {
bool ParameterIsShadowedByContextLocal(Handle<ScopeInfo> info,
Handle<String> parameter_name);
- StandardFrame* frame_;
+ CommonFrame* frame_;
int inlined_frame_index_;
std::unique_ptr<DeoptimizedFrameInfo> deoptimized_frame_;
Isolate* isolate_;
diff --git a/deps/v8/src/debug/debug-interface.h b/deps/v8/src/debug/debug-interface.h
index 9234fe35ac..ded8a31639 100644
--- a/deps/v8/src/debug/debug-interface.h
+++ b/deps/v8/src/debug/debug-interface.h
@@ -24,6 +24,7 @@ struct CoverageScript;
struct TypeProfileEntry;
struct TypeProfileScript;
class Coverage;
+class DisableBreak;
class PostponeInterruptsScope;
class Script;
class TypeProfile;
@@ -541,6 +542,15 @@ class PostponeInterruptsScope {
std::unique_ptr<i::PostponeInterruptsScope> scope_;
};
+class DisableBreakScope {
+ public:
+ explicit DisableBreakScope(v8::Isolate* isolate);
+ ~DisableBreakScope();
+
+ private:
+ std::unique_ptr<i::DisableBreak> scope_;
+};
+
class WeakMap : public v8::Object {
public:
WeakMap() = delete;
diff --git a/deps/v8/src/debug/debug-stack-trace-iterator.cc b/deps/v8/src/debug/debug-stack-trace-iterator.cc
index 28d595853c..ea0f4d3fc9 100644
--- a/deps/v8/src/debug/debug-stack-trace-iterator.cc
+++ b/deps/v8/src/debug/debug-stack-trace-iterator.cc
@@ -166,7 +166,7 @@ v8::Local<v8::Function> DebugStackTraceIterator::GetFunction() const {
std::unique_ptr<v8::debug::ScopeIterator>
DebugStackTraceIterator::GetScopeIterator() const {
DCHECK(!Done());
- StandardFrame* frame = iterator_.frame();
+ CommonFrame* frame = iterator_.frame();
if (frame->is_wasm()) {
return std::make_unique<DebugWasmScopeIterator>(isolate_,
WasmFrame::cast(frame));
@@ -184,11 +184,25 @@ v8::MaybeLocal<v8::Value> DebugStackTraceIterator::Evaluate(
v8::Local<v8::String> source, bool throw_on_side_effect) {
DCHECK(!Done());
Handle<Object> value;
+
i::SafeForInterruptsScope safe_for_interrupt_scope(isolate_);
- if (!DebugEvaluate::Local(isolate_, iterator_.frame()->id(),
- inlined_frame_index_, Utils::OpenHandle(*source),
- throw_on_side_effect)
- .ToHandle(&value)) {
+ bool success = false;
+ if (iterator_.is_wasm()) {
+ FrameSummary summary = FrameSummary::Get(iterator_.frame(), 0);
+ const FrameSummary::WasmFrameSummary& wasmSummary = summary.AsWasm();
+ Handle<WasmInstanceObject> instance = wasmSummary.wasm_instance();
+
+ success = DebugEvaluate::WebAssembly(instance, iterator_.frame()->id(),
+ Utils::OpenHandle(*source),
+ throw_on_side_effect)
+ .ToHandle(&value);
+ } else {
+ success = DebugEvaluate::Local(
+ isolate_, iterator_.frame()->id(), inlined_frame_index_,
+ Utils::OpenHandle(*source), throw_on_side_effect)
+ .ToHandle(&value);
+ }
+ if (!success) {
isolate_->OptionalRescheduleException(false);
return v8::MaybeLocal<v8::Value>();
}
diff --git a/deps/v8/src/debug/debug.cc b/deps/v8/src/debug/debug.cc
index d86a7ff5b8..a65c1b3bfd 100644
--- a/deps/v8/src/debug/debug.cc
+++ b/deps/v8/src/debug/debug.cc
@@ -1041,7 +1041,7 @@ void Debug::PrepareStep(StepAction step_action) {
thread_local_.last_step_action_ = step_action;
StackTraceFrameIterator frames_it(isolate_, frame_id);
- StandardFrame* frame = frames_it.frame();
+ CommonFrame* frame = frames_it.frame();
BreakLocation location = BreakLocation::Invalid();
Handle<SharedFunctionInfo> shared;
@@ -1327,7 +1327,8 @@ void Debug::InstallDebugBreakTrampoline() {
}
} else if (obj.IsJSObject()) {
JSObject object = JSObject::cast(obj);
- DescriptorArray descriptors = object.map().instance_descriptors();
+ DescriptorArray descriptors =
+ object.map().instance_descriptors(kRelaxedLoad);
for (InternalIndex i : object.map().IterateOwnDescriptors()) {
if (descriptors.GetDetails(i).kind() == PropertyKind::kAccessor) {
@@ -1703,7 +1704,8 @@ void Debug::FreeDebugInfoListNode(DebugInfoListNode* prev,
// Pack script back into the
// SFI::script_or_debug_info field.
Handle<DebugInfo> debug_info(node->debug_info());
- debug_info->shared().set_script_or_debug_info(debug_info->script());
+ debug_info->shared().set_script_or_debug_info(debug_info->script(),
+ kReleaseStore);
delete node;
}
@@ -1969,7 +1971,7 @@ bool Debug::ShouldBeSkipped() {
DisableBreak no_recursive_break(this);
StackTraceFrameIterator iterator(isolate_);
- StandardFrame* frame = iterator.frame();
+ CommonFrame* frame = iterator.frame();
FrameSummary summary = FrameSummary::GetTop(frame);
Handle<Object> script_obj = summary.script();
if (!script_obj->IsScript()) return false;
@@ -2135,7 +2137,7 @@ void Debug::PrintBreakLocation() {
HandleScope scope(isolate_);
StackTraceFrameIterator iterator(isolate_);
if (iterator.done()) return;
- StandardFrame* frame = iterator.frame();
+ CommonFrame* frame = iterator.frame();
FrameSummary summary = FrameSummary::GetTop(frame);
summary.EnsureSourcePositionsAvailable();
int source_position = summary.SourcePosition();
diff --git a/deps/v8/src/debug/liveedit.cc b/deps/v8/src/debug/liveedit.cc
index e62fbab04a..981692c3bb 100644
--- a/deps/v8/src/debug/liveedit.cc
+++ b/deps/v8/src/debug/liveedit.cc
@@ -1025,7 +1025,7 @@ void TranslateSourcePositionTable(Isolate* isolate, Handle<BytecodeArray> code,
Handle<ByteArray> new_source_position_table(
builder.ToSourcePositionTable(isolate));
- code->set_synchronized_source_position_table(*new_source_position_table);
+ code->set_source_position_table(*new_source_position_table, kReleaseStore);
LOG_CODE_EVENT(isolate,
CodeLinePosInfoRecordEvent(code->GetFirstBytecodeAddress(),
*new_source_position_table));
diff --git a/deps/v8/src/debug/ppc/OWNERS b/deps/v8/src/debug/ppc/OWNERS
index 6edd45a6ef..02c2cd757c 100644
--- a/deps/v8/src/debug/ppc/OWNERS
+++ b/deps/v8/src/debug/ppc/OWNERS
@@ -2,3 +2,4 @@ junyan@redhat.com
joransiu@ca.ibm.com
midawson@redhat.com
mfarazma@redhat.com
+vasili.skurydzin@ibm.com
diff --git a/deps/v8/src/debug/wasm/gdb-server/DIR_METADATA b/deps/v8/src/debug/wasm/gdb-server/DIR_METADATA
new file mode 100644
index 0000000000..3b428d9660
--- /dev/null
+++ b/deps/v8/src/debug/wasm/gdb-server/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>WebAssembly"
+} \ No newline at end of file
diff --git a/deps/v8/src/debug/wasm/gdb-server/OWNERS b/deps/v8/src/debug/wasm/gdb-server/OWNERS
index 4b8c1919e8..e2c94e8d24 100644
--- a/deps/v8/src/debug/wasm/gdb-server/OWNERS
+++ b/deps/v8/src/debug/wasm/gdb-server/OWNERS
@@ -1,3 +1 @@
paolosev@microsoft.com
-
-# COMPONENT: Blink>JavaScript>WebAssembly \ No newline at end of file
diff --git a/deps/v8/src/debug/wasm/gdb-server/wasm-module-debug.cc b/deps/v8/src/debug/wasm/gdb-server/wasm-module-debug.cc
index 5074acbb63..f0b77bc096 100644
--- a/deps/v8/src/debug/wasm/gdb-server/wasm-module-debug.cc
+++ b/deps/v8/src/debug/wasm/gdb-server/wasm-module-debug.cc
@@ -98,7 +98,7 @@ std::vector<wasm_addr_t> WasmModuleDebug::GetCallStack(
case StackFrame::WASM: {
// A standard frame may include many summarized frames, due to inlining.
std::vector<FrameSummary> frames;
- StandardFrame::cast(frame)->Summarize(&frames);
+ CommonFrame::cast(frame)->Summarize(&frames);
for (size_t i = frames.size(); i-- != 0;) {
int offset = 0;
Handle<Script> script;
@@ -156,7 +156,7 @@ std::vector<FrameSummary> WasmModuleDebug::FindWasmFrame(
case StackFrame::WASM: {
// A standard frame may include many summarized frames, due to inlining.
std::vector<FrameSummary> frames;
- StandardFrame::cast(frame)->Summarize(&frames);
+ CommonFrame::cast(frame)->Summarize(&frames);
const size_t frame_count = frames.size();
DCHECK_GT(frame_count, 0);
diff --git a/deps/v8/src/deoptimizer/DIR_METADATA b/deps/v8/src/deoptimizer/DIR_METADATA
new file mode 100644
index 0000000000..fc018666b1
--- /dev/null
+++ b/deps/v8/src/deoptimizer/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>Compiler"
+} \ No newline at end of file
diff --git a/deps/v8/src/deoptimizer/OWNERS b/deps/v8/src/deoptimizer/OWNERS
index e4ff70c640..eae6bba0ae 100644
--- a/deps/v8/src/deoptimizer/OWNERS
+++ b/deps/v8/src/deoptimizer/OWNERS
@@ -3,5 +3,3 @@ neis@chromium.org
nicohartmann@chromium.org
sigurds@chromium.org
tebbi@chromium.org
-
-# COMPONENT: Blink>JavaScript>Compiler
diff --git a/deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc b/deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc
index c04e49282e..27684d9e39 100644
--- a/deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc
@@ -2,246 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/codegen/assembler-inl.h"
-#include "src/codegen/macro-assembler.h"
-#include "src/codegen/register-configuration.h"
-#include "src/codegen/safepoint-table.h"
#include "src/deoptimizer/deoptimizer.h"
-#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
-const bool Deoptimizer::kSupportsFixedDeoptExitSizes = false;
-const int Deoptimizer::kNonLazyDeoptExitSize = 0;
-const int Deoptimizer::kLazyDeoptExitSize = 0;
-
-#define __ masm->
-
-// This code tries to be close to ia32 code so that any changes can be
-// easily ported.
-void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
- Isolate* isolate,
- DeoptimizeKind deopt_kind) {
- NoRootArrayScope no_root_array(masm);
-
- // Save all general purpose registers before messing with them.
- const int kNumberOfRegisters = Register::kNumRegisters;
-
- // Everything but pc, lr and ip which will be saved but not restored.
- RegList restored_regs = kJSCallerSaved | kCalleeSaved | ip.bit();
-
- const int kDoubleRegsSize = kDoubleSize * DwVfpRegister::kNumRegisters;
-
- // Save all allocatable VFP registers before messing with them.
- {
- // We use a run-time check for VFP32DREGS.
- CpuFeatureScope scope(masm, VFP32DREGS,
- CpuFeatureScope::kDontCheckSupported);
- UseScratchRegisterScope temps(masm);
- Register scratch = temps.Acquire();
-
- // Check CPU flags for number of registers, setting the Z condition flag.
- __ CheckFor32DRegs(scratch);
-
- // Push registers d0-d15, and possibly d16-d31, on the stack.
- // If d16-d31 are not pushed, decrease the stack pointer instead.
- __ vstm(db_w, sp, d16, d31, ne);
- // Okay to not call AllocateStackSpace here because the size is a known
- // small number and we need to use condition codes.
- __ sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq);
- __ vstm(db_w, sp, d0, d15);
- }
-
- // Push all 16 registers (needed to populate FrameDescription::registers_).
- // TODO(1588) Note that using pc with stm is deprecated, so we should perhaps
- // handle this a bit differently.
- __ stm(db_w, sp, restored_regs | sp.bit() | lr.bit() | pc.bit());
-
- {
- UseScratchRegisterScope temps(masm);
- Register scratch = temps.Acquire();
- __ mov(scratch, Operand(ExternalReference::Create(
- IsolateAddressId::kCEntryFPAddress, isolate)));
- __ str(fp, MemOperand(scratch));
- }
-
- const int kSavedRegistersAreaSize =
- (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
-
- // Get the bailout id is passed as r10 by the caller.
- __ mov(r2, r10);
-
- // Get the address of the location in the code object (r3) (return
- // address for lazy deoptimization) and compute the fp-to-sp delta in
- // register r4.
- __ mov(r3, lr);
- __ add(r4, sp, Operand(kSavedRegistersAreaSize));
- __ sub(r4, fp, r4);
-
- // Allocate a new deoptimizer object.
- // Pass four arguments in r0 to r3 and fifth argument on stack.
- __ PrepareCallCFunction(6);
- __ mov(r0, Operand(0));
- Label context_check;
- __ ldr(r1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ JumpIfSmi(r1, &context_check);
- __ ldr(r0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
- __ bind(&context_check);
- __ mov(r1, Operand(static_cast<int>(deopt_kind)));
- // r2: bailout id already loaded.
- // r3: code address or 0 already loaded.
- __ str(r4, MemOperand(sp, 0 * kPointerSize)); // Fp-to-sp delta.
- __ mov(r5, Operand(ExternalReference::isolate_address(isolate)));
- __ str(r5, MemOperand(sp, 1 * kPointerSize)); // Isolate.
- // Call Deoptimizer::New().
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
- }
-
- // Preserve "deoptimizer" object in register r0 and get the input
- // frame descriptor pointer to r1 (deoptimizer->input_);
- __ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
-
- // Copy core registers into FrameDescription::registers_[kNumRegisters].
- DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
- for (int i = 0; i < kNumberOfRegisters; i++) {
- int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- __ ldr(r2, MemOperand(sp, i * kPointerSize));
- __ str(r2, MemOperand(r1, offset));
- }
-
- // Copy VFP registers to
- // double_registers_[DoubleRegister::kNumAllocatableRegisters]
- int double_regs_offset = FrameDescription::double_registers_offset();
- const RegisterConfiguration* config = RegisterConfiguration::Default();
- for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
- int code = config->GetAllocatableDoubleCode(i);
- int dst_offset = code * kDoubleSize + double_regs_offset;
- int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize;
- __ vldr(d0, sp, src_offset);
- __ vstr(d0, r1, dst_offset);
- }
-
- // Mark the stack as not iterable for the CPU profiler which won't be able to
- // walk the stack without the return address.
- {
- UseScratchRegisterScope temps(masm);
- Register is_iterable = temps.Acquire();
- Register zero = r4;
- __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
- __ mov(zero, Operand(0));
- __ strb(zero, MemOperand(is_iterable));
- }
-
- // Remove the saved registers from the stack.
- __ add(sp, sp, Operand(kSavedRegistersAreaSize));
-
- // Compute a pointer to the unwinding limit in register r2; that is
- // the first stack slot not part of the input frame.
- __ ldr(r2, MemOperand(r1, FrameDescription::frame_size_offset()));
- __ add(r2, r2, sp);
-
- // Unwind the stack down to - but not including - the unwinding
- // limit and copy the contents of the activation frame to the input
- // frame description.
- __ add(r3, r1, Operand(FrameDescription::frame_content_offset()));
- Label pop_loop;
- Label pop_loop_header;
- __ b(&pop_loop_header);
- __ bind(&pop_loop);
- __ pop(r4);
- __ str(r4, MemOperand(r3, 0));
- __ add(r3, r3, Operand(sizeof(uint32_t)));
- __ bind(&pop_loop_header);
- __ cmp(r2, sp);
- __ b(ne, &pop_loop);
-
- // Compute the output frame in the deoptimizer.
- __ push(r0); // Preserve deoptimizer object across call.
- // r0: deoptimizer object; r1: scratch.
- __ PrepareCallCFunction(1);
- // Call Deoptimizer::ComputeOutputFrames().
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
- }
- __ pop(r0); // Restore deoptimizer object (class Deoptimizer).
-
- __ ldr(sp, MemOperand(r0, Deoptimizer::caller_frame_top_offset()));
-
- // Replace the current (input) frame with the output frames.
- Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
- // Outer loop state: r4 = current "FrameDescription** output_",
- // r1 = one past the last FrameDescription**.
- __ ldr(r1, MemOperand(r0, Deoptimizer::output_count_offset()));
- __ ldr(r4, MemOperand(r0, Deoptimizer::output_offset())); // r4 is output_.
- __ add(r1, r4, Operand(r1, LSL, 2));
- __ jmp(&outer_loop_header);
- __ bind(&outer_push_loop);
- // Inner loop state: r2 = current FrameDescription*, r3 = loop index.
- __ ldr(r2, MemOperand(r4, 0)); // output_[ix]
- __ ldr(r3, MemOperand(r2, FrameDescription::frame_size_offset()));
- __ jmp(&inner_loop_header);
- __ bind(&inner_push_loop);
- __ sub(r3, r3, Operand(sizeof(uint32_t)));
- __ add(r6, r2, Operand(r3));
- __ ldr(r6, MemOperand(r6, FrameDescription::frame_content_offset()));
- __ push(r6);
- __ bind(&inner_loop_header);
- __ cmp(r3, Operand::Zero());
- __ b(ne, &inner_push_loop); // test for gt?
- __ add(r4, r4, Operand(kPointerSize));
- __ bind(&outer_loop_header);
- __ cmp(r4, r1);
- __ b(lt, &outer_push_loop);
-
- __ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
- for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
- int code = config->GetAllocatableDoubleCode(i);
- DwVfpRegister reg = DwVfpRegister::from_code(code);
- int src_offset = code * kDoubleSize + double_regs_offset;
- __ vldr(reg, r1, src_offset);
- }
-
- // Push pc and continuation from the last output frame.
- __ ldr(r6, MemOperand(r2, FrameDescription::pc_offset()));
- __ push(r6);
- __ ldr(r6, MemOperand(r2, FrameDescription::continuation_offset()));
- __ push(r6);
-
- // Push the registers from the last output frame.
- for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
- int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- __ ldr(r6, MemOperand(r2, offset));
- __ push(r6);
- }
-
- // Restore the registers from the stack.
- __ ldm(ia_w, sp, restored_regs); // all but pc registers.
-
- {
- UseScratchRegisterScope temps(masm);
- Register is_iterable = temps.Acquire();
- Register one = r4;
- __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
- __ mov(one, Operand(1));
- __ strb(one, MemOperand(is_iterable));
- }
-
- // Remove sp, lr and pc.
- __ Drop(3);
- {
- UseScratchRegisterScope temps(masm);
- Register scratch = temps.Acquire();
- __ pop(scratch); // get continuation, leave pc on stack
- __ pop(lr);
- __ Jump(scratch);
- }
-
- __ stop();
-}
+const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
+const int Deoptimizer::kNonLazyDeoptExitSize = 2 * kInstrSize;
+const int Deoptimizer::kLazyDeoptExitSize = 2 * kInstrSize;
Float32 RegisterValues::GetFloatRegister(unsigned n) const {
const int kShift = n % 2 == 0 ? 0 : 32;
@@ -265,7 +33,5 @@ void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
void FrameDescription::SetPc(intptr_t pc) { pc_ = pc; }
-#undef __
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc b/deps/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc
index 21a75d024d..c695347a0b 100644
--- a/deps/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc
+++ b/deps/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc
@@ -3,12 +3,7 @@
// found in the LICENSE file.
#include "src/api/api.h"
-#include "src/codegen/arm64/assembler-arm64-inl.h"
-#include "src/codegen/arm64/macro-assembler-arm64-inl.h"
-#include "src/codegen/register-configuration.h"
-#include "src/codegen/safepoint-table.h"
#include "src/deoptimizer/deoptimizer.h"
-#include "src/execution/frame-constants.h"
#include "src/execution/pointer-authentication.h"
namespace v8 {
@@ -22,286 +17,6 @@ const int Deoptimizer::kLazyDeoptExitSize = 2 * kInstrSize;
const int Deoptimizer::kLazyDeoptExitSize = 1 * kInstrSize;
#endif
-#define __ masm->
-
-namespace {
-
-void CopyRegListToFrame(MacroAssembler* masm, const Register& dst,
- int dst_offset, const CPURegList& reg_list,
- const Register& temp0, const Register& temp1,
- int src_offset = 0) {
- DCHECK_EQ(reg_list.Count() % 2, 0);
- UseScratchRegisterScope temps(masm);
- CPURegList copy_to_input = reg_list;
- int reg_size = reg_list.RegisterSizeInBytes();
- DCHECK_EQ(temp0.SizeInBytes(), reg_size);
- DCHECK_EQ(temp1.SizeInBytes(), reg_size);
-
- // Compute some temporary addresses to avoid having the macro assembler set
- // up a temp with an offset for accesses out of the range of the addressing
- // mode.
- Register src = temps.AcquireX();
- masm->Add(src, sp, src_offset);
- masm->Add(dst, dst, dst_offset);
-
- // Write reg_list into the frame pointed to by dst.
- for (int i = 0; i < reg_list.Count(); i += 2) {
- masm->Ldp(temp0, temp1, MemOperand(src, i * reg_size));
-
- CPURegister reg0 = copy_to_input.PopLowestIndex();
- CPURegister reg1 = copy_to_input.PopLowestIndex();
- int offset0 = reg0.code() * reg_size;
- int offset1 = reg1.code() * reg_size;
-
- // Pair up adjacent stores, otherwise write them separately.
- if (offset1 == offset0 + reg_size) {
- masm->Stp(temp0, temp1, MemOperand(dst, offset0));
- } else {
- masm->Str(temp0, MemOperand(dst, offset0));
- masm->Str(temp1, MemOperand(dst, offset1));
- }
- }
- masm->Sub(dst, dst, dst_offset);
-}
-
-void RestoreRegList(MacroAssembler* masm, const CPURegList& reg_list,
- const Register& src_base, int src_offset) {
- DCHECK_EQ(reg_list.Count() % 2, 0);
- UseScratchRegisterScope temps(masm);
- CPURegList restore_list = reg_list;
- int reg_size = restore_list.RegisterSizeInBytes();
-
- // Compute a temporary addresses to avoid having the macro assembler set
- // up a temp with an offset for accesses out of the range of the addressing
- // mode.
- Register src = temps.AcquireX();
- masm->Add(src, src_base, src_offset);
-
- // No need to restore padreg.
- restore_list.Remove(padreg);
-
- // Restore every register in restore_list from src.
- while (!restore_list.IsEmpty()) {
- CPURegister reg0 = restore_list.PopLowestIndex();
- CPURegister reg1 = restore_list.PopLowestIndex();
- int offset0 = reg0.code() * reg_size;
-
- if (reg1 == NoCPUReg) {
- masm->Ldr(reg0, MemOperand(src, offset0));
- break;
- }
-
- int offset1 = reg1.code() * reg_size;
-
- // Pair up adjacent loads, otherwise read them separately.
- if (offset1 == offset0 + reg_size) {
- masm->Ldp(reg0, reg1, MemOperand(src, offset0));
- } else {
- masm->Ldr(reg0, MemOperand(src, offset0));
- masm->Ldr(reg1, MemOperand(src, offset1));
- }
- }
-}
-} // namespace
-
-void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
- Isolate* isolate,
- DeoptimizeKind deopt_kind) {
- NoRootArrayScope no_root_array(masm);
-
- // TODO(all): This code needs to be revisited. We probably only need to save
- // caller-saved registers here. Callee-saved registers can be stored directly
- // in the input frame.
-
- // Save all allocatable double registers.
- CPURegList saved_double_registers(
- CPURegister::kVRegister, kDRegSizeInBits,
- RegisterConfiguration::Default()->allocatable_double_codes_mask());
- DCHECK_EQ(saved_double_registers.Count() % 2, 0);
- __ PushCPURegList(saved_double_registers);
-
- // We save all the registers except sp, lr, platform register (x18) and the
- // masm scratches.
- CPURegList saved_registers(CPURegister::kRegister, kXRegSizeInBits, 0, 28);
- saved_registers.Remove(ip0);
- saved_registers.Remove(ip1);
- saved_registers.Remove(x18);
- saved_registers.Combine(fp);
- saved_registers.Align();
- DCHECK_EQ(saved_registers.Count() % 2, 0);
- __ PushCPURegList(saved_registers);
-
- __ Mov(x3, Operand(ExternalReference::Create(
- IsolateAddressId::kCEntryFPAddress, isolate)));
- __ Str(fp, MemOperand(x3));
-
- const int kSavedRegistersAreaSize =
- (saved_registers.Count() * kXRegSize) +
- (saved_double_registers.Count() * kDRegSize);
-
- // Floating point registers are saved on the stack above core registers.
- const int kDoubleRegistersOffset = saved_registers.Count() * kXRegSize;
-
- // We don't use a bailout id for arm64, because we can compute the id from the
- // address. Pass kMaxUInt32 instead to signify this.
- Register bailout_id = x2;
- __ Mov(bailout_id, kMaxUInt32);
-
- Register code_object = x3;
- Register fp_to_sp = x4;
- // Get the address of the location in the code object. This is the return
- // address for lazy deoptimization.
- __ Mov(code_object, lr);
- // Compute the fp-to-sp delta.
- __ Add(fp_to_sp, sp, kSavedRegistersAreaSize);
- __ Sub(fp_to_sp, fp, fp_to_sp);
-
- // Allocate a new deoptimizer object.
- __ Ldr(x1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
-
- // Ensure we can safely load from below fp.
- DCHECK_GT(kSavedRegistersAreaSize, -StandardFrameConstants::kFunctionOffset);
- __ Ldr(x0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
-
- // If x1 is a smi, zero x0.
- __ Tst(x1, kSmiTagMask);
- __ CzeroX(x0, eq);
-
- __ Mov(x1, static_cast<int>(deopt_kind));
- // Following arguments are already loaded:
- // - x2: bailout id
- // - x3: code object address
- // - x4: fp-to-sp delta
- __ Mov(x5, ExternalReference::isolate_address(isolate));
-
- {
- // Call Deoptimizer::New().
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
- }
-
- // Preserve "deoptimizer" object in register x0.
- Register deoptimizer = x0;
-
- // Get the input frame descriptor pointer.
- __ Ldr(x1, MemOperand(deoptimizer, Deoptimizer::input_offset()));
-
- // Copy core registers into the input frame.
- CopyRegListToFrame(masm, x1, FrameDescription::registers_offset(),
- saved_registers, x2, x3);
-
- // Copy double registers to the input frame.
- CopyRegListToFrame(masm, x1, FrameDescription::double_registers_offset(),
- saved_double_registers, x2, x3, kDoubleRegistersOffset);
-
- // Mark the stack as not iterable for the CPU profiler which won't be able to
- // walk the stack without the return address.
- {
- UseScratchRegisterScope temps(masm);
- Register is_iterable = temps.AcquireX();
- __ Mov(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
- __ strb(xzr, MemOperand(is_iterable));
- }
-
- // Remove the saved registers from the stack.
- DCHECK_EQ(kSavedRegistersAreaSize % kXRegSize, 0);
- __ Drop(kSavedRegistersAreaSize / kXRegSize);
-
- // Compute a pointer to the unwinding limit in register x2; that is
- // the first stack slot not part of the input frame.
- Register unwind_limit = x2;
- __ Ldr(unwind_limit, MemOperand(x1, FrameDescription::frame_size_offset()));
-
- // Unwind the stack down to - but not including - the unwinding
- // limit and copy the contents of the activation frame to the input
- // frame description.
- __ Add(x3, x1, FrameDescription::frame_content_offset());
- __ SlotAddress(x1, 0);
- __ Lsr(unwind_limit, unwind_limit, kSystemPointerSizeLog2);
- __ Mov(x5, unwind_limit);
- __ CopyDoubleWords(x3, x1, x5);
- __ Drop(unwind_limit);
-
- // Compute the output frame in the deoptimizer.
- __ Push(padreg, x0); // Preserve deoptimizer object across call.
- {
- // Call Deoptimizer::ComputeOutputFrames().
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
- }
- __ Pop(x4, padreg); // Restore deoptimizer object (class Deoptimizer).
-
- {
- UseScratchRegisterScope temps(masm);
- Register scratch = temps.AcquireX();
- __ Ldr(scratch, MemOperand(x4, Deoptimizer::caller_frame_top_offset()));
- __ Mov(sp, scratch);
- }
-
- // Replace the current (input) frame with the output frames.
- Label outer_push_loop, outer_loop_header;
- __ Ldrsw(x1, MemOperand(x4, Deoptimizer::output_count_offset()));
- __ Ldr(x0, MemOperand(x4, Deoptimizer::output_offset()));
- __ Add(x1, x0, Operand(x1, LSL, kSystemPointerSizeLog2));
- __ B(&outer_loop_header);
-
- __ Bind(&outer_push_loop);
- Register current_frame = x2;
- Register frame_size = x3;
- __ Ldr(current_frame, MemOperand(x0, kSystemPointerSize, PostIndex));
- __ Ldr(x3, MemOperand(current_frame, FrameDescription::frame_size_offset()));
- __ Lsr(frame_size, x3, kSystemPointerSizeLog2);
- __ Claim(frame_size);
-
- __ Add(x7, current_frame, FrameDescription::frame_content_offset());
- __ SlotAddress(x6, 0);
- __ CopyDoubleWords(x6, x7, frame_size);
-
- __ Bind(&outer_loop_header);
- __ Cmp(x0, x1);
- __ B(lt, &outer_push_loop);
-
- __ Ldr(x1, MemOperand(x4, Deoptimizer::input_offset()));
- RestoreRegList(masm, saved_double_registers, x1,
- FrameDescription::double_registers_offset());
-
- {
- UseScratchRegisterScope temps(masm);
- Register is_iterable = temps.AcquireX();
- Register one = x4;
- __ Mov(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
- __ Mov(one, Operand(1));
- __ strb(one, MemOperand(is_iterable));
- }
-
- // TODO(all): ARM copies a lot (if not all) of the last output frame onto the
- // stack, then pops it all into registers. Here, we try to load it directly
- // into the relevant registers. Is this correct? If so, we should improve the
- // ARM code.
-
- // Restore registers from the last output frame.
- // Note that lr is not in the list of saved_registers and will be restored
- // later. We can use it to hold the address of last output frame while
- // reloading the other registers.
- DCHECK(!saved_registers.IncludesAliasOf(lr));
- Register last_output_frame = lr;
- __ Mov(last_output_frame, current_frame);
-
- RestoreRegList(masm, saved_registers, last_output_frame,
- FrameDescription::registers_offset());
-
- UseScratchRegisterScope temps(masm);
- temps.Exclude(x17);
- Register continuation = x17;
- __ Ldr(continuation, MemOperand(last_output_frame,
- FrameDescription::continuation_offset()));
- __ Ldr(lr, MemOperand(last_output_frame, FrameDescription::pc_offset()));
-#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
- __ Autibsp();
-#endif
- __ Br(continuation);
-}
-
Float32 RegisterValues::GetFloatRegister(unsigned n) const {
return Float32::FromBits(
static_cast<uint32_t>(double_registers_[n].get_bits()));
@@ -331,7 +46,5 @@ void FrameDescription::SetPc(intptr_t pc) {
pc_ = pc;
}
-#undef __
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/deoptimizer/deoptimizer.cc b/deps/v8/src/deoptimizer/deoptimizer.cc
index c2b4d402ee..63b4431128 100644
--- a/deps/v8/src/deoptimizer/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer/deoptimizer.cc
@@ -23,11 +23,12 @@
#include "src/interpreter/interpreter.h"
#include "src/logging/counters.h"
#include "src/logging/log.h"
+#include "src/objects/arguments.h"
#include "src/objects/debug-objects-inl.h"
#include "src/objects/heap-number-inl.h"
#include "src/objects/smi.h"
+#include "src/snapshot/embedded/embedded-data.h"
#include "src/tracing/trace-event.h"
-#include "torque-generated/exported-class-definitions.h"
// Has to be the last include (doesn't have include guards)
#include "src/objects/object-macros.h"
@@ -103,7 +104,6 @@ class FrameWriter {
void PushStackJSArguments(TranslatedFrame::iterator& iterator,
int parameters_count) {
-#ifdef V8_REVERSE_JSARGS
std::vector<TranslatedFrame::iterator> parameters;
parameters.reserve(parameters_count);
for (int i = 0; i < parameters_count; ++i, ++iterator) {
@@ -112,11 +112,6 @@ class FrameWriter {
for (auto& parameter : base::Reversed(parameters)) {
PushTranslatedValue(parameter, "stack parameter");
}
-#else
- for (int i = 0; i < parameters_count; ++i, ++iterator) {
- PushTranslatedValue(iterator, "stack parameter");
- }
-#endif
}
unsigned top_offset() const { return top_offset_; }
@@ -179,25 +174,6 @@ class FrameWriter {
unsigned top_offset_;
};
-DeoptimizerData::DeoptimizerData(Heap* heap) : heap_(heap), current_(nullptr) {
- Code* start = &deopt_entry_code_[0];
- Code* end = &deopt_entry_code_[DeoptimizerData::kLastDeoptimizeKind + 1];
- strong_roots_entry_ =
- heap_->RegisterStrongRoots(FullObjectSlot(start), FullObjectSlot(end));
-}
-
-DeoptimizerData::~DeoptimizerData() {
- heap_->UnregisterStrongRoots(strong_roots_entry_);
-}
-
-Code DeoptimizerData::deopt_entry_code(DeoptimizeKind kind) {
- return deopt_entry_code_[static_cast<int>(kind)];
-}
-
-void DeoptimizerData::set_deopt_entry_code(DeoptimizeKind kind, Code code) {
- deopt_entry_code_[static_cast<int>(kind)] = code;
-}
-
Code Deoptimizer::FindDeoptimizingCode(Address addr) {
if (function_.IsHeapObject()) {
// Search all deoptimizing code in the native context of the function.
@@ -214,7 +190,7 @@ Code Deoptimizer::FindDeoptimizingCode(Address addr) {
return Code();
}
-// We rely on this function not causing a GC. It is called from generated code
+// We rely on this function not causing a GC. It is called from generated code
// without having a real stack frame in place.
Deoptimizer* Deoptimizer::New(Address raw_function, DeoptimizeKind kind,
unsigned bailout_id, Address from,
@@ -222,16 +198,13 @@ Deoptimizer* Deoptimizer::New(Address raw_function, DeoptimizeKind kind,
JSFunction function = JSFunction::cast(Object(raw_function));
Deoptimizer* deoptimizer = new Deoptimizer(isolate, function, kind,
bailout_id, from, fp_to_sp_delta);
- CHECK_NULL(isolate->deoptimizer_data()->current_);
- isolate->deoptimizer_data()->current_ = deoptimizer;
+ isolate->set_current_deoptimizer(deoptimizer);
return deoptimizer;
}
Deoptimizer* Deoptimizer::Grab(Isolate* isolate) {
- Deoptimizer* result = isolate->deoptimizer_data()->current_;
- CHECK_NOT_NULL(result);
+ Deoptimizer* result = isolate->GetAndClearCurrentDeoptimizer();
result->DeleteFrameDescriptions();
- isolate->deoptimizer_data()->current_ = nullptr;
return result;
}
@@ -294,6 +267,7 @@ class ActivationsFinder : public ThreadVisitor {
SafepointEntry safepoint = code.GetSafepointEntry(it.frame()->pc());
int trampoline_pc = safepoint.trampoline_pc();
DCHECK_IMPLIES(code == topmost_, safe_to_deopt_);
+ CHECK_GE(trampoline_pc, 0);
// Replace the current pc on the stack with the trampoline.
// TODO(v8:10026): avoid replacing a signed pointer.
Address* pc_addr = it.frame()->pc_address();
@@ -498,8 +472,6 @@ const char* Deoptimizer::MessageFor(DeoptimizeKind kind, bool reuse_code) {
case DeoptimizeKind::kBailout:
return "bailout";
}
- FATAL("Unsupported deopt kind");
- return nullptr;
}
namespace {
@@ -542,6 +514,9 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction function,
deoptimizing_throw_ = true;
}
+ DCHECK(bailout_id_ == kFixedExitSizeMarker ||
+ bailout_id_ < kMaxNumberOfEntries);
+
DCHECK_NE(from, kNullAddress);
compiled_code_ = FindOptimizedCode();
DCHECK(!compiled_code_.is_null());
@@ -570,7 +545,7 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction function,
input_ = new (size) FrameDescription(size, parameter_count);
if (kSupportsFixedDeoptExitSizes) {
- DCHECK_EQ(bailout_id_, kMaxUInt32);
+ DCHECK_EQ(bailout_id_, kFixedExitSizeMarker);
// Calculate bailout id from return address.
DCHECK_GT(kNonLazyDeoptExitSize, 0);
DCHECK_GT(kLazyDeoptExitSize, 0);
@@ -582,7 +557,7 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction function,
Address lazy_deopt_start =
deopt_start + non_lazy_deopt_count * kNonLazyDeoptExitSize;
// The deoptimization exits are sorted so that lazy deopt exits appear last.
- static_assert(DeoptimizeKind::kLazy == DeoptimizeKind::kLastDeoptimizeKind,
+ static_assert(DeoptimizeKind::kLazy == kLastDeoptimizeKind,
"lazy deopts are expected to be emitted last");
// from_ is the value of the link register after the call to the
// deoptimizer, so for the last lazy deopt, from_ points to the first
@@ -641,42 +616,44 @@ void Deoptimizer::DeleteFrameDescriptions() {
#endif // DEBUG
}
-Address Deoptimizer::GetDeoptimizationEntry(Isolate* isolate,
- DeoptimizeKind kind) {
- DeoptimizerData* data = isolate->deoptimizer_data();
- CHECK_LE(kind, DeoptimizerData::kLastDeoptimizeKind);
- CHECK(!data->deopt_entry_code(kind).is_null());
- return data->deopt_entry_code(kind).raw_instruction_start();
-}
-
-bool Deoptimizer::IsDeoptimizationEntry(Isolate* isolate, Address addr,
- DeoptimizeKind type) {
- DeoptimizerData* data = isolate->deoptimizer_data();
- CHECK_LE(type, DeoptimizerData::kLastDeoptimizeKind);
- Code code = data->deopt_entry_code(type);
- if (code.is_null()) return false;
- return addr == code.raw_instruction_start();
+Builtins::Name Deoptimizer::GetDeoptimizationEntry(Isolate* isolate,
+ DeoptimizeKind kind) {
+ switch (kind) {
+ case DeoptimizeKind::kEager:
+ return Builtins::kDeoptimizationEntry_Eager;
+ case DeoptimizeKind::kSoft:
+ return Builtins::kDeoptimizationEntry_Soft;
+ case DeoptimizeKind::kBailout:
+ return Builtins::kDeoptimizationEntry_Bailout;
+ case DeoptimizeKind::kLazy:
+ return Builtins::kDeoptimizationEntry_Lazy;
+ }
}
bool Deoptimizer::IsDeoptimizationEntry(Isolate* isolate, Address addr,
- DeoptimizeKind* type) {
- if (IsDeoptimizationEntry(isolate, addr, DeoptimizeKind::kEager)) {
- *type = DeoptimizeKind::kEager;
- return true;
- }
- if (IsDeoptimizationEntry(isolate, addr, DeoptimizeKind::kSoft)) {
- *type = DeoptimizeKind::kSoft;
- return true;
- }
- if (IsDeoptimizationEntry(isolate, addr, DeoptimizeKind::kLazy)) {
- *type = DeoptimizeKind::kLazy;
- return true;
- }
- if (IsDeoptimizationEntry(isolate, addr, DeoptimizeKind::kBailout)) {
- *type = DeoptimizeKind::kBailout;
- return true;
+ DeoptimizeKind* type_out) {
+ Code maybe_code = InstructionStream::TryLookupCode(isolate, addr);
+ if (maybe_code.is_null()) return false;
+
+ Code code = maybe_code;
+ switch (code.builtin_index()) {
+ case Builtins::kDeoptimizationEntry_Eager:
+ *type_out = DeoptimizeKind::kEager;
+ return true;
+ case Builtins::kDeoptimizationEntry_Soft:
+ *type_out = DeoptimizeKind::kSoft;
+ return true;
+ case Builtins::kDeoptimizationEntry_Bailout:
+ *type_out = DeoptimizeKind::kBailout;
+ return true;
+ case Builtins::kDeoptimizationEntry_Lazy:
+ *type_out = DeoptimizeKind::kLazy;
+ return true;
+ default:
+ return false;
}
- return false;
+
+ UNREACHABLE();
}
int Deoptimizer::GetDeoptimizedCodeCount(Isolate* isolate) {
@@ -763,10 +740,10 @@ void Deoptimizer::TraceMarkForDeoptimization(Code code, const char* reason) {
DeoptimizationData deopt_data = DeoptimizationData::cast(maybe_data);
CodeTracer::Scope scope(isolate->GetCodeTracer());
- PrintF(scope.file(), "[marking dependent code " V8PRIxPTR_FMT " ",
+ PrintF(scope.file(), "[marking dependent code " V8PRIxPTR_FMT " (",
code.ptr());
deopt_data.SharedFunctionInfo().ShortPrint(scope.file());
- PrintF(" (opt id %d) for deoptimization, reason: %s]\n",
+ PrintF(") (opt id %d) for deoptimization, reason: %s]\n",
deopt_data.OptimizationId().value(), reason);
{
AllowHeapAllocation yes_gc;
@@ -824,8 +801,8 @@ void Deoptimizer::TraceDeoptMarked(Isolate* isolate) {
// without having a real stack frame in place.
void Deoptimizer::DoComputeOutputFrames() {
// When we call this function, the return address of the previous frame has
- // been removed from the stack by GenerateDeoptimizationEntries() so the stack
- // is not iterable by the SafeStackFrameIterator.
+ // been removed from the stack by the DeoptimizationEntry builtin, so the
+ // stack is not iterable by the SafeStackFrameIterator.
#if V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK
DCHECK_EQ(0, isolate()->isolate_data()->stack_is_iterable());
#endif
@@ -887,7 +864,7 @@ void Deoptimizer::DoComputeOutputFrames() {
// Do the input frame to output frame(s) translation.
size_t count = translated_state_.frames().size();
// If we are supposed to go to the catch handler, find the catching frame
- // for the catch and make sure we only deoptimize upto that frame.
+ // for the catch and make sure we only deoptimize up to that frame.
if (deoptimizing_throw_) {
size_t catch_handler_frame_index = count;
for (size_t i = count; i-- > 0;) {
@@ -986,9 +963,22 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
goto_catch_handler ? catch_handler_pc_offset_ : real_bytecode_offset;
const int parameters_count = InternalFormalParameterCountWithReceiver(shared);
+
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ // If this is the bottom most frame or the previous frame was the arguments
+ // adaptor fake frame, then we already have extra arguments in the stack
+ // (including any extra padding). Therefore we should not try to add any
+ // padding.
+ bool should_pad_arguments =
+ !is_bottommost && (translated_state_.frames()[frame_index - 1]).kind() !=
+ TranslatedFrame::kArgumentsAdaptor;
+#else
+ bool should_pad_arguments = true;
+#endif
+
const int locals_count = translated_frame->height();
- InterpretedFrameInfo frame_info =
- InterpretedFrameInfo::Precise(parameters_count, locals_count, is_topmost);
+ InterpretedFrameInfo frame_info = InterpretedFrameInfo::Precise(
+ parameters_count, locals_count, is_topmost, should_pad_arguments);
const uint32_t output_frame_size = frame_info.frame_size_in_bytes();
TranslatedFrame::iterator function_iterator = value_iterator++;
@@ -1020,9 +1010,10 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
// Compute the incoming parameter translation.
ReadOnlyRoots roots(isolate());
- if (ShouldPadArguments(parameters_count)) {
+ if (should_pad_arguments && ShouldPadArguments(parameters_count)) {
frame_writer.PushRawObject(roots.the_hole_value(), "padding\n");
}
+
// Note: parameters_count includes the receiver.
if (verbose_tracing_enabled() && is_bottommost &&
actual_argument_count_ > parameters_count - 1) {
@@ -1032,7 +1023,7 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
}
frame_writer.PushStackJSArguments(value_iterator, parameters_count);
- DCHECK_EQ(output_frame->GetLastArgumentSlotOffset(),
+ DCHECK_EQ(output_frame->GetLastArgumentSlotOffset(should_pad_arguments),
frame_writer.top_offset());
if (verbose_tracing_enabled()) {
PrintF(trace_scope()->file(), " -------------------------\n");
@@ -1218,7 +1209,7 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
: builtins->builtin(Builtins::kInterpreterEnterBytecodeDispatch);
if (is_topmost) {
// Only the pc of the topmost frame needs to be signed since it is
- // authenticated at the end of GenerateDeoptimizationEntries.
+ // authenticated at the end of the DeoptimizationEntry builtin.
const intptr_t top_most_pc = PointerAuthentication::SignAndCheckPC(
static_cast<intptr_t>(dispatch_builtin.InstructionStart()),
frame_writer.frame()->GetTop());
@@ -1274,9 +1265,13 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(
translated_frame->raw_shared_info().internal_formal_parameter_count();
const int extra_argument_count =
argument_count_without_receiver - formal_parameter_count;
-
+ // The number of pushed arguments is the maximum of the actual argument count
+ // and the formal parameter count + the receiver.
+ const bool should_pad_args = ShouldPadArguments(
+ std::max(argument_count_without_receiver, formal_parameter_count) + 1);
const int output_frame_size =
- std::max(0, extra_argument_count * kSystemPointerSize);
+ std::max(0, extra_argument_count * kSystemPointerSize) +
+ (should_pad_args ? kSystemPointerSize : 0);
if (verbose_tracing_enabled()) {
PrintF(trace_scope_->file(),
" translating arguments adaptor => variable_size=%d\n",
@@ -1296,14 +1291,14 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(
output_frame->SetFp(output_[frame_index - 1]->GetFp());
output_[frame_index] = output_frame;
- if (extra_argument_count > 0) {
- FrameWriter frame_writer(this, output_frame, verbose_trace_scope());
+ FrameWriter frame_writer(this, output_frame, verbose_trace_scope());
- ReadOnlyRoots roots(isolate());
- if (ShouldPadArguments(extra_argument_count)) {
- frame_writer.PushRawObject(roots.the_hole_value(), "padding\n");
- }
+ ReadOnlyRoots roots(isolate());
+ if (should_pad_args) {
+ frame_writer.PushRawObject(roots.the_hole_value(), "padding\n");
+ }
+ if (extra_argument_count > 0) {
// The receiver and arguments with index below the formal parameter
// count are in the fake adaptor frame, because they are used to create the
// arguments object. We should however not push them, since the interpreter
@@ -1545,7 +1540,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
intptr_t pc_value = static_cast<intptr_t>(start + pc_offset);
if (is_topmost) {
// Only the pc of the topmost frame needs to be signed since it is
- // authenticated at the end of GenerateDeoptimizationEntries.
+ // authenticated at the end of the DeoptimizationEntry builtin.
output_frame->SetPc(PointerAuthentication::SignAndCheckPC(
pc_value, frame_writer.frame()->GetTop()));
} else {
@@ -1765,7 +1760,6 @@ void Deoptimizer::DoComputeBuiltinContinuation(
frame_writer.PushRawObject(roots.the_hole_value(), "padding\n");
}
-#ifdef V8_REVERSE_JSARGS
if (mode == BuiltinContinuationMode::STUB) {
DCHECK_EQ(Builtins::CallInterfaceDescriptorFor(builtin_name)
.GetStackArgumentOrder(),
@@ -1805,34 +1799,6 @@ void Deoptimizer::DoComputeBuiltinContinuation(
frame_writer.PushStackJSArguments(
value_iterator, frame_info.translated_stack_parameter_count());
}
-#else
- for (uint32_t i = 0; i < frame_info.translated_stack_parameter_count();
- ++i, ++value_iterator) {
- frame_writer.PushTranslatedValue(value_iterator, "stack parameter");
- }
-
- switch (mode) {
- case BuiltinContinuationMode::STUB:
- break;
- case BuiltinContinuationMode::JAVASCRIPT:
- break;
- case BuiltinContinuationMode::JAVASCRIPT_WITH_CATCH: {
- frame_writer.PushRawObject(roots.the_hole_value(),
- "placeholder for exception on lazy deopt\n");
- } break;
- case BuiltinContinuationMode::JAVASCRIPT_HANDLE_EXCEPTION: {
- intptr_t accumulator_value =
- input_->GetRegister(kInterpreterAccumulatorRegister.code());
- frame_writer.PushRawObject(Object(accumulator_value),
- "exception (from accumulator)\n");
- } break;
- }
-
- if (frame_info.frame_has_result_stack_slot()) {
- frame_writer.PushRawObject(roots.the_hole_value(),
- "placeholder for return result on lazy deopt\n");
- }
-#endif
DCHECK_EQ(output_frame->GetLastArgumentSlotOffset(),
frame_writer.top_offset());
@@ -1975,7 +1941,7 @@ void Deoptimizer::DoComputeBuiltinContinuation(
mode, frame_info.frame_has_result_stack_slot()));
if (is_topmost) {
// Only the pc of the topmost frame needs to be signed since it is
- // authenticated at the end of GenerateDeoptimizationEntries.
+ // authenticated at the end of the DeoptimizationEntry builtin.
const intptr_t top_most_pc = PointerAuthentication::SignAndCheckPC(
static_cast<intptr_t>(continue_to_builtin.InstructionStart()),
frame_writer.frame()->GetTop());
@@ -2068,43 +2034,12 @@ unsigned Deoptimizer::ComputeInputFrameSize() const {
// static
unsigned Deoptimizer::ComputeIncomingArgumentSize(SharedFunctionInfo shared) {
int parameter_slots = InternalFormalParameterCountWithReceiver(shared);
+#ifndef V8_NO_ARGUMENTS_ADAPTOR
if (ShouldPadArguments(parameter_slots)) parameter_slots++;
+#endif
return parameter_slots * kSystemPointerSize;
}
-void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate,
- DeoptimizeKind kind) {
- CHECK(kind == DeoptimizeKind::kEager || kind == DeoptimizeKind::kSoft ||
- kind == DeoptimizeKind::kLazy || kind == DeoptimizeKind::kBailout);
- DeoptimizerData* data = isolate->deoptimizer_data();
- if (!data->deopt_entry_code(kind).is_null()) return;
-
- MacroAssembler masm(isolate, CodeObjectRequired::kYes,
- NewAssemblerBuffer(16 * KB));
- masm.set_emit_debug_code(false);
- GenerateDeoptimizationEntries(&masm, masm.isolate(), kind);
- CodeDesc desc;
- masm.GetCode(isolate, &desc);
- DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
-
- // Allocate the code as immovable since the entry addresses will be used
- // directly and there is no support for relocating them.
- Handle<Code> code = Factory::CodeBuilder(isolate, desc, CodeKind::STUB)
- .set_immovable()
- .Build();
- CHECK(isolate->heap()->IsImmovable(*code));
-
- CHECK(data->deopt_entry_code(kind).is_null());
- data->set_deopt_entry_code(kind, *code);
-}
-
-void Deoptimizer::EnsureCodeForDeoptimizationEntries(Isolate* isolate) {
- EnsureCodeForDeoptimizationEntry(isolate, DeoptimizeKind::kEager);
- EnsureCodeForDeoptimizationEntry(isolate, DeoptimizeKind::kLazy);
- EnsureCodeForDeoptimizationEntry(isolate, DeoptimizeKind::kSoft);
- EnsureCodeForDeoptimizationEntry(isolate, DeoptimizeKind::kBailout);
-}
-
FrameDescription::FrameDescription(uint32_t frame_size, int parameter_count)
: frame_size_(frame_size),
parameter_count_(parameter_count),
@@ -3182,25 +3117,19 @@ void TranslatedState::CreateArgumentsElementsTranslatedValues(
if (type == CreateArgumentsType::kMappedArguments) {
// If the actual number of arguments is less than the number of formal
// parameters, we have fewer holes to fill to not overshoot the length.
- number_of_holes = Min(formal_parameter_count_, length);
+ number_of_holes = std::min(formal_parameter_count_, length);
}
for (int i = 0; i < number_of_holes; ++i) {
frame.Add(TranslatedValue::NewTagged(this, roots.the_hole_value()));
}
int argc = length - number_of_holes;
-#ifdef V8_REVERSE_JSARGS
int start_index = number_of_holes;
if (type == CreateArgumentsType::kRestParameter) {
start_index = std::max(0, formal_parameter_count_);
}
-#endif
for (int i = 0; i < argc; i++) {
// Skip the receiver.
-#ifdef V8_REVERSE_JSARGS
int offset = i + start_index + 1;
-#else
- int offset = argc - i - 1;
-#endif
#ifdef V8_NO_ARGUMENTS_ADAPTOR
Address arguments_frame = offset > formal_parameter_count_
? stack_frame_pointer_
@@ -3556,12 +3485,12 @@ TranslatedState::TranslatedState(const JavaScriptFrame* frame) {
DCHECK(!data.is_null() && deopt_index != Safepoint::kNoDeoptimizationIndex);
TranslationIterator it(data.TranslationByteArray(),
data.TranslationIndex(deopt_index).value());
-#ifdef V8_NO_ARGUMENT_ADAPTOR
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
int actual_argc = frame->GetActualArgumentCount();
#else
int actual_argc = 0;
#endif
- Init(frame->isolate(), frame->fp(), kNullAddress, &it, data.LiteralArray(),
+ Init(frame->isolate(), frame->fp(), frame->fp(), &it, data.LiteralArray(),
nullptr /* registers */, nullptr /* trace file */,
frame->function().shared().internal_formal_parameter_count(),
actual_argc);
@@ -3999,7 +3928,8 @@ void TranslatedState::EnsurePropertiesAllocatedAndMarked(
properties_slot->set_storage(object_storage);
// Set markers for out-of-object properties.
- Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate());
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(kRelaxedLoad),
+ isolate());
for (InternalIndex i : map->IterateOwnDescriptors()) {
FieldIndex index = FieldIndex::ForDescriptor(*map, i);
Representation representation = descriptors->GetDetails(i).representation();
@@ -4032,7 +3962,8 @@ void TranslatedState::EnsureJSObjectAllocated(TranslatedValue* slot,
Handle<ByteArray> object_storage = AllocateStorageFor(slot);
// Now we handle the interesting (JSObject) case.
- Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate());
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(kRelaxedLoad),
+ isolate());
// Set markers for in-object properties.
for (InternalIndex i : map->IterateOwnDescriptors()) {
diff --git a/deps/v8/src/deoptimizer/deoptimizer.h b/deps/v8/src/deoptimizer/deoptimizer.h
index 152e5e510e..df13009acd 100644
--- a/deps/v8/src/deoptimizer/deoptimizer.h
+++ b/deps/v8/src/deoptimizer/deoptimizer.h
@@ -500,12 +500,13 @@ class Deoptimizer : public Malloced {
static void ComputeOutputFrames(Deoptimizer* deoptimizer);
- static Address GetDeoptimizationEntry(Isolate* isolate, DeoptimizeKind kind);
+ V8_EXPORT_PRIVATE static Builtins::Name GetDeoptimizationEntry(
+ Isolate* isolate, DeoptimizeKind kind);
// Returns true if {addr} is a deoptimization entry and stores its type in
- // {type}. Returns false if {addr} is not a deoptimization entry.
+ // {type_out}. Returns false if {addr} is not a deoptimization entry.
static bool IsDeoptimizationEntry(Isolate* isolate, Address addr,
- DeoptimizeKind* type);
+ DeoptimizeKind* type_out);
// Code generation support.
static int input_offset() { return offsetof(Deoptimizer, input_); }
@@ -520,25 +521,26 @@ class Deoptimizer : public Malloced {
V8_EXPORT_PRIVATE static int GetDeoptimizedCodeCount(Isolate* isolate);
- static const int kNotDeoptimizationEntry = -1;
-
- static void EnsureCodeForDeoptimizationEntry(Isolate* isolate,
- DeoptimizeKind kind);
- static void EnsureCodeForDeoptimizationEntries(Isolate* isolate);
-
Isolate* isolate() const { return isolate_; }
- static const int kMaxNumberOfEntries = 16384;
+ static constexpr int kMaxNumberOfEntries = 16384;
+
+ // This marker is passed to Deoptimizer::New as {bailout_id} on platforms
+ // that have fixed deopt sizes (see also kSupportsFixedDeoptExitSizes). The
+ // actual deoptimization id is then calculated from the return address.
+ static constexpr unsigned kFixedExitSizeMarker = kMaxUInt32;
// Set to true when the architecture supports deoptimization exit sequences
// of a fixed size, that can be sorted so that the deoptimization index is
// deduced from the address of the deoptimization exit.
- static const bool kSupportsFixedDeoptExitSizes;
+ // TODO(jgruber): Remove this, and support for variable deopt exit sizes,
+ // once all architectures use fixed exit sizes.
+ V8_EXPORT_PRIVATE static const bool kSupportsFixedDeoptExitSizes;
// Size of deoptimization exit sequence. This is only meaningful when
// kSupportsFixedDeoptExitSizes is true.
- static const int kNonLazyDeoptExitSize;
- static const int kLazyDeoptExitSize;
+ V8_EXPORT_PRIVATE static const int kNonLazyDeoptExitSize;
+ V8_EXPORT_PRIVATE static const int kLazyDeoptExitSize;
// Tracing.
static void TraceMarkForDeoptimization(Code code, const char* reason);
@@ -555,9 +557,6 @@ class Deoptimizer : public Malloced {
Code FindOptimizedCode();
void DeleteFrameDescriptions();
- static bool IsDeoptimizationEntry(Isolate* isolate, Address addr,
- DeoptimizeKind type);
-
void DoComputeOutputFrames();
void DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
int frame_index, bool goto_catch_handler);
@@ -579,10 +578,6 @@ class Deoptimizer : public Malloced {
static unsigned ComputeIncomingArgumentSize(SharedFunctionInfo shared);
static unsigned ComputeOutgoingArgumentSize(Code code, unsigned bailout_id);
- static void GenerateDeoptimizationEntries(MacroAssembler* masm,
- Isolate* isolate,
- DeoptimizeKind kind);
-
static void MarkAllCodeForContext(NativeContext native_context);
static void DeoptimizeMarkedCodeForContext(NativeContext native_context);
// Searches the list of known deoptimizing code for a Code object
@@ -717,15 +712,23 @@ class FrameDescription {
return *GetFrameSlotPointer(offset);
}
- unsigned GetLastArgumentSlotOffset() {
+ unsigned GetLastArgumentSlotOffset(bool pad_arguments = true) {
int parameter_slots = parameter_count();
- if (ShouldPadArguments(parameter_slots)) parameter_slots++;
+ if (pad_arguments && ShouldPadArguments(parameter_slots)) parameter_slots++;
return GetFrameSize() - parameter_slots * kSystemPointerSize;
}
Address GetFramePointerAddress() {
- int fp_offset =
- GetLastArgumentSlotOffset() - StandardFrameConstants::kCallerSPOffset;
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ // We should not pad arguments in the bottom frame, since this
+ // already contain a padding if necessary and it might contain
+ // extra arguments (actual argument count > parameter count).
+ const bool pad_arguments_bottom_frame = false;
+#else
+ const bool pad_arguments_bottom_frame = true;
+#endif
+ int fp_offset = GetLastArgumentSlotOffset(pad_arguments_bottom_frame) -
+ StandardFrameConstants::kCallerSPOffset;
return reinterpret_cast<Address>(GetFrameSlotPointer(fp_offset));
}
@@ -779,7 +782,7 @@ class FrameDescription {
return offsetof(FrameDescription, register_values_.registers_);
}
- static int double_registers_offset() {
+ static constexpr int double_registers_offset() {
return offsetof(FrameDescription, register_values_.double_registers_);
}
@@ -827,36 +830,6 @@ class FrameDescription {
}
};
-class DeoptimizerData {
- public:
- explicit DeoptimizerData(Heap* heap);
- ~DeoptimizerData();
-
-#ifdef DEBUG
- bool IsDeoptEntryCode(Code code) const {
- for (int i = 0; i < kLastDeoptimizeKind + 1; i++) {
- if (code == deopt_entry_code_[i]) return true;
- }
- return false;
- }
-#endif // DEBUG
-
- private:
- Heap* heap_;
- static const int kLastDeoptimizeKind =
- static_cast<int>(DeoptimizeKind::kLastDeoptimizeKind);
- Code deopt_entry_code_[kLastDeoptimizeKind + 1];
- Code deopt_entry_code(DeoptimizeKind kind);
- void set_deopt_entry_code(DeoptimizeKind kind, Code code);
-
- Deoptimizer* current_;
- StrongRootsEntry* strong_roots_entry_;
-
- friend class Deoptimizer;
-
- DISALLOW_COPY_AND_ASSIGN(DeoptimizerData);
-};
-
class TranslationBuffer {
public:
explicit TranslationBuffer(Zone* zone) : contents_(zone) {}
diff --git a/deps/v8/src/deoptimizer/ia32/deoptimizer-ia32.cc b/deps/v8/src/deoptimizer/ia32/deoptimizer-ia32.cc
index 19be03c1e3..4fcb22c209 100644
--- a/deps/v8/src/deoptimizer/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/deoptimizer/ia32/deoptimizer-ia32.cc
@@ -4,201 +4,14 @@
#if V8_TARGET_ARCH_IA32
-#include "src/codegen/macro-assembler.h"
-#include "src/codegen/register-configuration.h"
-#include "src/codegen/safepoint-table.h"
#include "src/deoptimizer/deoptimizer.h"
-#include "src/execution/frame-constants.h"
namespace v8 {
namespace internal {
-const bool Deoptimizer::kSupportsFixedDeoptExitSizes = false;
-const int Deoptimizer::kNonLazyDeoptExitSize = 0;
-const int Deoptimizer::kLazyDeoptExitSize = 0;
-
-#define __ masm->
-
-void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
- Isolate* isolate,
- DeoptimizeKind deopt_kind) {
- NoRootArrayScope no_root_array(masm);
-
- // Save all general purpose registers before messing with them.
- const int kNumberOfRegisters = Register::kNumRegisters;
-
- const int kDoubleRegsSize = kDoubleSize * XMMRegister::kNumRegisters;
- __ AllocateStackSpace(kDoubleRegsSize);
- const RegisterConfiguration* config = RegisterConfiguration::Default();
- for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
- int code = config->GetAllocatableDoubleCode(i);
- XMMRegister xmm_reg = XMMRegister::from_code(code);
- int offset = code * kDoubleSize;
- __ movsd(Operand(esp, offset), xmm_reg);
- }
-
- __ pushad();
-
- ExternalReference c_entry_fp_address =
- ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate);
- __ mov(masm->ExternalReferenceAsOperand(c_entry_fp_address, esi), ebp);
-
- const int kSavedRegistersAreaSize =
- kNumberOfRegisters * kSystemPointerSize + kDoubleRegsSize;
-
- // The bailout id is passed in ebx by the caller.
-
- // Get the address of the location in the code object
- // and compute the fp-to-sp delta in register edx.
- __ mov(ecx, Operand(esp, kSavedRegistersAreaSize));
- __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 1 * kSystemPointerSize));
-
- __ sub(edx, ebp);
- __ neg(edx);
-
- // Allocate a new deoptimizer object.
- __ PrepareCallCFunction(6, eax);
- __ mov(eax, Immediate(0));
- Label context_check;
- __ mov(edi, Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ JumpIfSmi(edi, &context_check);
- __ mov(eax, Operand(ebp, StandardFrameConstants::kFunctionOffset));
- __ bind(&context_check);
- __ mov(Operand(esp, 0 * kSystemPointerSize), eax); // Function.
- __ mov(Operand(esp, 1 * kSystemPointerSize),
- Immediate(static_cast<int>(deopt_kind)));
- __ mov(Operand(esp, 2 * kSystemPointerSize), ebx); // Bailout id.
- __ mov(Operand(esp, 3 * kSystemPointerSize), ecx); // Code address or 0.
- __ mov(Operand(esp, 4 * kSystemPointerSize), edx); // Fp-to-sp delta.
- __ mov(Operand(esp, 5 * kSystemPointerSize),
- Immediate(ExternalReference::isolate_address(isolate)));
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
- }
-
- // Preserve deoptimizer object in register eax and get the input
- // frame descriptor pointer.
- __ mov(esi, Operand(eax, Deoptimizer::input_offset()));
-
- // Fill in the input registers.
- for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
- int offset =
- (i * kSystemPointerSize) + FrameDescription::registers_offset();
- __ pop(Operand(esi, offset));
- }
-
- int double_regs_offset = FrameDescription::double_registers_offset();
- // Fill in the double input registers.
- for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
- int code = config->GetAllocatableDoubleCode(i);
- int dst_offset = code * kDoubleSize + double_regs_offset;
- int src_offset = code * kDoubleSize;
- __ movsd(xmm0, Operand(esp, src_offset));
- __ movsd(Operand(esi, dst_offset), xmm0);
- }
-
- // Clear FPU all exceptions.
- // TODO(ulan): Find out why the TOP register is not zero here in some cases,
- // and check that the generated code never deoptimizes with unbalanced stack.
- __ fnclex();
-
- // Mark the stack as not iterable for the CPU profiler which won't be able to
- // walk the stack without the return address.
- __ mov_b(__ ExternalReferenceAsOperand(
- ExternalReference::stack_is_iterable_address(isolate), edx),
- Immediate(0));
-
- // Remove the return address and the double registers.
- __ add(esp, Immediate(kDoubleRegsSize + 1 * kSystemPointerSize));
-
- // Compute a pointer to the unwinding limit in register ecx; that is
- // the first stack slot not part of the input frame.
- __ mov(ecx, Operand(esi, FrameDescription::frame_size_offset()));
- __ add(ecx, esp);
-
- // Unwind the stack down to - but not including - the unwinding
- // limit and copy the contents of the activation frame to the input
- // frame description.
- __ lea(edx, Operand(esi, FrameDescription::frame_content_offset()));
- Label pop_loop_header;
- __ jmp(&pop_loop_header);
- Label pop_loop;
- __ bind(&pop_loop);
- __ pop(Operand(edx, 0));
- __ add(edx, Immediate(sizeof(uint32_t)));
- __ bind(&pop_loop_header);
- __ cmp(ecx, esp);
- __ j(not_equal, &pop_loop);
-
- // Compute the output frame in the deoptimizer.
- __ push(eax);
- __ PrepareCallCFunction(1, esi);
- __ mov(Operand(esp, 0 * kSystemPointerSize), eax);
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
- }
- __ pop(eax);
-
- __ mov(esp, Operand(eax, Deoptimizer::caller_frame_top_offset()));
-
- // Replace the current (input) frame with the output frames.
- Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
- // Outer loop state: eax = current FrameDescription**, edx = one
- // past the last FrameDescription**.
- __ mov(edx, Operand(eax, Deoptimizer::output_count_offset()));
- __ mov(eax, Operand(eax, Deoptimizer::output_offset()));
- __ lea(edx, Operand(eax, edx, times_system_pointer_size, 0));
- __ jmp(&outer_loop_header);
- __ bind(&outer_push_loop);
- // Inner loop state: esi = current FrameDescription*, ecx = loop
- // index.
- __ mov(esi, Operand(eax, 0));
- __ mov(ecx, Operand(esi, FrameDescription::frame_size_offset()));
- __ jmp(&inner_loop_header);
- __ bind(&inner_push_loop);
- __ sub(ecx, Immediate(sizeof(uint32_t)));
- __ push(Operand(esi, ecx, times_1, FrameDescription::frame_content_offset()));
- __ bind(&inner_loop_header);
- __ test(ecx, ecx);
- __ j(not_zero, &inner_push_loop);
- __ add(eax, Immediate(kSystemPointerSize));
- __ bind(&outer_loop_header);
- __ cmp(eax, edx);
- __ j(below, &outer_push_loop);
-
- // In case of a failed STUB, we have to restore the XMM registers.
- for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
- int code = config->GetAllocatableDoubleCode(i);
- XMMRegister xmm_reg = XMMRegister::from_code(code);
- int src_offset = code * kDoubleSize + double_regs_offset;
- __ movsd(xmm_reg, Operand(esi, src_offset));
- }
-
- // Push pc and continuation from the last output frame.
- __ push(Operand(esi, FrameDescription::pc_offset()));
- __ push(Operand(esi, FrameDescription::continuation_offset()));
-
- // Push the registers from the last output frame.
- for (int i = 0; i < kNumberOfRegisters; i++) {
- int offset =
- (i * kSystemPointerSize) + FrameDescription::registers_offset();
- __ push(Operand(esi, offset));
- }
-
- __ mov_b(__ ExternalReferenceAsOperand(
- ExternalReference::stack_is_iterable_address(isolate), edx),
- Immediate(1));
-
- // Restore the registers from the stack.
- __ popad();
-
- __ InitializeRootRegister();
-
- // Return to the continuation point.
- __ ret(0);
-}
+const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
+const int Deoptimizer::kNonLazyDeoptExitSize = 5;
+const int Deoptimizer::kLazyDeoptExitSize = 5;
Float32 RegisterValues::GetFloatRegister(unsigned n) const {
return Float32::FromBits(
@@ -220,8 +33,6 @@ void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
void FrameDescription::SetPc(intptr_t pc) { pc_ = pc; }
-#undef __
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/deoptimizer/mips/deoptimizer-mips.cc b/deps/v8/src/deoptimizer/mips/deoptimizer-mips.cc
index 80221c5cbe..532f7a9b54 100644
--- a/deps/v8/src/deoptimizer/mips/deoptimizer-mips.cc
+++ b/deps/v8/src/deoptimizer/mips/deoptimizer-mips.cc
@@ -2,215 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/codegen/macro-assembler.h"
-#include "src/codegen/register-configuration.h"
-#include "src/codegen/safepoint-table.h"
#include "src/deoptimizer/deoptimizer.h"
namespace v8 {
namespace internal {
-const bool Deoptimizer::kSupportsFixedDeoptExitSizes = false;
-const int Deoptimizer::kNonLazyDeoptExitSize = 0;
-const int Deoptimizer::kLazyDeoptExitSize = 0;
-
-#define __ masm->
-
-// This code tries to be close to ia32 code so that any changes can be
-// easily ported.
-void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
- Isolate* isolate,
- DeoptimizeKind deopt_kind) {
- NoRootArrayScope no_root_array(masm);
-
- // Unlike on ARM we don't save all the registers, just the useful ones.
- // For the rest, there are gaps on the stack, so the offsets remain the same.
- const int kNumberOfRegisters = Register::kNumRegisters;
-
- RegList restored_regs = kJSCallerSaved | kCalleeSaved;
- RegList saved_regs = restored_regs | sp.bit() | ra.bit();
-
- const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
-
- // Save all FPU registers before messing with them.
- __ Subu(sp, sp, Operand(kDoubleRegsSize));
- const RegisterConfiguration* config = RegisterConfiguration::Default();
- for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
- int code = config->GetAllocatableDoubleCode(i);
- const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
- int offset = code * kDoubleSize;
- __ Sdc1(fpu_reg, MemOperand(sp, offset));
- }
-
- // Push saved_regs (needed to populate FrameDescription::registers_).
- // Leave gaps for other registers.
- __ Subu(sp, sp, kNumberOfRegisters * kPointerSize);
- for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
- if ((saved_regs & (1 << i)) != 0) {
- __ sw(ToRegister(i), MemOperand(sp, kPointerSize * i));
- }
- }
-
- __ li(a2, Operand(ExternalReference::Create(
- IsolateAddressId::kCEntryFPAddress, isolate)));
- __ sw(fp, MemOperand(a2));
-
- const int kSavedRegistersAreaSize =
- (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
-
- // Get the bailout id is passed as kRootRegister by the caller.
- __ mov(a2, kRootRegister);
-
- // Get the address of the location in the code object (a3) (return
- // address for lazy deoptimization) and compute the fp-to-sp delta in
- // register t0.
- __ mov(a3, ra);
- __ Addu(t0, sp, Operand(kSavedRegistersAreaSize));
- __ Subu(t0, fp, t0);
-
- // Allocate a new deoptimizer object.
- __ PrepareCallCFunction(6, t1);
- // Pass four arguments in a0 to a3 and fifth & sixth arguments on stack.
- __ mov(a0, zero_reg);
- Label context_check;
- __ lw(a1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ JumpIfSmi(a1, &context_check);
- __ lw(a0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
- __ bind(&context_check);
- __ li(a1, Operand(static_cast<int>(deopt_kind)));
- // a2: bailout id already loaded.
- // a3: code address or 0 already loaded.
- __ sw(t0, CFunctionArgumentOperand(5)); // Fp-to-sp delta.
- __ li(t1, Operand(ExternalReference::isolate_address(isolate)));
- __ sw(t1, CFunctionArgumentOperand(6)); // Isolate.
- // Call Deoptimizer::New().
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
- }
-
- // Preserve "deoptimizer" object in register v0 and get the input
- // frame descriptor pointer to a1 (deoptimizer->input_);
- // Move deopt-obj to a0 for call to Deoptimizer::ComputeOutputFrames() below.
- __ mov(a0, v0);
- __ lw(a1, MemOperand(v0, Deoptimizer::input_offset()));
-
- // Copy core registers into FrameDescription::registers_[kNumRegisters].
- DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
- for (int i = 0; i < kNumberOfRegisters; i++) {
- int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- if ((saved_regs & (1 << i)) != 0) {
- __ lw(a2, MemOperand(sp, i * kPointerSize));
- __ sw(a2, MemOperand(a1, offset));
- } else if (FLAG_debug_code) {
- __ li(a2, kDebugZapValue);
- __ sw(a2, MemOperand(a1, offset));
- }
- }
-
- int double_regs_offset = FrameDescription::double_registers_offset();
- // Copy FPU registers to
- // double_registers_[DoubleRegister::kNumAllocatableRegisters]
- for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
- int code = config->GetAllocatableDoubleCode(i);
- int dst_offset = code * kDoubleSize + double_regs_offset;
- int src_offset =
- code * kDoubleSize + kNumberOfRegisters * kPointerSize;
- __ Ldc1(f0, MemOperand(sp, src_offset));
- __ Sdc1(f0, MemOperand(a1, dst_offset));
- }
-
- // Remove the saved registers from the stack.
- __ Addu(sp, sp, Operand(kSavedRegistersAreaSize));
-
- // Compute a pointer to the unwinding limit in register a2; that is
- // the first stack slot not part of the input frame.
- __ lw(a2, MemOperand(a1, FrameDescription::frame_size_offset()));
- __ Addu(a2, a2, sp);
-
- // Unwind the stack down to - but not including - the unwinding
- // limit and copy the contents of the activation frame to the input
- // frame description.
- __ Addu(a3, a1, Operand(FrameDescription::frame_content_offset()));
- Label pop_loop;
- Label pop_loop_header;
- __ BranchShort(&pop_loop_header);
- __ bind(&pop_loop);
- __ pop(t0);
- __ sw(t0, MemOperand(a3, 0));
- __ addiu(a3, a3, sizeof(uint32_t));
- __ bind(&pop_loop_header);
- __ BranchShort(&pop_loop, ne, a2, Operand(sp));
-
- // Compute the output frame in the deoptimizer.
- __ push(a0); // Preserve deoptimizer object across call.
- // a0: deoptimizer object; a1: scratch.
- __ PrepareCallCFunction(1, a1);
- // Call Deoptimizer::ComputeOutputFrames().
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
- }
- __ pop(a0); // Restore deoptimizer object (class Deoptimizer).
-
- __ lw(sp, MemOperand(a0, Deoptimizer::caller_frame_top_offset()));
-
- // Replace the current (input) frame with the output frames.
- Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
- // Outer loop state: t0 = current "FrameDescription** output_",
- // a1 = one past the last FrameDescription**.
- __ lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
- __ lw(t0, MemOperand(a0, Deoptimizer::output_offset())); // t0 is output_.
- __ Lsa(a1, t0, a1, kPointerSizeLog2);
- __ BranchShort(&outer_loop_header);
- __ bind(&outer_push_loop);
- // Inner loop state: a2 = current FrameDescription*, a3 = loop index.
- __ lw(a2, MemOperand(t0, 0)); // output_[ix]
- __ lw(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
- __ BranchShort(&inner_loop_header);
- __ bind(&inner_push_loop);
- __ Subu(a3, a3, Operand(sizeof(uint32_t)));
- __ Addu(t2, a2, Operand(a3));
- __ lw(t3, MemOperand(t2, FrameDescription::frame_content_offset()));
- __ push(t3);
- __ bind(&inner_loop_header);
- __ BranchShort(&inner_push_loop, ne, a3, Operand(zero_reg));
-
- __ Addu(t0, t0, Operand(kPointerSize));
- __ bind(&outer_loop_header);
- __ BranchShort(&outer_push_loop, lt, t0, Operand(a1));
-
- __ lw(a1, MemOperand(a0, Deoptimizer::input_offset()));
- for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
- int code = config->GetAllocatableDoubleCode(i);
- const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
- int src_offset = code * kDoubleSize + double_regs_offset;
- __ Ldc1(fpu_reg, MemOperand(a1, src_offset));
- }
-
- // Push pc and continuation from the last output frame.
- __ lw(t2, MemOperand(a2, FrameDescription::pc_offset()));
- __ push(t2);
- __ lw(t2, MemOperand(a2, FrameDescription::continuation_offset()));
- __ push(t2);
-
- // Technically restoring 'at' should work unless zero_reg is also restored
- // but it's safer to check for this.
- DCHECK(!(at.bit() & restored_regs));
- // Restore the registers from the last output frame.
- __ mov(at, a2);
- for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
- int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- if ((restored_regs & (1 << i)) != 0) {
- __ lw(ToRegister(i), MemOperand(at, offset));
- }
- }
-
- __ pop(at); // Get continuation, leave pc on stack.
- __ pop(ra);
- __ Jump(at);
- __ stop();
-}
+const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
+const int Deoptimizer::kNonLazyDeoptExitSize = 3 * kInstrSize;
+const int Deoptimizer::kLazyDeoptExitSize = 3 * kInstrSize;
// Maximum size of a table entry generated below.
#ifdef _MIPS_ARCH_MIPS32R6
@@ -239,7 +38,5 @@ void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
void FrameDescription::SetPc(intptr_t pc) { pc_ = pc; }
-#undef __
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/deoptimizer/mips64/deoptimizer-mips64.cc b/deps/v8/src/deoptimizer/mips64/deoptimizer-mips64.cc
index 3b8b1b9659..227c002b88 100644
--- a/deps/v8/src/deoptimizer/mips64/deoptimizer-mips64.cc
+++ b/deps/v8/src/deoptimizer/mips64/deoptimizer-mips64.cc
@@ -2,215 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/codegen/macro-assembler.h"
-#include "src/codegen/register-configuration.h"
-#include "src/codegen/safepoint-table.h"
#include "src/deoptimizer/deoptimizer.h"
namespace v8 {
namespace internal {
-const bool Deoptimizer::kSupportsFixedDeoptExitSizes = false;
-const int Deoptimizer::kNonLazyDeoptExitSize = 0;
-const int Deoptimizer::kLazyDeoptExitSize = 0;
-
-#define __ masm->
-
-// This code tries to be close to ia32 code so that any changes can be
-// easily ported.
-void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
- Isolate* isolate,
- DeoptimizeKind deopt_kind) {
- NoRootArrayScope no_root_array(masm);
-
- // Unlike on ARM we don't save all the registers, just the useful ones.
- // For the rest, there are gaps on the stack, so the offsets remain the same.
- const int kNumberOfRegisters = Register::kNumRegisters;
-
- RegList restored_regs = kJSCallerSaved | kCalleeSaved;
- RegList saved_regs = restored_regs | sp.bit() | ra.bit();
-
- const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
-
- // Save all double FPU registers before messing with them.
- __ Dsubu(sp, sp, Operand(kDoubleRegsSize));
- const RegisterConfiguration* config = RegisterConfiguration::Default();
- for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
- int code = config->GetAllocatableDoubleCode(i);
- const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
- int offset = code * kDoubleSize;
- __ Sdc1(fpu_reg, MemOperand(sp, offset));
- }
-
- // Push saved_regs (needed to populate FrameDescription::registers_).
- // Leave gaps for other registers.
- __ Dsubu(sp, sp, kNumberOfRegisters * kPointerSize);
- for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
- if ((saved_regs & (1 << i)) != 0) {
- __ Sd(ToRegister(i), MemOperand(sp, kPointerSize * i));
- }
- }
-
- __ li(a2, Operand(ExternalReference::Create(
- IsolateAddressId::kCEntryFPAddress, isolate)));
- __ Sd(fp, MemOperand(a2));
-
- const int kSavedRegistersAreaSize =
- (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
-
- // Get the bailout is passed as kRootRegister by the caller.
- __ mov(a2, kRootRegister);
-
- // Get the address of the location in the code object (a3) (return
- // address for lazy deoptimization) and compute the fp-to-sp delta in
- // register a4.
- __ mov(a3, ra);
- __ Daddu(a4, sp, Operand(kSavedRegistersAreaSize));
-
- __ Dsubu(a4, fp, a4);
-
- // Allocate a new deoptimizer object.
- __ PrepareCallCFunction(6, a5);
- // Pass six arguments, according to n64 ABI.
- __ mov(a0, zero_reg);
- Label context_check;
- __ Ld(a1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ JumpIfSmi(a1, &context_check);
- __ Ld(a0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
- __ bind(&context_check);
- __ li(a1, Operand(static_cast<int>(deopt_kind)));
- // a2: bailout id already loaded.
- // a3: code address or 0 already loaded.
- // a4: already has fp-to-sp delta.
- __ li(a5, Operand(ExternalReference::isolate_address(isolate)));
-
- // Call Deoptimizer::New().
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
- }
-
- // Preserve "deoptimizer" object in register v0 and get the input
- // frame descriptor pointer to a1 (deoptimizer->input_);
- // Move deopt-obj to a0 for call to Deoptimizer::ComputeOutputFrames() below.
- __ mov(a0, v0);
- __ Ld(a1, MemOperand(v0, Deoptimizer::input_offset()));
-
- // Copy core registers into FrameDescription::registers_[kNumRegisters].
- DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
- for (int i = 0; i < kNumberOfRegisters; i++) {
- int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- if ((saved_regs & (1 << i)) != 0) {
- __ Ld(a2, MemOperand(sp, i * kPointerSize));
- __ Sd(a2, MemOperand(a1, offset));
- } else if (FLAG_debug_code) {
- __ li(a2, kDebugZapValue);
- __ Sd(a2, MemOperand(a1, offset));
- }
- }
-
- int double_regs_offset = FrameDescription::double_registers_offset();
- // Copy FPU registers to
- // double_registers_[DoubleRegister::kNumAllocatableRegisters]
- for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
- int code = config->GetAllocatableDoubleCode(i);
- int dst_offset = code * kDoubleSize + double_regs_offset;
- int src_offset =
- code * kDoubleSize + kNumberOfRegisters * kPointerSize;
- __ Ldc1(f0, MemOperand(sp, src_offset));
- __ Sdc1(f0, MemOperand(a1, dst_offset));
- }
-
- // Remove the saved registers from the stack.
- __ Daddu(sp, sp, Operand(kSavedRegistersAreaSize));
-
- // Compute a pointer to the unwinding limit in register a2; that is
- // the first stack slot not part of the input frame.
- __ Ld(a2, MemOperand(a1, FrameDescription::frame_size_offset()));
- __ Daddu(a2, a2, sp);
-
- // Unwind the stack down to - but not including - the unwinding
- // limit and copy the contents of the activation frame to the input
- // frame description.
- __ Daddu(a3, a1, Operand(FrameDescription::frame_content_offset()));
- Label pop_loop;
- Label pop_loop_header;
- __ BranchShort(&pop_loop_header);
- __ bind(&pop_loop);
- __ pop(a4);
- __ Sd(a4, MemOperand(a3, 0));
- __ daddiu(a3, a3, sizeof(uint64_t));
- __ bind(&pop_loop_header);
- __ BranchShort(&pop_loop, ne, a2, Operand(sp));
- // Compute the output frame in the deoptimizer.
- __ push(a0); // Preserve deoptimizer object across call.
- // a0: deoptimizer object; a1: scratch.
- __ PrepareCallCFunction(1, a1);
- // Call Deoptimizer::ComputeOutputFrames().
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
- }
- __ pop(a0); // Restore deoptimizer object (class Deoptimizer).
-
- __ Ld(sp, MemOperand(a0, Deoptimizer::caller_frame_top_offset()));
-
- // Replace the current (input) frame with the output frames.
- Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
- // Outer loop state: a4 = current "FrameDescription** output_",
- // a1 = one past the last FrameDescription**.
- __ Lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
- __ Ld(a4, MemOperand(a0, Deoptimizer::output_offset())); // a4 is output_.
- __ Dlsa(a1, a4, a1, kPointerSizeLog2);
- __ BranchShort(&outer_loop_header);
- __ bind(&outer_push_loop);
- // Inner loop state: a2 = current FrameDescription*, a3 = loop index.
- __ Ld(a2, MemOperand(a4, 0)); // output_[ix]
- __ Ld(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
- __ BranchShort(&inner_loop_header);
- __ bind(&inner_push_loop);
- __ Dsubu(a3, a3, Operand(sizeof(uint64_t)));
- __ Daddu(a6, a2, Operand(a3));
- __ Ld(a7, MemOperand(a6, FrameDescription::frame_content_offset()));
- __ push(a7);
- __ bind(&inner_loop_header);
- __ BranchShort(&inner_push_loop, ne, a3, Operand(zero_reg));
-
- __ Daddu(a4, a4, Operand(kPointerSize));
- __ bind(&outer_loop_header);
- __ BranchShort(&outer_push_loop, lt, a4, Operand(a1));
-
- __ Ld(a1, MemOperand(a0, Deoptimizer::input_offset()));
- for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
- int code = config->GetAllocatableDoubleCode(i);
- const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
- int src_offset = code * kDoubleSize + double_regs_offset;
- __ Ldc1(fpu_reg, MemOperand(a1, src_offset));
- }
-
- // Push pc and continuation from the last output frame.
- __ Ld(a6, MemOperand(a2, FrameDescription::pc_offset()));
- __ push(a6);
- __ Ld(a6, MemOperand(a2, FrameDescription::continuation_offset()));
- __ push(a6);
-
- // Technically restoring 'at' should work unless zero_reg is also restored
- // but it's safer to check for this.
- DCHECK(!(at.bit() & restored_regs));
- // Restore the registers from the last output frame.
- __ mov(at, a2);
- for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
- int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- if ((restored_regs & (1 << i)) != 0) {
- __ Ld(ToRegister(i), MemOperand(at, offset));
- }
- }
-
- __ pop(at); // Get continuation, leave pc on stack.
- __ pop(ra);
- __ Jump(at);
- __ stop();
-}
+const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
+const int Deoptimizer::kNonLazyDeoptExitSize = 3 * kInstrSize;
+const int Deoptimizer::kLazyDeoptExitSize = 3 * kInstrSize;
// Maximum size of a table entry generated below.
#ifdef _MIPS_ARCH_MIPS64R6
@@ -239,7 +38,5 @@ void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
void FrameDescription::SetPc(intptr_t pc) { pc_ = pc; }
-#undef __
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc b/deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc
index f8959752b7..817c301431 100644
--- a/deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc
+++ b/deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc
@@ -11,238 +11,9 @@
namespace v8 {
namespace internal {
-const bool Deoptimizer::kSupportsFixedDeoptExitSizes = false;
-const int Deoptimizer::kNonLazyDeoptExitSize = 0;
-const int Deoptimizer::kLazyDeoptExitSize = 0;
-
-#define __ masm->
-
-// This code tries to be close to ia32 code so that any changes can be
-// easily ported.
-void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
- Isolate* isolate,
- DeoptimizeKind deopt_kind) {
- NoRootArrayScope no_root_array(masm);
-
- // Unlike on ARM we don't save all the registers, just the useful ones.
- // For the rest, there are gaps on the stack, so the offsets remain the same.
- const int kNumberOfRegisters = Register::kNumRegisters;
-
- RegList restored_regs = kJSCallerSaved | kCalleeSaved;
- RegList saved_regs = restored_regs | sp.bit();
-
- const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
-
- // Save all double registers before messing with them.
- __ subi(sp, sp, Operand(kDoubleRegsSize));
- const RegisterConfiguration* config = RegisterConfiguration::Default();
- for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
- int code = config->GetAllocatableDoubleCode(i);
- const DoubleRegister dreg = DoubleRegister::from_code(code);
- int offset = code * kDoubleSize;
- __ stfd(dreg, MemOperand(sp, offset));
- }
-
- // Push saved_regs (needed to populate FrameDescription::registers_).
- // Leave gaps for other registers.
- __ subi(sp, sp, Operand(kNumberOfRegisters * kSystemPointerSize));
- for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
- if ((saved_regs & (1 << i)) != 0) {
- __ StoreP(ToRegister(i), MemOperand(sp, kSystemPointerSize * i));
- }
- }
- {
- UseScratchRegisterScope temps(masm);
- Register scratch = temps.Acquire();
- __ mov(scratch, Operand(ExternalReference::Create(
- IsolateAddressId::kCEntryFPAddress, isolate)));
- __ StoreP(fp, MemOperand(scratch));
- }
- const int kSavedRegistersAreaSize =
- (kNumberOfRegisters * kSystemPointerSize) + kDoubleRegsSize;
-
- // Get the bailout id is passed as r29 by the caller.
- __ mr(r5, r29);
-
- // Get the address of the location in the code object (r6) (return
- // address for lazy deoptimization) and compute the fp-to-sp delta in
- // register r7.
- __ mflr(r6);
- __ addi(r7, sp, Operand(kSavedRegistersAreaSize));
- __ sub(r7, fp, r7);
-
- // Allocate a new deoptimizer object.
- // Pass six arguments in r3 to r8.
- __ PrepareCallCFunction(6, r8);
- __ li(r3, Operand::Zero());
- Label context_check;
- __ LoadP(r4, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ JumpIfSmi(r4, &context_check);
- __ LoadP(r3, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
- __ bind(&context_check);
- __ li(r4, Operand(static_cast<int>(deopt_kind)));
- // r5: bailout id already loaded.
- // r6: code address or 0 already loaded.
- // r7: Fp-to-sp delta.
- __ mov(r8, Operand(ExternalReference::isolate_address(isolate)));
- // Call Deoptimizer::New().
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
- }
-
- // Preserve "deoptimizer" object in register r3 and get the input
- // frame descriptor pointer to r4 (deoptimizer->input_);
- __ LoadP(r4, MemOperand(r3, Deoptimizer::input_offset()));
-
- // Copy core registers into FrameDescription::registers_[kNumRegisters].
- DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
- for (int i = 0; i < kNumberOfRegisters; i++) {
- int offset =
- (i * kSystemPointerSize) + FrameDescription::registers_offset();
- __ LoadP(r5, MemOperand(sp, i * kSystemPointerSize));
- __ StoreP(r5, MemOperand(r4, offset));
- }
-
- int double_regs_offset = FrameDescription::double_registers_offset();
- // Copy double registers to
- // double_registers_[DoubleRegister::kNumRegisters]
- for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
- int code = config->GetAllocatableDoubleCode(i);
- int dst_offset = code * kDoubleSize + double_regs_offset;
- int src_offset =
- code * kDoubleSize + kNumberOfRegisters * kSystemPointerSize;
- __ lfd(d0, MemOperand(sp, src_offset));
- __ stfd(d0, MemOperand(r4, dst_offset));
- }
-
- // Mark the stack as not iterable for the CPU profiler which won't be able to
- // walk the stack without the return address.
- {
- UseScratchRegisterScope temps(masm);
- Register is_iterable = temps.Acquire();
- Register zero = r7;
- __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
- __ li(zero, Operand(0));
- __ stb(zero, MemOperand(is_iterable));
- }
-
- // Remove the saved registers from the stack.
- __ addi(sp, sp, Operand(kSavedRegistersAreaSize));
-
- // Compute a pointer to the unwinding limit in register r5; that is
- // the first stack slot not part of the input frame.
- __ LoadP(r5, MemOperand(r4, FrameDescription::frame_size_offset()));
- __ add(r5, r5, sp);
-
- // Unwind the stack down to - but not including - the unwinding
- // limit and copy the contents of the activation frame to the input
- // frame description.
- __ addi(r6, r4, Operand(FrameDescription::frame_content_offset()));
- Label pop_loop;
- Label pop_loop_header;
- __ b(&pop_loop_header);
- __ bind(&pop_loop);
- __ pop(r7);
- __ StoreP(r7, MemOperand(r6, 0));
- __ addi(r6, r6, Operand(kSystemPointerSize));
- __ bind(&pop_loop_header);
- __ cmp(r5, sp);
- __ bne(&pop_loop);
-
- // Compute the output frame in the deoptimizer.
- __ push(r3); // Preserve deoptimizer object across call.
- // r3: deoptimizer object; r4: scratch.
- __ PrepareCallCFunction(1, r4);
- // Call Deoptimizer::ComputeOutputFrames().
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
- }
- __ pop(r3); // Restore deoptimizer object (class Deoptimizer).
-
- __ LoadP(sp, MemOperand(r3, Deoptimizer::caller_frame_top_offset()));
-
- // Replace the current (input) frame with the output frames.
- Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
- // Outer loop state: r7 = current "FrameDescription** output_",
- // r4 = one past the last FrameDescription**.
- __ lwz(r4, MemOperand(r3, Deoptimizer::output_count_offset()));
- __ LoadP(r7, MemOperand(r3, Deoptimizer::output_offset())); // r7 is output_.
- __ ShiftLeftImm(r4, r4, Operand(kSystemPointerSizeLog2));
- __ add(r4, r7, r4);
- __ b(&outer_loop_header);
-
- __ bind(&outer_push_loop);
- // Inner loop state: r5 = current FrameDescription*, r6 = loop index.
- __ LoadP(r5, MemOperand(r7, 0)); // output_[ix]
- __ LoadP(r6, MemOperand(r5, FrameDescription::frame_size_offset()));
- __ b(&inner_loop_header);
-
- __ bind(&inner_push_loop);
- __ addi(r6, r6, Operand(-sizeof(intptr_t)));
- __ add(r9, r5, r6);
- __ LoadP(r9, MemOperand(r9, FrameDescription::frame_content_offset()));
- __ push(r9);
-
- __ bind(&inner_loop_header);
- __ cmpi(r6, Operand::Zero());
- __ bne(&inner_push_loop); // test for gt?
-
- __ addi(r7, r7, Operand(kSystemPointerSize));
- __ bind(&outer_loop_header);
- __ cmp(r7, r4);
- __ blt(&outer_push_loop);
-
- __ LoadP(r4, MemOperand(r3, Deoptimizer::input_offset()));
- for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
- int code = config->GetAllocatableDoubleCode(i);
- const DoubleRegister dreg = DoubleRegister::from_code(code);
- int src_offset = code * kDoubleSize + double_regs_offset;
- __ lfd(dreg, MemOperand(r4, src_offset));
- }
-
- // Push pc, and continuation from the last output frame.
- __ LoadP(r9, MemOperand(r5, FrameDescription::pc_offset()));
- __ push(r9);
- __ LoadP(r9, MemOperand(r5, FrameDescription::continuation_offset()));
- __ push(r9);
-
- // Restore the registers from the last output frame.
- {
- UseScratchRegisterScope temps(masm);
- Register scratch = temps.Acquire();
- DCHECK(!(scratch.bit() & restored_regs));
- __ mr(scratch, r5);
- for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
- int offset =
- (i * kSystemPointerSize) + FrameDescription::registers_offset();
- if ((restored_regs & (1 << i)) != 0) {
- __ LoadP(ToRegister(i), MemOperand(scratch, offset));
- }
- }
- }
-
- {
- UseScratchRegisterScope temps(masm);
- Register is_iterable = temps.Acquire();
- Register one = r7;
- __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
- __ li(one, Operand(1));
- __ stb(one, MemOperand(is_iterable));
- }
-
- {
- UseScratchRegisterScope temps(masm);
- Register scratch = temps.Acquire();
- __ pop(scratch); // get continuation, leave pc on stack
- __ pop(r0);
- __ mtlr(r0);
- __ Jump(scratch);
- }
-
- __ stop();
-}
+const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
+const int Deoptimizer::kNonLazyDeoptExitSize = 3 * kInstrSize;
+const int Deoptimizer::kLazyDeoptExitSize = 3 * kInstrSize;
Float32 RegisterValues::GetFloatRegister(unsigned n) const {
float float_val = static_cast<float>(double_registers_[n].get_scalar());
@@ -264,6 +35,5 @@ void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
void FrameDescription::SetPc(intptr_t pc) { pc_ = pc; }
-#undef __
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc b/deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc
index 66d9d0db8e..358450c091 100644
--- a/deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc
+++ b/deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc
@@ -2,240 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/codegen/macro-assembler.h"
-#include "src/codegen/register-configuration.h"
-#include "src/codegen/safepoint-table.h"
#include "src/deoptimizer/deoptimizer.h"
namespace v8 {
namespace internal {
-const bool Deoptimizer::kSupportsFixedDeoptExitSizes = false;
-const int Deoptimizer::kNonLazyDeoptExitSize = 0;
-const int Deoptimizer::kLazyDeoptExitSize = 0;
-
-#define __ masm->
-
-// This code tries to be close to ia32 code so that any changes can be
-// easily ported.
-void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
- Isolate* isolate,
- DeoptimizeKind deopt_kind) {
- NoRootArrayScope no_root_array(masm);
-
- // Save all the registers onto the stack
- const int kNumberOfRegisters = Register::kNumRegisters;
-
- RegList restored_regs = kJSCallerSaved | kCalleeSaved;
-
- const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
-
- // Save all double registers before messing with them.
- __ lay(sp, MemOperand(sp, -kDoubleRegsSize));
- const RegisterConfiguration* config = RegisterConfiguration::Default();
- for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
- int code = config->GetAllocatableDoubleCode(i);
- const DoubleRegister dreg = DoubleRegister::from_code(code);
- int offset = code * kDoubleSize;
- __ StoreDouble(dreg, MemOperand(sp, offset));
- }
-
- // Push all GPRs onto the stack
- __ lay(sp, MemOperand(sp, -kNumberOfRegisters * kSystemPointerSize));
- __ StoreMultipleP(r0, sp, MemOperand(sp)); // Save all 16 registers
-
- __ mov(r1, Operand(ExternalReference::Create(
- IsolateAddressId::kCEntryFPAddress, isolate)));
- __ StoreP(fp, MemOperand(r1));
-
- const int kSavedRegistersAreaSize =
- (kNumberOfRegisters * kSystemPointerSize) + kDoubleRegsSize;
-
- // The bailout id is passed using r10
- __ LoadRR(r4, r10);
-
- // Cleanse the Return address for 31-bit
- __ CleanseP(r14);
-
- // Get the address of the location in the code object (r5)(return
- // address for lazy deoptimization) and compute the fp-to-sp delta in
- // register r6.
- __ LoadRR(r5, r14);
-
- __ la(r6, MemOperand(sp, kSavedRegistersAreaSize));
- __ SubP(r6, fp, r6);
-
- // Allocate a new deoptimizer object.
- // Pass six arguments in r2 to r7.
- __ PrepareCallCFunction(6, r7);
- __ LoadImmP(r2, Operand::Zero());
- Label context_check;
- __ LoadP(r3, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ JumpIfSmi(r3, &context_check);
- __ LoadP(r2, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
- __ bind(&context_check);
- __ LoadImmP(r3, Operand(static_cast<int>(deopt_kind)));
- // r4: bailout id already loaded.
- // r5: code address or 0 already loaded.
- // r6: Fp-to-sp delta.
- // Parm6: isolate is passed on the stack.
- __ mov(r7, Operand(ExternalReference::isolate_address(isolate)));
- __ StoreP(r7, MemOperand(sp, kStackFrameExtraParamSlot * kSystemPointerSize));
-
- // Call Deoptimizer::New().
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
- }
-
- // Preserve "deoptimizer" object in register r2 and get the input
- // frame descriptor pointer to r3 (deoptimizer->input_);
- __ LoadP(r3, MemOperand(r2, Deoptimizer::input_offset()));
-
- // Copy core registers into FrameDescription::registers_[kNumRegisters].
- // DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
- // __ mvc(MemOperand(r3, FrameDescription::registers_offset()),
- // MemOperand(sp), kNumberOfRegisters * kSystemPointerSize);
- // Copy core registers into FrameDescription::registers_[kNumRegisters].
- // TODO(john.yan): optimize the following code by using mvc instruction
- DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
- for (int i = 0; i < kNumberOfRegisters; i++) {
- int offset =
- (i * kSystemPointerSize) + FrameDescription::registers_offset();
- __ LoadP(r4, MemOperand(sp, i * kSystemPointerSize));
- __ StoreP(r4, MemOperand(r3, offset));
- }
-
- int double_regs_offset = FrameDescription::double_registers_offset();
- // Copy double registers to
- // double_registers_[DoubleRegister::kNumRegisters]
- for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
- int code = config->GetAllocatableDoubleCode(i);
- int dst_offset = code * kDoubleSize + double_regs_offset;
- int src_offset =
- code * kDoubleSize + kNumberOfRegisters * kSystemPointerSize;
- // TODO(joransiu): MVC opportunity
- __ LoadDouble(d0, MemOperand(sp, src_offset));
- __ StoreDouble(d0, MemOperand(r3, dst_offset));
- }
-
- // Mark the stack as not iterable for the CPU profiler which won't be able to
- // walk the stack without the return address.
- {
- UseScratchRegisterScope temps(masm);
- Register is_iterable = temps.Acquire();
- Register zero = r6;
- __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
- __ lhi(zero, Operand(0));
- __ StoreByte(zero, MemOperand(is_iterable));
- }
-
- // Remove the saved registers from the stack.
- __ la(sp, MemOperand(sp, kSavedRegistersAreaSize));
-
- // Compute a pointer to the unwinding limit in register r4; that is
- // the first stack slot not part of the input frame.
- __ LoadP(r4, MemOperand(r3, FrameDescription::frame_size_offset()));
- __ AddP(r4, sp);
-
- // Unwind the stack down to - but not including - the unwinding
- // limit and copy the contents of the activation frame to the input
- // frame description.
- __ la(r5, MemOperand(r3, FrameDescription::frame_content_offset()));
- Label pop_loop;
- Label pop_loop_header;
- __ b(&pop_loop_header, Label::kNear);
- __ bind(&pop_loop);
- __ pop(r6);
- __ StoreP(r6, MemOperand(r5, 0));
- __ la(r5, MemOperand(r5, kSystemPointerSize));
- __ bind(&pop_loop_header);
- __ CmpP(r4, sp);
- __ bne(&pop_loop);
-
- // Compute the output frame in the deoptimizer.
- __ push(r2); // Preserve deoptimizer object across call.
- // r2: deoptimizer object; r3: scratch.
- __ PrepareCallCFunction(1, r3);
- // Call Deoptimizer::ComputeOutputFrames().
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
- }
- __ pop(r2); // Restore deoptimizer object (class Deoptimizer).
-
- __ LoadP(sp, MemOperand(r2, Deoptimizer::caller_frame_top_offset()));
-
- // Replace the current (input) frame with the output frames.
- Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
- // Outer loop state: r6 = current "FrameDescription** output_",
- // r3 = one past the last FrameDescription**.
- __ LoadlW(r3, MemOperand(r2, Deoptimizer::output_count_offset()));
- __ LoadP(r6, MemOperand(r2, Deoptimizer::output_offset())); // r6 is output_.
- __ ShiftLeftP(r3, r3, Operand(kSystemPointerSizeLog2));
- __ AddP(r3, r6, r3);
- __ b(&outer_loop_header, Label::kNear);
-
- __ bind(&outer_push_loop);
- // Inner loop state: r4 = current FrameDescription*, r5 = loop index.
- __ LoadP(r4, MemOperand(r6, 0)); // output_[ix]
- __ LoadP(r5, MemOperand(r4, FrameDescription::frame_size_offset()));
- __ b(&inner_loop_header, Label::kNear);
-
- __ bind(&inner_push_loop);
- __ SubP(r5, Operand(sizeof(intptr_t)));
- __ AddP(r8, r4, r5);
- __ LoadP(r8, MemOperand(r8, FrameDescription::frame_content_offset()));
- __ push(r8);
-
- __ bind(&inner_loop_header);
- __ CmpP(r5, Operand::Zero());
- __ bne(&inner_push_loop); // test for gt?
-
- __ AddP(r6, r6, Operand(kSystemPointerSize));
- __ bind(&outer_loop_header);
- __ CmpP(r6, r3);
- __ blt(&outer_push_loop);
-
- __ LoadP(r3, MemOperand(r2, Deoptimizer::input_offset()));
- for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
- int code = config->GetAllocatableDoubleCode(i);
- const DoubleRegister dreg = DoubleRegister::from_code(code);
- int src_offset = code * kDoubleSize + double_regs_offset;
- __ ld(dreg, MemOperand(r3, src_offset));
- }
-
- // Push pc and continuation from the last output frame.
- __ LoadP(r8, MemOperand(r4, FrameDescription::pc_offset()));
- __ push(r8);
- __ LoadP(r8, MemOperand(r4, FrameDescription::continuation_offset()));
- __ push(r8);
-
- // Restore the registers from the last output frame.
- __ LoadRR(r1, r4);
- for (int i = kNumberOfRegisters - 1; i > 0; i--) {
- int offset =
- (i * kSystemPointerSize) + FrameDescription::registers_offset();
- if ((restored_regs & (1 << i)) != 0) {
- __ LoadP(ToRegister(i), MemOperand(r1, offset));
- }
- }
-
- {
- UseScratchRegisterScope temps(masm);
- Register is_iterable = temps.Acquire();
- Register one = r6;
- __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
- __ lhi(one, Operand(1));
- __ StoreByte(one, MemOperand(is_iterable));
- }
-
- __ pop(ip); // get continuation, leave pc on stack
- __ pop(r14);
- __ Jump(ip);
-
- __ stop();
-}
+const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
+const int Deoptimizer::kNonLazyDeoptExitSize = 6 + 2;
+const int Deoptimizer::kLazyDeoptExitSize = 6 + 2;
Float32 RegisterValues::GetFloatRegister(unsigned n) const {
return Float32::FromBits(
@@ -257,7 +31,5 @@ void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
void FrameDescription::SetPc(intptr_t pc) { pc_ = pc; }
-#undef __
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc b/deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc
index ea13361341..6f621ed34e 100644
--- a/deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc
@@ -4,217 +4,14 @@
#if V8_TARGET_ARCH_X64
-#include "src/codegen/macro-assembler.h"
-#include "src/codegen/register-configuration.h"
-#include "src/codegen/safepoint-table.h"
#include "src/deoptimizer/deoptimizer.h"
-#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
-const bool Deoptimizer::kSupportsFixedDeoptExitSizes = false;
-const int Deoptimizer::kNonLazyDeoptExitSize = 0;
-const int Deoptimizer::kLazyDeoptExitSize = 0;
-
-#define __ masm->
-
-void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
- Isolate* isolate,
- DeoptimizeKind deopt_kind) {
- NoRootArrayScope no_root_array(masm);
-
- // Save all general purpose registers before messing with them.
- const int kNumberOfRegisters = Register::kNumRegisters;
-
- const int kDoubleRegsSize = kDoubleSize * XMMRegister::kNumRegisters;
- __ AllocateStackSpace(kDoubleRegsSize);
-
- const RegisterConfiguration* config = RegisterConfiguration::Default();
- for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
- int code = config->GetAllocatableDoubleCode(i);
- XMMRegister xmm_reg = XMMRegister::from_code(code);
- int offset = code * kDoubleSize;
- __ Movsd(Operand(rsp, offset), xmm_reg);
- }
-
- // We push all registers onto the stack, even though we do not need
- // to restore all later.
- for (int i = 0; i < kNumberOfRegisters; i++) {
- Register r = Register::from_code(i);
- __ pushq(r);
- }
-
- const int kSavedRegistersAreaSize =
- kNumberOfRegisters * kSystemPointerSize + kDoubleRegsSize;
-
- __ Store(
- ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate),
- rbp);
-
- // We use this to keep the value of the fifth argument temporarily.
- // Unfortunately we can't store it directly in r8 (used for passing
- // this on linux), since it is another parameter passing register on windows.
- Register arg5 = r11;
-
- // The bailout id is passed using r13 on the stack.
- __ movq(arg_reg_3, r13);
-
- // Get the address of the location in the code object
- // and compute the fp-to-sp delta in register arg5.
- __ movq(arg_reg_4, Operand(rsp, kSavedRegistersAreaSize));
- __ leaq(arg5, Operand(rsp, kSavedRegistersAreaSize + kPCOnStackSize));
-
- __ subq(arg5, rbp);
- __ negq(arg5);
-
- // Allocate a new deoptimizer object.
- __ PrepareCallCFunction(6);
- __ movq(rax, Immediate(0));
- Label context_check;
- __ movq(rdi, Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ JumpIfSmi(rdi, &context_check);
- __ movq(rax, Operand(rbp, StandardFrameConstants::kFunctionOffset));
- __ bind(&context_check);
- __ movq(arg_reg_1, rax);
- __ Set(arg_reg_2, static_cast<int>(deopt_kind));
- // Args 3 and 4 are already in the right registers.
-
- // On windows put the arguments on the stack (PrepareCallCFunction
- // has created space for this). On linux pass the arguments in r8 and r9.
-#ifdef V8_TARGET_OS_WIN
- __ movq(Operand(rsp, 4 * kSystemPointerSize), arg5);
- __ LoadAddress(arg5, ExternalReference::isolate_address(isolate));
- __ movq(Operand(rsp, 5 * kSystemPointerSize), arg5);
-#else
- __ movq(r8, arg5);
- __ LoadAddress(r9, ExternalReference::isolate_address(isolate));
-#endif
-
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
- }
- // Preserve deoptimizer object in register rax and get the input
- // frame descriptor pointer.
- __ movq(rbx, Operand(rax, Deoptimizer::input_offset()));
-
- // Fill in the input registers.
- for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
- int offset =
- (i * kSystemPointerSize) + FrameDescription::registers_offset();
- __ PopQuad(Operand(rbx, offset));
- }
-
- // Fill in the double input registers.
- int double_regs_offset = FrameDescription::double_registers_offset();
- for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
- int dst_offset = i * kDoubleSize + double_regs_offset;
- __ popq(Operand(rbx, dst_offset));
- }
-
- // Mark the stack as not iterable for the CPU profiler which won't be able to
- // walk the stack without the return address.
- __ movb(__ ExternalReferenceAsOperand(
- ExternalReference::stack_is_iterable_address(isolate)),
- Immediate(0));
-
- // Remove the return address from the stack.
- __ addq(rsp, Immediate(kPCOnStackSize));
-
- // Compute a pointer to the unwinding limit in register rcx; that is
- // the first stack slot not part of the input frame.
- __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
- __ addq(rcx, rsp);
-
- // Unwind the stack down to - but not including - the unwinding
- // limit and copy the contents of the activation frame to the input
- // frame description.
- __ leaq(rdx, Operand(rbx, FrameDescription::frame_content_offset()));
- Label pop_loop_header;
- __ jmp(&pop_loop_header);
- Label pop_loop;
- __ bind(&pop_loop);
- __ Pop(Operand(rdx, 0));
- __ addq(rdx, Immediate(sizeof(intptr_t)));
- __ bind(&pop_loop_header);
- __ cmpq(rcx, rsp);
- __ j(not_equal, &pop_loop);
-
- // Compute the output frame in the deoptimizer.
- __ pushq(rax);
- __ PrepareCallCFunction(2);
- __ movq(arg_reg_1, rax);
- __ LoadAddress(arg_reg_2, ExternalReference::isolate_address(isolate));
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(ExternalReference::compute_output_frames_function(), 2);
- }
- __ popq(rax);
-
- __ movq(rsp, Operand(rax, Deoptimizer::caller_frame_top_offset()));
-
- // Replace the current (input) frame with the output frames.
- Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
- // Outer loop state: rax = current FrameDescription**, rdx = one past the
- // last FrameDescription**.
- __ movl(rdx, Operand(rax, Deoptimizer::output_count_offset()));
- __ movq(rax, Operand(rax, Deoptimizer::output_offset()));
- __ leaq(rdx, Operand(rax, rdx, times_system_pointer_size, 0));
- __ jmp(&outer_loop_header);
- __ bind(&outer_push_loop);
- // Inner loop state: rbx = current FrameDescription*, rcx = loop index.
- __ movq(rbx, Operand(rax, 0));
- __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
- __ jmp(&inner_loop_header);
- __ bind(&inner_push_loop);
- __ subq(rcx, Immediate(sizeof(intptr_t)));
- __ Push(Operand(rbx, rcx, times_1, FrameDescription::frame_content_offset()));
- __ bind(&inner_loop_header);
- __ testq(rcx, rcx);
- __ j(not_zero, &inner_push_loop);
- __ addq(rax, Immediate(kSystemPointerSize));
- __ bind(&outer_loop_header);
- __ cmpq(rax, rdx);
- __ j(below, &outer_push_loop);
-
- for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
- int code = config->GetAllocatableDoubleCode(i);
- XMMRegister xmm_reg = XMMRegister::from_code(code);
- int src_offset = code * kDoubleSize + double_regs_offset;
- __ Movsd(xmm_reg, Operand(rbx, src_offset));
- }
-
- // Push pc and continuation from the last output frame.
- __ PushQuad(Operand(rbx, FrameDescription::pc_offset()));
- __ PushQuad(Operand(rbx, FrameDescription::continuation_offset()));
-
- // Push the registers from the last output frame.
- for (int i = 0; i < kNumberOfRegisters; i++) {
- int offset =
- (i * kSystemPointerSize) + FrameDescription::registers_offset();
- __ PushQuad(Operand(rbx, offset));
- }
-
- // Restore the registers from the stack.
- for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
- Register r = Register::from_code(i);
- // Do not restore rsp, simply pop the value into the next register
- // and overwrite this afterwards.
- if (r == rsp) {
- DCHECK_GT(i, 0);
- r = Register::from_code(i - 1);
- }
- __ popq(r);
- }
-
- __ movb(__ ExternalReferenceAsOperand(
- ExternalReference::stack_is_iterable_address(isolate)),
- Immediate(1));
-
- // Return to the continuation point.
- __ ret(0);
-}
+const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
+const int Deoptimizer::kNonLazyDeoptExitSize = 7;
+const int Deoptimizer::kLazyDeoptExitSize = 7;
Float32 RegisterValues::GetFloatRegister(unsigned n) const {
return Float32::FromBits(
@@ -236,8 +33,6 @@ void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
void FrameDescription::SetPc(intptr_t pc) { pc_ = pc; }
-#undef __
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/diagnostics/arm/disasm-arm.cc b/deps/v8/src/diagnostics/arm/disasm-arm.cc
index 190640d527..83cdca3c03 100644
--- a/deps/v8/src/diagnostics/arm/disasm-arm.cc
+++ b/deps/v8/src/diagnostics/arm/disasm-arm.cc
@@ -111,6 +111,20 @@ class Decoder {
void DecodeSpecialCondition(Instruction* instr);
+ // F4.1.14 Floating-point data-processing.
+ void DecodeFloatingPointDataProcessing(Instruction* instr);
+ // F4.1.18 Unconditional instructions.
+ void DecodeUnconditional(Instruction* instr);
+ // F4.1.20 Advanced SIMD data-processing.
+ void DecodeAdvancedSIMDDataProcessing(Instruction* instr);
+ // F4.1.21 Advanced SIMD two registers, or three registers of different
+ // lengths.
+ void DecodeAdvancedSIMDTwoOrThreeRegisters(Instruction* instr);
+ // F4.1.23 Memory hints and barriers.
+ void DecodeMemoryHintsAndBarriers(Instruction* instr);
+ // F4.1.24 Advanced SIMD element or structure load/store.
+ void DecodeAdvancedSIMDElementOrStructureLoadStore(Instruction* instr);
+
void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instruction* instr);
void DecodeVCMP(Instruction* instr);
void DecodeVCVTBetweenDoubleAndSingle(Instruction* instr);
@@ -565,11 +579,18 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
PrintSoftwareInterrupt(instr->SvcValue());
return 3;
} else if (format[1] == 'i') { // 'sign: signed extra loads and stores
- DCHECK(STRING_STARTS_WITH(format, "sign"));
- if (instr->HasSign()) {
- Print("s");
+ if (format[2] == 'g') {
+ DCHECK(STRING_STARTS_WITH(format, "sign"));
+ if (instr->HasSign()) {
+ Print("s");
+ }
+ return 4;
+ } else { // 'size, for Advanced SIMD instructions
+ DCHECK(STRING_STARTS_WITH(format, "size"));
+ int sz = 8 << instr->Bits(21, 20);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sz);
+ return 4;
}
- return 4;
} else if (format[1] == 'p') {
if (format[8] == '_') { // 'spec_reg_fields
DCHECK(STRING_STARTS_WITH(format, "spec_reg_fields"));
@@ -1859,594 +1880,307 @@ static const char* const barrier_option_names[] = {
};
void Decoder::DecodeSpecialCondition(Instruction* instr) {
- switch (instr->SpecialValue()) {
- case 4: {
- int Vd, Vm, Vn;
- if (instr->Bit(6) == 0) {
- Vd = instr->VFPDRegValue(kDoublePrecision);
- Vm = instr->VFPMRegValue(kDoublePrecision);
- Vn = instr->VFPNRegValue(kDoublePrecision);
- } else {
- Vd = instr->VFPDRegValue(kSimd128Precision);
- Vm = instr->VFPMRegValue(kSimd128Precision);
- Vn = instr->VFPNRegValue(kSimd128Precision);
- }
- int size = kBitsPerByte * (1 << instr->Bits(21, 20));
- switch (instr->Bits(11, 8)) {
- case 0x0: {
- if (instr->Bit(4) == 1) {
- // vqadd.s<size> Qd, Qm, Qn.
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_,
- "vqadd.s%d q%d, q%d, q%d", size, Vd, Vn, Vm);
- } else {
- Unknown(instr);
- }
- break;
- }
- case 0x1: {
- if (instr->Bits(21, 20) == 2 && instr->Bit(6) == 1 &&
- instr->Bit(4) == 1) {
- if (Vm == Vn) {
- // vmov Qd, Qm
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "vmov q%d, q%d", Vd, Vm);
- } else {
- // vorr Qd, Qm, Qn.
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "vorr q%d, q%d, q%d", Vd, Vn, Vm);
- }
- } else if (instr->Bits(21, 20) == 1 && instr->Bit(6) == 1 &&
- instr->Bit(4) == 1) {
- // vbic Qd, Qn, Qm
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "vbic q%d, q%d, q%d", Vd, Vn, Vm);
- } else if (instr->Bits(21, 20) == 0 && instr->Bit(6) == 1 &&
- instr->Bit(4) == 1) {
- // vand Qd, Qm, Qn.
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "vand q%d, q%d, q%d", Vd, Vn, Vm);
- } else {
- Unknown(instr);
- }
- break;
- }
- case 0x2: {
- if (instr->Bit(4) == 1) {
- // vqsub.s<size> Qd, Qm, Qn.
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_,
- "vqsub.s%d q%d, q%d, q%d", size, Vd, Vn, Vm);
- } else {
- Unknown(instr);
- }
- break;
- }
- case 0x3: {
- const char* op = (instr->Bit(4) == 1) ? "vcge" : "vcgt";
- // vcge/vcgt.s<size> Qd, Qm, Qn.
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "%s.s%d q%d, q%d, q%d",
- op, size, Vd, Vn, Vm);
- break;
- }
- case 0x4: {
- if (instr->Bit(4) == 0) {
- // vshl.s<size> Qd, Qm, Qn.
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_,
- "vshl.s%d q%d, q%d, q%d", size, Vd, Vm, Vn);
- } else {
- Unknown(instr);
- }
- break;
- }
- case 0x6: {
- // vmin/vmax.s<size> Qd, Qm, Qn.
- const char* op = instr->Bit(4) == 1 ? "vmin" : "vmax";
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "%s.s%d q%d, q%d, q%d",
- op, size, Vd, Vn, Vm);
- break;
+ int op0 = instr->Bits(25, 24);
+ int op1 = instr->Bits(11, 9);
+ int op2 = instr->Bit(4);
+
+ if (instr->Bit(27) == 0) {
+ DecodeUnconditional(instr);
+ } else if ((instr->Bits(27, 26) == 0b11) && (op0 == 0b10) &&
+ ((op1 >> 1) == 0b10) && !op2) {
+ DecodeFloatingPointDataProcessing(instr);
+ } else {
+ Unknown(instr);
+ }
+}
+
+void Decoder::DecodeFloatingPointDataProcessing(Instruction* instr) {
+ // Floating-point data processing, F4.1.14.
+ int op0 = instr->Bits(23, 20);
+ int op1 = instr->Bits(19, 16);
+ int op2 = instr->Bits(9, 8);
+ int op3 = instr->Bit(6);
+ if (((op0 & 0b1000) == 0) && op2 && !op3) {
+ // Floating-point conditional select.
+ // VSEL* (floating-point)
+ bool dp_operation = (instr->SzValue() == 1);
+ switch (instr->Bits(21, 20)) {
+ case 0x0:
+ if (dp_operation) {
+ Format(instr, "vseleq.f64 'Dd, 'Dn, 'Dm");
+ } else {
+ Format(instr, "vseleq.f32 'Sd, 'Sn, 'Sm");
}
- case 0x8: {
- const char* op = (instr->Bit(4) == 0) ? "vadd" : "vtst";
- // vadd/vtst.i<size> Qd, Qm, Qn.
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "%s.i%d q%d, q%d, q%d",
- op, size, Vd, Vn, Vm);
- break;
+ break;
+ case 0x1:
+ if (dp_operation) {
+ Format(instr, "vselvs.f64 'Dd, 'Dn, 'Dm");
+ } else {
+ Format(instr, "vselvs.f32 'Sd, 'Sn, 'Sm");
}
- case 0x9: {
- if (instr->Bit(6) == 1 && instr->Bit(4) == 1) {
- // vmul.i<size> Qd, Qm, Qn.
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_,
- "vmul.i%d q%d, q%d, q%d", size, Vd, Vn, Vm);
- } else {
- Unknown(instr);
- }
- break;
+ break;
+ case 0x2:
+ if (dp_operation) {
+ Format(instr, "vselge.f64 'Dd, 'Dn, 'Dm");
+ } else {
+ Format(instr, "vselge.f32 'Sd, 'Sn, 'Sm");
}
- case 0xA: {
- // vpmin/vpmax.s<size> Dd, Dm, Dn.
- const char* op = instr->Bit(4) == 1 ? "vpmin" : "vpmax";
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "%s.s%d d%d, d%d, d%d",
- op, size, Vd, Vn, Vm);
- break;
+ break;
+ case 0x3:
+ if (dp_operation) {
+ Format(instr, "vselgt.f64 'Dd, 'Dn, 'Dm");
+ } else {
+ Format(instr, "vselgt.f32 'Sd, 'Sn, 'Sm");
}
- case 0xB: {
- // vpadd.i<size> Dd, Dm, Dn.
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "vpadd.i%d d%d, d%d, d%d",
- size, Vd, Vn, Vm);
- break;
+ break;
+ default:
+ UNREACHABLE(); // Case analysis is exhaustive.
+ break;
+ }
+ } else if (instr->Opc1Value() == 0x4 && op2) {
+ // Floating-point minNum/maxNum.
+ // VMAXNM, VMINNM (floating-point)
+ if (instr->SzValue() == 0x1) {
+ if (instr->Bit(6) == 0x1) {
+ Format(instr, "vminnm.f64 'Dd, 'Dn, 'Dm");
+ } else {
+ Format(instr, "vmaxnm.f64 'Dd, 'Dn, 'Dm");
+ }
+ } else {
+ if (instr->Bit(6) == 0x1) {
+ Format(instr, "vminnm.f32 'Sd, 'Sn, 'Sm");
+ } else {
+ Format(instr, "vmaxnm.f32 'Sd, 'Sn, 'Sm");
+ }
+ }
+ } else if (instr->Opc1Value() == 0x7 && (op1 >> 3) && op2 && op3) {
+ // Floating-point directed convert to integer.
+ // VRINTA, VRINTN, VRINTP, VRINTM (floating-point)
+ bool dp_operation = (instr->SzValue() == 1);
+ int rounding_mode = instr->Bits(17, 16);
+ switch (rounding_mode) {
+ case 0x0:
+ if (dp_operation) {
+ Format(instr, "vrinta.f64.f64 'Dd, 'Dm");
+ } else {
+ Format(instr, "vrinta.f32.f32 'Sd, 'Sm");
}
- case 0xD: {
- if (instr->Bit(4) == 0) {
- const char* op = (instr->Bits(21, 20) == 0) ? "vadd" : "vsub";
- // vadd/vsub.f32 Qd, Qm, Qn.
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "%s.f32 q%d, q%d, q%d", op, Vd, Vn, Vm);
- } else {
- Unknown(instr);
- }
- break;
+ break;
+ case 0x1:
+ if (dp_operation) {
+ Format(instr, "vrintn.f64.f64 'Dd, 'Dm");
+ } else {
+ Format(instr, "vrintn.f32.f32 'Sd, 'Sm");
}
- case 0xE: {
- if (instr->Bits(21, 20) == 0 && instr->Bit(4) == 0) {
- // vceq.f32 Qd, Qm, Qn.
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "vceq.f32 q%d, q%d, q%d", Vd, Vn, Vm);
- } else {
- Unknown(instr);
- }
- break;
+ break;
+ case 0x2:
+ if (dp_operation) {
+ Format(instr, "vrintp.f64.f64 'Dd, 'Dm");
+ } else {
+ Format(instr, "vrintp.f32.f32 'Sd, 'Sm");
}
- case 0xF: {
- if (instr->Bit(20) == 0 && instr->Bit(6) == 1) {
- if (instr->Bit(4) == 1) {
- // vrecps/vrsqrts.f32 Qd, Qm, Qn.
- const char* op = instr->Bit(21) == 0 ? "vrecps" : "vrsqrts";
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_,
- "%s.f32 q%d, q%d, q%d", op, Vd, Vn, Vm);
- } else {
- // vmin/max.f32 Qd, Qm, Qn.
- const char* op = instr->Bit(21) == 1 ? "vmin" : "vmax";
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_,
- "%s.f32 q%d, q%d, q%d", op, Vd, Vn, Vm);
- }
- } else {
- Unknown(instr);
- }
- break;
+ break;
+ case 0x3:
+ if (dp_operation) {
+ Format(instr, "vrintm.f64.f64 'Dd, 'Dm");
+ } else {
+ Format(instr, "vrintm.f32.f32 'Sd, 'Sm");
}
- default:
- Unknown(instr);
- break;
- }
- break;
+ break;
+ default:
+ UNREACHABLE(); // Case analysis is exhaustive.
+ break;
}
- case 5:
- if (instr->Bit(23) == 1 && instr->Bits(21, 19) == 0 &&
- instr->Bit(7) == 0 && instr->Bit(4) == 1) {
- // One register and a modified immediate value, see ARM DDI 0406C.d
- // A7.4.6.
- DecodeVmovImmediate(instr);
- } else if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
- (instr->Bit(4) == 1)) {
- // vmovl signed
- if ((instr->VdValue() & 1) != 0) Unknown(instr);
- int Vd = (instr->Bit(22) << 3) | (instr->VdValue() >> 1);
- int Vm = (instr->Bit(5) << 4) | instr->VmValue();
- int imm3 = instr->Bits(21, 19);
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "vmovl.s%d q%d, d%d", imm3 * 8, Vd, Vm);
- } else if (instr->Bits(21, 20) == 3 && instr->Bit(4) == 0) {
- // vext.8 Qd, Qm, Qn, imm4
- int imm4 = instr->Bits(11, 8);
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- int Vn = instr->VFPNRegValue(kSimd128Precision);
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "vext.8 q%d, q%d, q%d, #%d",
- Vd, Vn, Vm, imm4);
- } else if (instr->Bits(11, 8) == 5 && instr->Bit(4) == 1) {
- // vshl.i<size> Qd, Qm, shift
- int imm7 = instr->Bits(21, 16);
- if (instr->Bit(7) != 0) imm7 += 64;
- int size = base::bits::RoundDownToPowerOfTwo32(imm7);
- int shift = imm7 - size;
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "vshl.i%d q%d, q%d, #%d",
- size, Vd, Vm, shift);
- } else if (instr->Bits(11, 8) == 0 && instr->Bit(4) == 1) {
+ } else {
+ Unknown(instr);
+ }
+ // One class of decoding is missing here: Floating-point extraction and
+ // insertion, but it is not used in V8 now, and thus omitted.
+}
+
+void Decoder::DecodeUnconditional(Instruction* instr) {
+ // This follows the decoding in F4.1.18 Unconditional instructions.
+ int op0 = instr->Bits(26, 25);
+ int op1 = instr->Bit(20);
+
+ // Four classes of decoding:
+ // - Miscellaneous (omitted, no instructions used in V8).
+ // - Advanced SIMD data-processing.
+ // - Memory hints and barriers.
+ // - Advanced SIMD element or structure load/store.
+ if (op0 == 0b01) {
+ DecodeAdvancedSIMDDataProcessing(instr);
+ } else if ((op0 & 0b10) == 0b10 && op1) {
+ DecodeMemoryHintsAndBarriers(instr);
+ } else if (op0 == 0b10 && !op1) {
+ DecodeAdvancedSIMDElementOrStructureLoadStore(instr);
+ } else {
+ Unknown(instr);
+ }
+}
+
+void Decoder::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
+ int op0 = instr->Bit(23);
+ int op1 = instr->Bit(4);
+ if (op0 == 0) {
+ // Advanced SIMD three registers of same length.
+ int Vd, Vm, Vn;
+ if (instr->Bit(6) == 0) {
+ Vd = instr->VFPDRegValue(kDoublePrecision);
+ Vm = instr->VFPMRegValue(kDoublePrecision);
+ Vn = instr->VFPNRegValue(kDoublePrecision);
+ } else {
+ Vd = instr->VFPDRegValue(kSimd128Precision);
+ Vm = instr->VFPMRegValue(kSimd128Precision);
+ Vn = instr->VFPNRegValue(kSimd128Precision);
+ }
+
+ int u = instr->Bit(24);
+ int opc = instr->Bits(11, 8);
+ int q = instr->Bit(6);
+ int sz = instr->Bits(21, 20);
+
+ if (!u && opc == 0 && op1) {
+ Format(instr, "vqadd.s'size 'Qd, 'Qn, 'Qm");
+ } else if (!u && opc == 1 && sz == 2 && q && op1) {
+ if (Vm == Vn) {
+ Format(instr, "vmov 'Qd, 'Qm");
+ } else {
+ Format(instr, "vorr 'Qd, 'Qn, 'Qm");
+ }
+ } else if (!u && opc == 1 && sz == 1 && q && op1) {
+ Format(instr, "vbic 'Qd, 'Qn, 'Qm");
+ } else if (!u && opc == 1 && sz == 0 && q && op1) {
+ Format(instr, "vand 'Qd, 'Qn, 'Qm");
+ } else if (!u && opc == 2 && op1) {
+ Format(instr, "vqsub.s'size 'Qd, 'Qn, 'Qm");
+ } else if (!u && opc == 3 && op1) {
+ Format(instr, "vcge.s'size 'Qd, 'Qn, 'Qm");
+ } else if (!u && opc == 3 && !op1) {
+ Format(instr, "vcgt.s'size 'Qd, 'Qn, 'Qm");
+ } else if (!u && opc == 4 && !op1) {
+ Format(instr, "vshl.s'size 'Qd, 'Qm, 'Qn");
+ } else if (!u && opc == 6 && op1) {
+ Format(instr, "vmin.s'size 'Qd, 'Qn, 'Qm");
+ } else if (!u && opc == 6 && !op1) {
+ Format(instr, "vmax.s'size 'Qd, 'Qn, 'Qm");
+ } else if (!u && opc == 8 && op1) {
+ Format(instr, "vtst.i'size 'Qd, 'Qn, 'Qm");
+ } else if (!u && opc == 8 && !op1) {
+ Format(instr, "vadd.i'size 'Qd, 'Qn, 'Qm");
+ } else if (opc == 9 && op1) {
+ Format(instr, "vmul.i'size 'Qd, 'Qn, 'Qm");
+ } else if (!u && opc == 0xA && op1) {
+ Format(instr, "vpmin.s'size 'Dd, 'Dn, 'Dm");
+ } else if (!u && opc == 0xA && !op1) {
+ Format(instr, "vpmax.s'size 'Dd, 'Dn, 'Dm");
+ } else if (!u && opc == 0xB) {
+ Format(instr, "vpadd.i'size 'Dd, 'Dn, 'Dm");
+ } else if (!u && !(sz >> 1) && opc == 0xD && !op1) {
+ Format(instr, "vadd.f32 'Qd, 'Qn, 'Qm");
+ } else if (!u && (sz >> 1) && opc == 0xD && !op1) {
+ Format(instr, "vsub.f32 'Qd, 'Qn, 'Qm");
+ } else if (!u && opc == 0xE && !sz && !op1) {
+ Format(instr, "vceq.f32 'Qd, 'Qn, 'Qm");
+ } else if (!u && !(sz >> 1) && opc == 0xF && op1) {
+ Format(instr, "vrecps.f32 'Qd, 'Qn, 'Qm");
+ } else if (!u && (sz >> 1) && opc == 0xF && op1) {
+ Format(instr, "vrsqrts.f32 'Qd, 'Qn, 'Qm");
+ } else if (!u && !(sz >> 1) && opc == 0xF && !op1) {
+ Format(instr, "vmax.f32 'Qd, 'Qn, 'Qm");
+ } else if (!u && (sz >> 1) && opc == 0xF && !op1) {
+ Format(instr, "vmin.f32 'Qd, 'Qn, 'Qm");
+ } else if (u && opc == 0 && op1) {
+ Format(instr, "vqadd.u'size 'Qd, 'Qn, 'Qm");
+ } else if (u && opc == 1 && sz == 1 && op1) {
+ Format(instr, "vbsl 'Qd, 'Qn, 'Qm");
+ } else if (u && opc == 1 && sz == 0 && q && op1) {
+ Format(instr, "veor 'Qd, 'Qn, 'Qm");
+ } else if (u && opc == 1 && sz == 0 && !q && op1) {
+ Format(instr, "veor 'Dd, 'Dn, 'Dm");
+ } else if (u && opc == 1 && !op1) {
+ Format(instr, "vrhadd.u'size 'Qd, 'Qn, 'Qm");
+ } else if (u && opc == 2 && op1) {
+ Format(instr, "vqsub.u'size 'Qd, 'Qn, 'Qm");
+ } else if (u && opc == 3 && op1) {
+ Format(instr, "vcge.u'size 'Qd, 'Qn, 'Qm");
+ } else if (u && opc == 3 && !op1) {
+ Format(instr, "vcgt.u'size 'Qd, 'Qn, 'Qm");
+ } else if (u && opc == 4 && !op1) {
+ Format(instr, "vshl.u'size 'Qd, 'Qm, 'Qn");
+ } else if (u && opc == 6 && op1) {
+ Format(instr, "vmin.u'size 'Qd, 'Qn, 'Qm");
+ } else if (u && opc == 6 && !op1) {
+ Format(instr, "vmax.u'size 'Qd, 'Qn, 'Qm");
+ } else if (u && opc == 8 && op1) {
+ Format(instr, "vceq.i'size 'Qd, 'Qn, 'Qm");
+ } else if (u && opc == 8 && !op1) {
+ Format(instr, "vsub.i'size 'Qd, 'Qn, 'Qm");
+ } else if (u && opc == 0xA && op1) {
+ Format(instr, "vpmin.u'size 'Dd, 'Dn, 'Dm");
+ } else if (u && opc == 0xA && !op1) {
+ Format(instr, "vpmax.u'size 'Dd, 'Dn, 'Dm");
+ } else if (u && opc == 0xD && sz == 0 && q && op1) {
+ Format(instr, "vmul.f32 'Qd, 'Qn, 'Qm");
+ } else if (u && opc == 0xD && sz == 0 && !q && !op1) {
+ Format(instr, "vpadd.f32 'Dd, 'Dn, 'Dm");
+ } else if (u && opc == 0xE && !(sz >> 1) && !op1) {
+ Format(instr, "vcge.f32 'Qd, 'Qn, 'Qm");
+ } else if (u && opc == 0xE && (sz >> 1) && !op1) {
+ Format(instr, "vcgt.f32 'Qd, 'Qn, 'Qm");
+ } else {
+ Unknown(instr);
+ }
+ } else if (op0 == 1 && op1 == 0) {
+ DecodeAdvancedSIMDTwoOrThreeRegisters(instr);
+ } else if (op0 == 1 && op1 == 1) {
+ // Advanced SIMD shifts and immediate generation.
+ if (instr->Bits(21, 19) == 0 && instr->Bit(7) == 0) {
+ // Advanced SIMD one register and modified immediate.
+ DecodeVmovImmediate(instr);
+ } else {
+ // Advanced SIMD two registers and shift amount.
+ int u = instr->Bit(24);
+ int imm3H = instr->Bits(21, 19);
+ int imm3L = instr->Bits(18, 16);
+ int opc = instr->Bits(11, 8);
+ int l = instr->Bit(7);
+ int q = instr->Bit(6);
+ int imm3H_L = imm3H << 1 | l;
+
+ if (imm3H_L != 0 && opc == 0) {
// vshr.s<size> Qd, Qm, shift
- int imm7 = instr->Bits(21, 16);
- if (instr->Bit(7) != 0) imm7 += 64;
+ int imm7 = (l << 6) | instr->Bits(21, 16);
int size = base::bits::RoundDownToPowerOfTwo32(imm7);
int shift = 2 * size - imm7;
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "vshr.s%d q%d, q%d, #%d",
- size, Vd, Vm, shift);
- } else if (instr->Bits(11, 8) == 0xC && instr->Bit(6) == 0 &&
- instr->Bit(4) == 0) {
- // vmull.s<size> Qd, Dn, Dm
+ SNPrintF(out_buffer_ + out_buffer_pos_, "vshr.%s%d q%d, q%d, #%d",
+ u ? "u" : "s", size, Vd, Vm, shift);
+ } else if (imm3H_L != 0 && imm3L == 0 && opc == 0b1010 && !q) {
+ // vmovl
+ if ((instr->VdValue() & 1) != 0) Unknown(instr);
int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vn = instr->VFPNRegValue(kDoublePrecision);
int Vm = instr->VFPMRegValue(kDoublePrecision);
- int size = 8 << instr->Bits(21, 20);
+ int imm3H = instr->Bits(21, 19);
out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "vmull.s%d q%d, d%d, d%d",
- size, Vd, Vn, Vm);
- } else {
- Unknown(instr);
- }
- break;
- case 6: {
- int Vd, Vm, Vn;
- if (instr->Bit(6) == 0) {
- Vd = instr->VFPDRegValue(kDoublePrecision);
- Vm = instr->VFPMRegValue(kDoublePrecision);
- Vn = instr->VFPNRegValue(kDoublePrecision);
- } else {
- Vd = instr->VFPDRegValue(kSimd128Precision);
- Vm = instr->VFPMRegValue(kSimd128Precision);
- Vn = instr->VFPNRegValue(kSimd128Precision);
- }
- int size = kBitsPerByte * (1 << instr->Bits(21, 20));
- switch (instr->Bits(11, 8)) {
- case 0x0: {
- if (instr->Bit(4) == 1) {
- // vqadd.u<size> Qd, Qm, Qn.
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_,
- "vqadd.u%d q%d, q%d, q%d", size, Vd, Vn, Vm);
- } else {
- Unknown(instr);
- }
- break;
- }
- case 0x1: {
- if (instr->Bits(21, 20) == 1 && instr->Bit(4) == 1) {
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "vbsl q%d, q%d, q%d", Vd, Vn, Vm);
- } else if (instr->Bits(21, 20) == 0 && instr->Bit(4) == 1) {
- if (instr->Bit(6) == 0) {
- // veor Dd, Dn, Dm
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "veor d%d, d%d, d%d", Vd, Vn, Vm);
-
- } else {
- // veor Qd, Qn, Qm
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "veor q%d, q%d, q%d", Vd, Vn, Vm);
- }
- } else if (instr->Bit(4) == 0) {
- if (instr->Bit(6) == 1) {
- // vrhadd.u<size> Qd, Qm, Qn.
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_,
- "vrhadd.u%d q%d, q%d, q%d", size, Vd, Vn, Vm);
- } else {
- // vrhadd.u<size> Dd, Dm, Dn.
- Unknown(instr);
- }
- } else {
- Unknown(instr);
- }
- break;
- }
- case 0x2: {
- if (instr->Bit(4) == 1) {
- // vqsub.u<size> Qd, Qm, Qn.
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_,
- "vqsub.u%d q%d, q%d, q%d", size, Vd, Vn, Vm);
- } else {
- Unknown(instr);
- }
- break;
- }
- case 0x3: {
- const char* op = (instr->Bit(4) == 1) ? "vcge" : "vcgt";
- // vcge/vcgt.u<size> Qd, Qm, Qn.
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "%s.u%d q%d, q%d, q%d",
- op, size, Vd, Vn, Vm);
- break;
- }
- case 0x4: {
- if (instr->Bit(4) == 0) {
- // vshl.u<size> Qd, Qm, Qn.
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_,
- "vshl.u%d q%d, q%d, q%d", size, Vd, Vm, Vn);
- } else {
- Unknown(instr);
- }
- break;
- }
- case 0x6: {
- // vmin/vmax.u<size> Qd, Qm, Qn.
- const char* op = instr->Bit(4) == 1 ? "vmin" : "vmax";
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "%s.u%d q%d, q%d, q%d",
- op, size, Vd, Vn, Vm);
- break;
- }
- case 0x8: {
- if (instr->Bit(4) == 0) {
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_,
- "vsub.i%d q%d, q%d, q%d", size, Vd, Vn, Vm);
- } else {
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_,
- "vceq.i%d q%d, q%d, q%d", size, Vd, Vn, Vm);
- }
- break;
- }
- case 0xA: {
- // vpmin/vpmax.u<size> Dd, Dm, Dn.
- const char* op = instr->Bit(4) == 1 ? "vpmin" : "vpmax";
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "%s.u%d d%d, d%d, d%d",
- op, size, Vd, Vn, Vm);
- break;
- }
- case 0xD: {
- if (instr->Bits(21, 20) == 0 && instr->Bit(6) == 1 &&
- instr->Bit(4) == 1) {
- // vmul.f32 Qd, Qm, Qn
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "vmul.f32 q%d, q%d, q%d", Vd, Vn, Vm);
- } else if (instr->Bits(21, 20) == 0 && instr->Bit(6) == 0 &&
- instr->Bit(4) == 0) {
- // vpadd.f32 Dd, Dm, Dn.
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "vpadd.f32 d%d, d%d, d%d", Vd, Vn, Vm);
- } else {
- Unknown(instr);
- }
- break;
- }
- case 0xE: {
- if (instr->Bit(20) == 0 && instr->Bit(4) == 0) {
- const char* op = (instr->Bit(21) == 0) ? "vcge" : "vcgt";
- // vcge/vcgt.f32 Qd, Qm, Qn.
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "%s.f32 q%d, q%d, q%d", op, Vd, Vn, Vm);
- } else {
- Unknown(instr);
- }
- break;
- }
- default:
- Unknown(instr);
- break;
- }
- break;
- }
- case 7:
- if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
- (instr->Bit(4) == 1)) {
- // vmovl unsigned
- if ((instr->VdValue() & 1) != 0) Unknown(instr);
- int Vd = (instr->Bit(22) << 3) | (instr->VdValue() >> 1);
- int Vm = (instr->Bit(5) << 4) | instr->VmValue();
- int imm3 = instr->Bits(21, 19);
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "vmovl.u%d q%d, d%d", imm3 * 8, Vd, Vm);
- } else if (instr->Opc1Value() == 7 && instr->Bit(4) == 0) {
- if (instr->Bits(11, 7) == 0x18) {
- int Vm = instr->VFPMRegValue(kDoublePrecision);
- int imm4 = instr->Bits(19, 16);
- int size = 0, index = 0;
- if ((imm4 & 0x1) != 0) {
- size = 8;
- index = imm4 >> 1;
- } else if ((imm4 & 0x2) != 0) {
- size = 16;
- index = imm4 >> 2;
- } else {
- size = 32;
- index = imm4 >> 3;
- }
- if (instr->Bit(6) == 0) {
- int Vd = instr->VFPDRegValue(kDoublePrecision);
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "vdup.%i d%d, d%d[%d]",
- size, Vd, Vm, index);
- } else {
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "vdup.%i q%d, d%d[%d]",
- size, Vd, Vm, index);
- }
- } else if (instr->Bits(11, 10) == 0x2) {
- int Vd = instr->VFPDRegValue(kDoublePrecision);
- int Vn = instr->VFPNRegValue(kDoublePrecision);
- int Vm = instr->VFPMRegValue(kDoublePrecision);
- int len = instr->Bits(9, 8);
- NeonListOperand list(DwVfpRegister::from_code(Vn), len + 1);
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "%s d%d, ",
- instr->Bit(6) == 0 ? "vtbl.8" : "vtbx.8", Vd);
- FormatNeonList(Vn, list.type());
- Print(", ");
- PrintDRegister(Vm);
- } else if (instr->Bits(17, 16) == 0x2 && instr->Bits(11, 8) == 0x2 &&
- instr->Bits(7, 6) != 0) {
- // vqmov{u}n.<type><size> Dd, Qm.
- int Vd = instr->VFPDRegValue(kDoublePrecision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- int op = instr->Bits(7, 6);
- const char* name = op == 0b01 ? "vqmovun" : "vqmovn";
- char type = op == 0b11 ? 'u' : 's';
- int size = 2 * kBitsPerByte * (1 << instr->Bits(19, 18));
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "%s.%c%i d%d, q%d", name,
- type, size, Vd, Vm);
- } else if (instr->Bits(17, 16) == 0x2 && instr->Bit(10) == 1) {
- // NEON vrintm, vrintn, vrintp, vrintz.
- bool dp_op = instr->Bit(6) == 0;
- int rounding_mode = instr->Bits(9, 7);
- switch (rounding_mode) {
- case 0:
- if (dp_op) {
- Format(instr, "vrintn.f32 'Dd, 'Dm");
- } else {
- Format(instr, "vrintn.f32 'Qd, 'Qm");
- }
- break;
- case 3:
- if (dp_op) {
- Format(instr, "vrintz.f32 'Dd, 'Dm");
- } else {
- Format(instr, "vrintz.f32 'Qd, 'Qm");
- }
- break;
- case 5:
- if (dp_op) {
- Format(instr, "vrintm.f32 'Dd, 'Dm");
- } else {
- Format(instr, "vrintm.f32 'Qd, 'Qm");
- }
- break;
- case 7:
- if (dp_op) {
- Format(instr, "vrintp.f32 'Dd, 'Dm");
- } else {
- Format(instr, "vrintp.f32 'Qd, 'Qm");
- }
- break;
- default:
- UNIMPLEMENTED();
- }
- } else {
- int Vd, Vm;
- if (instr->Bit(6) == 0) {
- Vd = instr->VFPDRegValue(kDoublePrecision);
- Vm = instr->VFPMRegValue(kDoublePrecision);
- } else {
- Vd = instr->VFPDRegValue(kSimd128Precision);
- Vm = instr->VFPMRegValue(kSimd128Precision);
- }
- if (instr->Bits(17, 16) == 0x2 && instr->Bits(11, 7) == 0) {
- if (instr->Bit(6) == 0) {
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "vswp d%d, d%d", Vd, Vm);
- } else {
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "vswp q%d, q%d", Vd, Vm);
- }
- } else if (instr->Bits(19, 16) == 0 && instr->Bits(11, 6) == 0x17) {
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "vmvn q%d, q%d", Vd, Vm);
- } else if (instr->Bits(19, 16) == 0xB && instr->Bits(11, 9) == 0x3 &&
- instr->Bit(6) == 1) {
- const char* suffix = nullptr;
- int op = instr->Bits(8, 7);
- switch (op) {
- case 0:
- suffix = "f32.s32";
- break;
- case 1:
- suffix = "f32.u32";
- break;
- case 2:
- suffix = "s32.f32";
- break;
- case 3:
- suffix = "u32.f32";
- break;
- }
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "vcvt.%s q%d, q%d", suffix, Vd, Vm);
- } else if (instr->Bits(17, 16) == 0x2 && instr->Bits(11, 8) == 0x1) {
- int size = kBitsPerByte * (1 << instr->Bits(19, 18));
- const char* op = instr->Bit(7) != 0 ? "vzip" : "vuzp";
- if (instr->Bit(6) == 0) {
- // vzip/vuzp.<size> Dd, Dm.
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "%s.%d d%d, d%d", op, size, Vd, Vm);
- } else {
- // vzip/vuzp.<size> Qd, Qm.
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "%s.%d q%d, q%d", op, size, Vd, Vm);
- }
- } else if (instr->Bits(17, 16) == 0 && instr->Bits(11, 9) == 0 &&
- instr->Bit(6) == 1) {
- int size = kBitsPerByte * (1 << instr->Bits(19, 18));
- int op = kBitsPerByte
- << (static_cast<int>(Neon64) - instr->Bits(8, 7));
- // vrev<op>.<size> Qd, Qm.
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "vrev%d.%d q%d, q%d", op, size, Vd, Vm);
- } else if (instr->Bits(17, 16) == 0x2 && instr->Bits(11, 7) == 0x1) {
- int size = kBitsPerByte * (1 << instr->Bits(19, 18));
- if (instr->Bit(6) == 0) {
- // vtrn.<size> Dd, Dm.
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "vtrn.%d d%d, d%d", size, Vd, Vm);
- } else {
- // vtrn.<size> Qd, Qm.
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "vtrn.%d q%d, q%d", size, Vd, Vm);
- }
- } else if (instr->Bits(17, 16) == 0x1 && instr->Bit(11) == 0 &&
- instr->Bit(6) == 1) {
- int size = kBitsPerByte * (1 << instr->Bits(19, 18));
- char type = instr->Bit(10) != 0 ? 'f' : 's';
- if (instr->Bits(9, 6) == 0xD) {
- // vabs<type>.<size> Qd, Qm.
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "vabs.%c%d q%d, q%d",
- type, size, Vd, Vm);
- } else if (instr->Bits(9, 6) == 0xF) {
- // vneg<type>.<size> Qd, Qm.
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "vneg.%c%d q%d, q%d",
- type, size, Vd, Vm);
- } else {
- Unknown(instr);
- }
- } else if (instr->Bits(19, 18) == 0x2 && instr->Bits(11, 8) == 0x5 &&
- instr->Bit(6) == 1) {
- // vrecpe/vrsqrte.f32 Qd, Qm.
- const char* op = instr->Bit(7) == 0 ? "vrecpe" : "vrsqrte";
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "%s.f32 q%d, q%d", op, Vd, Vm);
- } else {
- Unknown(instr);
- }
- }
- } else if (instr->Bits(11, 8) == 0 && instr->Bit(4) == 1 &&
- instr->Bit(6) == 1) {
- // vshr.u<size> Qd, Qm, shift
- int imm7 = instr->Bits(21, 16);
- if (instr->Bit(7) != 0) imm7 += 64;
+ SNPrintF(out_buffer_ + out_buffer_pos_, "vmovl.%s%d q%d, d%d",
+ u ? "u" : "s", imm3H * 8, Vd, Vm);
+ } else if (!u && imm3H_L != 0 && opc == 0b0101) {
+ // vshl.i<size> Qd, Qm, shift
+ int imm7 = (l << 6) | instr->Bits(21, 16);
int size = base::bits::RoundDownToPowerOfTwo32(imm7);
- int shift = 2 * size - imm7;
+ int shift = imm7 - size;
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "vshr.u%d q%d, q%d, #%d",
+ SNPrintF(out_buffer_ + out_buffer_pos_, "vshl.i%d q%d, q%d, #%d",
size, Vd, Vm, shift);
- } else if (instr->Bit(10) == 1 && instr->Bit(6) == 0 &&
- instr->Bit(4) == 1) {
+ } else if (u && imm3H_L != 0 && (opc & 0b1110) == 0b0100) {
// vsli.<size> Dd, Dm, shift
// vsri.<size> Dd, Dm, shift
- int imm7 = instr->Bits(21, 16);
- if (instr->Bit(7) != 0) imm7 += 64;
+ int imm7 = (l << 6) | instr->Bits(21, 16);
int size = base::bits::RoundDownToPowerOfTwo32(imm7);
int shift;
char direction;
@@ -2462,208 +2196,299 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "vs%ci.%d d%d, d%d, #%d",
direction, size, Vd, Vm, shift);
- } else if (instr->Bits(11, 8) == 0x8 && instr->Bit(6) == 0 &&
- instr->Bit(4) == 0) {
- // vmlal.u<size> <Qd>, <Dn>, <Dm>
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vn = instr->VFPNRegValue(kDoublePrecision);
- int Vm = instr->VFPMRegValue(kDoublePrecision);
- int size = 8 << instr->Bits(21, 20);
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "vmlal.u%d q%d, d%d, d%d",
- size, Vd, Vn, Vm);
- } else if (instr->Bits(11, 8) == 0xC && instr->Bit(6) == 0 &&
- instr->Bit(4) == 0) {
- // vmull.u<size> <Qd>, <Dn>, <Dm>
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vn = instr->VFPNRegValue(kDoublePrecision);
- int Vm = instr->VFPMRegValue(kDoublePrecision);
- int size = 8 << instr->Bits(21, 20);
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "vmull.u%d q%d, d%d, d%d",
- size, Vd, Vn, Vm);
- } else if (instr->Bits(21, 19) == 0 && instr->Bit(7) == 0 &&
- instr->Bit(4) == 1) {
- // One register and a modified immediate value, see ARM DDI 0406C.d
- // A7.4.6.
- DecodeVmovImmediate(instr);
- } else {
- Unknown(instr);
}
- break;
- case 8:
- if (instr->Bits(21, 20) == 0) {
- // vst1
- int Vd = (instr->Bit(22) << 4) | instr->VdValue();
- int Rn = instr->VnValue();
- int type = instr->Bits(11, 8);
- int size = instr->Bits(7, 6);
- int align = instr->Bits(5, 4);
- int Rm = instr->VmValue();
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "vst1.%d ",
- (1 << size) << 3);
- FormatNeonList(Vd, type);
- Print(", ");
- FormatNeonMemory(Rn, align, Rm);
- } else if (instr->Bits(21, 20) == 2) {
- // vld1
- int Vd = (instr->Bit(22) << 4) | instr->VdValue();
- int Rn = instr->VnValue();
- int type = instr->Bits(11, 8);
- int size = instr->Bits(7, 6);
- int align = instr->Bits(5, 4);
- int Rm = instr->VmValue();
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "vld1.%d ",
- (1 << size) << 3);
- FormatNeonList(Vd, type);
- Print(", ");
- FormatNeonMemory(Rn, align, Rm);
+ }
+ } else {
+ Unknown(instr);
+ }
+}
+
+void Decoder::DecodeAdvancedSIMDTwoOrThreeRegisters(Instruction* instr) {
+ // Advanced SIMD two registers, or three registers of different lengths.
+ int op0 = instr->Bit(24);
+ int op1 = instr->Bits(21, 20);
+ int op2 = instr->Bits(11, 10);
+ int op3 = instr->Bit(6);
+ if (!op0 && op1 == 0b11) {
+ // vext.8 Qd, Qm, Qn, imm4
+ int imm4 = instr->Bits(11, 8);
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ int Vn = instr->VFPNRegValue(kSimd128Precision);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vext.8 q%d, q%d, q%d, #%d", Vd, Vn, Vm, imm4);
+ } else if (op0 && op1 == 0b11 && ((op2 >> 1) == 0)) {
+ // Advanced SIMD two registers misc
+ int size = instr->Bits(19, 18);
+ int opc1 = instr->Bits(17, 16);
+ int opc2 = instr->Bits(10, 7);
+ int q = instr->Bit(6);
+
+ int Vd, Vm;
+ if (q) {
+ Vd = instr->VFPDRegValue(kSimd128Precision);
+ Vm = instr->VFPMRegValue(kSimd128Precision);
+ } else {
+ Vd = instr->VFPDRegValue(kDoublePrecision);
+ Vm = instr->VFPMRegValue(kDoublePrecision);
+ }
+
+ int esize = kBitsPerByte * (1 << size);
+ if (opc1 == 0 && (opc2 >> 2) == 0) {
+ int op = kBitsPerByte << (static_cast<int>(Neon64) - instr->Bits(8, 7));
+ // vrev<op>.<esize> Qd, Qm.
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vrev%d.%d q%d, q%d", op, esize, Vd, Vm);
+ } else if (size == 0 && opc1 == 0b10 && opc2 == 0) {
+ Format(instr, q ? "vswp 'Qd, 'Qm" : "vswp 'Dd, 'Dm");
+ } else if (opc1 == 0 && opc2 == 0b1011) {
+ Format(instr, "vmvn 'Qd, 'Qm");
+ } else if (opc1 == 0b01 && (opc2 & 0b0111) == 0b110) {
+ // vabs<type>.<esize> Qd, Qm.
+ char type = instr->Bit(10) != 0 ? 'f' : 's';
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vabs.%c%d q%d, q%d", type, esize, Vd, Vm);
+ } else if (opc1 == 0b01 && (opc2 & 0b0111) == 0b111) {
+ // vneg<type>.<esize> Qd, Qm.
+ char type = instr->Bit(10) != 0 ? 'f' : 's';
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vneg.%c%d q%d, q%d", type, esize, Vd, Vm);
+ } else if (opc1 == 0b10 && opc2 == 0b0001) {
+ if (q) {
+ // vtrn.<esize> Qd, Qm.
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vtrn.%d q%d, q%d", esize, Vd, Vm);
} else {
- Unknown(instr);
+ // vtrn.<esize> Dd, Dm.
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vtrn.%d d%d, d%d", esize, Vd, Vm);
}
- break;
- case 0xA:
- case 0xB:
- if ((instr->Bits(22, 20) == 5) && (instr->Bits(15, 12) == 0xF)) {
- const char* rn_name = converter_.NameOfCPURegister(instr->Bits(19, 16));
- int offset = instr->Bits(11, 0);
- if (offset == 0) {
- out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "pld [%s]", rn_name);
- } else if (instr->Bit(23) == 0) {
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "pld [%s, #-%d]", rn_name, offset);
- } else {
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "pld [%s, #+%d]", rn_name, offset);
- }
- } else if (instr->SpecialValue() == 0xA && instr->Bits(22, 20) == 7) {
- int option = instr->Bits(3, 0);
- switch (instr->Bits(7, 4)) {
- case 4:
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "dsb %s",
- barrier_option_names[option]);
- break;
- case 5:
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "dmb %s",
- barrier_option_names[option]);
- break;
- case 6:
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "isb %s",
- barrier_option_names[option]);
- break;
- default:
- Unknown(instr);
- }
+ } else if (opc1 == 0b10 && (opc2 & 0b1110) == 0b0010) {
+ const char* op = instr->Bit(7) != 0 ? "vzip" : "vuzp";
+ if (q) {
+ // vzip/vuzp.<esize> Qd, Qm.
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%s.%d q%d, q%d", op, esize, Vd, Vm);
} else {
- Unknown(instr);
+ // vzip/vuzp.<esize> Dd, Dm.
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%s.%d d%d, d%d", op, esize, Vd, Vm);
+ }
+ } else if (opc1 == 0b10 && (opc2 & 0b1110) == 0b0100) {
+ // vqmov{u}n.<type><esize> Dd, Qm.
+ int Vd = instr->VFPDRegValue(kDoublePrecision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ int op = instr->Bits(7, 6);
+ const char* name = op == 0b01 ? "vqmovun" : "vqmovn";
+ char type = op == 0b11 ? 'u' : 's';
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "%s.%c%i d%d, q%d", name,
+ type, esize << 1, Vd, Vm);
+ } else if (opc1 == 0b10 && opc2 == 0b1000) {
+ Format(instr, q ? "vrintn.f32 'Qd, 'Qm" : "vrintn.f32 'Dd, 'Dm");
+ } else if (opc1 == 0b10 && opc2 == 0b1011) {
+ Format(instr, q ? "vrintz.f32 'Qd, 'Qm" : "vrintz.f32 'Dd, 'Dm");
+ } else if (opc1 == 0b10 && opc2 == 0b1101) {
+ Format(instr, q ? "vrintm.f32 'Qd, 'Qm" : "vrintm.f32 'Qd, 'Qm");
+ } else if (opc1 == 0b10 && opc2 == 0b1111) {
+ Format(instr, q ? "vrintp.f32 'Qd, 'Qm" : "vrintp.f32 'Qd, 'Qm");
+ } else if (opc1 == 0b11 && (opc2 & 0b1101) == 0b1000) {
+ Format(instr, "vrecpe.f32 'Qd, 'Qm");
+ } else if (opc1 == 0b11 && (opc2 & 0b1101) == 0b1001) {
+ Format(instr, "vrsqrte.f32 'Qd, 'Qm");
+ } else if (opc1 == 0b11 && (opc2 & 0b1100) == 0b1100) {
+ const char* suffix = nullptr;
+ int op = instr->Bits(8, 7);
+ switch (op) {
+ case 0:
+ suffix = "f32.s32";
+ break;
+ case 1:
+ suffix = "f32.u32";
+ break;
+ case 2:
+ suffix = "s32.f32";
+ break;
+ case 3:
+ suffix = "u32.f32";
+ break;
}
- break;
- case 0x1D:
- if (instr->Opc1Value() == 0x7 && instr->Bits(19, 18) == 0x2 &&
- instr->Bits(11, 9) == 0x5 && instr->Bits(7, 6) == 0x1 &&
- instr->Bit(4) == 0x0) {
- // VRINTA, VRINTN, VRINTP, VRINTM (floating-point)
- bool dp_operation = (instr->SzValue() == 1);
- int rounding_mode = instr->Bits(17, 16);
- switch (rounding_mode) {
- case 0x0:
- if (dp_operation) {
- Format(instr, "vrinta.f64.f64 'Dd, 'Dm");
- } else {
- Format(instr, "vrinta.f32.f32 'Sd, 'Sm");
- }
- break;
- case 0x1:
- if (dp_operation) {
- Format(instr, "vrintn.f64.f64 'Dd, 'Dm");
- } else {
- Format(instr, "vrintn.f32.f32 'Sd, 'Sm");
- }
- break;
- case 0x2:
- if (dp_operation) {
- Format(instr, "vrintp.f64.f64 'Dd, 'Dm");
- } else {
- Format(instr, "vrintp.f32.f32 'Sd, 'Sm");
- }
- break;
- case 0x3:
- if (dp_operation) {
- Format(instr, "vrintm.f64.f64 'Dd, 'Dm");
- } else {
- Format(instr, "vrintm.f32.f32 'Sd, 'Sm");
- }
- break;
- default:
- UNREACHABLE(); // Case analysis is exhaustive.
- break;
- }
- } else if ((instr->Opc1Value() == 0x4) && (instr->Bits(11, 9) == 0x5) &&
- (instr->Bit(4) == 0x0)) {
- // VMAXNM, VMINNM (floating-point)
- if (instr->SzValue() == 0x1) {
- if (instr->Bit(6) == 0x1) {
- Format(instr, "vminnm.f64 'Dd, 'Dn, 'Dm");
- } else {
- Format(instr, "vmaxnm.f64 'Dd, 'Dn, 'Dm");
- }
- } else {
- if (instr->Bit(6) == 0x1) {
- Format(instr, "vminnm.f32 'Sd, 'Sn, 'Sm");
- } else {
- Format(instr, "vmaxnm.f32 'Sd, 'Sn, 'Sm");
- }
- }
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vcvt.%s q%d, q%d", suffix, Vd, Vm);
+ }
+ } else if (op0 && op1 == 0b11 && op2 == 0b10) {
+ // VTBL, VTBX
+ int Vd = instr->VFPDRegValue(kDoublePrecision);
+ int Vn = instr->VFPNRegValue(kDoublePrecision);
+ int Vm = instr->VFPMRegValue(kDoublePrecision);
+ int len = instr->Bits(9, 8);
+ NeonListOperand list(DwVfpRegister::from_code(Vn), len + 1);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%s d%d, ",
+ instr->Bit(6) == 0 ? "vtbl.8" : "vtbx.8", Vd);
+ FormatNeonList(Vn, list.type());
+ Print(", ");
+ PrintDRegister(Vm);
+ } else if (op0 && op1 == 0b11 && op2 == 0b11) {
+ // Advanced SIMD duplicate (scalar)
+ if (instr->Bits(9, 7) == 0) {
+ // VDUP (scalar)
+ int Vm = instr->VFPMRegValue(kDoublePrecision);
+ int imm4 = instr->Bits(19, 16);
+ int esize = 0, index = 0;
+ if ((imm4 & 0x1) != 0) {
+ esize = 8;
+ index = imm4 >> 1;
+ } else if ((imm4 & 0x2) != 0) {
+ esize = 16;
+ index = imm4 >> 2;
} else {
- Unknown(instr);
+ esize = 32;
+ index = imm4 >> 3;
}
- break;
- case 0x1C:
- if ((instr->Bits(11, 9) == 0x5) && (instr->Bit(6) == 0) &&
- (instr->Bit(4) == 0)) {
- // VSEL* (floating-point)
- bool dp_operation = (instr->SzValue() == 1);
- switch (instr->Bits(21, 20)) {
- case 0x0:
- if (dp_operation) {
- Format(instr, "vseleq.f64 'Dd, 'Dn, 'Dm");
- } else {
- Format(instr, "vseleq.f32 'Sd, 'Sn, 'Sm");
- }
- break;
- case 0x1:
- if (dp_operation) {
- Format(instr, "vselvs.f64 'Dd, 'Dn, 'Dm");
- } else {
- Format(instr, "vselvs.f32 'Sd, 'Sn, 'Sm");
- }
- break;
- case 0x2:
- if (dp_operation) {
- Format(instr, "vselge.f64 'Dd, 'Dn, 'Dm");
- } else {
- Format(instr, "vselge.f32 'Sd, 'Sn, 'Sm");
- }
- break;
- case 0x3:
- if (dp_operation) {
- Format(instr, "vselgt.f64 'Dd, 'Dn, 'Dm");
- } else {
- Format(instr, "vselgt.f32 'Sd, 'Sn, 'Sm");
- }
- break;
- default:
- UNREACHABLE(); // Case analysis is exhaustive.
- break;
- }
+ if (instr->Bit(6) == 0) {
+ int Vd = instr->VFPDRegValue(kDoublePrecision);
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "vdup.%i d%d, d%d[%d]",
+ esize, Vd, Vm, index);
} else {
- Unknown(instr);
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "vdup.%i q%d, d%d[%d]",
+ esize, Vd, Vm, index);
}
- break;
- default:
+ } else {
Unknown(instr);
- break;
+ }
+ } else if (op1 != 0b11 && !op3) {
+ // Advanced SIMD three registers of different lengths.
+ int u = instr->Bit(24);
+ int opc = instr->Bits(11, 8);
+ if (opc == 0b1000) {
+ // vmlal.u<esize> <Qd>, <Dn>, <Dm>
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vn = instr->VFPNRegValue(kDoublePrecision);
+ int Vm = instr->VFPMRegValue(kDoublePrecision);
+ int esize = 8 << instr->Bits(21, 20);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vmlal.u%d q%d, d%d, d%d", esize, Vd, Vn, Vm);
+ } else if (opc == 0b1100) {
+ // vmull.s/u<esize> Qd, Dn, Dm
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vn = instr->VFPNRegValue(kDoublePrecision);
+ int Vm = instr->VFPMRegValue(kDoublePrecision);
+ int esize = 8 << instr->Bits(21, 20);
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "vmull.%s%d q%d, d%d, d%d",
+ u ? "u" : "s", esize, Vd, Vn, Vm);
+ }
+ } else if (op1 != 0b11 && op3) {
+ // The instructions specified by this encoding are not used in V8.
+ Unknown(instr);
+ } else {
+ Unknown(instr);
+ }
+}
+
+void Decoder::DecodeMemoryHintsAndBarriers(Instruction* instr) {
+ int op0 = instr->Bits(25, 21);
+ if (op0 == 0b01011) {
+ // Barriers.
+ int option = instr->Bits(3, 0);
+ switch (instr->Bits(7, 4)) {
+ case 4:
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "dsb %s",
+ barrier_option_names[option]);
+ break;
+ case 5:
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "dmb %s",
+ barrier_option_names[option]);
+ break;
+ case 6:
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "isb %s",
+ barrier_option_names[option]);
+ break;
+ default:
+ Unknown(instr);
+ }
+ } else if ((op0 & 0b10001) == 0b00000 && !instr->Bit(4)) {
+ // Preload (immediate).
+ const char* rn_name = converter_.NameOfCPURegister(instr->Bits(19, 16));
+ int offset = instr->Bits(11, 0);
+ if (offset == 0) {
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "pld [%s]", rn_name);
+ } else if (instr->Bit(23) == 0) {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "pld [%s, #-%d]", rn_name, offset);
+ } else {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "pld [%s, #+%d]", rn_name, offset);
+ }
+ } else {
+ Unknown(instr);
+ }
+}
+
+void Decoder::DecodeAdvancedSIMDElementOrStructureLoadStore(
+ Instruction* instr) {
+ int op0 = instr->Bit(23);
+ int op1 = instr->Bits(11, 10);
+ int l = instr->Bit(21);
+ int n = instr->Bits(9, 8);
+ int Vd = instr->VFPDRegValue(kDoublePrecision);
+ int Rn = instr->VnValue();
+ int Rm = instr->VmValue();
+
+ if (op0 == 0) {
+ // Advanced SIMD load/store multiple structures.
+ int itype = instr->Bits(11, 8);
+ if (itype == 0b0010) {
+ // vld1/vst1
+ int size = instr->Bits(7, 6);
+ int align = instr->Bits(5, 4);
+ const char* op = l ? "vld1.%d " : "vst1.%d ";
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, op, (1 << size) << 3);
+ FormatNeonList(Vd, itype);
+ Print(", ");
+ FormatNeonMemory(Rn, align, Rm);
+ } else {
+ Unknown(instr);
+ }
+ } else if (op1 == 0b11) {
+ // Advanced SIMD load single structure to all lanes.
+ if (l && n == 0b00) {
+ // vld1r(replicate) single element to all lanes.
+ int size = instr->Bits(7, 6);
+ DCHECK_NE(0b11, size);
+ int type = instr->Bit(5) ? nlt_2 : nlt_1;
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "vld1.%d ", (1 << size) << 3);
+ FormatNeonList(Vd, type);
+ DCHECK_EQ(0, instr->Bit(4)); // Alignment not supported.
+ Print(", ");
+ FormatNeonMemory(Rn, 0, Rm);
+ } else {
+ Unknown(instr);
+ }
+ } else if (op1 != 0b11) {
+ // Advanced SIMD load/store single structure to one lane.
+ int size = op1; // size and op1 occupy the same bits in decoding.
+ if (l && n == 0b00) {
+ // VLD1 (single element to one lane) - A1, A2, A3
+ int index_align = instr->Bits(7, 4);
+ int index = index_align >> (size + 1);
+ // Omit alignment.
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "vld1.%d {d%d[%d]}",
+ (1 << size) << 3, Vd, index);
+ Print(", ");
+ FormatNeonMemory(Rn, 0, Rm);
+ } else {
+ Unknown(instr);
+ }
+ } else {
+ Unknown(instr);
}
}
diff --git a/deps/v8/src/diagnostics/arm/unwinder-arm.cc b/deps/v8/src/diagnostics/arm/unwinder-arm.cc
new file mode 100644
index 0000000000..171a258a0c
--- /dev/null
+++ b/deps/v8/src/diagnostics/arm/unwinder-arm.cc
@@ -0,0 +1,37 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/v8-unwinder-state.h"
+#include "src/diagnostics/unwinder.h"
+#include "src/execution/frame-constants.h"
+
+namespace v8 {
+
+void GetCalleeSavedRegistersFromEntryFrame(void* fp,
+ RegisterState* register_state) {
+ const i::Address base_addr =
+ reinterpret_cast<i::Address>(fp) +
+ i::EntryFrameConstants::kDirectCallerRRegistersOffset;
+
+ if (!register_state->callee_saved) {
+ register_state->callee_saved = std::make_unique<CalleeSavedRegisters>();
+ }
+
+ register_state->callee_saved->arm_r4 =
+ reinterpret_cast<void*>(Load(base_addr + 0 * i::kSystemPointerSize));
+ register_state->callee_saved->arm_r5 =
+ reinterpret_cast<void*>(Load(base_addr + 1 * i::kSystemPointerSize));
+ register_state->callee_saved->arm_r6 =
+ reinterpret_cast<void*>(Load(base_addr + 2 * i::kSystemPointerSize));
+ register_state->callee_saved->arm_r7 =
+ reinterpret_cast<void*>(Load(base_addr + 3 * i::kSystemPointerSize));
+ register_state->callee_saved->arm_r8 =
+ reinterpret_cast<void*>(Load(base_addr + 4 * i::kSystemPointerSize));
+ register_state->callee_saved->arm_r9 =
+ reinterpret_cast<void*>(Load(base_addr + 5 * i::kSystemPointerSize));
+ register_state->callee_saved->arm_r10 =
+ reinterpret_cast<void*>(Load(base_addr + 6 * i::kSystemPointerSize));
+}
+
+} // namespace v8
diff --git a/deps/v8/src/diagnostics/arm64/unwinder-arm64.cc b/deps/v8/src/diagnostics/arm64/unwinder-arm64.cc
new file mode 100644
index 0000000000..5a92512a17
--- /dev/null
+++ b/deps/v8/src/diagnostics/arm64/unwinder-arm64.cc
@@ -0,0 +1,12 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/diagnostics/unwinder.h"
+
+namespace v8 {
+
+void GetCalleeSavedRegistersFromEntryFrame(void* fp,
+ RegisterState* register_state) {}
+
+} // namespace v8
diff --git a/deps/v8/src/diagnostics/basic-block-profiler.cc b/deps/v8/src/diagnostics/basic-block-profiler.cc
index 95e2cb8dae..22ba4c6da1 100644
--- a/deps/v8/src/diagnostics/basic-block-profiler.cc
+++ b/deps/v8/src/diagnostics/basic-block-profiler.cc
@@ -10,7 +10,7 @@
#include "src/base/lazy-instance.h"
#include "src/heap/heap-inl.h"
-#include "torque-generated/exported-class-definitions-inl.h"
+#include "src/objects/shared-function-info-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/diagnostics/basic-block-profiler.h b/deps/v8/src/diagnostics/basic-block-profiler.h
index 41d0e65ccb..706505939b 100644
--- a/deps/v8/src/diagnostics/basic-block-profiler.h
+++ b/deps/v8/src/diagnostics/basic-block-profiler.h
@@ -14,11 +14,13 @@
#include "src/base/macros.h"
#include "src/base/platform/mutex.h"
#include "src/common/globals.h"
-#include "torque-generated/exported-class-definitions.h"
+#include "src/objects/shared-function-info.h"
namespace v8 {
namespace internal {
+class OnHeapBasicBlockProfilerData;
+
class BasicBlockProfilerData {
public:
explicit BasicBlockProfilerData(size_t n_blocks);
diff --git a/deps/v8/src/diagnostics/disassembler.cc b/deps/v8/src/diagnostics/disassembler.cc
index 8c7cab195b..a26a4134c2 100644
--- a/deps/v8/src/diagnostics/disassembler.cc
+++ b/deps/v8/src/diagnostics/disassembler.cc
@@ -253,8 +253,7 @@ static void PrintRelocInfo(StringBuilder* out, Isolate* isolate,
host.as_wasm_code()->native_module()->GetRuntimeStubId(
relocinfo->wasm_stub_call_address()));
out->AddFormatted(" ;; wasm stub: %s", runtime_stub_name);
- } else if (RelocInfo::IsRuntimeEntry(rmode) && isolate &&
- isolate->deoptimizer_data() != nullptr) {
+ } else if (RelocInfo::IsRuntimeEntry(rmode) && isolate != nullptr) {
// A runtime entry relocinfo might be a deoptimization bailout.
Address addr = relocinfo->target_address();
DeoptimizeKind type;
@@ -426,6 +425,8 @@ static int DecodeIt(Isolate* isolate, ExternalReferenceEncoder* ref_encoder,
int Disassembler::Decode(Isolate* isolate, std::ostream* os, byte* begin,
byte* end, CodeReference code, Address current_pc) {
+ DCHECK_WITH_MSG(FLAG_text_is_readable,
+ "Builtins disassembly requires a readable .text section");
V8NameConverter v8NameConverter(isolate, code);
if (isolate) {
// We have an isolate, so support external reference names.
diff --git a/deps/v8/src/diagnostics/ia32/disasm-ia32.cc b/deps/v8/src/diagnostics/ia32/disasm-ia32.cc
index 80ab5663aa..3dbde536de 100644
--- a/deps/v8/src/diagnostics/ia32/disasm-ia32.cc
+++ b/deps/v8/src/diagnostics/ia32/disasm-ia32.cc
@@ -2152,37 +2152,21 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
}
} else if (*data == 0x3A) {
data++;
- if (*data == 0x08) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- int8_t imm8 = static_cast<int8_t>(data[1]);
- AppendToBuffer("roundps %s,%s,%d", NameOfXMMRegister(regop),
- NameOfXMMRegister(rm), static_cast<int>(imm8));
- data += 2;
- } else if (*data == 0x09) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- int8_t imm8 = static_cast<int8_t>(data[1]);
- AppendToBuffer("roundpd %s,%s,%d", NameOfXMMRegister(regop),
- NameOfXMMRegister(rm), static_cast<int>(imm8));
- data += 2;
- } else if (*data == 0x0A) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- int8_t imm8 = static_cast<int8_t>(data[1]);
- AppendToBuffer("roundss %s,%s,%d", NameOfXMMRegister(regop),
- NameOfXMMRegister(rm), static_cast<int>(imm8));
- data += 2;
- } else if (*data == 0x0B) {
+ if (*data >= 0x08 && *data <= 0x0B) {
+ const char* const pseudo_op[] = {
+ "roundps",
+ "roundpd",
+ "roundss",
+ "roundsd",
+ };
+ byte op = *data;
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
int8_t imm8 = static_cast<int8_t>(data[1]);
- AppendToBuffer("roundsd %s,%s,%d", NameOfXMMRegister(regop),
- NameOfXMMRegister(rm), static_cast<int>(imm8));
+ AppendToBuffer("%s %s,%s,%d", pseudo_op[op - 0x08],
+ NameOfXMMRegister(regop), NameOfXMMRegister(rm),
+ static_cast<int>(imm8));
data += 2;
} else if (*data == 0x0E) {
data++;
diff --git a/deps/v8/src/diagnostics/ia32/unwinder-ia32.cc b/deps/v8/src/diagnostics/ia32/unwinder-ia32.cc
new file mode 100644
index 0000000000..5a92512a17
--- /dev/null
+++ b/deps/v8/src/diagnostics/ia32/unwinder-ia32.cc
@@ -0,0 +1,12 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/diagnostics/unwinder.h"
+
+namespace v8 {
+
+void GetCalleeSavedRegistersFromEntryFrame(void* fp,
+ RegisterState* register_state) {}
+
+} // namespace v8
diff --git a/deps/v8/src/diagnostics/mips/unwinder-mips.cc b/deps/v8/src/diagnostics/mips/unwinder-mips.cc
new file mode 100644
index 0000000000..5a92512a17
--- /dev/null
+++ b/deps/v8/src/diagnostics/mips/unwinder-mips.cc
@@ -0,0 +1,12 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/diagnostics/unwinder.h"
+
+namespace v8 {
+
+void GetCalleeSavedRegistersFromEntryFrame(void* fp,
+ RegisterState* register_state) {}
+
+} // namespace v8
diff --git a/deps/v8/src/diagnostics/mips64/unwinder-mips64.cc b/deps/v8/src/diagnostics/mips64/unwinder-mips64.cc
new file mode 100644
index 0000000000..5a92512a17
--- /dev/null
+++ b/deps/v8/src/diagnostics/mips64/unwinder-mips64.cc
@@ -0,0 +1,12 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/diagnostics/unwinder.h"
+
+namespace v8 {
+
+void GetCalleeSavedRegistersFromEntryFrame(void* fp,
+ RegisterState* register_state) {}
+
+} // namespace v8
diff --git a/deps/v8/src/diagnostics/objects-debug.cc b/deps/v8/src/diagnostics/objects-debug.cc
index 83a1ac3a9f..6ee2d39f45 100644
--- a/deps/v8/src/diagnostics/objects-debug.cc
+++ b/deps/v8/src/diagnostics/objects-debug.cc
@@ -27,6 +27,7 @@
#include "src/objects/free-space-inl.h"
#include "src/objects/function-kind.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/instance-type.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/layout-descriptor.h"
#include "src/objects/objects-inl.h"
@@ -66,14 +67,15 @@
#include "src/objects/property-descriptor-object-inl.h"
#include "src/objects/stack-frame-info-inl.h"
#include "src/objects/struct-inl.h"
+#include "src/objects/synthetic-module-inl.h"
#include "src/objects/template-objects-inl.h"
+#include "src/objects/torque-defined-classes-inl.h"
#include "src/objects/transitions-inl.h"
#include "src/regexp/regexp.h"
#include "src/utils/ostreams.h"
#include "src/wasm/wasm-objects-inl.h"
#include "torque-generated/class-verifiers.h"
-#include "torque-generated/exported-class-definitions-inl.h"
-#include "torque-generated/internal-class-definitions-inl.h"
+#include "torque-generated/runtime-macros.h"
namespace v8 {
namespace internal {
@@ -291,9 +293,11 @@ void BytecodeArray::BytecodeArrayVerify(Isolate* isolate) {
CHECK(IsBytecodeArray(isolate));
CHECK(constant_pool(isolate).IsFixedArray(isolate));
VerifyHeapPointer(isolate, constant_pool(isolate));
- CHECK(synchronized_source_position_table(isolate).IsUndefined(isolate) ||
- synchronized_source_position_table(isolate).IsException(isolate) ||
- synchronized_source_position_table(isolate).IsByteArray(isolate));
+ {
+ Object table = source_position_table(isolate, kAcquireLoad);
+ CHECK(table.IsUndefined(isolate) || table.IsException(isolate) ||
+ table.IsByteArray(isolate));
+ }
CHECK(handler_table(isolate).IsByteArray(isolate));
for (int i = 0; i < constant_pool(isolate).length(); ++i) {
// No ThinStrings in the constant pool.
@@ -303,7 +307,7 @@ void BytecodeArray::BytecodeArrayVerify(Isolate* isolate) {
USE_TORQUE_VERIFIER(JSReceiver)
-bool JSObject::ElementsAreSafeToExamine(const Isolate* isolate) const {
+bool JSObject::ElementsAreSafeToExamine(IsolateRoot isolate) const {
// If a GC was caused while constructing this object, the elements
// pointer may point to a one pointer filler map.
return elements(isolate) !=
@@ -371,7 +375,7 @@ void JSObject::JSObjectVerify(Isolate* isolate) {
int delta = actual_unused_property_fields - map().UnusedPropertyFields();
CHECK_EQ(0, delta % JSObject::kFieldsAdded);
}
- DescriptorArray descriptors = map().instance_descriptors();
+ DescriptorArray descriptors = map().instance_descriptors(kRelaxedLoad);
bool is_transitionable_fast_elements_kind =
IsTransitionableFastElementsKind(map().elements_kind());
@@ -445,13 +449,13 @@ void Map::MapVerify(Isolate* isolate) {
// Root maps must not have descriptors in the descriptor array that do not
// belong to the map.
CHECK_EQ(NumberOfOwnDescriptors(),
- instance_descriptors().number_of_descriptors());
+ instance_descriptors(kRelaxedLoad).number_of_descriptors());
} else {
// If there is a parent map it must be non-stable.
Map parent = Map::cast(GetBackPointer());
CHECK(!parent.is_stable());
- DescriptorArray descriptors = instance_descriptors();
- if (descriptors == parent.instance_descriptors()) {
+ DescriptorArray descriptors = instance_descriptors(kRelaxedLoad);
+ if (descriptors == parent.instance_descriptors(kRelaxedLoad)) {
if (NumberOfOwnDescriptors() == parent.NumberOfOwnDescriptors() + 1) {
// Descriptors sharing through property transitions takes over
// ownership from the parent map.
@@ -469,14 +473,14 @@ void Map::MapVerify(Isolate* isolate) {
}
}
}
- SLOW_DCHECK(instance_descriptors().IsSortedNoDuplicates());
+ SLOW_DCHECK(instance_descriptors(kRelaxedLoad).IsSortedNoDuplicates());
DisallowHeapAllocation no_gc;
SLOW_DCHECK(
TransitionsAccessor(isolate, *this, &no_gc).IsSortedNoDuplicates());
SLOW_DCHECK(TransitionsAccessor(isolate, *this, &no_gc)
.IsConsistentWithBackPointers());
SLOW_DCHECK(!FLAG_unbox_double_fields ||
- layout_descriptor().IsConsistentWithMap(*this));
+ layout_descriptor(kAcquireLoad).IsConsistentWithMap(*this));
// Only JSFunction maps have has_prototype_slot() bit set and constructible
// JSFunction objects must have prototype slot.
CHECK_IMPLIES(has_prototype_slot(), instance_type() == JS_FUNCTION_TYPE);
@@ -484,7 +488,7 @@ void Map::MapVerify(Isolate* isolate) {
CHECK(!has_named_interceptor());
CHECK(!is_dictionary_map());
CHECK(!is_access_check_needed());
- DescriptorArray const descriptors = instance_descriptors();
+ DescriptorArray const descriptors = instance_descriptors(kRelaxedLoad);
for (InternalIndex i : IterateOwnDescriptors()) {
CHECK(!descriptors.GetKey(i).IsInterestingSymbol());
}
@@ -508,7 +512,7 @@ void Map::DictionaryMapVerify(Isolate* isolate) {
CHECK(is_dictionary_map());
CHECK_EQ(kInvalidEnumCacheSentinel, EnumLength());
CHECK_EQ(ReadOnlyRoots(isolate).empty_descriptor_array(),
- instance_descriptors());
+ instance_descriptors(kRelaxedLoad));
CHECK_EQ(0, UnusedPropertyFields());
CHECK_EQ(Map::GetVisitorId(*this), visitor_id());
}
@@ -574,7 +578,7 @@ void NativeContext::NativeContextVerify(Isolate* isolate) {
}
void FeedbackMetadata::FeedbackMetadataVerify(Isolate* isolate) {
- if (slot_count() == 0 && closure_feedback_cell_count() == 0) {
+ if (slot_count() == 0 && create_closure_slot_count() == 0) {
CHECK_EQ(ReadOnlyRoots(isolate).empty_feedback_metadata(), *this);
} else {
FeedbackMetadataIterator iter(*this);
@@ -820,7 +824,7 @@ void SharedFunctionInfo::SharedFunctionInfoVerify(LocalIsolate* isolate) {
}
void SharedFunctionInfo::SharedFunctionInfoVerify(ReadOnlyRoots roots) {
- Object value = name_or_scope_info();
+ Object value = name_or_scope_info(kAcquireLoad);
if (value.IsScopeInfo()) {
CHECK_LT(0, ScopeInfo::cast(value).length());
CHECK_NE(value, roots.empty_scope_info());
@@ -832,8 +836,11 @@ void SharedFunctionInfo::SharedFunctionInfoVerify(ReadOnlyRoots roots) {
HasUncompiledDataWithoutPreparseData() || HasWasmJSFunctionData() ||
HasWasmCapiFunctionData());
- CHECK(script_or_debug_info().IsUndefined(roots) ||
- script_or_debug_info().IsScript() || HasDebugInfo());
+ {
+ auto script = script_or_debug_info(kAcquireLoad);
+ CHECK(script.IsUndefined(roots) || script.IsScript() ||
+ script.IsDebugInfo());
+ }
if (!is_compiled()) {
CHECK(!HasFeedbackMetadata());
@@ -865,11 +872,6 @@ void SharedFunctionInfo::SharedFunctionInfoVerify(ReadOnlyRoots roots) {
CHECK(!construct_as_builtin());
}
}
-
- // At this point we only support skipping arguments adaptor frames
- // for strict mode functions (see https://crbug.com/v8/8895).
- CHECK_IMPLIES(is_safe_to_skip_arguments_adaptor(),
- language_mode() == LanguageMode::kStrict);
}
void JSGlobalProxy::JSGlobalProxyVerify(Isolate* isolate) {
@@ -944,13 +946,16 @@ void CodeDataContainer::CodeDataContainerVerify(Isolate* isolate) {
}
void Code::CodeVerify(Isolate* isolate) {
- CHECK_IMPLIES(
- has_safepoint_table(),
- IsAligned(safepoint_table_offset(), static_cast<unsigned>(kIntSize)));
+ CHECK(IsAligned(InstructionSize(),
+ static_cast<unsigned>(Code::kMetadataAlignment)));
+ CHECK_EQ(safepoint_table_offset(), 0);
CHECK_LE(safepoint_table_offset(), handler_table_offset());
CHECK_LE(handler_table_offset(), constant_pool_offset());
CHECK_LE(constant_pool_offset(), code_comments_offset());
- CHECK_LE(code_comments_offset(), InstructionSize());
+ CHECK_LE(code_comments_offset(), unwinding_info_offset());
+ CHECK_LE(unwinding_info_offset(), MetadataSize());
+ CHECK_IMPLIES(!ReadOnlyHeap::Contains(*this),
+ IsAligned(InstructionStart(), kCodeAlignment));
CHECK_IMPLIES(!ReadOnlyHeap::Contains(*this),
IsAligned(raw_instruction_start(), kCodeAlignment));
// TODO(delphick): Refactor Factory::CodeBuilder::BuildInternal, so that the
@@ -959,7 +964,8 @@ void Code::CodeVerify(Isolate* isolate) {
// everything is set up.
// CHECK_EQ(ReadOnlyHeap::Contains(*this), !IsExecutable());
relocation_info().ObjectVerify(isolate);
- CHECK(Code::SizeFor(body_size()) <= kMaxRegularHeapObjectSize ||
+ CHECK(V8_ENABLE_THIRD_PARTY_HEAP_BOOL ||
+ CodeSize() <= MemoryChunkLayout::MaxRegularCodeObjectSize() ||
isolate->heap()->InSpace(*this, CODE_LO_SPACE));
Address last_gc_pc = kNullAddress;
@@ -1174,6 +1180,7 @@ void SmallOrderedHashTable<Derived>::SmallOrderedHashTableVerify(
}
}
void SmallOrderedHashMap::SmallOrderedHashMapVerify(Isolate* isolate) {
+ TorqueGeneratedClassVerifiers::SmallOrderedHashMapVerify(*this, isolate);
SmallOrderedHashTable<SmallOrderedHashMap>::SmallOrderedHashTableVerify(
isolate);
for (int entry = NumberOfElements(); entry < NumberOfDeletedElements();
@@ -1186,6 +1193,7 @@ void SmallOrderedHashMap::SmallOrderedHashMapVerify(Isolate* isolate) {
}
void SmallOrderedHashSet::SmallOrderedHashSetVerify(Isolate* isolate) {
+ TorqueGeneratedClassVerifiers::SmallOrderedHashSetVerify(*this, isolate);
SmallOrderedHashTable<SmallOrderedHashSet>::SmallOrderedHashTableVerify(
isolate);
for (int entry = NumberOfElements(); entry < NumberOfDeletedElements();
@@ -1199,6 +1207,8 @@ void SmallOrderedHashSet::SmallOrderedHashSetVerify(Isolate* isolate) {
void SmallOrderedNameDictionary::SmallOrderedNameDictionaryVerify(
Isolate* isolate) {
+ TorqueGeneratedClassVerifiers::SmallOrderedNameDictionaryVerify(*this,
+ isolate);
SmallOrderedHashTable<
SmallOrderedNameDictionary>::SmallOrderedHashTableVerify(isolate);
for (int entry = NumberOfElements(); entry < NumberOfDeletedElements();
@@ -1375,6 +1385,17 @@ void Module::ModuleVerify(Isolate* isolate) {
CHECK_NE(hash(), 0);
}
+void ModuleRequest::ModuleRequestVerify(Isolate* isolate) {
+ TorqueGeneratedClassVerifiers::ModuleRequestVerify(*this, isolate);
+ CHECK_EQ(0, import_assertions().length() % 3);
+
+ for (int i = 0; i < import_assertions().length(); i += 3) {
+ CHECK(import_assertions().get(i).IsString()); // Assertion key
+ CHECK(import_assertions().get(i + 1).IsString()); // Assertion value
+ CHECK(import_assertions().get(i + 2).IsSmi()); // Assertion location
+ }
+}
+
void SourceTextModule::SourceTextModuleVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::SourceTextModuleVerify(*this, isolate);
@@ -1540,8 +1561,6 @@ void CallHandlerInfo::CallHandlerInfoVerify(Isolate* isolate) {
.next_call_side_effect_free_call_handler_info_map());
}
-USE_TORQUE_VERIFIER(WasmCapiFunctionData)
-
USE_TORQUE_VERIFIER(WasmJSFunctionData)
USE_TORQUE_VERIFIER(WasmIndirectFunctionTable)
diff --git a/deps/v8/src/diagnostics/objects-printer.cc b/deps/v8/src/diagnostics/objects-printer.cc
index 9afe8e9445..d65c0eeb4b 100644
--- a/deps/v8/src/diagnostics/objects-printer.cc
+++ b/deps/v8/src/diagnostics/objects-printer.cc
@@ -5,73 +5,22 @@
#include <iomanip>
#include <memory>
+#include "src/common/globals.h"
+#include "src/compiler/node.h"
#include "src/diagnostics/disasm.h"
#include "src/diagnostics/disassembler.h"
#include "src/heap/heap-inl.h" // For InOldSpace.
#include "src/heap/heap-write-barrier-inl.h" // For GetIsolateFromWritableObj.
#include "src/init/bootstrapper.h"
#include "src/interpreter/bytecodes.h"
-#include "src/objects/arguments-inl.h"
-#include "src/objects/cell-inl.h"
-#include "src/objects/data-handler-inl.h"
-#include "src/objects/debug-objects-inl.h"
-#include "src/objects/embedder-data-array-inl.h"
-#include "src/objects/embedder-data-slot-inl.h"
-#include "src/objects/feedback-cell-inl.h"
-#include "src/objects/foreign-inl.h"
-#include "src/objects/free-space-inl.h"
-#include "src/objects/hash-table-inl.h"
-#include "src/objects/heap-number-inl.h"
-#include "src/objects/js-array-buffer-inl.h"
-#include "src/objects/js-array-inl.h"
-#include "src/objects/objects-inl.h"
-#include "src/objects/objects.h"
-#include "src/snapshot/embedded/embedded-data.h"
-#ifdef V8_INTL_SUPPORT
-#include "src/objects/js-break-iterator-inl.h"
-#include "src/objects/js-collator-inl.h"
-#endif // V8_INTL_SUPPORT
-#include "src/objects/js-collection-inl.h"
-#ifdef V8_INTL_SUPPORT
-#include "src/objects/js-date-time-format-inl.h"
-#include "src/objects/js-display-names-inl.h"
-#endif // V8_INTL_SUPPORT
-#include "src/objects/js-generator-inl.h"
-#ifdef V8_INTL_SUPPORT
-#include "src/objects/js-list-format-inl.h"
-#include "src/objects/js-locale-inl.h"
-#include "src/objects/js-number-format-inl.h"
-#include "src/objects/js-plural-rules-inl.h"
-#endif // V8_INTL_SUPPORT
-#include "src/objects/js-regexp-inl.h"
-#include "src/objects/js-regexp-string-iterator-inl.h"
-#ifdef V8_INTL_SUPPORT
-#include "src/objects/js-relative-time-format-inl.h"
-#include "src/objects/js-segment-iterator-inl.h"
-#include "src/objects/js-segmenter-inl.h"
-#include "src/objects/js-segments-inl.h"
-#endif // V8_INTL_SUPPORT
-#include "src/compiler/node.h"
-#include "src/objects/js-weak-refs-inl.h"
-#include "src/objects/literal-objects-inl.h"
-#include "src/objects/microtask-inl.h"
-#include "src/objects/module-inl.h"
-#include "src/objects/oddball-inl.h"
-#include "src/objects/promise-inl.h"
-#include "src/objects/property-descriptor-object-inl.h"
-#include "src/objects/stack-frame-info-inl.h"
-#include "src/objects/string-set-inl.h"
-#include "src/objects/struct-inl.h"
-#include "src/objects/template-objects-inl.h"
-#include "src/objects/transitions-inl.h"
+#include "src/objects/all-objects-inl.h"
+#include "src/objects/code-kind.h"
#include "src/regexp/regexp.h"
+#include "src/snapshot/embedded/embedded-data.h"
#include "src/utils/ostreams.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-objects-inl.h"
-#include "torque-generated/class-definitions-inl.h"
-#include "torque-generated/exported-class-definitions-inl.h"
-#include "torque-generated/internal-class-definitions-inl.h"
namespace v8 {
namespace internal {
@@ -284,7 +233,7 @@ void FreeSpace::FreeSpacePrint(std::ostream& os) { // NOLINT
bool JSObject::PrintProperties(std::ostream& os) { // NOLINT
if (HasFastProperties()) {
- DescriptorArray descs = map().instance_descriptors();
+ DescriptorArray descs = map().instance_descriptors(kRelaxedLoad);
int nof_inobject_properties = map().GetInObjectProperties();
for (InternalIndex i : map().IterateOwnDescriptors()) {
os << "\n ";
@@ -307,16 +256,23 @@ bool JSObject::PrintProperties(std::ostream& os) { // NOLINT
}
os << " ";
details.PrintAsFastTo(os, PropertyDetails::kForProperties);
- if (details.location() != kField) continue;
- int field_index = details.field_index();
- if (nof_inobject_properties <= field_index) {
- field_index -= nof_inobject_properties;
- os << " properties[" << field_index << "]";
+ if (details.location() == kField) {
+ int field_index = details.field_index();
+ if (field_index < nof_inobject_properties) {
+ os << ", location: in-object";
+ } else {
+ field_index -= nof_inobject_properties;
+ os << ", location: properties[" << field_index << "]";
+ }
+ } else {
+ os << ", location: descriptor";
}
}
return map().NumberOfOwnDescriptors() > 0;
} else if (IsJSGlobalObject()) {
JSGlobalObject::cast(*this).global_dictionary().Print(os);
+ } else if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ property_dictionary_ordered().Print(os);
} else {
property_dictionary().Print(os);
}
@@ -468,7 +424,7 @@ void PrintSloppyArgumentElements(std::ostream& os, ElementsKind kind,
}
}
-void PrintEmbedderData(const Isolate* isolate, std::ostream& os,
+void PrintEmbedderData(IsolateRoot isolate, std::ostream& os,
EmbedderDataSlot slot) {
DisallowHeapAllocation no_gc;
Object value = slot.load_tagged();
@@ -568,9 +524,10 @@ static void JSObjectPrintBody(std::ostream& os,
if (!properties_or_hash.IsSmi()) {
os << Brief(properties_or_hash);
}
- os << " {";
+ os << "\n - All own properties (excluding elements): {";
if (obj.PrintProperties(os)) os << "\n ";
os << "}\n";
+
if (print_elements) {
size_t length = obj.IsJSTypedArray() ? JSTypedArray::cast(obj).length()
: obj.elements().length();
@@ -578,7 +535,7 @@ static void JSObjectPrintBody(std::ostream& os,
}
int embedder_fields = obj.GetEmbedderFieldCount();
if (embedder_fields > 0) {
- const Isolate* isolate = GetIsolateForPtrCompr(obj);
+ IsolateRoot isolate = GetIsolateForPtrCompr(obj);
os << " - embedder fields = {";
for (int i = 0; i < embedder_fields; i++) {
os << "\n ";
@@ -772,7 +729,7 @@ void PrintWeakArrayElements(std::ostream& os, T* array) {
} // namespace
void EmbedderDataArray::EmbedderDataArrayPrint(std::ostream& os) {
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
PrintHeader(os, "EmbedderDataArray");
os << "\n - length: " << length();
EmbedderDataSlot start(*this, 0);
@@ -888,14 +845,13 @@ void FeedbackVectorSpec::Print() {
}
void FeedbackVectorSpec::FeedbackVectorSpecPrint(std::ostream& os) { // NOLINT
- int slot_count = slots();
- os << " - slot_count: " << slot_count;
- if (slot_count == 0) {
+ os << " - slot_count: " << slot_count();
+ if (slot_count() == 0) {
os << " (empty)\n";
return;
}
- for (int slot = 0; slot < slot_count;) {
+ for (int slot = 0; slot < slot_count();) {
FeedbackSlotKind kind = GetKind(FeedbackSlot(slot));
int entry_size = FeedbackMetadata::GetSlotSize(kind);
DCHECK_LT(0, entry_size);
@@ -908,6 +864,7 @@ void FeedbackVectorSpec::FeedbackVectorSpecPrint(std::ostream& os) { // NOLINT
void FeedbackMetadata::FeedbackMetadataPrint(std::ostream& os) {
PrintHeader(os, "FeedbackMetadata");
os << "\n - slot_count: " << slot_count();
+ os << "\n - create_closure_slot_count: " << create_closure_slot_count();
FeedbackMetadataIterator iter(*this);
while (iter.HasNext()) {
@@ -931,12 +888,13 @@ void FeedbackVector::FeedbackVectorPrint(std::ostream& os) { // NOLINT
}
os << "\n - shared function info: " << Brief(shared_function_info());
- os << "\n - optimized code/marker: ";
if (has_optimized_code()) {
- os << Brief(optimized_code());
+ os << "\n - optimized code: " << Brief(optimized_code());
} else {
- os << optimization_marker();
+ os << "\n - no optimized code";
}
+ os << "\n - optimization marker: " << optimization_marker();
+ os << "\n - optimization tier: " << optimization_tier();
os << "\n - invocation count: " << invocation_count();
os << "\n - profiler ticks: " << profiler_ticks();
@@ -1279,13 +1237,12 @@ void JSFunction::JSFunctionPrint(std::ostream& os) { // NOLINT
os << "\n - formal_parameter_count: "
<< shared().internal_formal_parameter_count();
- if (shared().is_safe_to_skip_arguments_adaptor()) {
- os << "\n - safe_to_skip_arguments_adaptor";
- }
os << "\n - kind: " << shared().kind();
os << "\n - context: " << Brief(context());
os << "\n - code: " << Brief(code());
- if (ActiveTierIsIgnition()) {
+ if (code().kind() == CodeKind::FOR_TESTING) {
+ os << "\n - FOR_TESTING";
+ } else if (ActiveTierIsIgnition()) {
os << "\n - interpreted";
if (shared().HasBytecodeArray()) {
os << "\n - bytecode: " << shared().GetBytecodeArray();
@@ -1364,12 +1321,9 @@ void SharedFunctionInfo::SharedFunctionInfoPrint(std::ostream& os) { // NOLINT
}
os << "\n - function_map_index: " << function_map_index();
os << "\n - formal_parameter_count: " << internal_formal_parameter_count();
- if (is_safe_to_skip_arguments_adaptor()) {
- os << "\n - safe_to_skip_arguments_adaptor";
- }
os << "\n - expected_nof_properties: " << expected_nof_properties();
os << "\n - language_mode: " << language_mode();
- os << "\n - data: " << Brief(function_data());
+ os << "\n - data: " << Brief(function_data(kAcquireLoad));
os << "\n - code (from data): ";
os << Brief(GetCode());
PrintSourceCode(os);
@@ -1913,7 +1867,7 @@ void FunctionTemplateInfo::FunctionTemplateInfoPrint(
os << "\n - tag: " << tag();
os << "\n - serial_number: " << serial_number();
os << "\n - property_list: " << Brief(property_list());
- os << "\n - call_code: " << Brief(call_code());
+ os << "\n - call_code: " << Brief(call_code(kAcquireLoad));
os << "\n - property_accessors: " << Brief(property_accessors());
os << "\n - signature: " << Brief(signature());
os << "\n - cached_property_name: " << Brief(cached_property_name());
@@ -1924,16 +1878,6 @@ void FunctionTemplateInfo::FunctionTemplateInfoPrint(
os << "\n";
}
-void WasmCapiFunctionData::WasmCapiFunctionDataPrint(
- std::ostream& os) { // NOLINT
- PrintHeader(os, "WasmCapiFunctionData");
- os << "\n - call_target: " << call_target();
- os << "\n - embedder_data: " << Brief(embedder_data());
- os << "\n - wrapper_code: " << Brief(wrapper_code());
- os << "\n - serialized_signature: " << Brief(serialized_signature());
- os << "\n";
-}
-
void WasmIndirectFunctionTable::WasmIndirectFunctionTablePrint(
std::ostream& os) {
PrintHeader(os, "WasmIndirectFunctionTable");
@@ -2393,7 +2337,7 @@ int Name::NameShortPrint(Vector<char> str) {
void Map::PrintMapDetails(std::ostream& os) {
DisallowHeapAllocation no_gc;
this->MapPrint(os);
- instance_descriptors().PrintDescriptors(os);
+ instance_descriptors(kRelaxedLoad).PrintDescriptors(os);
}
void Map::MapPrint(std::ostream& os) { // NOLINT
@@ -2447,10 +2391,10 @@ void Map::MapPrint(std::ostream& os) { // NOLINT
os << "\n - prototype_validity cell: " << Brief(prototype_validity_cell());
os << "\n - instance descriptors " << (owns_descriptors() ? "(own) " : "")
<< "#" << NumberOfOwnDescriptors() << ": "
- << Brief(instance_descriptors());
+ << Brief(instance_descriptors(kRelaxedLoad));
if (FLAG_unbox_double_fields) {
os << "\n - layout descriptor: ";
- layout_descriptor().ShortPrint(os);
+ layout_descriptor(kAcquireLoad).ShortPrint(os);
}
// Read-only maps can't have transitions, which is fortunate because we need
@@ -2563,7 +2507,7 @@ void TransitionsAccessor::PrintOneTransition(std::ostream& os, Name key,
DCHECK(!IsSpecialTransition(roots, key));
os << "(transition to ";
InternalIndex descriptor = target.LastAdded();
- DescriptorArray descriptors = target.instance_descriptors();
+ DescriptorArray descriptors = target.instance_descriptors(kRelaxedLoad);
descriptors.PrintDescriptorDetails(os, descriptor,
PropertyDetails::kForTransitions);
os << ")";
@@ -2641,7 +2585,7 @@ void TransitionsAccessor::PrintTransitionTree(std::ostream& os, int level,
DCHECK(!IsSpecialTransition(ReadOnlyRoots(isolate_), key));
os << "to ";
InternalIndex descriptor = target.LastAdded();
- DescriptorArray descriptors = target.instance_descriptors();
+ DescriptorArray descriptors = target.instance_descriptors(kRelaxedLoad);
descriptors.PrintDescriptorDetails(os, descriptor,
PropertyDetails::kForTransitions);
}
diff --git a/deps/v8/src/diagnostics/perf-jit.cc b/deps/v8/src/diagnostics/perf-jit.cc
index 3efdcc08db..4b83325f1a 100644
--- a/deps/v8/src/diagnostics/perf-jit.cc
+++ b/deps/v8/src/diagnostics/perf-jit.cc
@@ -205,7 +205,9 @@ void PerfJitLogger::LogRecordedBuffer(
int length) {
if (FLAG_perf_basic_prof_only_functions &&
(abstract_code->kind() != CodeKind::INTERPRETED_FUNCTION &&
- abstract_code->kind() != CodeKind::OPTIMIZED_FUNCTION)) {
+ abstract_code->kind() != CodeKind::TURBOFAN &&
+ abstract_code->kind() != CodeKind::NATIVE_CONTEXT_INDEPENDENT &&
+ abstract_code->kind() != CodeKind::TURBOPROP)) {
return;
}
@@ -231,14 +233,11 @@ void PerfJitLogger::LogRecordedBuffer(
const char* code_name = name;
uint8_t* code_pointer = reinterpret_cast<uint8_t*>(code->InstructionStart());
- // Code generated by Turbofan will have the safepoint table directly after
- // instructions. There is no need to record the safepoint table itself.
- uint32_t code_size = code->ExecutableInstructionSize();
-
// Unwinding info comes right after debug info.
if (FLAG_perf_prof_unwinding_info) LogWriteUnwindingInfo(*code);
- WriteJitCodeLoadEntry(code_pointer, code_size, code_name, length);
+ WriteJitCodeLoadEntry(code_pointer, code->InstructionSize(), code_name,
+ length);
}
void PerfJitLogger::LogRecordedBuffer(const wasm::WasmCode* code,
diff --git a/deps/v8/src/diagnostics/ppc/unwinder-ppc.cc b/deps/v8/src/diagnostics/ppc/unwinder-ppc.cc
new file mode 100644
index 0000000000..43c6acb609
--- /dev/null
+++ b/deps/v8/src/diagnostics/ppc/unwinder-ppc.cc
@@ -0,0 +1,8 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#include "src/diagnostics/unwinder.h"
+namespace v8 {
+void GetCalleeSavedRegistersFromEntryFrame(void* fp,
+ RegisterState* register_state) {}
+} // namespace v8
diff --git a/deps/v8/src/diagnostics/s390/unwinder-s390.cc b/deps/v8/src/diagnostics/s390/unwinder-s390.cc
new file mode 100644
index 0000000000..43c6acb609
--- /dev/null
+++ b/deps/v8/src/diagnostics/s390/unwinder-s390.cc
@@ -0,0 +1,8 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#include "src/diagnostics/unwinder.h"
+namespace v8 {
+void GetCalleeSavedRegistersFromEntryFrame(void* fp,
+ RegisterState* register_state) {}
+} // namespace v8
diff --git a/deps/v8/src/diagnostics/unwinder.cc b/deps/v8/src/diagnostics/unwinder.cc
index c4a559c9d9..1dd122a118 100644
--- a/deps/v8/src/diagnostics/unwinder.cc
+++ b/deps/v8/src/diagnostics/unwinder.cc
@@ -2,15 +2,22 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/diagnostics/unwinder.h"
+
#include <algorithm>
-#include "include/v8.h"
-#include "src/common/globals.h"
-#include "src/execution/frame-constants.h"
#include "src/execution/pointer-authentication.h"
namespace v8 {
+// Architecture specific. Implemented in unwinder-<arch>.cc.
+void GetCalleeSavedRegistersFromEntryFrame(void* fp,
+ RegisterState* register_state);
+
+i::Address Load(i::Address address) {
+ return *reinterpret_cast<i::Address*>(address);
+}
+
namespace {
const i::byte* CalculateEnd(const void* start, size_t length_in_bytes) {
@@ -61,13 +68,15 @@ bool IsInUnsafeJSEntryRange(const JSEntryStubs& entry_stubs, void* pc) {
// within JSEntry.
}
-i::Address Load(i::Address address) {
- return *reinterpret_cast<i::Address*>(address);
+bool AddressIsInStack(const void* address, const void* stack_base,
+ const void* stack_top) {
+ return address <= stack_base && address >= stack_top;
}
void* GetReturnAddressFromFP(void* fp, void* pc,
const JSEntryStubs& entry_stubs) {
int caller_pc_offset = i::CommonFrameConstants::kCallerPCOffset;
+// TODO(solanes): Implement the JSEntry range case also for x64 here and below.
#if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM
if (IsInJSEntryRange(entry_stubs, pc)) {
caller_pc_offset = i::EntryFrameConstants::kDirectCallerPCOffset;
@@ -100,11 +109,6 @@ void* GetCallerSPFromFP(void* fp, void* pc, const JSEntryStubs& entry_stubs) {
caller_sp_offset);
}
-bool AddressIsInStack(const void* address, const void* stack_base,
- const void* stack_top) {
- return address <= stack_base && address >= stack_top;
-}
-
} // namespace
bool Unwinder::TryUnwindV8Frames(const JSEntryStubs& entry_stubs,
@@ -145,6 +149,10 @@ bool Unwinder::TryUnwindV8Frames(const JSEntryStubs& entry_stubs,
// Link register no longer valid after unwinding.
register_state->lr = nullptr;
+
+ if (IsInJSEntryRange(entry_stubs, pc)) {
+ GetCalleeSavedRegistersFromEntryFrame(current_fp, register_state);
+ }
return true;
}
return false;
diff --git a/deps/v8/src/diagnostics/unwinder.h b/deps/v8/src/diagnostics/unwinder.h
new file mode 100644
index 0000000000..4cad2897fd
--- /dev/null
+++ b/deps/v8/src/diagnostics/unwinder.h
@@ -0,0 +1,17 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_DIAGNOSTICS_UNWINDER_H_
+#define V8_DIAGNOSTICS_UNWINDER_H_
+
+#include "include/v8.h"
+#include "src/common/globals.h"
+
+namespace v8 {
+
+i::Address Load(i::Address address);
+
+} // namespace v8
+
+#endif // V8_DIAGNOSTICS_UNWINDER_H_
diff --git a/deps/v8/src/diagnostics/unwinding-info-win64.cc b/deps/v8/src/diagnostics/unwinding-info-win64.cc
index c39adcf710..f3b9a753af 100644
--- a/deps/v8/src/diagnostics/unwinding-info-win64.cc
+++ b/deps/v8/src/diagnostics/unwinding-info-win64.cc
@@ -16,37 +16,6 @@
#error "Unsupported OS"
#endif // V8_OS_WIN_X64
-// Forward declaration to keep this independent of Win8
-NTSYSAPI
-DWORD
-NTAPI
-RtlAddGrowableFunctionTable(
- _Out_ PVOID* DynamicTable,
- _In_reads_(MaximumEntryCount) PRUNTIME_FUNCTION FunctionTable,
- _In_ DWORD EntryCount,
- _In_ DWORD MaximumEntryCount,
- _In_ ULONG_PTR RangeBase,
- _In_ ULONG_PTR RangeEnd
- );
-
-
-NTSYSAPI
-void
-NTAPI
-RtlGrowFunctionTable(
- _Inout_ PVOID DynamicTable,
- _In_ DWORD NewEntryCount
- );
-
-
-NTSYSAPI
-void
-NTAPI
-RtlDeleteGrowableFunctionTable(
- _In_ PVOID DynamicTable
- );
-
-
namespace v8 {
namespace internal {
namespace win64_unwindinfo {
diff --git a/deps/v8/src/diagnostics/x64/disasm-x64.cc b/deps/v8/src/diagnostics/x64/disasm-x64.cc
index 641db9f4e7..7ae330c3ea 100644
--- a/deps/v8/src/diagnostics/x64/disasm-x64.cc
+++ b/deps/v8/src/diagnostics/x64/disasm-x64.cc
@@ -28,7 +28,12 @@ enum OperandType {
// Fixed 8-bit operands.
BYTE_SIZE_OPERAND_FLAG = 4,
BYTE_REG_OPER_OP_ORDER = REG_OPER_OP_ORDER | BYTE_SIZE_OPERAND_FLAG,
- BYTE_OPER_REG_OP_ORDER = OPER_REG_OP_ORDER | BYTE_SIZE_OPERAND_FLAG
+ BYTE_OPER_REG_OP_ORDER = OPER_REG_OP_ORDER | BYTE_SIZE_OPERAND_FLAG,
+ // XMM registers/operands can be mixed with normal operands.
+ OPER_XMMREG_OP_ORDER,
+ XMMREG_OPER_OP_ORDER,
+ XMMREG_XMMOPER_OP_ORDER,
+ XMMOPER_XMMREG_OP_ORDER,
};
//------------------------------------------------------------------
@@ -444,6 +449,7 @@ class DisassemblerX64 {
int PrintImmediateOp(byte* data);
const char* TwoByteMnemonic(byte opcode);
int TwoByteOpcodeInstruction(byte* data);
+ int ThreeByteOpcodeInstruction(byte* data);
int F6F7Instruction(byte* data);
int ShiftInstruction(byte* data);
int JumpShort(byte* data);
@@ -622,6 +628,28 @@ int DisassemblerX64::PrintOperands(const char* mnem, OperandType op_order,
AppendToBuffer(",%s", register_name);
break;
}
+ case XMMREG_XMMOPER_OP_ORDER: {
+ AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
+ advance = PrintRightXMMOperand(data);
+ break;
+ }
+ case XMMOPER_XMMREG_OP_ORDER: {
+ AppendToBuffer("%s ", mnem);
+ advance = PrintRightXMMOperand(data);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ break;
+ }
+ case OPER_XMMREG_OP_ORDER: {
+ AppendToBuffer("%s ", mnem);
+ advance = PrintRightOperand(data);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ break;
+ }
+ case XMMREG_OPER_OP_ORDER: {
+ AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
+ advance = PrintRightOperand(data);
+ break;
+ }
default:
UNREACHABLE();
}
@@ -1019,6 +1047,13 @@ int DisassemblerX64::AVXInstruction(byte* data) {
current += PrintRightOperand(current);
AppendToBuffer(",0x%x", *current++);
break;
+ case 0x4A: {
+ AppendToBuffer("vblendvps %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",%s", NameOfXMMRegister((*current++) >> 4));
+ break;
+ }
case 0x4B: {
AppendToBuffer("vblendvpd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
@@ -1026,6 +1061,13 @@ int DisassemblerX64::AVXInstruction(byte* data) {
AppendToBuffer(",%s", NameOfXMMRegister((*current++) >> 4));
break;
}
+ case 0x4C: {
+ AppendToBuffer("vpblendvb %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",%s", NameOfXMMRegister((*current++) >> 4));
+ break;
+ }
default:
UnimplementedInstruction();
}
@@ -1335,11 +1377,32 @@ int DisassemblerX64::AVXInstruction(byte* data) {
current += PrintRightXMMOperand(current);
AppendToBuffer(",%s", NameOfXMMRegister(regop));
break;
- case 0x16:
- AppendToBuffer("vmovlhps %s,%s,", NameOfXMMRegister(regop),
+ case 0x12:
+ AppendToBuffer("vmovlps %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
+ case 0x13:
+ AppendToBuffer("vmovlps ");
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ break;
+ case 0x16:
+ if (mod == 0b11) {
+ AppendToBuffer("vmovlhps %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ } else {
+ AppendToBuffer("vmovhps %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ }
+ break;
+ case 0x17:
+ AppendToBuffer("vmovhps ");
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ break;
case 0x28:
AppendToBuffer("vmovaps %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
@@ -1805,432 +1868,271 @@ int DisassemblerX64::RegisterFPUInstruction(int escape_opcode,
// Handle all two-byte opcodes, which start with 0x0F.
// These instructions may be affected by an 0x66, 0xF2, or 0xF3 prefix.
-// We do not use any three-byte opcodes, which start with 0x0F38 or 0x0F3A.
int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
byte opcode = *(data + 1);
byte* current = data + 2;
// At return, "current" points to the start of the next instruction.
const char* mnemonic = TwoByteMnemonic(opcode);
+ // Not every instruction will use this, but it doesn't hurt to figure it out
+ // here, since it doesn't update any pointers.
+ int mod, regop, rm;
+ get_modrm(*current, &mod, &regop, &rm);
if (operand_size_ == 0x66) {
+ // These are three-byte opcodes, see ThreeByteOpcodeInstruction.
+ DCHECK_NE(0x38, opcode);
+ DCHECK_NE(0x3A, opcode);
// 0x66 0x0F prefix.
- int mod, regop, rm;
- if (opcode == 0x38) {
- byte third_byte = *current;
- current = data + 3;
- get_modrm(*current, &mod, &regop, &rm);
- switch (third_byte) {
- case 0x15: {
- AppendToBuffer("blendvpd %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",<xmm0>");
- break;
- }
-#define SSE34_DIS_CASE(instruction, notUsed1, notUsed2, notUsed3, opcode) \
- case 0x##opcode: { \
- AppendToBuffer(#instruction " %s,", NameOfXMMRegister(regop)); \
- current += PrintRightXMMOperand(current); \
- break; \
- }
-
- SSSE3_INSTRUCTION_LIST(SSE34_DIS_CASE)
- SSSE3_UNOP_INSTRUCTION_LIST(SSE34_DIS_CASE)
- SSE4_INSTRUCTION_LIST(SSE34_DIS_CASE)
- SSE4_UNOP_INSTRUCTION_LIST(SSE34_DIS_CASE)
- SSE4_2_INSTRUCTION_LIST(SSE34_DIS_CASE)
-#undef SSE34_DIS_CASE
- default:
- UnimplementedInstruction();
+ if (opcode == 0xC1) {
+ current += PrintOperands("xadd", OPER_REG_OP_ORDER, current);
+ } else if (opcode == 0x1F) {
+ current++;
+ if (rm == 4) { // SIB byte present.
+ current++;
}
- } else if (opcode == 0x3A) {
- byte third_byte = *current;
- current = data + 3;
- if (third_byte == 0x17) {
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("extractps "); // reg/m32, xmm, imm8
- current += PrintRightOperand(current);
- AppendToBuffer(",%s,%d", NameOfXMMRegister(regop), (*current) & 3);
- current += 1;
- } else if (third_byte == 0x08) {
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("roundps %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",0x%x", (*current) & 3);
- current += 1;
- } else if (third_byte == 0x09) {
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("roundpd %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",0x%x", (*current) & 3);
- current += 1;
- } else if (third_byte == 0x0A) {
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("roundss %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",0x%x", (*current) & 3);
- current += 1;
- } else if (third_byte == 0x0B) {
- get_modrm(*current, &mod, &regop, &rm);
- // roundsd xmm, xmm/m64, imm8
- AppendToBuffer("roundsd %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",0x%x", (*current) & 3);
- current += 1;
- } else if (third_byte == 0x0E) {
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("pblendw %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",0x%x", *current);
- current += 1;
- } else if (third_byte == 0x0F) {
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("palignr %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",0x%x", (*current));
- current += 1;
- } else if (third_byte == 0x14) {
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("pextrb "); // reg/m32, xmm, imm8
- current += PrintRightOperand(current);
- AppendToBuffer(",%s,%d", NameOfXMMRegister(regop), (*current) & 3);
- current += 1;
- } else if (third_byte == 0x15) {
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("pextrw "); // reg/m32, xmm, imm8
- current += PrintRightOperand(current);
- AppendToBuffer(",%s,%d", NameOfXMMRegister(regop), (*current) & 7);
- current += 1;
- } else if (third_byte == 0x16) {
- get_modrm(*current, &mod, &regop, &rm);
- // reg/m32/reg/m64, xmm, imm8
- AppendToBuffer("pextr%c ", rex_w() ? 'q' : 'd');
- current += PrintRightOperand(current);
- AppendToBuffer(",%s,%d", NameOfXMMRegister(regop), (*current) & 3);
- current += 1;
- } else if (third_byte == 0x20) {
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("pinsrb "); // xmm, reg/m32, imm8
- AppendToBuffer(" %s,", NameOfXMMRegister(regop));
- current += PrintRightOperand(current);
- AppendToBuffer(",%d", (*current) & 3);
- current += 1;
- } else if (third_byte == 0x21) {
- get_modrm(*current, &mod, &regop, &rm);
- // insertps xmm, xmm/m32, imm8
- AppendToBuffer("insertps %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",0x%x", (*current));
- current += 1;
- } else if (third_byte == 0x22) {
- get_modrm(*current, &mod, &regop, &rm);
- // xmm, reg/m32/reg/m64, imm8
- AppendToBuffer("pinsr%c ", rex_w() ? 'q' : 'd');
- AppendToBuffer(" %s,", NameOfXMMRegister(regop));
- current += PrintRightOperand(current);
- AppendToBuffer(",%d", (*current) & 3);
+ if (mod == 1) { // Byte displacement.
current += 1;
+ } else if (mod == 2) { // 32-bit displacement.
+ current += 4;
+ } // else no immediate displacement.
+ AppendToBuffer("nop");
+ } else if (opcode == 0x10) {
+ current += PrintOperands("movupd", XMMREG_XMMOPER_OP_ORDER, current);
+ } else if (opcode == 0x11) {
+ current += PrintOperands("movupd", XMMOPER_XMMREG_OP_ORDER, current);
+ } else if (opcode == 0x28) {
+ current += PrintOperands("movapd", XMMREG_XMMOPER_OP_ORDER, current);
+ } else if (opcode == 0x29) {
+ current += PrintOperands("movapd", XMMOPER_XMMREG_OP_ORDER, current);
+ } else if (opcode == 0x6E) {
+ current += PrintOperands(rex_w() ? "movq" : "movd",
+ XMMREG_XMMOPER_OP_ORDER, current);
+ } else if (opcode == 0x6F) {
+ current += PrintOperands("movdqa", XMMREG_XMMOPER_OP_ORDER, current);
+ } else if (opcode == 0x7E) {
+ current += PrintOperands(rex_w() ? "movq" : "movd",
+ XMMOPER_XMMREG_OP_ORDER, current);
+ } else if (opcode == 0x7F) {
+ current += PrintOperands("movdqa", XMMOPER_XMMREG_OP_ORDER, current);
+ } else if (opcode == 0xD6) {
+ current += PrintOperands("movq", XMMOPER_XMMREG_OP_ORDER, current);
+ } else if (opcode == 0x50) {
+ AppendToBuffer("movmskpd %s,", NameOfCPURegister(regop));
+ current += PrintRightXMMOperand(current);
+ } else if (opcode == 0x70) {
+ current += PrintOperands("pshufd", XMMREG_XMMOPER_OP_ORDER, current);
+ AppendToBuffer(",0x%x", *current++);
+ } else if (opcode == 0x71) {
+ current += 1;
+ AppendToBuffer("ps%sw %s,%d", sf_str[regop / 2], NameOfXMMRegister(rm),
+ *current & 0x7F);
+ current += 1;
+ } else if (opcode == 0x72) {
+ current += 1;
+ AppendToBuffer("ps%sd %s,%d", sf_str[regop / 2], NameOfXMMRegister(rm),
+ *current & 0x7F);
+ current += 1;
+ } else if (opcode == 0x73) {
+ current += 1;
+ AppendToBuffer("ps%sq %s,%d", sf_str[regop / 2], NameOfXMMRegister(rm),
+ *current & 0x7F);
+ current += 1;
+ } else if (opcode == 0xB1) {
+ current += PrintOperands("cmpxchg", OPER_REG_OP_ORDER, current);
+ } else if (opcode == 0xC4) {
+ current += PrintOperands("pinsrw", XMMREG_OPER_OP_ORDER, current);
+ AppendToBuffer(",0x%x", (*current++) & 7);
+ } else {
+ const char* mnemonic;
+ if (opcode == 0x51) {
+ mnemonic = "sqrtpd";
+ } else if (opcode == 0x54) {
+ mnemonic = "andpd";
+ } else if (opcode == 0x55) {
+ mnemonic = "andnpd";
+ } else if (opcode == 0x56) {
+ mnemonic = "orpd";
+ } else if (opcode == 0x57) {
+ mnemonic = "xorpd";
+ } else if (opcode == 0x58) {
+ mnemonic = "addpd";
+ } else if (opcode == 0x59) {
+ mnemonic = "mulpd";
+ } else if (opcode == 0x5B) {
+ mnemonic = "cvtps2dq";
+ } else if (opcode == 0x5C) {
+ mnemonic = "subpd";
+ } else if (opcode == 0x5D) {
+ mnemonic = "minpd";
+ } else if (opcode == 0x5E) {
+ mnemonic = "divpd";
+ } else if (opcode == 0x5F) {
+ mnemonic = "maxpd";
+ } else if (opcode == 0x60) {
+ mnemonic = "punpcklbw";
+ } else if (opcode == 0x61) {
+ mnemonic = "punpcklwd";
+ } else if (opcode == 0x62) {
+ mnemonic = "punpckldq";
+ } else if (opcode == 0x63) {
+ mnemonic = "packsswb";
+ } else if (opcode == 0x64) {
+ mnemonic = "pcmpgtb";
+ } else if (opcode == 0x65) {
+ mnemonic = "pcmpgtw";
+ } else if (opcode == 0x66) {
+ mnemonic = "pcmpgtd";
+ } else if (opcode == 0x67) {
+ mnemonic = "packuswb";
+ } else if (opcode == 0x68) {
+ mnemonic = "punpckhbw";
+ } else if (opcode == 0x69) {
+ mnemonic = "punpckhwd";
+ } else if (opcode == 0x6A) {
+ mnemonic = "punpckhdq";
+ } else if (opcode == 0x6B) {
+ mnemonic = "packssdw";
+ } else if (opcode == 0x6C) {
+ mnemonic = "punpcklqdq";
+ } else if (opcode == 0x6D) {
+ mnemonic = "punpckhqdq";
+ } else if (opcode == 0x2E) {
+ mnemonic = "ucomisd";
+ } else if (opcode == 0x2F) {
+ mnemonic = "comisd";
+ } else if (opcode == 0x74) {
+ mnemonic = "pcmpeqb";
+ } else if (opcode == 0x75) {
+ mnemonic = "pcmpeqw";
+ } else if (opcode == 0x76) {
+ mnemonic = "pcmpeqd";
+ } else if (opcode == 0xC2) {
+ mnemonic = "cmppd";
+ } else if (opcode == 0xD1) {
+ mnemonic = "psrlw";
+ } else if (opcode == 0xD2) {
+ mnemonic = "psrld";
+ } else if (opcode == 0xD3) {
+ mnemonic = "psrlq";
+ } else if (opcode == 0xD4) {
+ mnemonic = "paddq";
+ } else if (opcode == 0xD5) {
+ mnemonic = "pmullw";
+ } else if (opcode == 0xD7) {
+ mnemonic = "pmovmskb";
+ } else if (opcode == 0xD8) {
+ mnemonic = "psubusb";
+ } else if (opcode == 0xD9) {
+ mnemonic = "psubusw";
+ } else if (opcode == 0xDA) {
+ mnemonic = "pminub";
+ } else if (opcode == 0xDB) {
+ mnemonic = "pand";
+ } else if (opcode == 0xDC) {
+ mnemonic = "paddusb";
+ } else if (opcode == 0xDD) {
+ mnemonic = "paddusw";
+ } else if (opcode == 0xDE) {
+ mnemonic = "pmaxub";
+ } else if (opcode == 0xE0) {
+ mnemonic = "pavgb";
+ } else if (opcode == 0xE1) {
+ mnemonic = "psraw";
+ } else if (opcode == 0xE2) {
+ mnemonic = "psrad";
+ } else if (opcode == 0xE3) {
+ mnemonic = "pavgw";
+ } else if (opcode == 0xE8) {
+ mnemonic = "psubsb";
+ } else if (opcode == 0xE9) {
+ mnemonic = "psubsw";
+ } else if (opcode == 0xEA) {
+ mnemonic = "pminsw";
+ } else if (opcode == 0xEB) {
+ mnemonic = "por";
+ } else if (opcode == 0xEC) {
+ mnemonic = "paddsb";
+ } else if (opcode == 0xED) {
+ mnemonic = "paddsw";
+ } else if (opcode == 0xEE) {
+ mnemonic = "pmaxsw";
+ } else if (opcode == 0xEF) {
+ mnemonic = "pxor";
+ } else if (opcode == 0xF1) {
+ mnemonic = "psllw";
+ } else if (opcode == 0xF2) {
+ mnemonic = "pslld";
+ } else if (opcode == 0xF3) {
+ mnemonic = "psllq";
+ } else if (opcode == 0xF4) {
+ mnemonic = "pmuludq";
+ } else if (opcode == 0xF5) {
+ mnemonic = "pmaddwd";
+ } else if (opcode == 0xF8) {
+ mnemonic = "psubb";
+ } else if (opcode == 0xF9) {
+ mnemonic = "psubw";
+ } else if (opcode == 0xFA) {
+ mnemonic = "psubd";
+ } else if (opcode == 0xFB) {
+ mnemonic = "psubq";
+ } else if (opcode == 0xFC) {
+ mnemonic = "paddb";
+ } else if (opcode == 0xFD) {
+ mnemonic = "paddw";
+ } else if (opcode == 0xFE) {
+ mnemonic = "paddd";
} else {
UnimplementedInstruction();
}
- } else if (opcode == 0xC1) {
- current += PrintOperands("xadd", OPER_REG_OP_ORDER, current);
- } else {
- get_modrm(*current, &mod, &regop, &rm);
- if (opcode == 0x1F) {
- current++;
- if (rm == 4) { // SIB byte present.
- current++;
- }
- if (mod == 1) { // Byte displacement.
- current += 1;
- } else if (mod == 2) { // 32-bit displacement.
- current += 4;
- } // else no immediate displacement.
- AppendToBuffer("nop");
- } else if (opcode == 0x10) {
- AppendToBuffer("movupd %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- } else if (opcode == 0x11) {
- AppendToBuffer("movupd ");
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
- } else if (opcode == 0x28) {
- AppendToBuffer("movapd %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- } else if (opcode == 0x29) {
- AppendToBuffer("movapd ");
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
- } else if (opcode == 0x6E) {
- AppendToBuffer("mov%c %s,", rex_w() ? 'q' : 'd',
- NameOfXMMRegister(regop));
- current += PrintRightOperand(current);
- } else if (opcode == 0x6F) {
- AppendToBuffer("movdqa %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- } else if (opcode == 0x7E) {
- AppendToBuffer("mov%c ", rex_w() ? 'q' : 'd');
- current += PrintRightOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
- } else if (opcode == 0x7F) {
- AppendToBuffer("movdqa ");
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
- } else if (opcode == 0xD6) {
- AppendToBuffer("movq ");
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
- } else if (opcode == 0x50) {
- AppendToBuffer("movmskpd %s,", NameOfCPURegister(regop));
- current += PrintRightXMMOperand(current);
- } else if (opcode == 0x70) {
- AppendToBuffer("pshufd %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",0x%x", *current);
- current += 1;
- } else if (opcode == 0x71) {
- current += 1;
- AppendToBuffer("ps%sw %s,%d", sf_str[regop / 2], NameOfXMMRegister(rm),
- *current & 0x7F);
- current += 1;
- } else if (opcode == 0x72) {
- current += 1;
- AppendToBuffer("ps%sd %s,%d", sf_str[regop / 2], NameOfXMMRegister(rm),
- *current & 0x7F);
- current += 1;
- } else if (opcode == 0x73) {
- current += 1;
- AppendToBuffer("ps%sq %s,%d", sf_str[regop / 2], NameOfXMMRegister(rm),
- *current & 0x7F);
- current += 1;
- } else if (opcode == 0xB1) {
- current += PrintOperands("cmpxchg", OPER_REG_OP_ORDER, current);
- } else if (opcode == 0xC4) {
- AppendToBuffer("pinsrw %s,", NameOfXMMRegister(regop));
- current += PrintRightOperand(current);
- AppendToBuffer(",0x%x", (*current) & 7);
+ // Not every opcode here has an XMM register as the dst operand.
+ const char* regop_reg =
+ opcode == 0xD7 ? NameOfCPURegister(regop) : NameOfXMMRegister(regop);
+ AppendToBuffer("%s %s,", mnemonic, regop_reg);
+ current += PrintRightXMMOperand(current);
+ if (opcode == 0xC2) {
+ const char* const pseudo_op[] = {"eq", "lt", "le", "unord",
+ "neq", "nlt", "nle", "ord"};
+ AppendToBuffer(", (%s)", pseudo_op[*current]);
current += 1;
- } else {
- const char* mnemonic;
- if (opcode == 0x51) {
- mnemonic = "sqrtpd";
- } else if (opcode == 0x54) {
- mnemonic = "andpd";
- } else if (opcode == 0x55) {
- mnemonic = "andnpd";
- } else if (opcode == 0x56) {
- mnemonic = "orpd";
- } else if (opcode == 0x57) {
- mnemonic = "xorpd";
- } else if (opcode == 0x58) {
- mnemonic = "addpd";
- } else if (opcode == 0x59) {
- mnemonic = "mulpd";
- } else if (opcode == 0x5B) {
- mnemonic = "cvtps2dq";
- } else if (opcode == 0x5C) {
- mnemonic = "subpd";
- } else if (opcode == 0x5D) {
- mnemonic = "minpd";
- } else if (opcode == 0x5E) {
- mnemonic = "divpd";
- } else if (opcode == 0x5F) {
- mnemonic = "maxpd";
- } else if (opcode == 0x60) {
- mnemonic = "punpcklbw";
- } else if (opcode == 0x61) {
- mnemonic = "punpcklwd";
- } else if (opcode == 0x62) {
- mnemonic = "punpckldq";
- } else if (opcode == 0x63) {
- mnemonic = "packsswb";
- } else if (opcode == 0x64) {
- mnemonic = "pcmpgtb";
- } else if (opcode == 0x65) {
- mnemonic = "pcmpgtw";
- } else if (opcode == 0x66) {
- mnemonic = "pcmpgtd";
- } else if (opcode == 0x67) {
- mnemonic = "packuswb";
- } else if (opcode == 0x68) {
- mnemonic = "punpckhbw";
- } else if (opcode == 0x69) {
- mnemonic = "punpckhwd";
- } else if (opcode == 0x6A) {
- mnemonic = "punpckhdq";
- } else if (opcode == 0x6B) {
- mnemonic = "packssdw";
- } else if (opcode == 0x6C) {
- mnemonic = "punpcklqdq";
- } else if (opcode == 0x6D) {
- mnemonic = "punpckhqdq";
- } else if (opcode == 0x2E) {
- mnemonic = "ucomisd";
- } else if (opcode == 0x2F) {
- mnemonic = "comisd";
- } else if (opcode == 0x74) {
- mnemonic = "pcmpeqb";
- } else if (opcode == 0x75) {
- mnemonic = "pcmpeqw";
- } else if (opcode == 0x76) {
- mnemonic = "pcmpeqd";
- } else if (opcode == 0xC2) {
- mnemonic = "cmppd";
- } else if (opcode == 0xD1) {
- mnemonic = "psrlw";
- } else if (opcode == 0xD2) {
- mnemonic = "psrld";
- } else if (opcode == 0xD3) {
- mnemonic = "psrlq";
- } else if (opcode == 0xD4) {
- mnemonic = "paddq";
- } else if (opcode == 0xD5) {
- mnemonic = "pmullw";
- } else if (opcode == 0xD7) {
- mnemonic = "pmovmskb";
- } else if (opcode == 0xD8) {
- mnemonic = "psubusb";
- } else if (opcode == 0xD9) {
- mnemonic = "psubusw";
- } else if (opcode == 0xDA) {
- mnemonic = "pminub";
- } else if (opcode == 0xDB) {
- mnemonic = "pand";
- } else if (opcode == 0xDC) {
- mnemonic = "paddusb";
- } else if (opcode == 0xDD) {
- mnemonic = "paddusw";
- } else if (opcode == 0xDE) {
- mnemonic = "pmaxub";
- } else if (opcode == 0xE0) {
- mnemonic = "pavgb";
- } else if (opcode == 0xE1) {
- mnemonic = "psraw";
- } else if (opcode == 0xE2) {
- mnemonic = "psrad";
- } else if (opcode == 0xE3) {
- mnemonic = "pavgw";
- } else if (opcode == 0xE8) {
- mnemonic = "psubsb";
- } else if (opcode == 0xE9) {
- mnemonic = "psubsw";
- } else if (opcode == 0xEA) {
- mnemonic = "pminsw";
- } else if (opcode == 0xEB) {
- mnemonic = "por";
- } else if (opcode == 0xEC) {
- mnemonic = "paddsb";
- } else if (opcode == 0xED) {
- mnemonic = "paddsw";
- } else if (opcode == 0xEE) {
- mnemonic = "pmaxsw";
- } else if (opcode == 0xEF) {
- mnemonic = "pxor";
- } else if (opcode == 0xF1) {
- mnemonic = "psllw";
- } else if (opcode == 0xF2) {
- mnemonic = "pslld";
- } else if (opcode == 0xF3) {
- mnemonic = "psllq";
- } else if (opcode == 0xF4) {
- mnemonic = "pmuludq";
- } else if (opcode == 0xF5) {
- mnemonic = "pmaddwd";
- } else if (opcode == 0xF8) {
- mnemonic = "psubb";
- } else if (opcode == 0xF9) {
- mnemonic = "psubw";
- } else if (opcode == 0xFA) {
- mnemonic = "psubd";
- } else if (opcode == 0xFB) {
- mnemonic = "psubq";
- } else if (opcode == 0xFC) {
- mnemonic = "paddb";
- } else if (opcode == 0xFD) {
- mnemonic = "paddw";
- } else if (opcode == 0xFE) {
- mnemonic = "paddd";
- } else {
- UnimplementedInstruction();
- }
- // Not every opcode here has an XMM register as the dst operand.
- const char* regop_reg = opcode == 0xD7 ? NameOfCPURegister(regop)
- : NameOfXMMRegister(regop);
- AppendToBuffer("%s %s,", mnemonic, regop_reg);
- current += PrintRightXMMOperand(current);
- if (opcode == 0xC2) {
- const char* const pseudo_op[] = {"eq", "lt", "le", "unord",
- "neq", "nlt", "nle", "ord"};
- AppendToBuffer(", (%s)", pseudo_op[*current]);
- current += 1;
- }
}
}
} else if (group_1_prefix_ == 0xF2) {
// Beginning of instructions with prefix 0xF2.
-
- if (opcode == 0x11 || opcode == 0x10) {
+ if (opcode == 0x10) {
// MOVSD: Move scalar double-precision fp to/from/between XMM registers.
- AppendToBuffer("movsd ");
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- if (opcode == 0x11) {
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
- } else {
- AppendToBuffer("%s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- }
+ current += PrintOperands("movsd", XMMREG_XMMOPER_OP_ORDER, current);
+ } else if (opcode == 0x11) {
+ current += PrintOperands("movsd", XMMOPER_XMMREG_OP_ORDER, current);
} else if (opcode == 0x12) {
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("movddup %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ current += PrintOperands("movddup", XMMREG_XMMOPER_OP_ORDER, current);
} else if (opcode == 0x2A) {
// CVTSI2SD: integer to XMM double conversion.
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("%s %s,", mnemonic, NameOfXMMRegister(regop));
- current += PrintRightOperand(current);
+ current += PrintOperands(mnemonic, XMMREG_OPER_OP_ORDER, current);
} else if (opcode == 0x2C) {
// CVTTSD2SI:
// Convert with truncation scalar double-precision FP to integer.
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("cvttsd2si%c %s,", operand_size_code(),
NameOfCPURegister(regop));
current += PrintRightXMMOperand(current);
} else if (opcode == 0x2D) {
// CVTSD2SI: Convert scalar double-precision FP to integer.
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("cvtsd2si%c %s,", operand_size_code(),
NameOfCPURegister(regop));
current += PrintRightXMMOperand(current);
} else if (opcode == 0x5B) {
// CVTTPS2DQ: Convert packed single-precision FP values to packed signed
// doubleword integer values
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("cvttps2dq%c %s,", operand_size_code(),
NameOfCPURegister(regop));
current += PrintRightXMMOperand(current);
} else if ((opcode & 0xF8) == 0x58 || opcode == 0x51) {
// XMM arithmetic. Mnemonic was retrieved at the start of this function.
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("%s %s,", mnemonic, NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ current += PrintOperands(mnemonic, XMMREG_XMMOPER_OP_ORDER, current);
} else if (opcode == 0x70) {
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("pshuflw %s, ", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- AppendToBuffer(", %d", (*current) & 7);
- current += 1;
+ current += PrintOperands("pshuflw", XMMREG_XMMOPER_OP_ORDER, current);
+ AppendToBuffer(",%d", (*current++) & 7);
} else if (opcode == 0xC2) {
// Intel manual 2A, Table 3-18.
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
const char* const pseudo_op[] = {"cmpeqsd", "cmpltsd", "cmplesd",
"cmpunordsd", "cmpneqsd", "cmpnltsd",
"cmpnlesd", "cmpordsd"};
@@ -2238,97 +2140,54 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
NameOfXMMRegister(regop), NameOfXMMRegister(rm));
current += 2;
} else if (opcode == 0xF0) {
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("lddqu %s,", NameOfXMMRegister(regop));
- current += PrintRightOperand(current);
+ current += PrintOperands("lddqu", XMMREG_OPER_OP_ORDER, current);
} else if (opcode == 0x7C) {
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("haddps %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ current += PrintOperands("haddps", XMMREG_XMMOPER_OP_ORDER, current);
} else {
UnimplementedInstruction();
}
} else if (group_1_prefix_ == 0xF3) {
// Instructions with prefix 0xF3.
- if (opcode == 0x11 || opcode == 0x10) {
+ if (opcode == 0x10) {
// MOVSS: Move scalar double-precision fp to/from/between XMM registers.
- AppendToBuffer("movss ");
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- if (opcode == 0x11) {
- current += PrintRightOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
- } else {
- AppendToBuffer("%s,", NameOfXMMRegister(regop));
- current += PrintRightOperand(current);
- }
+ current += PrintOperands("movss", XMMREG_OPER_OP_ORDER, current);
+ } else if (opcode == 0x11) {
+ current += PrintOperands("movss", OPER_XMMREG_OP_ORDER, current);
} else if (opcode == 0x2A) {
// CVTSI2SS: integer to XMM single conversion.
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("%s %s,", mnemonic, NameOfXMMRegister(regop));
- current += PrintRightOperand(current);
+ current += PrintOperands(mnemonic, XMMREG_OPER_OP_ORDER, current);
} else if (opcode == 0x2C) {
// CVTTSS2SI:
// Convert with truncation scalar single-precision FP to dword integer.
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("cvttss2si%c %s,", operand_size_code(),
NameOfCPURegister(regop));
current += PrintRightXMMOperand(current);
} else if (opcode == 0x70) {
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("pshufhw %s, ", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- AppendToBuffer(", %d", (*current) & 7);
- current += 1;
+ current += PrintOperands("pshufhw", XMMREG_XMMOPER_OP_ORDER, current);
+ AppendToBuffer(", %d", (*current++) & 7);
} else if (opcode == 0x6F) {
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("movdqu %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ current += PrintOperands("movdqu", XMMREG_XMMOPER_OP_ORDER, current);
} else if (opcode == 0x7E) {
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("movq %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ current += PrintOperands("movq", XMMREG_XMMOPER_OP_ORDER, current);
} else if (opcode == 0x7F) {
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("movdqu ");
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ current += PrintOperands("movdqu", XMMOPER_XMMREG_OP_ORDER, current);
} else if ((opcode & 0xF8) == 0x58 || opcode == 0x51) {
// XMM arithmetic. Mnemonic was retrieved at the start of this function.
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("%s %s,", mnemonic, NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ current += PrintOperands(mnemonic, XMMREG_XMMOPER_OP_ORDER, current);
} else if (opcode == 0xB8) {
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("popcnt%c %s,", operand_size_code(),
NameOfCPURegister(regop));
current += PrintRightOperand(current);
} else if (opcode == 0xBC) {
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("tzcnt%c %s,", operand_size_code(),
NameOfCPURegister(regop));
current += PrintRightOperand(current);
} else if (opcode == 0xBD) {
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("lzcnt%c %s,", operand_size_code(),
NameOfCPURegister(regop));
current += PrintRightOperand(current);
} else if (opcode == 0xC2) {
// Intel manual 2A, Table 3-18.
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
const char* const pseudo_op[] = {"cmpeqss", "cmpltss", "cmpless",
"cmpunordss", "cmpneqss", "cmpnltss",
"cmpnless", "cmpordss"};
@@ -2338,29 +2197,37 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
} else {
UnimplementedInstruction();
}
- } else if (opcode == 0x10 || opcode == 0x11) {
+ } else if (opcode == 0x10) {
// movups xmm, xmm/m128
+ current += PrintOperands("movups", XMMREG_XMMOPER_OP_ORDER, current);
+ } else if (opcode == 0x11) {
// movups xmm/m128, xmm
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("movups ");
- if (opcode == 0x11) {
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
- } else {
- AppendToBuffer("%s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- }
+ current += PrintOperands("movups", XMMOPER_XMMREG_OP_ORDER, current);
+ } else if (opcode == 0x12) {
+ // movlps xmm1, m64
+ current += PrintOperands("movlps", XMMREG_OPER_OP_ORDER, current);
+ } else if (opcode == 0x13) {
+ // movlps m64, xmm1
+ AppendToBuffer("movlps ");
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
} else if (opcode == 0x16) {
// movlhps xmm1, xmm2
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("movlhps %s,", NameOfXMMRegister(regop));
+ // movhps xmm1, m64
+ if (mod == 0b11) {
+ AppendToBuffer("movlhps ");
+ } else {
+ AppendToBuffer("movhps ");
+ }
+ AppendToBuffer("%s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ } else if (opcode == 0x17) {
+ // movhps m64, xmm1
+ AppendToBuffer("movhps ");
current += PrintRightXMMOperand(current);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
} else if (opcode == 0x1F) {
// NOP
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
current++;
if (rm == 4) { // SIB byte present.
current++;
@@ -2374,22 +2241,16 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
} else if (opcode == 0x28) {
// movaps xmm, xmm/m128
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("movaps %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
} else if (opcode == 0x29) {
// movaps xmm/m128, xmm
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("movaps ");
current += PrintRightXMMOperand(current);
AppendToBuffer(",%s", NameOfXMMRegister(regop));
} else if (opcode == 0x2E) {
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("ucomiss %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
} else if (opcode == 0xA2) {
@@ -2408,8 +2269,6 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
"orps", "xorps", "addps", "mulps", "cvtps2pd",
"cvtdq2ps", "subps", "minps", "divps", "maxps",
};
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("%s %s,", pseudo_op[opcode - 0x51],
NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
@@ -2421,8 +2280,6 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
current += PrintOperands("xadd", OPER_REG_OP_ORDER, current);
} else if (opcode == 0xC2) {
// cmpps xmm, xmm/m128, imm8
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
const char* const pseudo_op[] = {"eq", "lt", "le", "unord",
"neq", "nlt", "nle", "ord"};
AppendToBuffer("cmpps %s, ", NameOfXMMRegister(regop));
@@ -2431,8 +2288,6 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
current += 1;
} else if (opcode == 0xC6) {
// shufps xmm, xmm/m128, imm8
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("shufps %s, ", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
AppendToBuffer(", %d", (*current) & 3);
@@ -2443,8 +2298,6 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
AppendToBuffer("bswap%c %s", operand_size_code(), NameOfCPURegister(reg));
} else if (opcode == 0x50) {
// movmskps reg, xmm
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("movmskps %s,", NameOfCPURegister(regop));
current += PrintRightXMMOperand(current);
} else if ((opcode & 0xF0) == 0x80) {
@@ -2463,8 +2316,6 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
// BT (bit test), SHLD, BTS (bit test and set),
// SHRD (double-precision shift)
AppendToBuffer("%s ", mnemonic);
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
current += PrintRightOperand(current);
if (opcode == 0xAB) {
AppendToBuffer(",%s", NameOfCPURegister(regop));
@@ -2473,8 +2324,6 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
}
} else if (opcode == 0xBA) {
// BTS / BTR (bit test and set/reset) with immediate
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
mnemonic = regop == 5 ? "bts" : regop == 6 ? "btr" : "?";
AppendToBuffer("%s ", mnemonic);
current += PrintRightOperand(current);
@@ -2482,8 +2331,6 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
} else if (opcode == 0xB8 || opcode == 0xBC || opcode == 0xBD) {
// POPCNT, CTZ, CLZ.
AppendToBuffer("%s%c ", mnemonic, operand_size_code());
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("%s,", NameOfCPURegister(regop));
current += PrintRightOperand(current);
} else if (opcode == 0x0B) {
@@ -2506,6 +2353,102 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
return static_cast<int>(current - data);
}
+// Handle all three-byte opcodes, which start with 0x0F38 or 0x0F3A.
+// These instructions may be affected by an 0x66, 0xF2, or 0xF3 prefix, but we
+// only have instructions prefixed with 0x66 for now.
+int DisassemblerX64::ThreeByteOpcodeInstruction(byte* data) {
+ DCHECK_EQ(0x0F, *data);
+ // Only support 3-byte opcodes prefixed with 0x66 for now.
+ DCHECK_EQ(0x66, operand_size_);
+ byte second_byte = *(data + 1);
+ byte third_byte = *(data + 2);
+ byte* current = data + 3;
+ int mod, regop, rm;
+ get_modrm(*current, &mod, &regop, &rm);
+ if (second_byte == 0x38) {
+ switch (third_byte) {
+ case 0x10: {
+ AppendToBuffer("pblendvb %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",<xmm0>");
+ break;
+ }
+ case 0x14: {
+ AppendToBuffer("blendvps %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",<xmm0>");
+ break;
+ }
+ case 0x15: {
+ current += PrintOperands("blendvpd", XMMREG_XMMOPER_OP_ORDER, current);
+ AppendToBuffer(",<xmm0>");
+ break;
+ }
+#define SSE34_DIS_CASE(instruction, notUsed1, notUsed2, notUsed3, opcode) \
+ case 0x##opcode: { \
+ current += PrintOperands(#instruction, XMMREG_XMMOPER_OP_ORDER, current); \
+ break; \
+ }
+
+ SSSE3_INSTRUCTION_LIST(SSE34_DIS_CASE)
+ SSSE3_UNOP_INSTRUCTION_LIST(SSE34_DIS_CASE)
+ SSE4_INSTRUCTION_LIST(SSE34_DIS_CASE)
+ SSE4_UNOP_INSTRUCTION_LIST(SSE34_DIS_CASE)
+ SSE4_2_INSTRUCTION_LIST(SSE34_DIS_CASE)
+#undef SSE34_DIS_CASE
+ default:
+ UnimplementedInstruction();
+ }
+ } else {
+ DCHECK_EQ(0x3A, second_byte);
+ if (third_byte == 0x17) {
+ current += PrintOperands("extractps", OPER_XMMREG_OP_ORDER, current);
+ AppendToBuffer(",%d", (*current++) & 3);
+ } else if (third_byte == 0x08) {
+ current += PrintOperands("roundps", XMMREG_XMMOPER_OP_ORDER, current);
+ AppendToBuffer(",0x%x", (*current++) & 3);
+ } else if (third_byte == 0x09) {
+ current += PrintOperands("roundpd", XMMREG_XMMOPER_OP_ORDER, current);
+ AppendToBuffer(",0x%x", (*current++) & 3);
+ } else if (third_byte == 0x0A) {
+ current += PrintOperands("roundss", XMMREG_XMMOPER_OP_ORDER, current);
+ AppendToBuffer(",0x%x", (*current++) & 3);
+ } else if (third_byte == 0x0B) {
+ current += PrintOperands("roundsd", XMMREG_XMMOPER_OP_ORDER, current);
+ AppendToBuffer(",0x%x", (*current++) & 3);
+ } else if (third_byte == 0x0E) {
+ current += PrintOperands("pblendw", XMMREG_XMMOPER_OP_ORDER, current);
+ AppendToBuffer(",0x%x", *current++);
+ } else if (third_byte == 0x0F) {
+ current += PrintOperands("palignr", XMMREG_XMMOPER_OP_ORDER, current);
+ AppendToBuffer(",0x%x", *current++);
+ } else if (third_byte == 0x14) {
+ current += PrintOperands("pextrb", OPER_XMMREG_OP_ORDER, current);
+ AppendToBuffer(",%d", (*current++) & 0xf);
+ } else if (third_byte == 0x15) {
+ current += PrintOperands("pextrw", OPER_XMMREG_OP_ORDER, current);
+ AppendToBuffer(",%d", (*current++) & 7);
+ } else if (third_byte == 0x16) {
+ const char* mnem = rex_w() ? "pextrq" : "pextrd";
+ current += PrintOperands(mnem, OPER_XMMREG_OP_ORDER, current);
+ AppendToBuffer(",%d", (*current++) & 3);
+ } else if (third_byte == 0x20) {
+ current += PrintOperands("pinsrb", XMMREG_OPER_OP_ORDER, current);
+ AppendToBuffer(",%d", (*current++) & 3);
+ } else if (third_byte == 0x21) {
+ current += PrintOperands("insertps", XMMREG_XMMOPER_OP_ORDER, current);
+ AppendToBuffer(",0x%x", *current++);
+ } else if (third_byte == 0x22) {
+ const char* mnem = rex_w() ? "pinsrq" : "pinsrd";
+ current += PrintOperands(mnem, XMMREG_OPER_OP_ORDER, current);
+ AppendToBuffer(",%d", (*current++) & 3);
+ } else {
+ UnimplementedInstruction();
+ }
+ }
+ return static_cast<int>(current - data);
+}
+
// Mnemonics for two-byte opcode instructions starting with 0x0F.
// The argument is the second byte of the two-byte opcode.
// Returns nullptr if the instruction is not handled here.
@@ -2730,7 +2673,12 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
break;
case 0x0F:
- data += TwoByteOpcodeInstruction(data);
+ // Check for three-byte opcodes, 0x0F38 or 0x0F3A.
+ if (*(data + 1) == 0x38 || *(data + 1) == 0x3A) {
+ data += ThreeByteOpcodeInstruction(data);
+ } else {
+ data += TwoByteOpcodeInstruction(data);
+ }
break;
case 0x8F: {
diff --git a/deps/v8/src/diagnostics/x64/unwinder-x64.cc b/deps/v8/src/diagnostics/x64/unwinder-x64.cc
new file mode 100644
index 0000000000..5a92512a17
--- /dev/null
+++ b/deps/v8/src/diagnostics/x64/unwinder-x64.cc
@@ -0,0 +1,12 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/diagnostics/unwinder.h"
+
+namespace v8 {
+
+void GetCalleeSavedRegistersFromEntryFrame(void* fp,
+ RegisterState* register_state) {}
+
+} // namespace v8
diff --git a/deps/v8/src/execution/DIR_METADATA b/deps/v8/src/execution/DIR_METADATA
new file mode 100644
index 0000000000..b183b81885
--- /dev/null
+++ b/deps/v8/src/execution/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>Runtime"
+} \ No newline at end of file
diff --git a/deps/v8/src/execution/OWNERS b/deps/v8/src/execution/OWNERS
index ea38b071ed..1e89f1e750 100644
--- a/deps/v8/src/execution/OWNERS
+++ b/deps/v8/src/execution/OWNERS
@@ -9,5 +9,3 @@ szuend@chromium.org
verwaest@chromium.org
per-file futex-emulation.*=marja@chromium.org
-
-# COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/execution/arguments.h b/deps/v8/src/execution/arguments.h
index d2798e6f76..39877cf4d2 100644
--- a/deps/v8/src/execution/arguments.h
+++ b/deps/v8/src/execution/arguments.h
@@ -62,11 +62,9 @@ class Arguments {
inline Address* address_of_arg_at(int index) const {
DCHECK_LE(static_cast<uint32_t>(index), static_cast<uint32_t>(length_));
uintptr_t offset = index * kSystemPointerSize;
-#ifdef V8_REVERSE_JSARGS
if (arguments_type == ArgumentsType::kJS) {
offset = (length_ - index - 1) * kSystemPointerSize;
}
-#endif
return reinterpret_cast<Address*>(reinterpret_cast<Address>(arguments_) -
offset);
}
@@ -77,17 +75,13 @@ class Arguments {
// Arguments on the stack are in reverse order (compared to an array).
FullObjectSlot first_slot() const {
int index = length() - 1;
-#ifdef V8_REVERSE_JSARGS
if (arguments_type == ArgumentsType::kJS) index = 0;
-#endif
return slot_at(index);
}
FullObjectSlot last_slot() const {
int index = 0;
-#ifdef V8_REVERSE_JSARGS
if (arguments_type == ArgumentsType::kJS) index = length() - 1;
-#endif
return slot_at(index);
}
diff --git a/deps/v8/src/execution/arm/frame-constants-arm.h b/deps/v8/src/execution/arm/frame-constants-arm.h
index e8bee055d2..47e901ea99 100644
--- a/deps/v8/src/execution/arm/frame-constants-arm.h
+++ b/deps/v8/src/execution/arm/frame-constants-arm.h
@@ -43,12 +43,14 @@ class EntryFrameConstants : public AllStatic {
static constexpr int kArgvOffset = +1 * kSystemPointerSize;
// These offsets refer to the immediate caller (i.e a native frame).
- static constexpr int kDirectCallerFPOffset =
+ static constexpr int kDirectCallerRRegistersOffset =
/* bad frame pointer (-1) */
kPointerSize +
/* d8...d15 */
- kNumDoubleCalleeSaved * kDoubleSize +
- /* r4...r10 (i.e callee saved without fp) */
+ kNumDoubleCalleeSaved * kDoubleSize;
+ static constexpr int kDirectCallerFPOffset =
+ kDirectCallerRRegistersOffset +
+ /* r4...r10 (i.e. callee saved without fp) */
(kNumCalleeSaved - 1) * kPointerSize;
static constexpr int kDirectCallerPCOffset =
kDirectCallerFPOffset + 1 * kSystemPointerSize;
diff --git a/deps/v8/src/execution/arm/simulator-arm.cc b/deps/v8/src/execution/arm/simulator-arm.cc
index 3c6368d8f5..3df283e2fd 100644
--- a/deps/v8/src/execution/arm/simulator-arm.cc
+++ b/deps/v8/src/execution/arm/simulator-arm.cc
@@ -900,10 +900,25 @@ void Simulator::SetFpResult(const double& result) {
}
void Simulator::TrashCallerSaveRegisters() {
- // We don't trash the registers with the return value.
+ // Return registers.
+ registers_[0] = 0x50BAD4U;
+ registers_[1] = 0x50BAD4U;
+ // Caller-saved registers.
registers_[2] = 0x50BAD4U;
registers_[3] = 0x50BAD4U;
registers_[12] = 0x50BAD4U;
+ // This value is a NaN in both 32-bit and 64-bit FP.
+ static const uint64_t v = 0x7ff000007f801000UL;
+ // d0 - d7 are caller-saved.
+ for (int i = 0; i < 8; i++) {
+ set_d_register(i, &v);
+ }
+ if (DoubleRegister::SupportedRegisterCount() > 16) {
+ // d16 - d31 (if supported) are caller-saved.
+ for (int i = 16; i < 32; i++) {
+ set_d_register(i, &v);
+ }
+ }
}
int Simulator::ReadW(int32_t addr) {
@@ -1673,6 +1688,9 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
SimulatorRuntimeCompareCall target =
reinterpret_cast<SimulatorRuntimeCompareCall>(external);
iresult = target(dval0, dval1);
+#ifdef DEBUG
+ TrashCallerSaveRegisters();
+#endif
set_register(r0, static_cast<int32_t>(iresult));
set_register(r1, static_cast<int32_t>(iresult >> 32));
break;
@@ -1681,6 +1699,9 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
SimulatorRuntimeFPFPCall target =
reinterpret_cast<SimulatorRuntimeFPFPCall>(external);
dresult = target(dval0, dval1);
+#ifdef DEBUG
+ TrashCallerSaveRegisters();
+#endif
SetFpResult(dresult);
break;
}
@@ -1688,6 +1709,9 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
SimulatorRuntimeFPCall target =
reinterpret_cast<SimulatorRuntimeFPCall>(external);
dresult = target(dval0);
+#ifdef DEBUG
+ TrashCallerSaveRegisters();
+#endif
SetFpResult(dresult);
break;
}
@@ -1695,6 +1719,9 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
SimulatorRuntimeFPIntCall target =
reinterpret_cast<SimulatorRuntimeFPIntCall>(external);
dresult = target(dval0, ival);
+#ifdef DEBUG
+ TrashCallerSaveRegisters();
+#endif
SetFpResult(dresult);
break;
}
@@ -1728,6 +1755,9 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
}
CHECK(stack_aligned);
UnsafeDirectApiCall(external, arg0);
+#ifdef DEBUG
+ TrashCallerSaveRegisters();
+#endif
} else if (redirection->type() == ExternalReference::PROFILING_API_CALL) {
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
PrintF("Call to host function at %p args %08x %08x",
@@ -1739,6 +1769,9 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
}
CHECK(stack_aligned);
UnsafeProfilingApiCall(external, arg0, arg1);
+#ifdef DEBUG
+ TrashCallerSaveRegisters();
+#endif
} else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
PrintF("Call to host function at %p args %08x %08x",
@@ -1750,6 +1783,9 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
}
CHECK(stack_aligned);
UnsafeDirectGetterCall(external, arg0, arg1);
+#ifdef DEBUG
+ TrashCallerSaveRegisters();
+#endif
} else if (redirection->type() ==
ExternalReference::PROFILING_GETTER_CALL) {
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
@@ -1764,6 +1800,9 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
SimulatorRuntimeProfilingGetterCall target =
reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(external);
target(arg0, arg1, Redirection::ReverseRedirection(arg2));
+#ifdef DEBUG
+ TrashCallerSaveRegisters();
+#endif
} else {
// builtin call.
DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL ||
@@ -1783,6 +1822,9 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
int64_t result =
UnsafeGenericFunctionCall(external, arg0, arg1, arg2, arg3, arg4,
arg5, arg6, arg7, arg8, arg9);
+#ifdef DEBUG
+ TrashCallerSaveRegisters();
+#endif
int32_t lo_res = static_cast<int32_t>(result);
int32_t hi_res = static_cast<int32_t>(result >> 32);
if (::v8::internal::FLAG_trace_sim) {
@@ -3836,6 +3878,32 @@ void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
}
}
+// Helper functions for implementing NEON ops. Unop applies a unary op to each
+// lane. Binop applies a binary operation to matching input lanes.
+template <typename T>
+void Unop(Simulator* simulator, int Vd, int Vm, std::function<T(T)> unop) {
+ static const int kLanes = 16 / sizeof(T);
+ T src[kLanes];
+ simulator->get_neon_register(Vm, src);
+ for (int i = 0; i < kLanes; i++) {
+ src[i] = unop(src[i]);
+ }
+ simulator->set_neon_register(Vd, src);
+}
+
+template <typename T>
+void Binop(Simulator* simulator, int Vd, int Vm, int Vn,
+ std::function<T(T, T)> binop) {
+ static const int kLanes = 16 / sizeof(T);
+ T src1[kLanes], src2[kLanes];
+ simulator->get_neon_register(Vn, src1);
+ simulator->get_neon_register(Vm, src2);
+ for (int i = 0; i < kLanes; i++) {
+ src1[i] = binop(src1[i], src2[i]);
+ }
+ simulator->set_neon_register(Vd, src1);
+}
+
// Templated operations for NEON instructions.
template <typename T, typename U>
U Widen(T value) {
@@ -3857,15 +3925,6 @@ U Narrow(T value) {
return static_cast<U>(value);
}
-template <typename T>
-T Clamp(int64_t value) {
- static_assert(sizeof(int64_t) > sizeof(T), "T must be int32_t or smaller");
- int64_t min = static_cast<int64_t>(std::numeric_limits<T>::min());
- int64_t max = static_cast<int64_t>(std::numeric_limits<T>::max());
- int64_t clamped = std::max(min, std::min(max, value));
- return static_cast<T>(clamped);
-}
-
template <typename T, typename U>
void Widen(Simulator* simulator, int Vd, int Vm) {
static const int kLanes = 8 / sizeof(T);
@@ -3880,28 +3939,15 @@ void Widen(Simulator* simulator, int Vd, int Vm) {
template <typename T, int SIZE>
void Abs(Simulator* simulator, int Vd, int Vm) {
- static const int kElems = SIZE / sizeof(T);
- T src[kElems];
- simulator->get_neon_register<T, SIZE>(Vm, src);
- for (int i = 0; i < kElems; i++) {
- src[i] = std::abs(src[i]);
- }
- simulator->set_neon_register<T, SIZE>(Vd, src);
+ Unop<T>(simulator, Vd, Vm, [](T x) { return std::abs(x); });
}
template <typename T, int SIZE>
void Neg(Simulator* simulator, int Vd, int Vm) {
- static const int kElems = SIZE / sizeof(T);
- T src[kElems];
- simulator->get_neon_register<T, SIZE>(Vm, src);
- for (int i = 0; i < kElems; i++) {
- if (src[i] != std::numeric_limits<T>::min()) {
- src[i] = -src[i];
- } else {
- // The respective minimum (negative) value maps to itself.
- }
- }
- simulator->set_neon_register<T, SIZE>(Vd, src);
+ Unop<T>(simulator, Vd, Vm, [](T x) {
+ // The respective minimum (negative) value maps to itself.
+ return x == std::numeric_limits<T>::min() ? x : -x;
+ });
}
template <typename T, typename U>
@@ -3911,7 +3957,7 @@ void SaturatingNarrow(Simulator* simulator, int Vd, int Vm) {
U dst[kLanes];
simulator->get_neon_register(Vm, src);
for (int i = 0; i < kLanes; i++) {
- dst[i] = Narrow<T, U>(Clamp<U>(src[i]));
+ dst[i] = Narrow<T, U>(Saturate<U>(src[i]));
}
simulator->set_neon_register<U, kDoubleSize>(Vd, dst);
}
@@ -3923,33 +3969,19 @@ void SaturatingUnsignedNarrow(Simulator* simulator, int Vd, int Vm) {
U dst[kLanes];
simulator->get_neon_register(Vm, src);
for (int i = 0; i < kLanes; i++) {
- dst[i] = Clamp<U>(src[i]);
+ dst[i] = Saturate<U>(src[i]);
}
simulator->set_neon_register<U, kDoubleSize>(Vd, dst);
}
template <typename T>
-void AddSaturate(Simulator* simulator, int Vd, int Vm, int Vn) {
- static const int kLanes = 16 / sizeof(T);
- T src1[kLanes], src2[kLanes];
- simulator->get_neon_register(Vn, src1);
- simulator->get_neon_register(Vm, src2);
- for (int i = 0; i < kLanes; i++) {
- src1[i] = Clamp<T>(Widen<T, int64_t>(src1[i]) + Widen<T, int64_t>(src2[i]));
- }
- simulator->set_neon_register(Vd, src1);
+void AddSat(Simulator* simulator, int Vd, int Vm, int Vn) {
+ Binop<T>(simulator, Vd, Vm, Vn, SaturateAdd<T>);
}
template <typename T>
-void SubSaturate(Simulator* simulator, int Vd, int Vm, int Vn) {
- static const int kLanes = 16 / sizeof(T);
- T src1[kLanes], src2[kLanes];
- simulator->get_neon_register(Vn, src1);
- simulator->get_neon_register(Vm, src2);
- for (int i = 0; i < kLanes; i++) {
- src1[i] = SaturateSub<T>(src1[i], src2[i]);
- }
- simulator->set_neon_register(Vd, src1);
+void SubSat(Simulator* simulator, int Vd, int Vm, int Vn) {
+ Binop<T>(simulator, Vd, Vm, Vn, SaturateSub<T>);
}
template <typename T, int SIZE>
@@ -4002,38 +4034,18 @@ void Transpose(Simulator* simulator, int Vd, int Vm) {
template <typename T, int SIZE>
void Test(Simulator* simulator, int Vd, int Vm, int Vn) {
- static const int kElems = SIZE / sizeof(T);
- T src1[kElems], src2[kElems];
- simulator->get_neon_register<T, SIZE>(Vn, src1);
- simulator->get_neon_register<T, SIZE>(Vm, src2);
- for (int i = 0; i < kElems; i++) {
- src1[i] = (src1[i] & src2[i]) != 0 ? -1 : 0;
- }
- simulator->set_neon_register<T, SIZE>(Vd, src1);
+ auto test = [](T x, T y) { return (x & y) ? -1 : 0; };
+ Binop<T>(simulator, Vd, Vm, Vn, test);
}
template <typename T, int SIZE>
void Add(Simulator* simulator, int Vd, int Vm, int Vn) {
- static const int kElems = SIZE / sizeof(T);
- T src1[kElems], src2[kElems];
- simulator->get_neon_register<T, SIZE>(Vn, src1);
- simulator->get_neon_register<T, SIZE>(Vm, src2);
- for (int i = 0; i < kElems; i++) {
- src1[i] += src2[i];
- }
- simulator->set_neon_register<T, SIZE>(Vd, src1);
+ Binop<T>(simulator, Vd, Vm, Vn, std::plus<T>());
}
template <typename T, int SIZE>
void Sub(Simulator* simulator, int Vd, int Vm, int Vn) {
- static const int kElems = SIZE / sizeof(T);
- T src1[kElems], src2[kElems];
- simulator->get_neon_register<T, SIZE>(Vn, src1);
- simulator->get_neon_register<T, SIZE>(Vm, src2);
- for (int i = 0; i < kElems; i++) {
- src1[i] -= src2[i];
- }
- simulator->set_neon_register<T, SIZE>(Vd, src1);
+ Binop<T>(simulator, Vd, Vm, Vn, std::minus<T>());
}
namespace {
@@ -4048,7 +4060,9 @@ uint16_t Multiply(uint16_t a, uint16_t b) {
void VmovImmediate(Simulator* simulator, Instruction* instr) {
byte cmode = instr->Bits(11, 8);
- int vd = instr->VFPDRegValue(kSimd128Precision);
+ int vd = instr->VFPDRegValue(kDoublePrecision);
+ int q = instr->Bit(6);
+ int regs = q ? 2 : 1;
uint8_t imm = instr->Bit(24) << 7; // i
imm |= instr->Bits(18, 16) << 4; // imm3
imm |= instr->Bits(3, 0); // imm4
@@ -4056,14 +4070,20 @@ void VmovImmediate(Simulator* simulator, Instruction* instr) {
case 0: {
// Set the LSB of each 64-bit halves.
uint64_t imm64 = imm;
- simulator->set_neon_register(vd, {imm64, imm64});
+ for (int r = 0; r < regs; r++) {
+ simulator->set_d_register(vd + r, &imm64);
+ }
break;
}
case 0xe: {
uint8_t imms[kSimd128Size];
// Set all bytes of register.
std::fill_n(imms, kSimd128Size, imm);
- simulator->set_neon_register(vd, imms);
+ uint64_t imm64;
+ memcpy(&imm64, imms, 8);
+ for (int r = 0; r < regs; r++) {
+ simulator->set_d_register(vd + r, &imm64);
+ }
break;
}
default: {
@@ -4087,35 +4107,19 @@ void Mul(Simulator* simulator, int Vd, int Vm, int Vn) {
template <typename T, int SIZE>
void ShiftLeft(Simulator* simulator, int Vd, int Vm, int shift) {
- static const int kElems = SIZE / sizeof(T);
- T src[kElems];
- simulator->get_neon_register<T, SIZE>(Vm, src);
- for (int i = 0; i < kElems; i++) {
- src[i] <<= shift;
- }
- simulator->set_neon_register<T, SIZE>(Vd, src);
+ Unop<T>(simulator, Vd, Vm, [shift](T x) { return x << shift; });
}
template <typename T, int SIZE>
void ShiftRight(Simulator* simulator, int Vd, int Vm, int shift) {
- static const int kElems = SIZE / sizeof(T);
- T src[kElems];
- simulator->get_neon_register<T, SIZE>(Vm, src);
- for (int i = 0; i < kElems; i++) {
- src[i] >>= shift;
- }
- simulator->set_neon_register<T, SIZE>(Vd, src);
+ Unop<T>(simulator, Vd, Vm, [shift](T x) { return x >> shift; });
}
template <typename T, int SIZE>
void ArithmeticShiftRight(Simulator* simulator, int Vd, int Vm, int shift) {
- static const int kElems = SIZE / sizeof(T);
- T src[kElems];
- simulator->get_neon_register<T, SIZE>(Vm, src);
- for (int i = 0; i < kElems; i++) {
- src[i] = ArithmeticShiftRight(src[i], shift);
- }
- simulator->set_neon_register<T, SIZE>(Vd, src);
+ auto shift_fn =
+ std::bind(ArithmeticShiftRight<T>, std::placeholders::_1, shift);
+ Unop<T>(simulator, Vd, Vm, shift_fn);
}
template <typename T, int SIZE>
@@ -4182,29 +4186,16 @@ void ShiftByRegister(Simulator* simulator, int Vd, int Vm, int Vn) {
template <typename T, int SIZE>
void CompareEqual(Simulator* simulator, int Vd, int Vm, int Vn) {
- static const int kElems = SIZE / sizeof(T);
- T src1[kElems], src2[kElems];
- simulator->get_neon_register<T, SIZE>(Vn, src1);
- simulator->get_neon_register<T, SIZE>(Vm, src2);
- for (int i = 0; i < kElems; i++) {
- src1[i] = src1[i] == src2[i] ? -1 : 0;
- }
- simulator->set_neon_register<T, SIZE>(Vd, src1);
+ Binop<T>(simulator, Vd, Vm, Vn, [](T x, T y) { return x == y ? -1 : 0; });
}
template <typename T, int SIZE>
void CompareGreater(Simulator* simulator, int Vd, int Vm, int Vn, bool ge) {
- static const int kElems = SIZE / sizeof(T);
- T src1[kElems], src2[kElems];
- simulator->get_neon_register<T, SIZE>(Vn, src1);
- simulator->get_neon_register<T, SIZE>(Vm, src2);
- for (int i = 0; i < kElems; i++) {
- if (ge)
- src1[i] = src1[i] >= src2[i] ? -1 : 0;
- else
- src1[i] = src1[i] > src2[i] ? -1 : 0;
+ if (ge) {
+ Binop<T>(simulator, Vd, Vm, Vn, [](T x, T y) { return x >= y ? -1 : 0; });
+ } else {
+ Binop<T>(simulator, Vd, Vm, Vn, [](T x, T y) { return x > y ? -1 : 0; });
}
- simulator->set_neon_register<T, SIZE>(Vd, src1);
}
float MinMax(float a, float b, bool is_min) {
@@ -4217,14 +4208,13 @@ T MinMax(T a, T b, bool is_min) {
template <typename T, int SIZE>
void MinMax(Simulator* simulator, int Vd, int Vm, int Vn, bool min) {
- static const int kElems = SIZE / sizeof(T);
- T src1[kElems], src2[kElems];
- simulator->get_neon_register<T, SIZE>(Vn, src1);
- simulator->get_neon_register<T, SIZE>(Vm, src2);
- for (int i = 0; i < kElems; i++) {
- src1[i] = MinMax(src1[i], src2[i], min);
+ if (min) {
+ Binop<T>(simulator, Vd, Vm, Vn,
+ [](auto x, auto y) { return std::min<T>(x, y); });
+ } else {
+ Binop<T>(simulator, Vd, Vm, Vn,
+ [](auto x, auto y) { return std::max<T>(x, y); });
}
- simulator->set_neon_register<T, SIZE>(Vd, src1);
}
template <typename T>
@@ -4259,14 +4249,7 @@ template <typename T, int SIZE = kSimd128Size>
void RoundingAverageUnsigned(Simulator* simulator, int Vd, int Vm, int Vn) {
static_assert(std::is_unsigned<T>::value,
"Implemented only for unsigned types.");
- static const int kElems = SIZE / sizeof(T);
- T src1[kElems], src2[kElems];
- simulator->get_neon_register<T, SIZE>(Vn, src1);
- simulator->get_neon_register<T, SIZE>(Vm, src2);
- for (int i = 0; i < kElems; i++) {
- src1[i] = base::RoundingAverageUnsigned(src1[i], src2[i]);
- }
- simulator->set_neon_register<T, SIZE>(Vd, src1);
+ Binop<T>(simulator, Vd, Vm, Vn, base::RoundingAverageUnsigned<T>);
}
template <typename NarrowType, typename WideType>
@@ -4291,338 +4274,378 @@ void MultiplyLong(Simulator* simulator, int Vd, int Vn, int Vm) {
simulator->set_neon_register<WideType>(Vd, dst);
}
-void Simulator::DecodeSpecialCondition(Instruction* instr) {
- switch (instr->SpecialValue()) {
- case 4: {
- int Vd, Vm, Vn;
- if (instr->Bit(6) == 0) {
- Vd = instr->VFPDRegValue(kDoublePrecision);
- Vm = instr->VFPMRegValue(kDoublePrecision);
- Vn = instr->VFPNRegValue(kDoublePrecision);
- } else {
- Vd = instr->VFPDRegValue(kSimd128Precision);
- Vm = instr->VFPMRegValue(kSimd128Precision);
- Vn = instr->VFPNRegValue(kSimd128Precision);
- }
- switch (instr->Bits(11, 8)) {
- case 0x0: {
- if (instr->Bit(4) == 1) {
- // vqadd.s<size> Qd, Qm, Qn.
- NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
- switch (size) {
- case Neon8:
- AddSaturate<int8_t>(this, Vd, Vm, Vn);
- break;
- case Neon16:
- AddSaturate<int16_t>(this, Vd, Vm, Vn);
- break;
- case Neon32:
- AddSaturate<int32_t>(this, Vd, Vm, Vn);
- break;
- default:
- UNREACHABLE();
- break;
- }
- } else {
- UNIMPLEMENTED();
+void Simulator::DecodeUnconditional(Instruction* instr) {
+ // This follows the decoding in F4.1.18 Unconditional instructions.
+ int op0 = instr->Bits(26, 25);
+ int op1 = instr->Bit(20);
+
+ // Four classes of decoding:
+ // - Miscellaneous (omitted, no instructions used in V8).
+ // - Advanced SIMD data-processing.
+ // - Memory hints and barriers.
+ // - Advanced SIMD element or structure load/store.
+ if (op0 == 0b01) {
+ DecodeAdvancedSIMDDataProcessing(instr);
+ } else if ((op0 & 0b10) == 0b10 && op1) {
+ DecodeMemoryHintsAndBarriers(instr);
+ } else if (op0 == 0b10 && !op1) {
+ DecodeAdvancedSIMDElementOrStructureLoadStore(instr);
+ } else {
+ UNIMPLEMENTED();
+ }
+}
+
+void Simulator::DecodeAdvancedSIMDTwoOrThreeRegisters(Instruction* instr) {
+ // Advanced SIMD two registers, or three registers of different lengths.
+ int op0 = instr->Bit(24);
+ int op1 = instr->Bits(21, 20);
+ int op2 = instr->Bits(11, 10);
+ int op3 = instr->Bit(6);
+ if (!op0 && op1 == 0b11) {
+ // vext.8 Qd, Qm, Qn, imm4
+ int imm4 = instr->Bits(11, 8);
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ int Vn = instr->VFPNRegValue(kSimd128Precision);
+ uint8_t src1[16], src2[16], dst[16];
+ get_neon_register(Vn, src1);
+ get_neon_register(Vm, src2);
+ int boundary = kSimd128Size - imm4;
+ int i = 0;
+ for (; i < boundary; i++) {
+ dst[i] = src1[i + imm4];
+ }
+ for (; i < 16; i++) {
+ dst[i] = src2[i - boundary];
+ }
+ set_neon_register(Vd, dst);
+ } else if (op0 && op1 == 0b11 && ((op2 >> 1) == 0)) {
+ // Advanced SIMD two registers misc
+ int size = instr->Bits(19, 18);
+ int opc1 = instr->Bits(17, 16);
+ int opc2 = instr->Bits(10, 7);
+ int q = instr->Bit(6);
+
+ if (opc1 == 0 && (opc2 >> 2) == 0) {
+ // vrev<op>.size Qd, Qm
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ NeonSize size = static_cast<NeonSize>(instr->Bits(19, 18));
+ NeonSize op =
+ static_cast<NeonSize>(static_cast<int>(Neon64) - instr->Bits(8, 7));
+ switch (op) {
+ case Neon16: {
+ DCHECK_EQ(Neon8, size);
+ uint8_t src[16];
+ get_neon_register(Vm, src);
+ for (int i = 0; i < 16; i += 2) {
+ std::swap(src[i], src[i + 1]);
}
+ set_neon_register(Vd, src);
break;
}
- case 0x1: {
- if (instr->Bits(21, 20) == 2 && instr->Bit(6) == 1 &&
- instr->Bit(4) == 1) {
- // vmov Qd, Qm.
- // vorr, Qd, Qm, Qn.
- uint32_t src1[4];
- get_neon_register(Vm, src1);
- if (Vm != Vn) {
- uint32_t src2[4];
- get_neon_register(Vn, src2);
- for (int i = 0; i < 4; i++) {
- src1[i] = src1[i] | src2[i];
+ case Neon32: {
+ switch (size) {
+ case Neon16: {
+ uint16_t src[8];
+ get_neon_register(Vm, src);
+ for (int i = 0; i < 8; i += 2) {
+ std::swap(src[i], src[i + 1]);
}
+ set_neon_register(Vd, src);
+ break;
}
- set_neon_register(Vd, src1);
- } else if (instr->Bits(21, 20) == 0 && instr->Bit(6) == 1 &&
- instr->Bit(4) == 1) {
- // vand Qd, Qm, Qn.
- uint32_t src1[4], src2[4];
- get_neon_register(Vn, src1);
- get_neon_register(Vm, src2);
- for (int i = 0; i < 4; i++) {
- src1[i] = src1[i] & src2[i];
- }
- set_neon_register(Vd, src1);
- } else if (instr->Bits(21, 20) == 1 && instr->Bit(6) == 1 &&
- instr->Bit(4) == 1) {
- // vbic Qd, Qm, Qn.
- uint32_t src1[4], src2[4];
- get_neon_register(Vn, src1);
- get_neon_register(Vm, src2);
- for (int i = 0; i < 4; i++) {
- src1[i] = src1[i] & ~src2[i];
- }
- set_neon_register(Vd, src1);
- } else {
- UNIMPLEMENTED();
- }
- break;
- }
- case 0x2: {
- if (instr->Bit(4) == 1) {
- // vqsub.s<size> Qd, Qm, Qn.
- NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
- switch (size) {
- case Neon8:
- SubSaturate<int8_t>(this, Vd, Vm, Vn);
- break;
- case Neon16:
- SubSaturate<int16_t>(this, Vd, Vm, Vn);
- break;
- case Neon32:
- SubSaturate<int32_t>(this, Vd, Vm, Vn);
- break;
- case Neon64:
- SubSaturate<int64_t>(this, Vd, Vm, Vn);
- break;
- default:
- UNREACHABLE();
- break;
+ case Neon8: {
+ uint8_t src[16];
+ get_neon_register(Vm, src);
+ for (int i = 0; i < 4; i++) {
+ std::swap(src[i * 4], src[i * 4 + 3]);
+ std::swap(src[i * 4 + 1], src[i * 4 + 2]);
+ }
+ set_neon_register(Vd, src);
+ break;
}
- } else {
- UNIMPLEMENTED();
+ default:
+ UNREACHABLE();
+ break;
}
break;
}
- case 0x3: {
- // vcge/vcgt.s<size> Qd, Qm, Qn.
- bool ge = instr->Bit(4) == 1;
- NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ case Neon64: {
switch (size) {
- case Neon8:
- CompareGreater<int8_t, kSimd128Size>(this, Vd, Vm, Vn, ge);
+ case Neon32: {
+ uint32_t src[4];
+ get_neon_register(Vm, src);
+ std::swap(src[0], src[1]);
+ std::swap(src[2], src[3]);
+ set_neon_register(Vd, src);
break;
- case Neon16:
- CompareGreater<int16_t, kSimd128Size>(this, Vd, Vm, Vn, ge);
+ }
+ case Neon16: {
+ uint16_t src[8];
+ get_neon_register(Vm, src);
+ for (int i = 0; i < 2; i++) {
+ std::swap(src[i * 4], src[i * 4 + 3]);
+ std::swap(src[i * 4 + 1], src[i * 4 + 2]);
+ }
+ set_neon_register(Vd, src);
break;
- case Neon32:
- CompareGreater<int32_t, kSimd128Size>(this, Vd, Vm, Vn, ge);
+ }
+ case Neon8: {
+ uint8_t src[16];
+ get_neon_register(Vm, src);
+ for (int i = 0; i < 4; i++) {
+ std::swap(src[i], src[7 - i]);
+ std::swap(src[i + 8], src[15 - i]);
+ }
+ set_neon_register(Vd, src);
break;
+ }
default:
UNREACHABLE();
break;
}
break;
}
- case 0x4: {
- // vshl s<size> Qd, Qm, Qn.
- NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else if (size == 0 && opc1 == 0b10 && opc2 == 0) {
+ if (instr->Bit(6) == 0) {
+ // vswp Dd, Dm.
+ uint64_t dval, mval;
+ int vd = instr->VFPDRegValue(kDoublePrecision);
+ int vm = instr->VFPMRegValue(kDoublePrecision);
+ get_d_register(vd, &dval);
+ get_d_register(vm, &mval);
+ set_d_register(vm, &dval);
+ set_d_register(vd, &mval);
+ } else {
+ // vswp Qd, Qm.
+ uint32_t dval[4], mval[4];
+ int vd = instr->VFPDRegValue(kSimd128Precision);
+ int vm = instr->VFPMRegValue(kSimd128Precision);
+ get_neon_register(vd, dval);
+ get_neon_register(vm, mval);
+ set_neon_register(vm, dval);
+ set_neon_register(vd, mval);
+ }
+ } else if (opc1 == 0 && opc2 == 0b1011) {
+ // vmvn Qd, Qm.
+ int vd = instr->VFPDRegValue(kSimd128Precision);
+ int vm = instr->VFPMRegValue(kSimd128Precision);
+ uint32_t q_data[4];
+ get_neon_register(vm, q_data);
+ for (int i = 0; i < 4; i++) q_data[i] = ~q_data[i];
+ set_neon_register(vd, q_data);
+ } else if (opc1 == 0b01 && (opc2 & 0b0111) == 0b110) {
+ // vabs<type>.<size> Qd, Qm
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ if (instr->Bit(10) != 0) {
+ // floating point (clear sign bits)
+ uint32_t src[4];
+ get_neon_register(Vm, src);
+ for (int i = 0; i < 4; i++) {
+ src[i] &= ~0x80000000;
+ }
+ set_neon_register(Vd, src);
+ } else {
+ // signed integer
+ switch (size) {
+ case Neon8:
+ Abs<int8_t, kSimd128Size>(this, Vd, Vm);
+ break;
+ case Neon16:
+ Abs<int16_t, kSimd128Size>(this, Vd, Vm);
+ break;
+ case Neon32:
+ Abs<int32_t, kSimd128Size>(this, Vd, Vm);
+ break;
+ default:
+ UNIMPLEMENTED();
+ break;
+ }
+ }
+ } else if (opc1 == 0b01 && (opc2 & 0b0111) == 0b111) {
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ // vneg<type>.<size> Qd, Qm (signed integer)
+ if (instr->Bit(10) != 0) {
+ // floating point (toggle sign bits)
+ uint32_t src[4];
+ get_neon_register(Vm, src);
+ for (int i = 0; i < 4; i++) {
+ src[i] ^= 0x80000000;
+ }
+ set_neon_register(Vd, src);
+ } else {
+ // signed integer
+ switch (size) {
+ case Neon8:
+ Neg<int8_t, kSimd128Size>(this, Vd, Vm);
+ break;
+ case Neon16:
+ Neg<int16_t, kSimd128Size>(this, Vd, Vm);
+ break;
+ case Neon32:
+ Neg<int32_t, kSimd128Size>(this, Vd, Vm);
+ break;
+ default:
+ UNIMPLEMENTED();
+ break;
+ }
+ }
+ } else if (opc1 == 0b10 && opc2 == 0b0001) {
+ if (q) {
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ // vtrn.<size> Qd, Qm.
+ switch (size) {
+ case Neon8:
+ Transpose<uint8_t, kSimd128Size>(this, Vd, Vm);
+ break;
+ case Neon16:
+ Transpose<uint16_t, kSimd128Size>(this, Vd, Vm);
+ break;
+ case Neon32:
+ Transpose<uint32_t, kSimd128Size>(this, Vd, Vm);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else {
+ int Vd = instr->VFPDRegValue(kDoublePrecision);
+ int Vm = instr->VFPMRegValue(kDoublePrecision);
+ // vtrn.<size> Dd, Dm.
+ switch (size) {
+ case Neon8:
+ Transpose<uint8_t, kDoubleSize>(this, Vd, Vm);
+ break;
+ case Neon16:
+ Transpose<uint16_t, kDoubleSize>(this, Vd, Vm);
+ break;
+ case Neon32:
+ Transpose<uint32_t, kDoubleSize>(this, Vd, Vm);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+ } else if (opc1 == 0b10 && (opc2 & 0b1110) == 0b0010) {
+ NeonSize size = static_cast<NeonSize>(instr->Bits(19, 18));
+ if (q) {
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ if (instr->Bit(7) == 1) {
+ // vzip.<size> Qd, Qm.
switch (size) {
case Neon8:
- ShiftByRegister<int8_t, int8_t, kSimd128Size>(this, Vd, Vm, Vn);
+ Zip<uint8_t, kSimd128Size>(this, Vd, Vm);
break;
case Neon16:
- ShiftByRegister<int16_t, int16_t, kSimd128Size>(this, Vd, Vm, Vn);
+ Zip<uint16_t, kSimd128Size>(this, Vd, Vm);
break;
case Neon32:
- ShiftByRegister<int32_t, int32_t, kSimd128Size>(this, Vd, Vm, Vn);
- break;
- case Neon64:
- ShiftByRegister<int64_t, int64_t, kSimd128Size>(this, Vd, Vm, Vn);
+ Zip<uint32_t, kSimd128Size>(this, Vd, Vm);
break;
default:
UNREACHABLE();
break;
}
- break;
- }
- case 0x6: {
- // vmin/vmax.s<size> Qd, Qm, Qn.
- NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
- bool min = instr->Bit(4) != 0;
+ } else {
+ // vuzp.<size> Qd, Qm.
switch (size) {
case Neon8:
- MinMax<int8_t, kSimd128Size>(this, Vd, Vm, Vn, min);
+ Unzip<uint8_t, kSimd128Size>(this, Vd, Vm);
break;
case Neon16:
- MinMax<int16_t, kSimd128Size>(this, Vd, Vm, Vn, min);
+ Unzip<uint16_t, kSimd128Size>(this, Vd, Vm);
break;
case Neon32:
- MinMax<int32_t, kSimd128Size>(this, Vd, Vm, Vn, min);
+ Unzip<uint32_t, kSimd128Size>(this, Vd, Vm);
break;
default:
UNREACHABLE();
break;
}
- break;
}
- case 0x8: {
- // vadd/vtst
- NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
- if (instr->Bit(4) == 0) {
- // vadd.i<size> Qd, Qm, Qn.
- switch (size) {
- case Neon8:
- Add<uint8_t, kSimd128Size>(this, Vd, Vm, Vn);
- break;
- case Neon16:
- Add<uint16_t, kSimd128Size>(this, Vd, Vm, Vn);
- break;
- case Neon32:
- Add<uint32_t, kSimd128Size>(this, Vd, Vm, Vn);
- break;
- case Neon64:
- Add<uint64_t, kSimd128Size>(this, Vd, Vm, Vn);
- break;
- }
- } else {
- // vtst.i<size> Qd, Qm, Qn.
- switch (size) {
- case Neon8:
- Test<uint8_t, kSimd128Size>(this, Vd, Vm, Vn);
- break;
- case Neon16:
- Test<uint16_t, kSimd128Size>(this, Vd, Vm, Vn);
- break;
- case Neon32:
- Test<uint32_t, kSimd128Size>(this, Vd, Vm, Vn);
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
- break;
- }
- case 0x9: {
- if (instr->Bit(6) == 1 && instr->Bit(4) == 1) {
- // vmul.i<size> Qd, Qm, Qn.
- NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
- switch (size) {
- case Neon8:
- Mul<uint8_t, kSimd128Size>(this, Vd, Vm, Vn);
- break;
- case Neon16:
- Mul<uint16_t, kSimd128Size>(this, Vd, Vm, Vn);
- break;
- case Neon32:
- Mul<uint32_t, kSimd128Size>(this, Vd, Vm, Vn);
- break;
- default:
- UNREACHABLE();
- break;
- }
- } else {
- UNIMPLEMENTED();
- }
- break;
- }
- case 0xA: {
- // vpmin/vpmax.s<size> Dd, Dm, Dn.
- NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
- bool min = instr->Bit(4) != 0;
+ } else {
+ int Vd = instr->VFPDRegValue(kDoublePrecision);
+ int Vm = instr->VFPMRegValue(kDoublePrecision);
+ if (instr->Bit(7) == 1) {
+ // vzip.<size> Dd, Dm.
switch (size) {
case Neon8:
- PairwiseMinMax<int8_t>(this, Vd, Vm, Vn, min);
+ Zip<uint8_t, kDoubleSize>(this, Vd, Vm);
break;
case Neon16:
- PairwiseMinMax<int16_t>(this, Vd, Vm, Vn, min);
+ Zip<uint16_t, kDoubleSize>(this, Vd, Vm);
break;
case Neon32:
- PairwiseMinMax<int32_t>(this, Vd, Vm, Vn, min);
+ UNIMPLEMENTED();
break;
default:
UNREACHABLE();
break;
}
- break;
- }
- case 0xB: {
- // vpadd.i<size> Dd, Dm, Dn.
- NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ } else {
+ // vuzp.<size> Dd, Dm.
switch (size) {
case Neon8:
- PairwiseAdd<int8_t>(this, Vd, Vm, Vn);
+ Unzip<uint8_t, kDoubleSize>(this, Vd, Vm);
break;
case Neon16:
- PairwiseAdd<int16_t>(this, Vd, Vm, Vn);
+ Unzip<uint16_t, kDoubleSize>(this, Vd, Vm);
break;
case Neon32:
- PairwiseAdd<int32_t>(this, Vd, Vm, Vn);
+ UNIMPLEMENTED();
break;
default:
UNREACHABLE();
break;
}
- break;
}
- case 0xD: {
- if (instr->Bit(4) == 0) {
- float src1[4], src2[4];
- get_neon_register(Vn, src1);
- get_neon_register(Vm, src2);
- for (int i = 0; i < 4; i++) {
- if (instr->Bit(21) == 0) {
- // vadd.f32 Qd, Qm, Qn.
- src1[i] = src1[i] + src2[i];
- } else {
- // vsub.f32 Qd, Qm, Qn.
- src1[i] = src1[i] - src2[i];
- }
- }
- set_neon_register(Vd, src1);
+ }
+ } else if (opc1 == 0b10 && (opc2 & 0b1110) == 0b0100) {
+ // vqmovn.<type><size> Dd, Qm.
+ int Vd = instr->VFPDRegValue(kDoublePrecision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ NeonSize size = static_cast<NeonSize>(instr->Bits(19, 18));
+ bool dst_unsigned = instr->Bit(6) != 0;
+ bool src_unsigned = instr->Bits(7, 6) == 0b11;
+ DCHECK_IMPLIES(src_unsigned, dst_unsigned);
+ switch (size) {
+ case Neon8: {
+ if (src_unsigned) {
+ SaturatingNarrow<uint16_t, uint8_t>(this, Vd, Vm);
+ } else if (dst_unsigned) {
+ SaturatingUnsignedNarrow<int16_t, uint8_t>(this, Vd, Vm);
} else {
- UNIMPLEMENTED();
+ SaturatingNarrow<int16_t, int8_t>(this, Vd, Vm);
}
break;
}
- case 0xE: {
- if (instr->Bits(21, 20) == 0 && instr->Bit(4) == 0) {
- // vceq.f32.
- float src1[4], src2[4];
- get_neon_register(Vn, src1);
- get_neon_register(Vm, src2);
- uint32_t dst[4];
- for (int i = 0; i < 4; i++) {
- dst[i] = (src1[i] == src2[i]) ? 0xFFFFFFFF : 0;
- }
- set_neon_register(Vd, dst);
+ case Neon16: {
+ if (src_unsigned) {
+ SaturatingNarrow<uint32_t, uint16_t>(this, Vd, Vm);
+ } else if (dst_unsigned) {
+ SaturatingUnsignedNarrow<int32_t, uint16_t>(this, Vd, Vm);
} else {
- UNIMPLEMENTED();
+ SaturatingNarrow<int32_t, int16_t>(this, Vd, Vm);
}
break;
}
- case 0xF: {
- if (instr->Bit(20) == 0 && instr->Bit(6) == 1) {
- float src1[4], src2[4];
- get_neon_register(Vn, src1);
- get_neon_register(Vm, src2);
- if (instr->Bit(4) == 1) {
- if (instr->Bit(21) == 0) {
- // vrecps.f32 Qd, Qm, Qn.
- for (int i = 0; i < 4; i++) {
- src1[i] = 2.0f - src1[i] * src2[i];
- }
- } else {
- // vrsqrts.f32 Qd, Qm, Qn.
- for (int i = 0; i < 4; i++) {
- src1[i] = (3.0f - src1[i] * src2[i]) * 0.5f;
- }
- }
- } else {
- // vmin/vmax.f32 Qd, Qm, Qn.
- bool min = instr->Bit(21) == 1;
- bool saved = FPSCR_default_NaN_mode_;
- FPSCR_default_NaN_mode_ = true;
- for (int i = 0; i < 4; i++) {
- // vmin returns default NaN if any input is NaN.
- src1[i] = canonicalizeNaN(MinMax(src1[i], src2[i], min));
- }
- FPSCR_default_NaN_mode_ = saved;
- }
- set_neon_register(Vd, src1);
+ case Neon32: {
+ if (src_unsigned) {
+ SaturatingNarrow<uint64_t, uint32_t>(this, Vd, Vm);
+ } else if (dst_unsigned) {
+ SaturatingUnsignedNarrow<int64_t, uint32_t>(this, Vd, Vm);
} else {
- UNIMPLEMENTED();
+ SaturatingNarrow<int64_t, int32_t>(this, Vd, Vm);
}
break;
}
@@ -4630,104 +4653,197 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
UNIMPLEMENTED();
break;
}
- break;
- }
- case 5:
- if (instr->Bit(23) == 1 && instr->Bits(21, 19) == 0 &&
- instr->Bit(7) == 0 && instr->Bit(4) == 1) {
- // One register and a modified immediate value, see ARM DDI 0406C.d
- // A7.4.6. Handles vmov, vorr, vmvn, vbic.
- // Only handle vmov.i32 for now.
- VmovImmediate(this, instr);
- } else if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
- (instr->Bit(4) == 1)) {
- // vmovl signed
- if ((instr->VdValue() & 1) != 0) UNIMPLEMENTED();
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kDoublePrecision);
- int imm3 = instr->Bits(21, 19);
- switch (imm3) {
- case 1:
- Widen<int8_t, int16_t>(this, Vd, Vm);
- break;
- case 2:
- Widen<int16_t, int32_t>(this, Vd, Vm);
- break;
- case 4:
- Widen<int32_t, int64_t>(this, Vd, Vm);
- break;
- default:
- UNIMPLEMENTED();
- break;
+ } else if (opc1 == 0b10 && instr->Bit(10) == 1) {
+ // vrint<q>.<dt> <Dd>, <Dm>
+ // vrint<q>.<dt> <Qd>, <Qm>
+ // See F6.1.205
+ int regs = instr->Bit(6) + 1;
+ int rounding_mode = instr->Bits(9, 7);
+ float (*fproundint)(float) = nullptr;
+ switch (rounding_mode) {
+ case 0:
+ fproundint = &nearbyintf;
+ break;
+ case 3:
+ fproundint = &truncf;
+ break;
+ case 5:
+ fproundint = &floorf;
+ break;
+ case 7:
+ fproundint = &ceilf;
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+ int vm = instr->VFPMRegValue(kDoublePrecision);
+ int vd = instr->VFPDRegValue(kDoublePrecision);
+
+ float floats[2];
+ for (int r = 0; r < regs; r++) {
+ // We cannot simply use GetVFPSingleValue since our Q registers
+ // might not map to any S registers at all.
+ get_neon_register<float, kDoubleSize>(vm + r, floats);
+ for (int e = 0; e < 2; e++) {
+ floats[e] = canonicalizeNaN(fproundint(floats[e]));
}
- } else if (instr->Bits(21, 20) == 3 && instr->Bit(4) == 0) {
- // vext.
- int imm4 = instr->Bits(11, 8);
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- int Vn = instr->VFPNRegValue(kSimd128Precision);
- uint8_t src1[16], src2[16], dst[16];
- get_neon_register(Vn, src1);
- get_neon_register(Vm, src2);
- int boundary = kSimd128Size - imm4;
- int i = 0;
- for (; i < boundary; i++) {
- dst[i] = src1[i + imm4];
+ set_neon_register<float, kDoubleSize>(vd + r, floats);
+ }
+ } else if (opc1 == 0b11 && (opc2 & 0b1100) == 0b1000) {
+ // vrecpe/vrsqrte.f32 Qd, Qm.
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ uint32_t src[4];
+ get_neon_register(Vm, src);
+ if (instr->Bit(7) == 0) {
+ for (int i = 0; i < 4; i++) {
+ float denom = bit_cast<float>(src[i]);
+ div_zero_vfp_flag_ = (denom == 0);
+ float result = 1.0f / denom;
+ result = canonicalizeNaN(result);
+ src[i] = bit_cast<uint32_t>(result);
}
- for (; i < 16; i++) {
- dst[i] = src2[i - boundary];
+ } else {
+ for (int i = 0; i < 4; i++) {
+ float radicand = bit_cast<float>(src[i]);
+ float result = 1.0f / std::sqrt(radicand);
+ result = canonicalizeNaN(result);
+ src[i] = bit_cast<uint32_t>(result);
}
- set_neon_register(Vd, dst);
- } else if (instr->Bits(11, 8) == 5 && instr->Bit(4) == 1) {
- // vshl.i<size> Qd, Qm, shift
- int imm7 = instr->Bits(21, 16);
- if (instr->Bit(7) != 0) imm7 += 64;
- int size = base::bits::RoundDownToPowerOfTwo32(imm7);
- int shift = imm7 - size;
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- NeonSize ns =
- static_cast<NeonSize>(base::bits::WhichPowerOfTwo(size >> 3));
- switch (ns) {
- case Neon8:
- ShiftLeft<uint8_t, kSimd128Size>(this, Vd, Vm, shift);
+ }
+ set_neon_register(Vd, src);
+ } else if (opc1 == 0b11 && (opc2 & 0b1100) == 0b1100) {
+ // vcvt.<Td>.<Tm> Qd, Qm.
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ uint32_t q_data[4];
+ get_neon_register(Vm, q_data);
+ int op = instr->Bits(8, 7);
+ for (int i = 0; i < 4; i++) {
+ switch (op) {
+ case 0:
+ // f32 <- s32, round towards nearest.
+ q_data[i] = bit_cast<uint32_t>(
+ std::round(static_cast<float>(bit_cast<int32_t>(q_data[i]))));
break;
- case Neon16:
- ShiftLeft<uint16_t, kSimd128Size>(this, Vd, Vm, shift);
+ case 1:
+ // f32 <- u32, round towards nearest.
+ q_data[i] =
+ bit_cast<uint32_t>(std::round(static_cast<float>(q_data[i])));
break;
- case Neon32:
- ShiftLeft<uint32_t, kSimd128Size>(this, Vd, Vm, shift);
+ case 2:
+ // s32 <- f32, round to zero.
+ q_data[i] = static_cast<uint32_t>(
+ ConvertDoubleToInt(bit_cast<float>(q_data[i]), false, RZ));
break;
- case Neon64:
- ShiftLeft<uint64_t, kSimd128Size>(this, Vd, Vm, shift);
+ case 3:
+ // u32 <- f32, round to zero.
+ q_data[i] = static_cast<uint32_t>(
+ ConvertDoubleToInt(bit_cast<float>(q_data[i]), true, RZ));
break;
}
- } else if (instr->Bits(11, 8) == 0 && instr->Bit(4) == 1) {
- // vshr.s<size> Qd, Qm, shift
- int imm7 = instr->Bits(21, 16);
- if (instr->Bit(7) != 0) imm7 += 64;
- int size = base::bits::RoundDownToPowerOfTwo32(imm7);
- int shift = 2 * size - imm7;
+ }
+ set_neon_register(Vd, q_data);
+ }
+ } else if (op0 && op1 == 0b11 && op2 == 0b10) {
+ // vtb[l,x] Dd, <list>, Dm.
+ int vd = instr->VFPDRegValue(kDoublePrecision);
+ int vn = instr->VFPNRegValue(kDoublePrecision);
+ int vm = instr->VFPMRegValue(kDoublePrecision);
+ int table_len = (instr->Bits(9, 8) + 1) * kDoubleSize;
+ bool vtbx = instr->Bit(6) != 0; // vtbl / vtbx
+ uint64_t destination = 0, indices = 0, result = 0;
+ get_d_register(vd, &destination);
+ get_d_register(vm, &indices);
+ for (int i = 0; i < kDoubleSize; i++) {
+ int shift = i * kBitsPerByte;
+ int index = (indices >> shift) & 0xFF;
+ if (index < table_len) {
+ uint64_t table;
+ get_d_register(vn + index / kDoubleSize, &table);
+ result |= ((table >> ((index % kDoubleSize) * kBitsPerByte)) & 0xFF)
+ << shift;
+ } else if (vtbx) {
+ result |= destination & (0xFFull << shift);
+ }
+ }
+ set_d_register(vd, &result);
+ } else if (op0 && op1 == 0b11 && op2 == 0b11) {
+ // Advanced SIMD duplicate (scalar)
+ if (instr->Bits(9, 7) == 0) {
+ // vdup.<size> Dd, Dm[index].
+ // vdup.<size> Qd, Dm[index].
+ int vm = instr->VFPMRegValue(kDoublePrecision);
+ int imm4 = instr->Bits(19, 16);
+ int size = 0, index = 0, mask = 0;
+ if ((imm4 & 0x1) != 0) {
+ size = 8;
+ index = imm4 >> 1;
+ mask = 0xFFu;
+ } else if ((imm4 & 0x2) != 0) {
+ size = 16;
+ index = imm4 >> 2;
+ mask = 0xFFFFu;
+ } else {
+ size = 32;
+ index = imm4 >> 3;
+ mask = 0xFFFFFFFFu;
+ }
+ uint64_t d_data;
+ get_d_register(vm, &d_data);
+ uint32_t scalar = (d_data >> (size * index)) & mask;
+ uint32_t duped = scalar;
+ for (int i = 1; i < 32 / size; i++) {
+ scalar <<= size;
+ duped |= scalar;
+ }
+ uint32_t result[4] = {duped, duped, duped, duped};
+ if (instr->Bit(6) == 0) {
+ int vd = instr->VFPDRegValue(kDoublePrecision);
+ set_d_register(vd, result);
+ } else {
+ int vd = instr->VFPDRegValue(kSimd128Precision);
+ set_neon_register(vd, result);
+ }
+ } else {
+ UNIMPLEMENTED();
+ }
+ } else if (op1 != 0b11 && !op3) {
+ // Advanced SIMD three registers of different lengths.
+ int u = instr->Bit(24);
+ int opc = instr->Bits(11, 8);
+ if (opc == 0b1000) {
+ // vmlal.u<size> Qd, Dn, Dm
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ if (size != Neon32) UNIMPLEMENTED();
+
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vn = instr->VFPNRegValue(kDoublePrecision);
+ int Vm = instr->VFPMRegValue(kDoublePrecision);
+ uint64_t src1, src2, dst[2];
+
+ get_neon_register<uint64_t>(Vd, dst);
+ get_d_register(Vn, &src1);
+ get_d_register(Vm, &src2);
+ dst[0] += (src1 & 0xFFFFFFFFULL) * (src2 & 0xFFFFFFFFULL);
+ dst[1] += (src1 >> 32) * (src2 >> 32);
+ set_neon_register<uint64_t>(Vd, dst);
+ } else if (opc == 0b1100) {
+ if (u) {
+ // vmull.u<size> Qd, Dn, Dm
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- NeonSize ns =
- static_cast<NeonSize>(base::bits::WhichPowerOfTwo(size >> 3));
- switch (ns) {
- case Neon8:
- ArithmeticShiftRight<int8_t, kSimd128Size>(this, Vd, Vm, shift);
- break;
- case Neon16:
- ArithmeticShiftRight<int16_t, kSimd128Size>(this, Vd, Vm, shift);
- break;
- case Neon32:
- ArithmeticShiftRight<int32_t, kSimd128Size>(this, Vd, Vm, shift);
- break;
- case Neon64:
- ArithmeticShiftRight<int64_t, kSimd128Size>(this, Vd, Vm, shift);
+ int Vn = instr->VFPNRegValue(kDoublePrecision);
+ int Vm = instr->VFPMRegValue(kDoublePrecision);
+ switch (size) {
+ case Neon32: {
+ MultiplyLong<uint32_t, uint64_t>(this, Vd, Vn, Vm);
break;
+ }
+ default:
+ UNIMPLEMENTED();
}
- } else if (instr->Bits(11, 8) == 0xC && instr->Bit(6) == 0 &&
- instr->Bit(4) == 0) {
+ } else {
// vmull.s<size> Qd, Dn, Dm
NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
int Vd = instr->VFPDRegValue(kSimd128Precision);
@@ -4741,920 +4857,716 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
default:
UNIMPLEMENTED();
}
- } else {
- UNIMPLEMENTED();
}
- break;
- case 6: {
- int Vd, Vm, Vn;
- if (instr->Bit(6) == 0) {
- Vd = instr->VFPDRegValue(kDoublePrecision);
- Vm = instr->VFPMRegValue(kDoublePrecision);
- Vn = instr->VFPNRegValue(kDoublePrecision);
- } else {
- Vd = instr->VFPDRegValue(kSimd128Precision);
- Vm = instr->VFPMRegValue(kSimd128Precision);
- Vn = instr->VFPNRegValue(kSimd128Precision);
- }
- switch (instr->Bits(11, 8)) {
- case 0x0: {
- if (instr->Bit(4) == 1) {
- // vqadd.u<size> Qd, Qm, Qn.
- NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
- switch (size) {
- case Neon8:
- AddSaturate<uint8_t>(this, Vd, Vm, Vn);
- break;
- case Neon16:
- AddSaturate<uint16_t>(this, Vd, Vm, Vn);
- break;
- case Neon32:
- AddSaturate<uint32_t>(this, Vd, Vm, Vn);
- break;
- default:
- UNREACHABLE();
- break;
- }
- } else {
- UNIMPLEMENTED();
- }
- break;
- }
- case 0x1: {
- if (instr->Bits(21, 20) == 1 && instr->Bit(4) == 1) {
- // vbsl.size Qd, Qm, Qn.
- uint32_t dst[4], src1[4], src2[4];
- get_neon_register(Vd, dst);
- get_neon_register(Vn, src1);
- get_neon_register(Vm, src2);
- for (int i = 0; i < 4; i++) {
- dst[i] = (dst[i] & src1[i]) | (~dst[i] & src2[i]);
- }
- set_neon_register(Vd, dst);
- } else if (instr->Bits(21, 20) == 0 && instr->Bit(4) == 1) {
- if (instr->Bit(6) == 0) {
- // veor Dd, Dn, Dm
- uint64_t src1, src2;
- get_d_register(Vn, &src1);
- get_d_register(Vm, &src2);
- src1 ^= src2;
- set_d_register(Vd, &src1);
+ }
+ } else if (op1 != 0b11 && op3) {
+ // The instructions specified by this encoding are not used in V8.
+ UNIMPLEMENTED();
+ } else {
+ UNIMPLEMENTED();
+ }
+}
- } else {
- // veor Qd, Qn, Qm
- uint32_t src1[4], src2[4];
- get_neon_register(Vn, src1);
- get_neon_register(Vm, src2);
- for (int i = 0; i < 4; i++) src1[i] ^= src2[i];
- set_neon_register(Vd, src1);
- }
- } else if (instr->Bit(4) == 0) {
- if (instr->Bit(6) == 0) {
- // vrhadd.u<size> Dd, Dm, Dn.
- UNIMPLEMENTED();
- }
- // vrhadd.u<size> Qd, Qm, Qn.
- NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
- switch (size) {
- case Neon8:
- RoundingAverageUnsigned<uint8_t>(this, Vd, Vm, Vn);
- break;
- case Neon16:
- RoundingAverageUnsigned<uint16_t>(this, Vd, Vm, Vn);
- break;
- case Neon32:
- RoundingAverageUnsigned<uint32_t>(this, Vd, Vm, Vn);
- break;
- default:
- UNREACHABLE();
- break;
- }
- } else {
- UNIMPLEMENTED();
- }
+void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
+ int op0 = instr->Bit(23);
+ int op1 = instr->Bit(4);
+
+ if (op0 == 0) {
+ // Advanced SIMD three registers of same length.
+ int u = instr->Bit(24);
+ int opc = instr->Bits(11, 8);
+ int q = instr->Bit(6);
+ int sz = instr->Bits(21, 20);
+ int Vd, Vm, Vn;
+ if (q) {
+ Vd = instr->VFPDRegValue(kSimd128Precision);
+ Vm = instr->VFPMRegValue(kSimd128Precision);
+ Vn = instr->VFPNRegValue(kSimd128Precision);
+ } else {
+ Vd = instr->VFPDRegValue(kDoublePrecision);
+ Vm = instr->VFPMRegValue(kDoublePrecision);
+ Vn = instr->VFPNRegValue(kDoublePrecision);
+ }
+
+ if (!u && opc == 0 && op1) {
+ // vqadd.s<size> Qd, Qm, Qn.
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ switch (size) {
+ case Neon8:
+ AddSat<int8_t>(this, Vd, Vm, Vn);
break;
- }
- case 0x2: {
- if (instr->Bit(4) == 1) {
- // vqsub.u<size> Qd, Qm, Qn.
- NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
- switch (size) {
- case Neon8:
- SubSaturate<uint8_t>(this, Vd, Vm, Vn);
- break;
- case Neon16:
- SubSaturate<uint16_t>(this, Vd, Vm, Vn);
- break;
- case Neon32:
- SubSaturate<uint32_t>(this, Vd, Vm, Vn);
- break;
- default:
- UNREACHABLE();
- break;
- }
- } else {
- UNIMPLEMENTED();
- }
+ case Neon16:
+ AddSat<int16_t>(this, Vd, Vm, Vn);
+ break;
+ case Neon32:
+ AddSat<int32_t>(this, Vd, Vm, Vn);
break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else if (!u && opc == 1 && sz == 2 && q && op1) {
+ // vmov Qd, Qm.
+ // vorr, Qd, Qm, Qn.
+ uint32_t src1[4];
+ get_neon_register(Vm, src1);
+ if (Vm != Vn) {
+ uint32_t src2[4];
+ get_neon_register(Vn, src2);
+ for (int i = 0; i < 4; i++) {
+ src1[i] = src1[i] | src2[i];
}
- case 0x3: {
- // vcge/vcgt.u<size> Qd, Qm, Qn.
- bool ge = instr->Bit(4) == 1;
- NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
- switch (size) {
- case Neon8:
- CompareGreater<uint8_t, kSimd128Size>(this, Vd, Vm, Vn, ge);
- break;
- case Neon16:
- CompareGreater<uint16_t, kSimd128Size>(this, Vd, Vm, Vn, ge);
- break;
- case Neon32:
- CompareGreater<uint32_t, kSimd128Size>(this, Vd, Vm, Vn, ge);
- break;
- default:
- UNREACHABLE();
- break;
- }
+ }
+ set_neon_register(Vd, src1);
+ } else if (!u && opc == 1 && sz == 0 && q && op1) {
+ // vand Qd, Qm, Qn.
+ uint32_t src1[4], src2[4];
+ get_neon_register(Vn, src1);
+ get_neon_register(Vm, src2);
+ for (int i = 0; i < 4; i++) {
+ src1[i] = src1[i] & src2[i];
+ }
+ set_neon_register(Vd, src1);
+ } else if (!u && opc == 1 && sz == 1 && q && op1) {
+ // vbic Qd, Qm, Qn.
+ uint32_t src1[4], src2[4];
+ get_neon_register(Vn, src1);
+ get_neon_register(Vm, src2);
+ for (int i = 0; i < 4; i++) {
+ src1[i] = src1[i] & ~src2[i];
+ }
+ set_neon_register(Vd, src1);
+ } else if (!u && opc == 2 && op1) {
+ // vqsub.s<size> Qd, Qm, Qn.
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ switch (size) {
+ case Neon8:
+ SubSat<int8_t>(this, Vd, Vm, Vn);
+ break;
+ case Neon16:
+ SubSat<int16_t>(this, Vd, Vm, Vn);
+ break;
+ case Neon32:
+ SubSat<int32_t>(this, Vd, Vm, Vn);
+ break;
+ case Neon64:
+ SubSat<int64_t>(this, Vd, Vm, Vn);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else if (!u && opc == 3) {
+ // vcge/vcgt.s<size> Qd, Qm, Qn.
+ bool ge = instr->Bit(4) == 1;
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ switch (size) {
+ case Neon8:
+ CompareGreater<int8_t, kSimd128Size>(this, Vd, Vm, Vn, ge);
+ break;
+ case Neon16:
+ CompareGreater<int16_t, kSimd128Size>(this, Vd, Vm, Vn, ge);
+ break;
+ case Neon32:
+ CompareGreater<int32_t, kSimd128Size>(this, Vd, Vm, Vn, ge);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else if (!u && opc == 4 && !op1) {
+ // vshl s<size> Qd, Qm, Qn.
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ switch (size) {
+ case Neon8:
+ ShiftByRegister<int8_t, int8_t, kSimd128Size>(this, Vd, Vm, Vn);
+ break;
+ case Neon16:
+ ShiftByRegister<int16_t, int16_t, kSimd128Size>(this, Vd, Vm, Vn);
+ break;
+ case Neon32:
+ ShiftByRegister<int32_t, int32_t, kSimd128Size>(this, Vd, Vm, Vn);
+ break;
+ case Neon64:
+ ShiftByRegister<int64_t, int64_t, kSimd128Size>(this, Vd, Vm, Vn);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else if (!u && opc == 6) {
+ // vmin/vmax.s<size> Qd, Qm, Qn.
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ bool min = instr->Bit(4) != 0;
+ switch (size) {
+ case Neon8:
+ MinMax<int8_t, kSimd128Size>(this, Vd, Vm, Vn, min);
+ break;
+ case Neon16:
+ MinMax<int16_t, kSimd128Size>(this, Vd, Vm, Vn, min);
+ break;
+ case Neon32:
+ MinMax<int32_t, kSimd128Size>(this, Vd, Vm, Vn, min);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else if (!u && opc == 8 && op1) {
+ // vtst.i<size> Qd, Qm, Qn.
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ switch (size) {
+ case Neon8:
+ Test<uint8_t, kSimd128Size>(this, Vd, Vm, Vn);
+ break;
+ case Neon16:
+ Test<uint16_t, kSimd128Size>(this, Vd, Vm, Vn);
+ break;
+ case Neon32:
+ Test<uint32_t, kSimd128Size>(this, Vd, Vm, Vn);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else if (!u && opc == 8 && !op1) {
+ // vadd.i<size> Qd, Qm, Qn.
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ switch (size) {
+ case Neon8:
+ Add<uint8_t, kSimd128Size>(this, Vd, Vm, Vn);
+ break;
+ case Neon16:
+ Add<uint16_t, kSimd128Size>(this, Vd, Vm, Vn);
+ break;
+ case Neon32:
+ Add<uint32_t, kSimd128Size>(this, Vd, Vm, Vn);
+ break;
+ case Neon64:
+ Add<uint64_t, kSimd128Size>(this, Vd, Vm, Vn);
+ break;
+ }
+ } else if (opc == 9 && op1) {
+ // vmul.i<size> Qd, Qm, Qn.
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ switch (size) {
+ case Neon8:
+ Mul<uint8_t, kSimd128Size>(this, Vd, Vm, Vn);
+ break;
+ case Neon16:
+ Mul<uint16_t, kSimd128Size>(this, Vd, Vm, Vn);
+ break;
+ case Neon32:
+ Mul<uint32_t, kSimd128Size>(this, Vd, Vm, Vn);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else if (!u && opc == 0xA) {
+ // vpmin/vpmax.s<size> Dd, Dm, Dn.
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ bool min = instr->Bit(4) != 0;
+ switch (size) {
+ case Neon8:
+ PairwiseMinMax<int8_t>(this, Vd, Vm, Vn, min);
+ break;
+ case Neon16:
+ PairwiseMinMax<int16_t>(this, Vd, Vm, Vn, min);
+ break;
+ case Neon32:
+ PairwiseMinMax<int32_t>(this, Vd, Vm, Vn, min);
break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else if (!u && opc == 0xB) {
+ // vpadd.i<size> Dd, Dm, Dn.
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ switch (size) {
+ case Neon8:
+ PairwiseAdd<int8_t>(this, Vd, Vm, Vn);
+ break;
+ case Neon16:
+ PairwiseAdd<int16_t>(this, Vd, Vm, Vn);
+ break;
+ case Neon32:
+ PairwiseAdd<int32_t>(this, Vd, Vm, Vn);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else if (!u && opc == 0xD && !op1) {
+ float src1[4], src2[4];
+ get_neon_register(Vn, src1);
+ get_neon_register(Vm, src2);
+ for (int i = 0; i < 4; i++) {
+ if (instr->Bit(21) == 0) {
+ // vadd.f32 Qd, Qm, Qn.
+ src1[i] = src1[i] + src2[i];
+ } else {
+ // vsub.f32 Qd, Qm, Qn.
+ src1[i] = src1[i] - src2[i];
}
- case 0x4: {
- // vshl s<size> Qd, Qm, Qn.
- NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
- switch (size) {
- case Neon8:
- ShiftByRegister<uint8_t, int8_t, kSimd128Size>(this, Vd, Vm, Vn);
- break;
- case Neon16:
- ShiftByRegister<uint16_t, int16_t, kSimd128Size>(this, Vd, Vm,
- Vn);
- break;
- case Neon32:
- ShiftByRegister<uint32_t, int32_t, kSimd128Size>(this, Vd, Vm,
- Vn);
- break;
- case Neon64:
- ShiftByRegister<uint64_t, int64_t, kSimd128Size>(this, Vd, Vm,
- Vn);
- break;
- default:
- UNREACHABLE();
- break;
- }
+ }
+ set_neon_register(Vd, src1);
+ } else if (!u && opc == 0xE && !sz && !op1) {
+ // vceq.f32.
+ float src1[4], src2[4];
+ get_neon_register(Vn, src1);
+ get_neon_register(Vm, src2);
+ uint32_t dst[4];
+ for (int i = 0; i < 4; i++) {
+ dst[i] = (src1[i] == src2[i]) ? 0xFFFFFFFF : 0;
+ }
+ set_neon_register(Vd, dst);
+ } else if (!u && opc == 0xF && op1) {
+ float src1[4], src2[4];
+ get_neon_register(Vn, src1);
+ get_neon_register(Vm, src2);
+ if (instr->Bit(21) == 0) {
+ // vrecps.f32 Qd, Qm, Qn.
+ for (int i = 0; i < 4; i++) {
+ src1[i] = 2.0f - src1[i] * src2[i];
+ }
+ } else {
+ // vrsqrts.f32 Qd, Qm, Qn.
+ for (int i = 0; i < 4; i++) {
+ src1[i] = (3.0f - src1[i] * src2[i]) * 0.5f;
+ }
+ }
+ set_neon_register(Vd, src1);
+ } else if (!u && opc == 0xF && !op1) {
+ float src1[4], src2[4];
+ get_neon_register(Vn, src1);
+ get_neon_register(Vm, src2);
+ // vmin/vmax.f32 Qd, Qm, Qn.
+ bool min = instr->Bit(21) == 1;
+ bool saved = FPSCR_default_NaN_mode_;
+ FPSCR_default_NaN_mode_ = true;
+ for (int i = 0; i < 4; i++) {
+ // vmin returns default NaN if any input is NaN.
+ src1[i] = canonicalizeNaN(MinMax(src1[i], src2[i], min));
+ }
+ FPSCR_default_NaN_mode_ = saved;
+ set_neon_register(Vd, src1);
+ } else if (u && opc == 0 && op1) {
+ // vqadd.u<size> Qd, Qm, Qn.
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ switch (size) {
+ case Neon8:
+ AddSat<uint8_t>(this, Vd, Vm, Vn);
+ break;
+ case Neon16:
+ AddSat<uint16_t>(this, Vd, Vm, Vn);
+ break;
+ case Neon32:
+ AddSat<uint32_t>(this, Vd, Vm, Vn);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else if (u && opc == 1 && sz == 1 && op1) {
+ // vbsl.size Qd, Qm, Qn.
+ uint32_t dst[4], src1[4], src2[4];
+ get_neon_register(Vd, dst);
+ get_neon_register(Vn, src1);
+ get_neon_register(Vm, src2);
+ for (int i = 0; i < 4; i++) {
+ dst[i] = (dst[i] & src1[i]) | (~dst[i] & src2[i]);
+ }
+ set_neon_register(Vd, dst);
+ } else if (u && opc == 1 && sz == 0 && !q && op1) {
+ // veor Dd, Dn, Dm
+ uint64_t src1, src2;
+ get_d_register(Vn, &src1);
+ get_d_register(Vm, &src2);
+ src1 ^= src2;
+ set_d_register(Vd, &src1);
+ } else if (u && opc == 1 && sz == 0 && q && op1) {
+ // veor Qd, Qn, Qm
+ uint32_t src1[4], src2[4];
+ get_neon_register(Vn, src1);
+ get_neon_register(Vm, src2);
+ for (int i = 0; i < 4; i++) src1[i] ^= src2[i];
+ set_neon_register(Vd, src1);
+ } else if (u && opc == 1 && !op1) {
+ // vrhadd.u<size> Qd, Qm, Qn.
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ switch (size) {
+ case Neon8:
+ RoundingAverageUnsigned<uint8_t>(this, Vd, Vm, Vn);
+ break;
+ case Neon16:
+ RoundingAverageUnsigned<uint16_t>(this, Vd, Vm, Vn);
+ break;
+ case Neon32:
+ RoundingAverageUnsigned<uint32_t>(this, Vd, Vm, Vn);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else if (u && opc == 2 && op1) {
+ // vqsub.u<size> Qd, Qm, Qn.
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ switch (size) {
+ case Neon8:
+ SubSat<uint8_t>(this, Vd, Vm, Vn);
+ break;
+ case Neon16:
+ SubSat<uint16_t>(this, Vd, Vm, Vn);
+ break;
+ case Neon32:
+ SubSat<uint32_t>(this, Vd, Vm, Vn);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else if (u && opc == 3) {
+ // vcge/vcgt.u<size> Qd, Qm, Qn.
+ bool ge = instr->Bit(4) == 1;
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ switch (size) {
+ case Neon8:
+ CompareGreater<uint8_t, kSimd128Size>(this, Vd, Vm, Vn, ge);
+ break;
+ case Neon16:
+ CompareGreater<uint16_t, kSimd128Size>(this, Vd, Vm, Vn, ge);
+ break;
+ case Neon32:
+ CompareGreater<uint32_t, kSimd128Size>(this, Vd, Vm, Vn, ge);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else if (u && opc == 4 && !op1) {
+ // vshl u<size> Qd, Qm, Qn.
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ switch (size) {
+ case Neon8:
+ ShiftByRegister<uint8_t, int8_t, kSimd128Size>(this, Vd, Vm, Vn);
break;
+ case Neon16:
+ ShiftByRegister<uint16_t, int16_t, kSimd128Size>(this, Vd, Vm, Vn);
+ break;
+ case Neon32:
+ ShiftByRegister<uint32_t, int32_t, kSimd128Size>(this, Vd, Vm, Vn);
+ break;
+ case Neon64:
+ ShiftByRegister<uint64_t, int64_t, kSimd128Size>(this, Vd, Vm, Vn);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else if (u && opc == 6) {
+ // vmin/vmax.u<size> Qd, Qm, Qn.
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ bool min = instr->Bit(4) != 0;
+ switch (size) {
+ case Neon8:
+ MinMax<uint8_t, kSimd128Size>(this, Vd, Vm, Vn, min);
+ break;
+ case Neon16:
+ MinMax<uint16_t, kSimd128Size>(this, Vd, Vm, Vn, min);
+ break;
+ case Neon32:
+ MinMax<uint32_t, kSimd128Size>(this, Vd, Vm, Vn, min);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else if (u && opc == 8 && !op1) {
+ // vsub.size Qd, Qm, Qn.
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ switch (size) {
+ case Neon8:
+ Sub<uint8_t, kSimd128Size>(this, Vd, Vm, Vn);
+ break;
+ case Neon16:
+ Sub<uint16_t, kSimd128Size>(this, Vd, Vm, Vn);
+ break;
+ case Neon32:
+ Sub<uint32_t, kSimd128Size>(this, Vd, Vm, Vn);
+ break;
+ case Neon64:
+ Sub<uint64_t, kSimd128Size>(this, Vd, Vm, Vn);
+ break;
+ }
+ } else if (u && opc == 8 && op1) {
+ // vceq.size Qd, Qm, Qn.
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ switch (size) {
+ case Neon8:
+ CompareEqual<uint8_t, kSimd128Size>(this, Vd, Vm, Vn);
+ break;
+ case Neon16:
+ CompareEqual<uint16_t, kSimd128Size>(this, Vd, Vm, Vn);
+ break;
+ case Neon32:
+ CompareEqual<uint32_t, kSimd128Size>(this, Vd, Vm, Vn);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else if (u && opc == 0xA) {
+ // vpmin/vpmax.u<size> Dd, Dm, Dn.
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ bool min = instr->Bit(4) != 0;
+ switch (size) {
+ case Neon8:
+ PairwiseMinMax<uint8_t>(this, Vd, Vm, Vn, min);
+ break;
+ case Neon16:
+ PairwiseMinMax<uint16_t>(this, Vd, Vm, Vn, min);
+ break;
+ case Neon32:
+ PairwiseMinMax<uint32_t>(this, Vd, Vm, Vn, min);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else if (u && opc == 0xD && sz == 0 && q && op1) {
+ // vmul.f32 Qd, Qn, Qm
+ float src1[4], src2[4];
+ get_neon_register(Vn, src1);
+ get_neon_register(Vm, src2);
+ for (int i = 0; i < 4; i++) {
+ src1[i] = src1[i] * src2[i];
+ }
+ set_neon_register(Vd, src1);
+ } else if (u && opc == 0xD && sz == 0 && !q && !op1) {
+ // vpadd.f32 Dd, Dn, Dm
+ PairwiseAdd<float>(this, Vd, Vm, Vn);
+ } else if (u && opc == 0xE && !op1) {
+ // vcge/vcgt.f32 Qd, Qm, Qn
+ bool ge = instr->Bit(21) == 0;
+ float src1[4], src2[4];
+ get_neon_register(Vn, src1);
+ get_neon_register(Vm, src2);
+ uint32_t dst[4];
+ for (int i = 0; i < 4; i++) {
+ if (ge) {
+ dst[i] = src1[i] >= src2[i] ? 0xFFFFFFFFu : 0;
+ } else {
+ dst[i] = src1[i] > src2[i] ? 0xFFFFFFFFu : 0;
}
- case 0x6: {
- // vmin/vmax.u<size> Qd, Qm, Qn.
- NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
- bool min = instr->Bit(4) != 0;
- switch (size) {
+ }
+ set_neon_register(Vd, dst);
+ } else {
+ UNIMPLEMENTED();
+ }
+ return;
+ } else if (op0 == 1 && op1 == 0) {
+ DecodeAdvancedSIMDTwoOrThreeRegisters(instr);
+ } else if (op0 == 1 && op1 == 1) {
+ // Advanced SIMD shifts and immediate generation.
+ if (instr->Bits(21, 19) == 0 && instr->Bit(7) == 0) {
+ VmovImmediate(this, instr);
+ } else {
+ // Advanced SIMD two registers and shift amount.
+ int u = instr->Bit(24);
+ int imm3H = instr->Bits(21, 19);
+ int imm3L = instr->Bits(18, 16);
+ int opc = instr->Bits(11, 8);
+ int l = instr->Bit(7);
+ int q = instr->Bit(6);
+ int imm3H_L = imm3H << 1 | l;
+
+ if (imm3H_L != 0 && opc == 0) {
+ // vshr.s<size> Qd, Qm, shift
+ int imm7 = instr->Bits(21, 16);
+ if (instr->Bit(7) != 0) imm7 += 64;
+ int size = base::bits::RoundDownToPowerOfTwo32(imm7);
+ int shift = 2 * size - imm7;
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ NeonSize ns =
+ static_cast<NeonSize>(base::bits::WhichPowerOfTwo(size >> 3));
+ if (u) {
+ switch (ns) {
case Neon8:
- MinMax<uint8_t, kSimd128Size>(this, Vd, Vm, Vn, min);
+ ShiftRight<uint8_t, kSimd128Size>(this, Vd, Vm, shift);
break;
case Neon16:
- MinMax<uint16_t, kSimd128Size>(this, Vd, Vm, Vn, min);
+ ShiftRight<uint16_t, kSimd128Size>(this, Vd, Vm, shift);
break;
case Neon32:
- MinMax<uint32_t, kSimd128Size>(this, Vd, Vm, Vn, min);
+ ShiftRight<uint32_t, kSimd128Size>(this, Vd, Vm, shift);
break;
- default:
- UNREACHABLE();
+ case Neon64:
+ ShiftRight<uint64_t, kSimd128Size>(this, Vd, Vm, shift);
break;
}
- break;
- }
- case 0x8: {
- if (instr->Bit(4) == 0) {
- // vsub.size Qd, Qm, Qn.
- NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
- switch (size) {
- case Neon8:
- Sub<uint8_t, kSimd128Size>(this, Vd, Vm, Vn);
- break;
- case Neon16:
- Sub<uint16_t, kSimd128Size>(this, Vd, Vm, Vn);
- break;
- case Neon32:
- Sub<uint32_t, kSimd128Size>(this, Vd, Vm, Vn);
- break;
- case Neon64:
- Sub<uint64_t, kSimd128Size>(this, Vd, Vm, Vn);
- break;
- }
- } else {
- // vceq.size Qd, Qm, Qn.
- NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
- switch (size) {
- case Neon8:
- CompareEqual<uint8_t, kSimd128Size>(this, Vd, Vm, Vn);
- break;
- case Neon16:
- CompareEqual<uint16_t, kSimd128Size>(this, Vd, Vm, Vn);
- break;
- case Neon32:
- CompareEqual<uint32_t, kSimd128Size>(this, Vd, Vm, Vn);
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
- break;
- }
- case 0xA: {
- // vpmin/vpmax.u<size> Dd, Dm, Dn.
- NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
- bool min = instr->Bit(4) != 0;
- switch (size) {
+ } else {
+ switch (ns) {
case Neon8:
- PairwiseMinMax<uint8_t>(this, Vd, Vm, Vn, min);
+ ArithmeticShiftRight<int8_t, kSimd128Size>(this, Vd, Vm, shift);
break;
case Neon16:
- PairwiseMinMax<uint16_t>(this, Vd, Vm, Vn, min);
+ ArithmeticShiftRight<int16_t, kSimd128Size>(this, Vd, Vm, shift);
break;
case Neon32:
- PairwiseMinMax<uint32_t>(this, Vd, Vm, Vn, min);
+ ArithmeticShiftRight<int32_t, kSimd128Size>(this, Vd, Vm, shift);
break;
- default:
- UNREACHABLE();
+ case Neon64:
+ ArithmeticShiftRight<int64_t, kSimd128Size>(this, Vd, Vm, shift);
break;
}
- break;
}
- case 0xD: {
- if (instr->Bits(21, 20) == 0 && instr->Bit(6) == 1 &&
- instr->Bit(4) == 1) {
- // vmul.f32 Qd, Qn, Qm
- float src1[4], src2[4];
- get_neon_register(Vn, src1);
- get_neon_register(Vm, src2);
- for (int i = 0; i < 4; i++) {
- src1[i] = src1[i] * src2[i];
- }
- set_neon_register(Vd, src1);
- } else if (instr->Bits(21, 20) == 0 && instr->Bit(6) == 0 &&
- instr->Bit(4) == 0) {
- // vpadd.f32 Dd, Dn, Dm
- PairwiseAdd<float>(this, Vd, Vm, Vn);
- } else {
- UNIMPLEMENTED();
- }
- break;
- }
- case 0xE: {
- if (instr->Bit(20) == 0 && instr->Bit(4) == 0) {
- // vcge/vcgt.f32 Qd, Qm, Qn
- bool ge = instr->Bit(21) == 0;
- float src1[4], src2[4];
- get_neon_register(Vn, src1);
- get_neon_register(Vm, src2);
- uint32_t dst[4];
- for (int i = 0; i < 4; i++) {
- if (ge) {
- dst[i] = src1[i] >= src2[i] ? 0xFFFFFFFFu : 0;
- } else {
- dst[i] = src1[i] > src2[i] ? 0xFFFFFFFFu : 0;
- }
- }
- set_neon_register(Vd, dst);
- } else {
- UNIMPLEMENTED();
- }
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- break;
- }
- case 7:
- if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
- (instr->Bit(4) == 1)) {
- // vmovl unsigned
- if ((instr->VdValue() & 1) != 0) UNIMPLEMENTED();
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kDoublePrecision);
- int imm3 = instr->Bits(21, 19);
- switch (imm3) {
- case 1:
- Widen<uint8_t, uint16_t>(this, Vd, Vm);
- break;
- case 2:
- Widen<uint16_t, uint32_t>(this, Vd, Vm);
- break;
- case 4:
- Widen<uint32_t, uint64_t>(this, Vd, Vm);
- break;
- default:
- UNIMPLEMENTED();
- break;
- }
- } else if (instr->Opc1Value() == 7 && instr->Bit(4) == 0) {
- if (instr->Bits(19, 16) == 0xB && instr->Bits(11, 9) == 0x3 &&
- instr->Bit(6) == 1) {
- // vcvt.<Td>.<Tm> Qd, Qm.
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- uint32_t q_data[4];
- get_neon_register(Vm, q_data);
- int op = instr->Bits(8, 7);
- for (int i = 0; i < 4; i++) {
- switch (op) {
- case 0:
- // f32 <- s32, round towards nearest.
- q_data[i] = bit_cast<uint32_t>(std::round(
- static_cast<float>(bit_cast<int32_t>(q_data[i]))));
- break;
- case 1:
- // f32 <- u32, round towards nearest.
- q_data[i] = bit_cast<uint32_t>(
- std::round(static_cast<float>(q_data[i])));
- break;
- case 2:
- // s32 <- f32, round to zero.
- q_data[i] = static_cast<uint32_t>(
- ConvertDoubleToInt(bit_cast<float>(q_data[i]), false, RZ));
- break;
- case 3:
- // u32 <- f32, round to zero.
- q_data[i] = static_cast<uint32_t>(
- ConvertDoubleToInt(bit_cast<float>(q_data[i]), true, RZ));
- break;
- }
- }
- set_neon_register(Vd, q_data);
- } else if (instr->Bits(17, 16) == 0x2 && instr->Bits(11, 7) == 0) {
- if (instr->Bit(6) == 0) {
- // vswp Dd, Dm.
- uint64_t dval, mval;
- int vd = instr->VFPDRegValue(kDoublePrecision);
- int vm = instr->VFPMRegValue(kDoublePrecision);
- get_d_register(vd, &dval);
- get_d_register(vm, &mval);
- set_d_register(vm, &dval);
- set_d_register(vd, &mval);
- } else {
- // vswp Qd, Qm.
- uint32_t dval[4], mval[4];
- int vd = instr->VFPDRegValue(kSimd128Precision);
- int vm = instr->VFPMRegValue(kSimd128Precision);
- get_neon_register(vd, dval);
- get_neon_register(vm, mval);
- set_neon_register(vm, dval);
- set_neon_register(vd, mval);
- }
- } else if (instr->Bits(11, 7) == 0x18) {
- // vdup.<size> Dd, Dm[index].
- // vdup.<size> Qd, Dm[index].
- int vm = instr->VFPMRegValue(kDoublePrecision);
- int imm4 = instr->Bits(19, 16);
- int size = 0, index = 0, mask = 0;
- if ((imm4 & 0x1) != 0) {
- size = 8;
- index = imm4 >> 1;
- mask = 0xFFu;
- } else if ((imm4 & 0x2) != 0) {
- size = 16;
- index = imm4 >> 2;
- mask = 0xFFFFu;
- } else {
- size = 32;
- index = imm4 >> 3;
- mask = 0xFFFFFFFFu;
- }
- uint64_t d_data;
- get_d_register(vm, &d_data);
- uint32_t scalar = (d_data >> (size * index)) & mask;
- uint32_t duped = scalar;
- for (int i = 1; i < 32 / size; i++) {
- scalar <<= size;
- duped |= scalar;
- }
- uint32_t result[4] = {duped, duped, duped, duped};
- if (instr->Bit(6) == 0) {
- int vd = instr->VFPDRegValue(kDoublePrecision);
- set_d_register(vd, result);
- } else {
- int vd = instr->VFPDRegValue(kSimd128Precision);
- set_neon_register(vd, result);
- }
- } else if (instr->Bits(19, 16) == 0 && instr->Bits(11, 6) == 0x17) {
- // vmvn Qd, Qm.
- int vd = instr->VFPDRegValue(kSimd128Precision);
- int vm = instr->VFPMRegValue(kSimd128Precision);
- uint32_t q_data[4];
- get_neon_register(vm, q_data);
- for (int i = 0; i < 4; i++) q_data[i] = ~q_data[i];
- set_neon_register(vd, q_data);
- } else if (instr->Bits(11, 10) == 0x2) {
- // vtb[l,x] Dd, <list>, Dm.
- int vd = instr->VFPDRegValue(kDoublePrecision);
- int vn = instr->VFPNRegValue(kDoublePrecision);
- int vm = instr->VFPMRegValue(kDoublePrecision);
- int table_len = (instr->Bits(9, 8) + 1) * kDoubleSize;
- bool vtbx = instr->Bit(6) != 0; // vtbl / vtbx
- uint64_t destination = 0, indices = 0, result = 0;
- get_d_register(vd, &destination);
- get_d_register(vm, &indices);
- for (int i = 0; i < kDoubleSize; i++) {
- int shift = i * kBitsPerByte;
- int index = (indices >> shift) & 0xFF;
- if (index < table_len) {
- uint64_t table;
- get_d_register(vn + index / kDoubleSize, &table);
- result |=
- ((table >> ((index % kDoubleSize) * kBitsPerByte)) & 0xFF)
- << shift;
- } else if (vtbx) {
- result |= destination & (0xFFull << shift);
- }
- }
- set_d_register(vd, &result);
- } else if (instr->Bits(17, 16) == 0x2 && instr->Bits(11, 8) == 0x1) {
- NeonSize size = static_cast<NeonSize>(instr->Bits(19, 18));
- if (instr->Bit(6) == 0) {
- int Vd = instr->VFPDRegValue(kDoublePrecision);
- int Vm = instr->VFPMRegValue(kDoublePrecision);
- if (instr->Bit(7) == 1) {
- // vzip.<size> Dd, Dm.
- switch (size) {
- case Neon8:
- Zip<uint8_t, kDoubleSize>(this, Vd, Vm);
- break;
- case Neon16:
- Zip<uint16_t, kDoubleSize>(this, Vd, Vm);
- break;
- case Neon32:
- UNIMPLEMENTED();
- break;
- default:
- UNREACHABLE();
- break;
- }
- } else {
- // vuzp.<size> Dd, Dm.
- switch (size) {
- case Neon8:
- Unzip<uint8_t, kDoubleSize>(this, Vd, Vm);
- break;
- case Neon16:
- Unzip<uint16_t, kDoubleSize>(this, Vd, Vm);
- break;
- case Neon32:
- UNIMPLEMENTED();
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
- } else {
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- if (instr->Bit(7) == 1) {
- // vzip.<size> Qd, Qm.
- switch (size) {
- case Neon8:
- Zip<uint8_t, kSimd128Size>(this, Vd, Vm);
- break;
- case Neon16:
- Zip<uint16_t, kSimd128Size>(this, Vd, Vm);
- break;
- case Neon32:
- Zip<uint32_t, kSimd128Size>(this, Vd, Vm);
- break;
- default:
- UNREACHABLE();
- break;
- }
- } else {
- // vuzp.<size> Qd, Qm.
- switch (size) {
- case Neon8:
- Unzip<uint8_t, kSimd128Size>(this, Vd, Vm);
- break;
- case Neon16:
- Unzip<uint16_t, kSimd128Size>(this, Vd, Vm);
- break;
- case Neon32:
- Unzip<uint32_t, kSimd128Size>(this, Vd, Vm);
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
- }
- } else if (instr->Bits(17, 16) == 0 && instr->Bits(11, 9) == 0) {
- // vrev<op>.size Qd, Qm
+ } else if (imm3H_L != 0 && imm3L == 0 && opc == 0b1010 && !q) {
+ if (u) {
+ // vmovl unsigned
+ if ((instr->VdValue() & 1) != 0) UNIMPLEMENTED();
int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- NeonSize size = static_cast<NeonSize>(instr->Bits(19, 18));
- NeonSize op = static_cast<NeonSize>(static_cast<int>(Neon64) -
- instr->Bits(8, 7));
- switch (op) {
- case Neon16: {
- DCHECK_EQ(Neon8, size);
- uint8_t src[16];
- get_neon_register(Vm, src);
- for (int i = 0; i < 16; i += 2) {
- std::swap(src[i], src[i + 1]);
- }
- set_neon_register(Vd, src);
+ int Vm = instr->VFPMRegValue(kDoublePrecision);
+ int imm3 = instr->Bits(21, 19);
+ switch (imm3) {
+ case 1:
+ Widen<uint8_t, uint16_t>(this, Vd, Vm);
break;
- }
- case Neon32: {
- switch (size) {
- case Neon16: {
- uint16_t src[8];
- get_neon_register(Vm, src);
- for (int i = 0; i < 8; i += 2) {
- std::swap(src[i], src[i + 1]);
- }
- set_neon_register(Vd, src);
- break;
- }
- case Neon8: {
- uint8_t src[16];
- get_neon_register(Vm, src);
- for (int i = 0; i < 4; i++) {
- std::swap(src[i * 4], src[i * 4 + 3]);
- std::swap(src[i * 4 + 1], src[i * 4 + 2]);
- }
- set_neon_register(Vd, src);
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
+ case 2:
+ Widen<uint16_t, uint32_t>(this, Vd, Vm);
break;
- }
- case Neon64: {
- switch (size) {
- case Neon32: {
- uint32_t src[4];
- get_neon_register(Vm, src);
- std::swap(src[0], src[1]);
- std::swap(src[2], src[3]);
- set_neon_register(Vd, src);
- break;
- }
- case Neon16: {
- uint16_t src[8];
- get_neon_register(Vm, src);
- for (int i = 0; i < 2; i++) {
- std::swap(src[i * 4], src[i * 4 + 3]);
- std::swap(src[i * 4 + 1], src[i * 4 + 2]);
- }
- set_neon_register(Vd, src);
- break;
- }
- case Neon8: {
- uint8_t src[16];
- get_neon_register(Vm, src);
- for (int i = 0; i < 4; i++) {
- std::swap(src[i], src[7 - i]);
- std::swap(src[i + 8], src[15 - i]);
- }
- set_neon_register(Vd, src);
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
+ case 4:
+ Widen<uint32_t, uint64_t>(this, Vd, Vm);
break;
- }
default:
- UNREACHABLE();
+ UNIMPLEMENTED();
break;
}
- } else if (instr->Bits(17, 16) == 0x2 && instr->Bits(11, 7) == 0x1) {
- NeonSize size = static_cast<NeonSize>(instr->Bits(19, 18));
- if (instr->Bit(6) == 0) {
- int Vd = instr->VFPDRegValue(kDoublePrecision);
- int Vm = instr->VFPMRegValue(kDoublePrecision);
- // vtrn.<size> Dd, Dm.
- switch (size) {
- case Neon8:
- Transpose<uint8_t, kDoubleSize>(this, Vd, Vm);
- break;
- case Neon16:
- Transpose<uint16_t, kDoubleSize>(this, Vd, Vm);
- break;
- case Neon32:
- Transpose<uint32_t, kDoubleSize>(this, Vd, Vm);
- break;
- default:
- UNREACHABLE();
- break;
- }
- } else {
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- // vtrn.<size> Qd, Qm.
- switch (size) {
- case Neon8:
- Transpose<uint8_t, kSimd128Size>(this, Vd, Vm);
- break;
- case Neon16:
- Transpose<uint16_t, kSimd128Size>(this, Vd, Vm);
- break;
- case Neon32:
- Transpose<uint32_t, kSimd128Size>(this, Vd, Vm);
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
- } else if (instr->Bits(17, 16) == 0x1 && instr->Bit(11) == 0) {
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- NeonSize size = static_cast<NeonSize>(instr->Bits(19, 18));
- if (instr->Bits(9, 6) == 0xD) {
- // vabs<type>.<size> Qd, Qm
- if (instr->Bit(10) != 0) {
- // floating point (clear sign bits)
- uint32_t src[4];
- get_neon_register(Vm, src);
- for (int i = 0; i < 4; i++) {
- src[i] &= ~0x80000000;
- }
- set_neon_register(Vd, src);
- } else {
- // signed integer
- switch (size) {
- case Neon8:
- Abs<int8_t, kSimd128Size>(this, Vd, Vm);
- break;
- case Neon16:
- Abs<int16_t, kSimd128Size>(this, Vd, Vm);
- break;
- case Neon32:
- Abs<int32_t, kSimd128Size>(this, Vd, Vm);
- break;
- default:
- UNIMPLEMENTED();
- break;
- }
- }
- } else if (instr->Bits(9, 6) == 0xF) {
- // vneg<type>.<size> Qd, Qm (signed integer)
- if (instr->Bit(10) != 0) {
- // floating point (toggle sign bits)
- uint32_t src[4];
- get_neon_register(Vm, src);
- for (int i = 0; i < 4; i++) {
- src[i] ^= 0x80000000;
- }
- set_neon_register(Vd, src);
- } else {
- // signed integer
- switch (size) {
- case Neon8:
- Neg<int8_t, kSimd128Size>(this, Vd, Vm);
- break;
- case Neon16:
- Neg<int16_t, kSimd128Size>(this, Vd, Vm);
- break;
- case Neon32:
- Neg<int32_t, kSimd128Size>(this, Vd, Vm);
- break;
- default:
- UNIMPLEMENTED();
- break;
- }
- }
- } else {
- UNIMPLEMENTED();
- }
- } else if (instr->Bits(19, 18) == 0x2 && instr->Bits(17, 16) == 0x3 &&
- instr->Bits(11, 8) == 0x5) {
- // vrecpe/vrsqrte.f32 Qd, Qm.
+ } else {
+ // vmovl signed
+ if ((instr->VdValue() & 1) != 0) UNIMPLEMENTED();
int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- uint32_t src[4];
- get_neon_register(Vm, src);
- if (instr->Bit(7) == 0) {
- for (int i = 0; i < 4; i++) {
- float denom = bit_cast<float>(src[i]);
- div_zero_vfp_flag_ = (denom == 0);
- float result = 1.0f / denom;
- result = canonicalizeNaN(result);
- src[i] = bit_cast<uint32_t>(result);
- }
- } else {
- for (int i = 0; i < 4; i++) {
- float radicand = bit_cast<float>(src[i]);
- float result = 1.0f / std::sqrt(radicand);
- result = canonicalizeNaN(result);
- src[i] = bit_cast<uint32_t>(result);
- }
- }
- set_neon_register(Vd, src);
- } else if (instr->Bits(17, 16) == 0x2 && instr->Bits(11, 8) == 0x2 &&
- instr->Bits(7, 6) != 0) {
- // vqmovn.<type><size> Dd, Qm.
- int Vd = instr->VFPDRegValue(kDoublePrecision);
- int Vm = instr->VFPMRegValue(kSimd128Precision);
- NeonSize size = static_cast<NeonSize>(instr->Bits(19, 18));
- bool dst_unsigned = instr->Bit(6) != 0;
- bool src_unsigned = instr->Bits(7, 6) == 0b11;
- DCHECK_IMPLIES(src_unsigned, dst_unsigned);
- switch (size) {
- case Neon8: {
- if (src_unsigned) {
- SaturatingNarrow<uint16_t, uint8_t>(this, Vd, Vm);
- } else if (dst_unsigned) {
- SaturatingUnsignedNarrow<int16_t, uint8_t>(this, Vd, Vm);
- } else {
- SaturatingNarrow<int16_t, int8_t>(this, Vd, Vm);
- }
+ int Vm = instr->VFPMRegValue(kDoublePrecision);
+ int imm3 = instr->Bits(21, 19);
+ switch (imm3) {
+ case 1:
+ Widen<int8_t, int16_t>(this, Vd, Vm);
break;
- }
- case Neon16: {
- if (src_unsigned) {
- SaturatingNarrow<uint32_t, uint16_t>(this, Vd, Vm);
- } else if (dst_unsigned) {
- SaturatingUnsignedNarrow<int32_t, uint16_t>(this, Vd, Vm);
- } else {
- SaturatingNarrow<int32_t, int16_t>(this, Vd, Vm);
- }
+ case 2:
+ Widen<int16_t, int32_t>(this, Vd, Vm);
break;
- }
- case Neon32: {
- if (src_unsigned) {
- SaturatingNarrow<uint64_t, uint32_t>(this, Vd, Vm);
- } else if (dst_unsigned) {
- SaturatingUnsignedNarrow<int64_t, uint32_t>(this, Vd, Vm);
- } else {
- SaturatingNarrow<int64_t, int32_t>(this, Vd, Vm);
- }
+ case 4:
+ Widen<int32_t, int64_t>(this, Vd, Vm);
break;
- }
default:
UNIMPLEMENTED();
break;
}
- } else if (instr->Bits(17, 16) == 0x2 && instr->Bit(10) == 1) {
- // vrint<q>.<dt> <Dd>, <Dm>
- // vrint<q>.<dt> <Qd>, <Qm>
- // See F6.1.205
- int regs = instr->Bit(6) + 1;
- int rounding_mode = instr->Bits(9, 7);
- float (*fproundint)(float) = nullptr;
- switch (rounding_mode) {
- case 0:
- fproundint = &nearbyintf;
- break;
- case 3:
- fproundint = &truncf;
- break;
- case 5:
- fproundint = &floorf;
- break;
- case 7:
- fproundint = &ceilf;
- break;
- default:
- UNIMPLEMENTED();
- }
- int vm = instr->VFPMRegValue(kDoublePrecision);
- int vd = instr->VFPDRegValue(kDoublePrecision);
-
- float floats[2];
- for (int r = 0; r < regs; r++) {
- // We cannot simply use GetVFPSingleValue since our Q registers
- // might not map to any S registers at all.
- get_neon_register<float, kDoubleSize>(vm + r, floats);
- for (int e = 0; e < 2; e++) {
- floats[e] = canonicalizeNaN(fproundint(floats[e]));
- }
- set_neon_register<float, kDoubleSize>(vd + r, floats);
- }
- } else {
- UNIMPLEMENTED();
}
- } else if (instr->Bits(11, 8) == 0 && instr->Bit(4) == 1) {
- // vshr.u<size> Qd, Qm, shift
+ } else if (!u && imm3H_L != 0 && opc == 0b0101) {
+ // vshl.i<size> Qd, Qm, shift
int imm7 = instr->Bits(21, 16);
if (instr->Bit(7) != 0) imm7 += 64;
int size = base::bits::RoundDownToPowerOfTwo32(imm7);
- int shift = 2 * size - imm7;
+ int shift = imm7 - size;
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
NeonSize ns =
static_cast<NeonSize>(base::bits::WhichPowerOfTwo(size >> 3));
switch (ns) {
case Neon8:
- ShiftRight<uint8_t, kSimd128Size>(this, Vd, Vm, shift);
+ ShiftLeft<uint8_t, kSimd128Size>(this, Vd, Vm, shift);
break;
case Neon16:
- ShiftRight<uint16_t, kSimd128Size>(this, Vd, Vm, shift);
+ ShiftLeft<uint16_t, kSimd128Size>(this, Vd, Vm, shift);
break;
case Neon32:
- ShiftRight<uint32_t, kSimd128Size>(this, Vd, Vm, shift);
+ ShiftLeft<uint32_t, kSimd128Size>(this, Vd, Vm, shift);
break;
case Neon64:
- ShiftRight<uint64_t, kSimd128Size>(this, Vd, Vm, shift);
+ ShiftLeft<uint64_t, kSimd128Size>(this, Vd, Vm, shift);
break;
}
- } else if (instr->Bits(11, 8) == 0x5 && instr->Bit(6) == 0 &&
- instr->Bit(4) == 1) {
- // vsli.<size> Dd, Dm, shift
+ } else if (u && imm3H_L != 0 && opc == 0b0100) {
+ // vsri.<size> Dd, Dm, shift
int imm7 = instr->Bits(21, 16);
if (instr->Bit(7) != 0) imm7 += 64;
int size = base::bits::RoundDownToPowerOfTwo32(imm7);
- int shift = imm7 - size;
+ int shift = 2 * size - imm7;
int Vd = instr->VFPDRegValue(kDoublePrecision);
int Vm = instr->VFPMRegValue(kDoublePrecision);
switch (size) {
case 8:
- ShiftLeftAndInsert<uint8_t, kDoubleSize>(this, Vd, Vm, shift);
+ ShiftRightAndInsert<uint8_t, kDoubleSize>(this, Vd, Vm, shift);
break;
case 16:
- ShiftLeftAndInsert<uint16_t, kDoubleSize>(this, Vd, Vm, shift);
+ ShiftRightAndInsert<uint16_t, kDoubleSize>(this, Vd, Vm, shift);
break;
case 32:
- ShiftLeftAndInsert<uint32_t, kDoubleSize>(this, Vd, Vm, shift);
+ ShiftRightAndInsert<uint32_t, kDoubleSize>(this, Vd, Vm, shift);
break;
case 64:
- ShiftLeftAndInsert<uint64_t, kDoubleSize>(this, Vd, Vm, shift);
+ ShiftRightAndInsert<uint64_t, kDoubleSize>(this, Vd, Vm, shift);
break;
default:
UNREACHABLE();
break;
}
- } else if (instr->Bits(11, 8) == 0x4 && instr->Bit(6) == 0 &&
- instr->Bit(4) == 1) {
- // vsri.<size> Dd, Dm, shift
+ } else if (u && imm3H_L != 0 && opc == 0b0101) {
+ // vsli.<size> Dd, Dm, shift
int imm7 = instr->Bits(21, 16);
if (instr->Bit(7) != 0) imm7 += 64;
int size = base::bits::RoundDownToPowerOfTwo32(imm7);
- int shift = 2 * size - imm7;
+ int shift = imm7 - size;
int Vd = instr->VFPDRegValue(kDoublePrecision);
int Vm = instr->VFPMRegValue(kDoublePrecision);
switch (size) {
case 8:
- ShiftRightAndInsert<uint8_t, kDoubleSize>(this, Vd, Vm, shift);
+ ShiftLeftAndInsert<uint8_t, kDoubleSize>(this, Vd, Vm, shift);
break;
case 16:
- ShiftRightAndInsert<uint16_t, kDoubleSize>(this, Vd, Vm, shift);
+ ShiftLeftAndInsert<uint16_t, kDoubleSize>(this, Vd, Vm, shift);
break;
case 32:
- ShiftRightAndInsert<uint32_t, kDoubleSize>(this, Vd, Vm, shift);
+ ShiftLeftAndInsert<uint32_t, kDoubleSize>(this, Vd, Vm, shift);
break;
case 64:
- ShiftRightAndInsert<uint64_t, kDoubleSize>(this, Vd, Vm, shift);
+ ShiftLeftAndInsert<uint64_t, kDoubleSize>(this, Vd, Vm, shift);
break;
default:
UNREACHABLE();
break;
}
- } else if (instr->Bits(11, 8) == 0x8 && instr->Bit(6) == 0 &&
- instr->Bit(4) == 0) {
- // vmlal.u<size> Qd, Dn, Dm
- NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
- if (size != Neon32) UNIMPLEMENTED();
+ }
+ }
+ return;
+ }
+}
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vn = instr->VFPNRegValue(kDoublePrecision);
- int Vm = instr->VFPMRegValue(kDoublePrecision);
- uint64_t src1, src2, dst[2];
-
- get_neon_register<uint64_t>(Vd, dst);
- get_d_register(Vn, &src1);
- get_d_register(Vm, &src2);
- dst[0] += (src1 & 0xFFFFFFFFULL) * (src2 & 0xFFFFFFFFULL);
- dst[1] += (src1 >> 32) * (src2 >> 32);
- set_neon_register<uint64_t>(Vd, dst);
- } else if (instr->Bits(11, 8) == 0xC && instr->Bit(6) == 0 &&
- instr->Bit(4) == 0) {
- // vmull.u<size> Qd, Dn, Dm
- NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
- int Vd = instr->VFPDRegValue(kSimd128Precision);
- int Vn = instr->VFPNRegValue(kDoublePrecision);
- int Vm = instr->VFPMRegValue(kDoublePrecision);
- switch (size) {
- case Neon32: {
- MultiplyLong<uint32_t, uint64_t>(this, Vd, Vn, Vm);
- break;
- }
- default:
- UNIMPLEMENTED();
- }
- } else if (instr->Bits(21, 19) == 0 && instr->Bit(7) == 0 &&
- instr->Bit(4) == 1) {
- // vmov (immediate), see ARM DDI 0487F.b F6.1.134, decoding A4.
- // Similar to vmov (immediate above), but when high bit of immediate is
- // set.
- VmovImmediate(this, instr);
+void Simulator::DecodeMemoryHintsAndBarriers(Instruction* instr) {
+ switch (instr->SpecialValue()) {
+ case 0xA:
+ case 0xB:
+ if ((instr->Bits(22, 20) == 5) && (instr->Bits(15, 12) == 0xF)) {
+ // pld: ignore instruction.
+ } else if (instr->SpecialValue() == 0xA && instr->Bits(22, 20) == 7) {
+ // dsb, dmb, isb: ignore instruction for now.
+ // TODO(binji): implement
+ // Also refer to the ARMv6 CP15 equivalents in DecodeTypeCP15.
} else {
UNIMPLEMENTED();
}
break;
+ default:
+ UNIMPLEMENTED();
+ }
+}
+
+void Simulator::DecodeAdvancedSIMDElementOrStructureLoadStore(
+ Instruction* instr) {
+ switch (instr->SpecialValue()) {
case 8:
if (instr->Bits(21, 20) == 0) {
// vst1
@@ -5745,7 +5657,7 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
case 9: {
if (instr->Bits(21, 20) == 2) {
// Bits(11, 8) is the B field in A7.7 Advanced SIMD element or structure
- // load/store instructions.
+ // load/store instructions. See table A7-21.
if (instr->Bits(11, 8) == 0xC) {
// vld1 (single element to all lanes).
DCHECK_EQ(instr->Bits(11, 8), 0b1100); // Type field.
@@ -5791,6 +5703,53 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
set_register(Rn, get_register(Rn) + get_register(Rm));
}
}
+ } else if (instr->Bits(11, 8) == 8 ||
+ ((instr->Bits(11, 8) & 0b1011) == 0)) {
+ // vld1 (single element to one lane)
+ int Vd = (instr->Bit(22) << 4) | instr->VdValue();
+ int Rn = instr->VnValue();
+ int Rm = instr->VmValue();
+ int32_t address = get_register(Rn);
+ int size = instr->Bits(11, 10);
+ uint64_t dreg;
+ get_d_register(Vd, &dreg);
+ switch (size) {
+ case Neon8: {
+ uint64_t data = ReadBU(address);
+ DCHECK_EQ(0, instr->Bit(4));
+ int i = instr->Bits(7, 5) * 8;
+ dreg = (dreg & ~(uint64_t{0xff} << i)) | (data << i);
+ break;
+ }
+ case Neon16: {
+ DCHECK_EQ(0, instr->Bits(5, 4)); // Alignment not supported.
+ uint64_t data = ReadHU(address);
+ int i = instr->Bits(7, 6) * 16;
+ dreg = (dreg & ~(uint64_t{0xffff} << i)) | (data << i);
+ break;
+ }
+ case Neon32: {
+ DCHECK_EQ(0, instr->Bits(6, 4)); // Alignment not supported.
+ uint64_t data = static_cast<unsigned>(ReadW(address));
+ int i = instr->Bit(7) * 32;
+ dreg = (dreg & ~(uint64_t{0xffffffff} << i)) | (data << i);
+ break;
+ }
+ case Neon64: {
+ // Should have been handled by vld1 (single element to all lanes).
+ UNREACHABLE();
+ }
+ }
+ set_d_register(Vd, &dreg);
+
+ // write back
+ if (Rm != 15) {
+ if (Rm == 13) {
+ set_register(Rn, address);
+ } else {
+ set_register(Rn, get_register(Rn) + get_register(Rm));
+ }
+ }
} else {
UNIMPLEMENTED();
}
@@ -5799,18 +5758,13 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
}
break;
}
- case 0xA:
- case 0xB:
- if ((instr->Bits(22, 20) == 5) && (instr->Bits(15, 12) == 0xF)) {
- // pld: ignore instruction.
- } else if (instr->SpecialValue() == 0xA && instr->Bits(22, 20) == 7) {
- // dsb, dmb, isb: ignore instruction for now.
- // TODO(binji): implement
- // Also refer to the ARMv6 CP15 equivalents in DecodeTypeCP15.
- } else {
- UNIMPLEMENTED();
- }
- break;
+ default:
+ UNIMPLEMENTED();
+ }
+}
+
+void Simulator::DecodeFloatingPointDataProcessing(Instruction* instr) {
+ switch (instr->SpecialValue()) {
case 0x1D:
if (instr->Opc1Value() == 0x7 && instr->Opc3Value() == 0x1 &&
instr->Bits(11, 9) == 0x5 && instr->Bits(19, 18) == 0x2) {
@@ -5979,6 +5933,21 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
}
}
+void Simulator::DecodeSpecialCondition(Instruction* instr) {
+ int op0 = instr->Bits(25, 24);
+ int op1 = instr->Bits(11, 9);
+ int op2 = instr->Bit(4);
+
+ if (instr->Bit(27) == 0) {
+ DecodeUnconditional(instr);
+ } else if ((instr->Bits(27, 26) == 0b11) && (op0 == 0b10) &&
+ ((op1 >> 1) == 0b10) && !op2) {
+ DecodeFloatingPointDataProcessing(instr);
+ } else {
+ UNIMPLEMENTED();
+ }
+}
+
// Executes the current instruction.
void Simulator::InstructionDecode(Instruction* instr) {
if (v8::internal::FLAG_check_icache) {
diff --git a/deps/v8/src/execution/arm/simulator-arm.h b/deps/v8/src/execution/arm/simulator-arm.h
index e577e0f815..84f857d5da 100644
--- a/deps/v8/src/execution/arm/simulator-arm.h
+++ b/deps/v8/src/execution/arm/simulator-arm.h
@@ -386,6 +386,13 @@ class Simulator : public SimulatorBase {
void DecodeType6CoprocessorIns(Instruction* instr);
void DecodeSpecialCondition(Instruction* instr);
+ void DecodeFloatingPointDataProcessing(Instruction* instr);
+ void DecodeUnconditional(Instruction* instr);
+ void DecodeAdvancedSIMDDataProcessing(Instruction* instr);
+ void DecodeMemoryHintsAndBarriers(Instruction* instr);
+ void DecodeAdvancedSIMDElementOrStructureLoadStore(Instruction* instr);
+ void DecodeAdvancedSIMDTwoOrThreeRegisters(Instruction* instr);
+
void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instruction* instr);
void DecodeVCMP(Instruction* instr);
void DecodeVCVTBetweenDoubleAndSingle(Instruction* instr);
diff --git a/deps/v8/src/execution/arm64/frame-constants-arm64.h b/deps/v8/src/execution/arm64/frame-constants-arm64.h
index 409fcec504..fba69f917d 100644
--- a/deps/v8/src/execution/arm64/frame-constants-arm64.h
+++ b/deps/v8/src/execution/arm64/frame-constants-arm64.h
@@ -15,6 +15,7 @@ namespace internal {
// The layout of an EntryFrame is as follows:
//
+// BOTTOM OF THE STACK HIGHEST ADDRESS
// slot Entry frame
// +---------------------+-----------------------
// -20 | saved register d15 |
@@ -45,6 +46,7 @@ namespace internal {
// |- - - - - - - - - - -|
// 5 | padding | <-- stack ptr
// -----+---------------------+-----------------------
+// TOP OF THE STACK LOWEST ADDRESS
//
class EntryFrameConstants : public AllStatic {
public:
diff --git a/deps/v8/src/execution/arm64/pointer-auth-arm64.cc b/deps/v8/src/execution/arm64/pointer-auth-arm64.cc
index eaa88445ec..7f4eeeb0ac 100644
--- a/deps/v8/src/execution/arm64/pointer-auth-arm64.cc
+++ b/deps/v8/src/execution/arm64/pointer-auth-arm64.cc
@@ -232,6 +232,9 @@ uint64_t Simulator::AuthPAC(uint64_t ptr, uint64_t context, PACKey key,
} else {
int error_lsb = GetTopPACBit(ptr, type) - 2;
uint64_t error_mask = UINT64_C(0x3) << error_lsb;
+ if (FLAG_sim_abort_on_bad_auth) {
+ FATAL("Pointer authentication failure.");
+ }
return (original_ptr & ~error_mask) | (error_code << error_lsb);
}
}
diff --git a/deps/v8/src/execution/execution.cc b/deps/v8/src/execution/execution.cc
index d780074861..3da4cbdbaf 100644
--- a/deps/v8/src/execution/execution.cc
+++ b/deps/v8/src/execution/execution.cc
@@ -194,10 +194,10 @@ MaybeHandle<Context> NewScriptContext(Isolate* isolate,
// If envRec.HasLexicalDeclaration(name) is true, throw a SyntaxError
// exception.
MessageLocation location(script, 0, 1);
- isolate->ThrowAt(isolate->factory()->NewSyntaxError(
- MessageTemplate::kVarRedeclaration, name),
- &location);
- return MaybeHandle<Context>();
+ return isolate->ThrowAt<Context>(
+ isolate->factory()->NewSyntaxError(
+ MessageTemplate::kVarRedeclaration, name),
+ &location);
}
}
}
@@ -216,10 +216,10 @@ MaybeHandle<Context> NewScriptContext(Isolate* isolate,
// ES#sec-globaldeclarationinstantiation 5.d:
// If hasRestrictedGlobal is true, throw a SyntaxError exception.
MessageLocation location(script, 0, 1);
- isolate->ThrowAt(isolate->factory()->NewSyntaxError(
- MessageTemplate::kVarRedeclaration, name),
- &location);
- return MaybeHandle<Context>();
+ return isolate->ThrowAt<Context>(
+ isolate->factory()->NewSyntaxError(
+ MessageTemplate::kVarRedeclaration, name),
+ &location);
}
JSGlobalObject::InvalidatePropertyCell(global_object, name);
diff --git a/deps/v8/src/execution/external-pointer-table.cc b/deps/v8/src/execution/external-pointer-table.cc
new file mode 100644
index 0000000000..5b199ae3cf
--- /dev/null
+++ b/deps/v8/src/execution/external-pointer-table.cc
@@ -0,0 +1,22 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/execution/external-pointer-table.h"
+
+namespace v8 {
+namespace internal {
+
+void ExternalPointerTable::GrowTable(ExternalPointerTable* table) {
+ // TODO(v8:10391, saelo): overflow check here and in the multiplication below
+ uint32_t new_capacity = table->capacity_ + table->capacity_ / 2;
+ table->buffer_ = reinterpret_cast<Address*>(
+ realloc(table->buffer_, new_capacity * sizeof(Address)));
+ CHECK(table->buffer_);
+ memset(&table->buffer_[table->capacity_], 0,
+ (new_capacity - table->capacity_) * sizeof(Address));
+ table->capacity_ = new_capacity;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/execution/external-pointer-table.h b/deps/v8/src/execution/external-pointer-table.h
new file mode 100644
index 0000000000..7774a39248
--- /dev/null
+++ b/deps/v8/src/execution/external-pointer-table.h
@@ -0,0 +1,80 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_EXECUTION_EXTERNAL_POINTER_TABLE_H_
+#define V8_EXECUTION_EXTERNAL_POINTER_TABLE_H_
+
+#include "src/common/external-pointer.h"
+#include "src/utils/utils.h"
+
+namespace v8 {
+namespace internal {
+
+class V8_EXPORT_PRIVATE ExternalPointerTable {
+ public:
+ static const int kExternalPointerTableInitialCapacity = 1024;
+
+ ExternalPointerTable()
+ : buffer_(reinterpret_cast<Address*>(
+ calloc(kExternalPointerTableInitialCapacity, sizeof(Address)))),
+ length_(1),
+ capacity_(kExternalPointerTableInitialCapacity),
+ freelist_head_(0) {
+ // Explicitly setup the invalid nullptr entry.
+ STATIC_ASSERT(kNullExternalPointer == 0);
+ buffer_[kNullExternalPointer] = kNullAddress;
+ }
+
+ ~ExternalPointerTable() { ::free(buffer_); }
+
+ Address get(uint32_t index) const {
+ CHECK_LT(index, length_);
+ return buffer_[index];
+ }
+
+ void set(uint32_t index, Address value) {
+ DCHECK_NE(kNullExternalPointer, index);
+ CHECK_LT(index, length_);
+ buffer_[index] = value;
+ }
+
+ uint32_t allocate() {
+ uint32_t index = length_++;
+ if (index >= capacity_) {
+ GrowTable(this);
+ }
+ DCHECK_NE(kNullExternalPointer, index);
+ return index;
+ }
+
+ void free(uint32_t index) {
+ // TODO(v8:10391, saelo): implement simple free list here, i.e. set
+ // buffer_[index] to freelist_head_ and set freelist_head
+ // to index
+ DCHECK_NE(kNullExternalPointer, index);
+ }
+
+ // Returns true if the entry exists in the table and therefore it can be read.
+ bool is_valid_index(uint32_t index) const {
+ // TODO(v8:10391, saelo): also check here if entry is free
+ return index < length_;
+ }
+
+ uint32_t size() const { return length_; }
+
+ static void GrowTable(ExternalPointerTable* table);
+
+ private:
+ friend class Isolate;
+
+ Address* buffer_;
+ uint32_t length_;
+ uint32_t capacity_;
+ uint32_t freelist_head_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_EXECUTION_EXTERNAL_POINTER_TABLE_H_
diff --git a/deps/v8/src/execution/frame-constants.h b/deps/v8/src/execution/frame-constants.h
index 6c037451a2..1c0a1f65f0 100644
--- a/deps/v8/src/execution/frame-constants.h
+++ b/deps/v8/src/execution/frame-constants.h
@@ -21,18 +21,15 @@ namespace internal {
// header, with slot index 2 corresponding to the current function context and 3
// corresponding to the frame marker/JSFunction.
//
-// If V8_REVERSE_JSARGS is set, then the parameters are reversed in the stack,
-// i.e., the first parameter (the receiver) is just above the return address.
-//
// slot JS frame
// +-----------------+--------------------------------
-// -n-1 | parameter 0 | ^
+// -n-1 | parameter n | ^
// |- - - - - - - - -| |
-// -n | | Caller
+// -n | parameter n-1 | Caller
// ... | ... | frame slots
-// -2 | parameter n-1 | (slot < 0)
+// -2 | parameter 1 | (slot < 0)
// |- - - - - - - - -| |
-// -1 | parameter n | v
+// -1 | parameter 0 | v
// -----+-----------------+--------------------------------
// 0 | return addr | ^ ^
// |- - - - - - - - -| | |
@@ -59,7 +56,7 @@ class CommonFrameConstants : public AllStatic {
// Fixed part of the frame consists of return address, caller fp,
// constant pool (if FLAG_enable_embedded_constant_pool), context, and
- // function. StandardFrame::IterateExpressions assumes that kLastObjectOffset
+ // function. CommonFrame::IterateExpressions assumes that kLastObjectOffset
// is the last object pointer.
static constexpr int kFixedFrameSizeAboveFp = kPCOnStackSize + kFPOnStackSize;
static constexpr int kFixedSlotCountAboveFp =
@@ -82,13 +79,13 @@ class CommonFrameConstants : public AllStatic {
//
// slot JS frame
// +-----------------+--------------------------------
-// -n-1 | parameter 0 | ^
+// -n-1 | parameter n | ^
// |- - - - - - - - -| |
-// -n | | Caller
+// -n | parameter n-1 | Caller
// ... | ... | frame slots
-// -2 | parameter n-1 | (slot < 0)
+// -2 | parameter 1 | (slot < 0)
// |- - - - - - - - -| |
-// -1 | parameter n | v
+// -1 | parameter 0 | v
// -----+-----------------+--------------------------------
// 0 | return addr | ^ ^
// |- - - - - - - - -| | |
@@ -133,13 +130,13 @@ class StandardFrameConstants : public CommonFrameConstants {
//
// slot JS frame
// +-----------------+--------------------------------
-// -n-1 | parameter 0 | ^
+// -n-1 | parameter n | ^
// |- - - - - - - - -| |
-// -n | | Caller
+// -n | parameter n-1 | Caller
// ... | ... | frame slots
-// -2 | parameter n-1 | (slot < 0)
+// -2 | parameter 1 | (slot < 0)
// |- - - - - - - - -| |
-// -1 | parameter n | v
+// -1 | parameter 0 | v
// -----+-----------------+--------------------------------
// 0 | return addr | ^ ^
// |- - - - - - - - -| | |
@@ -305,18 +302,13 @@ class InterpreterFrameConstants : public StandardFrameConstants {
STANDARD_FRAME_EXTRA_PUSHED_VALUE_OFFSET(1);
DEFINE_STANDARD_FRAME_SIZES(2);
-#ifdef V8_REVERSE_JSARGS
static constexpr int kFirstParamFromFp =
StandardFrameConstants::kCallerSPOffset;
-#else
- static constexpr int kLastParamFromFp =
- StandardFrameConstants::kCallerSPOffset;
-#endif
static constexpr int kRegisterFileFromFp =
-kFixedFrameSizeFromFp - kSystemPointerSize;
static constexpr int kExpressionsOffset = kRegisterFileFromFp;
- // Expression index for {StandardFrame::GetExpressionAddress}.
+ // Expression index for {JavaScriptFrame::GetExpressionAddress}.
static constexpr int kBytecodeArrayExpressionIndex = -2;
static constexpr int kBytecodeOffsetExpressionIndex = -1;
static constexpr int kRegisterFileExpressionIndex = 0;
diff --git a/deps/v8/src/execution/frames-inl.h b/deps/v8/src/execution/frames-inl.h
index e56db9ee4a..3cee9d5855 100644
--- a/deps/v8/src/execution/frames-inl.h
+++ b/deps/v8/src/execution/frames-inl.h
@@ -65,7 +65,6 @@ inline StackFrame::StackFrame(StackFrameIteratorBase* iterator)
: iterator_(iterator), isolate_(iterator_->isolate()) {
}
-
inline StackHandler* StackFrame::top_handler() const {
return iterator_->handler();
}
@@ -95,22 +94,29 @@ inline Address* StackFrame::ResolveReturnAddressLocation(Address* pc_address) {
}
}
-inline NativeFrame::NativeFrame(StackFrameIteratorBase* iterator)
- : StackFrame(iterator) {}
+inline TypedFrame::TypedFrame(StackFrameIteratorBase* iterator)
+ : CommonFrame(iterator) {}
-inline Address NativeFrame::GetCallerStackPointer() const {
- return fp() + CommonFrameConstants::kCallerSPOffset;
-}
+inline CommonFrameWithJSLinkage::CommonFrameWithJSLinkage(
+ StackFrameIteratorBase* iterator)
+ : CommonFrame(iterator) {}
+
+inline TypedFrameWithJSLinkage::TypedFrameWithJSLinkage(
+ StackFrameIteratorBase* iterator)
+ : CommonFrameWithJSLinkage(iterator) {}
+
+inline NativeFrame::NativeFrame(StackFrameIteratorBase* iterator)
+ : TypedFrame(iterator) {}
inline EntryFrame::EntryFrame(StackFrameIteratorBase* iterator)
- : StackFrame(iterator) {}
+ : TypedFrame(iterator) {}
inline ConstructEntryFrame::ConstructEntryFrame(
StackFrameIteratorBase* iterator)
: EntryFrame(iterator) {}
inline ExitFrame::ExitFrame(StackFrameIteratorBase* iterator)
- : StackFrame(iterator) {}
+ : TypedFrame(iterator) {}
inline BuiltinExitFrame::BuiltinExitFrame(StackFrameIteratorBase* iterator)
: ExitFrame(iterator) {}
@@ -124,17 +130,8 @@ inline Object BuiltinExitFrame::receiver_slot_object() const {
// fp[4]: argc.
// fp[5]: hole.
// ------- JS stack arguments ------
- // fp[6]: receiver, if V8_REVERSE_JSARGS.
- // fp[2 + argc - 1]: receiver, if not V8_REVERSE_JSARGS.
-#ifdef V8_REVERSE_JSARGS
+ // fp[6]: receiver
const int receiverOffset = BuiltinExitFrameConstants::kFirstArgumentOffset;
-#else
- Object argc_slot = argc_slot_object();
- DCHECK(argc_slot.IsSmi());
- int argc = Smi::ToInt(argc_slot);
- const int receiverOffset = BuiltinExitFrameConstants::kNewTargetOffset +
- (argc - 1) * kSystemPointerSize;
-#endif
return Object(base::Memory<Address>(fp() + receiverOffset));
}
@@ -153,72 +150,67 @@ inline Object BuiltinExitFrame::new_target_slot_object() const {
fp() + BuiltinExitFrameConstants::kNewTargetOffset));
}
-inline StandardFrame::StandardFrame(StackFrameIteratorBase* iterator)
- : StackFrame(iterator) {
-}
+inline CommonFrame::CommonFrame(StackFrameIteratorBase* iterator)
+ : StackFrame(iterator) {}
-inline Object StandardFrame::GetExpression(int index) const {
+inline Object CommonFrame::GetExpression(int index) const {
return Object(base::Memory<Address>(GetExpressionAddress(index)));
}
-inline void StandardFrame::SetExpression(int index, Object value) {
+inline void CommonFrame::SetExpression(int index, Object value) {
base::Memory<Address>(GetExpressionAddress(index)) = value.ptr();
}
-inline Address StandardFrame::caller_fp() const {
+inline Address CommonFrame::caller_fp() const {
return base::Memory<Address>(fp() + StandardFrameConstants::kCallerFPOffset);
}
-
-inline Address StandardFrame::caller_pc() const {
+inline Address CommonFrame::caller_pc() const {
return base::Memory<Address>(ComputePCAddress(fp()));
}
-
-inline Address StandardFrame::ComputePCAddress(Address fp) {
+inline Address CommonFrame::ComputePCAddress(Address fp) {
return fp + StandardFrameConstants::kCallerPCOffset;
}
-
-inline Address StandardFrame::ComputeConstantPoolAddress(Address fp) {
+inline Address CommonFrame::ComputeConstantPoolAddress(Address fp) {
return fp + StandardFrameConstants::kConstantPoolOffset;
}
-
-inline bool StandardFrame::IsArgumentsAdaptorFrame(Address fp) {
+inline bool CommonFrame::IsArgumentsAdaptorFrame(Address fp) {
intptr_t frame_type =
base::Memory<intptr_t>(fp + TypedFrameConstants::kFrameTypeOffset);
return frame_type == StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR);
}
-
-inline bool StandardFrame::IsConstructFrame(Address fp) {
+inline bool CommonFrameWithJSLinkage::IsConstructFrame(Address fp) {
intptr_t frame_type =
base::Memory<intptr_t>(fp + TypedFrameConstants::kFrameTypeOffset);
return frame_type == StackFrame::TypeToMarker(StackFrame::CONSTRUCT);
}
inline JavaScriptFrame::JavaScriptFrame(StackFrameIteratorBase* iterator)
- : StandardFrame(iterator) {}
+ : CommonFrameWithJSLinkage(iterator) {}
-Address JavaScriptFrame::GetParameterSlot(int index) const {
+Address CommonFrameWithJSLinkage::GetParameterSlot(int index) const {
DCHECK_LE(-1, index);
#ifdef V8_NO_ARGUMENTS_ADAPTOR
DCHECK_LT(index,
- std::max(GetActualArgumentsCount(), ComputeParametersCount()));
+ std::max(GetActualArgumentCount(), ComputeParametersCount()));
#else
DCHECK(index < ComputeParametersCount() ||
ComputeParametersCount() == kDontAdaptArgumentsSentinel);
#endif
-#ifdef V8_REVERSE_JSARGS
int parameter_offset = (index + 1) * kSystemPointerSize;
-#else
- int param_count = ComputeParametersCount();
- int parameter_offset = (param_count - index - 1) * kSystemPointerSize;
-#endif
return caller_sp() + parameter_offset;
}
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+inline int CommonFrameWithJSLinkage::GetActualArgumentCount() const {
+ return 0;
+}
+#endif
+
inline void JavaScriptFrame::set_receiver(Object value) {
base::Memory<Address>(GetParameterSlot(-1)) = value.ptr();
}
@@ -233,15 +225,12 @@ inline Object JavaScriptFrame::function_slot_object() const {
}
inline StubFrame::StubFrame(StackFrameIteratorBase* iterator)
- : StandardFrame(iterator) {
-}
-
+ : TypedFrame(iterator) {}
inline OptimizedFrame::OptimizedFrame(StackFrameIteratorBase* iterator)
: JavaScriptFrame(iterator) {
}
-
inline InterpretedFrame::InterpretedFrame(StackFrameIteratorBase* iterator)
: JavaScriptFrame(iterator) {}
@@ -251,17 +240,17 @@ inline ArgumentsAdaptorFrame::ArgumentsAdaptorFrame(
}
inline BuiltinFrame::BuiltinFrame(StackFrameIteratorBase* iterator)
- : JavaScriptFrame(iterator) {}
+ : TypedFrameWithJSLinkage(iterator) {}
inline WasmFrame::WasmFrame(StackFrameIteratorBase* iterator)
- : StandardFrame(iterator) {}
+ : TypedFrame(iterator) {}
inline WasmExitFrame::WasmExitFrame(StackFrameIteratorBase* iterator)
: WasmFrame(iterator) {}
inline WasmDebugBreakFrame::WasmDebugBreakFrame(
StackFrameIteratorBase* iterator)
- : StandardFrame(iterator) {}
+ : TypedFrame(iterator) {}
inline WasmToJsFrame::WasmToJsFrame(StackFrameIteratorBase* iterator)
: StubFrame(iterator) {}
@@ -274,11 +263,10 @@ inline CWasmEntryFrame::CWasmEntryFrame(StackFrameIteratorBase* iterator)
inline WasmCompileLazyFrame::WasmCompileLazyFrame(
StackFrameIteratorBase* iterator)
- : StandardFrame(iterator) {}
+ : TypedFrame(iterator) {}
inline InternalFrame::InternalFrame(StackFrameIteratorBase* iterator)
- : StandardFrame(iterator) {
-}
+ : TypedFrame(iterator) {}
inline ConstructFrame::ConstructFrame(StackFrameIteratorBase* iterator)
: InternalFrame(iterator) {
@@ -290,7 +278,7 @@ inline BuiltinContinuationFrame::BuiltinContinuationFrame(
inline JavaScriptBuiltinContinuationFrame::JavaScriptBuiltinContinuationFrame(
StackFrameIteratorBase* iterator)
- : JavaScriptFrame(iterator) {}
+ : TypedFrameWithJSLinkage(iterator) {}
inline JavaScriptBuiltinContinuationWithCatchFrame::
JavaScriptBuiltinContinuationWithCatchFrame(
@@ -319,11 +307,11 @@ inline JavaScriptFrame* JavaScriptFrameIterator::frame() const {
return static_cast<JavaScriptFrame*>(frame);
}
-inline StandardFrame* StackTraceFrameIterator::frame() const {
+inline CommonFrame* StackTraceFrameIterator::frame() const {
StackFrame* frame = iterator_.frame();
DCHECK(frame->is_java_script() || frame->is_arguments_adaptor() ||
frame->is_wasm());
- return static_cast<StandardFrame*>(frame);
+ return static_cast<CommonFrame*>(frame);
}
bool StackTraceFrameIterator::is_javascript() const {
diff --git a/deps/v8/src/execution/frames.cc b/deps/v8/src/execution/frames.cc
index d7aa13c3ec..3288f53c8d 100644
--- a/deps/v8/src/execution/frames.cc
+++ b/deps/v8/src/execution/frames.cc
@@ -156,6 +156,13 @@ StackFrame* StackFrameIteratorBase::SingletonFor(StackFrame::Type type) {
// -------------------------------------------------------------------------
+void TypedFrameWithJSLinkage::Iterate(RootVisitor* v) const {
+ IterateExpressions(v);
+ IteratePc(v, pc_address(), constant_pool_address(), LookupCode());
+}
+
+// -------------------------------------------------------------------------
+
void JavaScriptFrameIterator::Advance() {
do {
iterator_.Advance();
@@ -316,7 +323,7 @@ SafeStackFrameIterator::SafeStackFrameIterator(Isolate* isolate, Address pc,
state.fp = fp;
state.sp = sp;
state.pc_address = StackFrame::ResolveReturnAddressLocation(
- reinterpret_cast<Address*>(StandardFrame::ComputePCAddress(fp)));
+ reinterpret_cast<Address*>(CommonFrame::ComputePCAddress(fp)));
// If the current PC is in a bytecode handler, the top stack frame isn't
// the bytecode handler's frame and the top of stack or link register is a
@@ -589,8 +596,9 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
return OPTIMIZED;
}
return BUILTIN;
- case CodeKind::OPTIMIZED_FUNCTION:
+ case CodeKind::TURBOFAN:
case CodeKind::NATIVE_CONTEXT_INDEPENDENT:
+ case CodeKind::TURBOPROP:
return OPTIMIZED;
case CodeKind::JS_TO_WASM_FUNCTION:
return JS_TO_WASM;
@@ -598,9 +606,10 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
return STUB;
case CodeKind::C_WASM_ENTRY:
return C_WASM_ENTRY;
+ case CodeKind::WASM_TO_JS_FUNCTION:
+ return WASM_TO_JS;
case CodeKind::WASM_FUNCTION:
case CodeKind::WASM_TO_CAPI_FUNCTION:
- case CodeKind::WASM_TO_JS_FUNCTION:
// Never appear as on-heap {Code} objects.
UNREACHABLE();
default:
@@ -655,9 +664,9 @@ StackFrame::Type StackFrame::GetCallerState(State* state) const {
return ComputeType(iterator_, state);
}
-Address StackFrame::UnpaddedFP() const { return fp(); }
-
-Code NativeFrame::unchecked_code() const { return Code(); }
+Address CommonFrame::GetCallerStackPointer() const {
+ return fp() + CommonFrameConstants::kCallerSPOffset;
+}
void NativeFrame::ComputeCallerState(State* state) const {
state->sp = caller_sp();
@@ -692,8 +701,6 @@ Code ConstructEntryFrame::unchecked_code() const {
return isolate()->heap()->builtin(Builtins::kJSConstructEntry);
}
-Code ExitFrame::unchecked_code() const { return Code(); }
-
void ExitFrame::ComputeCallerState(State* state) const {
// Set up the caller state.
state->sp = caller_sp();
@@ -713,10 +720,6 @@ void ExitFrame::Iterate(RootVisitor* v) const {
IteratePc(v, pc_address(), constant_pool_address(), LookupCode());
}
-Address ExitFrame::GetCallerStackPointer() const {
- return fp() + ExitFrameConstants::kCallerSPOffset;
-}
-
StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
if (fp == 0) return NONE;
StackFrame::Type type = ComputeFrameType(fp);
@@ -854,7 +857,7 @@ void BuiltinExitFrame::Print(StringStream* accumulator, PrintMode mode,
accumulator->Add(")\n\n");
}
-Address StandardFrame::GetExpressionAddress(int n) const {
+Address CommonFrame::GetExpressionAddress(int n) const {
const int offset = StandardFrameConstants::kExpressionsOffset;
return fp() + offset - n * kSystemPointerSize;
}
@@ -864,27 +867,17 @@ Address InterpretedFrame::GetExpressionAddress(int n) const {
return fp() + offset - n * kSystemPointerSize;
}
-Script StandardFrame::script() const {
- // This should only be called on frames which override this method.
- UNREACHABLE();
- return Script();
-}
-
-Object StandardFrame::receiver() const {
- return ReadOnlyRoots(isolate()).undefined_value();
-}
-
-Object StandardFrame::context() const {
+Object CommonFrame::context() const {
return ReadOnlyRoots(isolate()).undefined_value();
}
-int StandardFrame::position() const {
+int CommonFrame::position() const {
AbstractCode code = AbstractCode::cast(LookupCode());
int code_offset = static_cast<int>(pc() - code.InstructionStart());
return code.SourcePosition(code_offset);
}
-int StandardFrame::ComputeExpressionsCount() const {
+int CommonFrame::ComputeExpressionsCount() const {
Address base = GetExpressionAddress(0);
Address limit = sp() - kSystemPointerSize;
DCHECK(base >= limit); // stack grows downwards
@@ -892,14 +885,7 @@ int StandardFrame::ComputeExpressionsCount() const {
return static_cast<int>((base - limit) / kSystemPointerSize);
}
-Object StandardFrame::GetParameter(int index) const {
- // StandardFrame does not define any parameters.
- UNREACHABLE();
-}
-
-int StandardFrame::ComputeParametersCount() const { return 0; }
-
-void StandardFrame::ComputeCallerState(State* state) const {
+void CommonFrame::ComputeCallerState(State* state) const {
state->sp = caller_sp();
state->fp = caller_fp();
state->pc_address = ResolveReturnAddressLocation(
@@ -910,14 +896,12 @@ void StandardFrame::ComputeCallerState(State* state) const {
reinterpret_cast<Address*>(ComputeConstantPoolAddress(fp()));
}
-bool StandardFrame::IsConstructor() const { return false; }
-
-void StandardFrame::Summarize(std::vector<FrameSummary>* functions) const {
+void CommonFrame::Summarize(std::vector<FrameSummary>* functions) const {
// This should only be called on frames which override this method.
UNREACHABLE();
}
-void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
+void CommonFrame::IterateCompiledFrame(RootVisitor* v) const {
// Make sure that we're not doing "safe" stack frame iteration. We cannot
// possibly find pointers in optimized frames in that state.
DCHECK(can_access_heap_objects());
@@ -1080,16 +1064,10 @@ void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
frame_header_limit);
}
-void StubFrame::Iterate(RootVisitor* v) const { IterateCompiledFrame(v); }
-
Code StubFrame::unchecked_code() const {
return isolate()->FindCodeObject(pc());
}
-Address StubFrame::GetCallerStackPointer() const {
- return fp() + ExitFrameConstants::kCallerSPOffset;
-}
-
int StubFrame::LookupExceptionHandlerInTable() {
Code code = LookupCode();
DCHECK(code.is_turbofanned());
@@ -1120,7 +1098,9 @@ bool JavaScriptFrame::HasInlinedFrames() const {
return functions.size() > 1;
}
-Code JavaScriptFrame::unchecked_code() const { return function().code(); }
+Code CommonFrameWithJSLinkage::unchecked_code() const {
+ return function().code();
+}
int OptimizedFrame::ComputeParametersCount() const {
Code code = LookupCode();
@@ -1153,7 +1133,12 @@ void JavaScriptFrame::GetFunctions(
}
}
-void JavaScriptFrame::Summarize(std::vector<FrameSummary>* functions) const {
+bool CommonFrameWithJSLinkage::IsConstructor() const {
+ return IsConstructFrame(caller_fp());
+}
+
+void CommonFrameWithJSLinkage::Summarize(
+ std::vector<FrameSummary>* functions) const {
DCHECK(functions->empty());
Code code = LookupCode();
int offset = static_cast<int>(pc() - code.InstructionStart());
@@ -1178,7 +1163,7 @@ Object JavaScriptFrame::unchecked_function() const {
return function_slot_object();
}
-Object JavaScriptFrame::receiver() const { return GetParameter(-1); }
+Object CommonFrameWithJSLinkage::receiver() const { return GetParameter(-1); }
Object JavaScriptFrame::context() const {
const int offset = StandardFrameConstants::kContextOffset;
@@ -1191,7 +1176,7 @@ Script JavaScriptFrame::script() const {
return Script::cast(function().shared().script());
}
-int JavaScriptFrame::LookupExceptionHandlerInTable(
+int CommonFrameWithJSLinkage::LookupExceptionHandlerInTable(
int* stack_depth, HandlerTable::CatchPrediction* prediction) {
DCHECK(!LookupCode().has_handler_table());
DCHECK(!LookupCode().is_optimized_code());
@@ -1286,11 +1271,11 @@ void JavaScriptFrame::CollectFunctionAndOffsetForICStats(JSFunction function,
}
}
-Object JavaScriptFrame::GetParameter(int index) const {
+Object CommonFrameWithJSLinkage::GetParameter(int index) const {
return Object(Memory<Address>(GetParameterSlot(index)));
}
-int JavaScriptFrame::ComputeParametersCount() const {
+int CommonFrameWithJSLinkage::ComputeParametersCount() const {
DCHECK(can_access_heap_objects() &&
isolate()->heap()->gc_state() == Heap::NOT_IN_GC);
return function().shared().internal_formal_parameter_count();
@@ -1303,7 +1288,7 @@ int JavaScriptFrame::GetActualArgumentCount() const {
}
#endif
-Handle<FixedArray> JavaScriptFrame::GetParameters() const {
+Handle<FixedArray> CommonFrameWithJSLinkage::GetParameters() const {
if (V8_LIKELY(!FLAG_detailed_error_stack_trace)) {
return isolate()->factory()->empty_fixed_array();
}
@@ -1317,6 +1302,11 @@ Handle<FixedArray> JavaScriptFrame::GetParameters() const {
return parameters;
}
+JSFunction JavaScriptBuiltinContinuationFrame::function() const {
+ const int offset = BuiltinContinuationFrameConstants::kFunctionOffset;
+ return JSFunction::cast(Object(base::Memory<Address>(fp() + offset)));
+}
+
int JavaScriptBuiltinContinuationFrame::ComputeParametersCount() const {
// Assert that the first allocatable register is also the argument count
// register.
@@ -1341,16 +1331,10 @@ Object JavaScriptBuiltinContinuationFrame::context() const {
void JavaScriptBuiltinContinuationWithCatchFrame::SetException(
Object exception) {
-#ifdef V8_REVERSE_JSARGS
int argc = ComputeParametersCount();
Address exception_argument_slot =
fp() + BuiltinContinuationFrameConstants::kFixedFrameSizeAboveFp +
(argc - 1) * kSystemPointerSize;
-#else
- Address exception_argument_slot =
- fp() + BuiltinContinuationFrameConstants::kFixedFrameSizeAboveFp +
- kSystemPointerSize; // Skip over return value slot.
-#endif
// Only allow setting exception if previous value was the hole.
CHECK_EQ(ReadOnlyRoots(isolate()).the_hole_value(),
@@ -1476,25 +1460,25 @@ FrameSummary::~FrameSummary() {
#undef FRAME_SUMMARY_DESTR
}
-FrameSummary FrameSummary::GetTop(const StandardFrame* frame) {
+FrameSummary FrameSummary::GetTop(const CommonFrame* frame) {
std::vector<FrameSummary> frames;
frame->Summarize(&frames);
DCHECK_LT(0, frames.size());
return frames.back();
}
-FrameSummary FrameSummary::GetBottom(const StandardFrame* frame) {
+FrameSummary FrameSummary::GetBottom(const CommonFrame* frame) {
return Get(frame, 0);
}
-FrameSummary FrameSummary::GetSingle(const StandardFrame* frame) {
+FrameSummary FrameSummary::GetSingle(const CommonFrame* frame) {
std::vector<FrameSummary> frames;
frame->Summarize(&frames);
DCHECK_EQ(1, frames.size());
return frames.front();
}
-FrameSummary FrameSummary::Get(const StandardFrame* frame, int index) {
+FrameSummary FrameSummary::Get(const CommonFrame* frame, int index) {
DCHECK_LE(0, index);
std::vector<FrameSummary> frames;
frame->Summarize(&frames);
@@ -1652,23 +1636,6 @@ DeoptimizationData OptimizedFrame::GetDeoptimizationData(
return DeoptimizationData();
}
-#ifndef V8_REVERSE_JSARGS
-Object OptimizedFrame::receiver() const {
- Code code = LookupCode();
- if (code.kind() == CodeKind::BUILTIN) {
- intptr_t argc = static_cast<int>(
- Memory<intptr_t>(fp() + StandardFrameConstants::kArgCOffset));
- intptr_t args_size =
- (StandardFrameConstants::kFixedSlotCountAboveFp + argc) *
- kSystemPointerSize;
- Address receiver_ptr = fp() + args_size;
- return *FullObjectSlot(receiver_ptr);
- } else {
- return JavaScriptFrame::receiver();
- }
-}
-#endif
-
void OptimizedFrame::GetFunctions(
std::vector<SharedFunctionInfo>* functions) const {
DCHECK(functions->empty());
@@ -1823,21 +1790,16 @@ Code ArgumentsAdaptorFrame::unchecked_code() const {
return isolate()->builtins()->builtin(Builtins::kArgumentsAdaptorTrampoline);
}
+JSFunction BuiltinFrame::function() const {
+ const int offset = BuiltinFrameConstants::kFunctionOffset;
+ return JSFunction::cast(Object(base::Memory<Address>(fp() + offset)));
+}
+
int BuiltinFrame::ComputeParametersCount() const {
const int offset = BuiltinFrameConstants::kLengthOffset;
return Smi::ToInt(Object(base::Memory<Address>(fp() + offset)));
}
-void BuiltinFrame::PrintFrameKind(StringStream* accumulator) const {
- accumulator->Add("builtin frame: ");
-}
-
-Address InternalFrame::GetCallerStackPointer() const {
- // Internal frames have no arguments. The stack pointer of the
- // caller is at a fixed offset from the frame pointer.
- return fp() + StandardFrameConstants::kCallerSPOffset;
-}
-
Code InternalFrame::unchecked_code() const { return Code(); }
void WasmFrame::Print(StringStream* accumulator, PrintMode mode,
@@ -1872,12 +1834,6 @@ Code WasmFrame::unchecked_code() const {
return isolate()->FindCodeObject(pc());
}
-void WasmFrame::Iterate(RootVisitor* v) const { IterateCompiledFrame(v); }
-
-Address WasmFrame::GetCallerStackPointer() const {
- return fp() + ExitFrameConstants::kCallerSPOffset;
-}
-
wasm::WasmCode* WasmFrame::wasm_code() const {
return isolate()->wasm_engine()->code_manager()->LookupCode(pc());
}
@@ -1967,8 +1923,6 @@ void WasmDebugBreakFrame::Iterate(RootVisitor* v) const {
// Liftoff.
}
-Code WasmDebugBreakFrame::unchecked_code() const { return Code(); }
-
void WasmDebugBreakFrame::Print(StringStream* accumulator, PrintMode mode,
int index) const {
PrintIndex(accumulator, mode, index);
@@ -1976,12 +1930,6 @@ void WasmDebugBreakFrame::Print(StringStream* accumulator, PrintMode mode,
if (mode != OVERVIEW) accumulator->Add("\n");
}
-Address WasmDebugBreakFrame::GetCallerStackPointer() const {
- // WasmDebugBreak does not receive any arguments, hence the stack pointer of
- // the caller is at a fixed offset from the frame pointer.
- return fp() + WasmDebugBreakFrameConstants::kCallerSPOffset;
-}
-
void JsToWasmFrame::Iterate(RootVisitor* v) const {
Code code = GetContainingCode(isolate(), pc());
// GenericJSToWasmWrapper stack layout
@@ -2016,8 +1964,6 @@ void JsToWasmFrame::Iterate(RootVisitor* v) const {
v->VisitRootPointers(Root::kTop, nullptr, spill_slot_base, spill_slot_limit);
}
-Code WasmCompileLazyFrame::unchecked_code() const { return Code(); }
-
WasmInstanceObject WasmCompileLazyFrame::wasm_instance() const {
return WasmInstanceObject::cast(*wasm_instance_slot());
}
@@ -2035,10 +1981,6 @@ void WasmCompileLazyFrame::Iterate(RootVisitor* v) const {
v->VisitRootPointer(Root::kTop, nullptr, wasm_instance_slot());
}
-Address WasmCompileLazyFrame::GetCallerStackPointer() const {
- return fp() + WasmCompileLazyFrameConstants::kCallerSPOffset;
-}
-
namespace {
void PrintFunctionSource(StringStream* accumulator, SharedFunctionInfo shared,
@@ -2201,7 +2143,7 @@ void EntryFrame::Iterate(RootVisitor* v) const {
IteratePc(v, pc_address(), constant_pool_address(), LookupCode());
}
-void StandardFrame::IterateExpressions(RootVisitor* v) const {
+void CommonFrame::IterateExpressions(RootVisitor* v) const {
const int last_object_offset = StandardFrameConstants::kLastObjectOffset;
intptr_t marker =
Memory<intptr_t>(fp() + CommonFrameConstants::kContextOrFrameTypeOffset);
@@ -2304,7 +2246,7 @@ bool BuiltinContinuationModeIsWithCatch(BuiltinContinuationMode mode) {
InterpretedFrameInfo::InterpretedFrameInfo(int parameters_count_with_receiver,
int translation_height,
- bool is_topmost,
+ bool is_topmost, bool pad_arguments,
FrameInfoKind frame_info_kind) {
const int locals_count = translation_height;
@@ -2325,7 +2267,7 @@ InterpretedFrameInfo::InterpretedFrameInfo(int parameters_count_with_receiver,
// the part described by InterpreterFrameConstants. This will include
// argument padding, when needed.
const int parameter_padding_slots =
- ArgumentPaddingSlots(parameters_count_with_receiver);
+ pad_arguments ? ArgumentPaddingSlots(parameters_count_with_receiver) : 0;
const int fixed_frame_size =
InterpreterFrameConstants::kFixedFrameSize +
(parameters_count_with_receiver + parameter_padding_slots) *
diff --git a/deps/v8/src/execution/frames.h b/deps/v8/src/execution/frames.h
index eb627a158a..43f9d383c2 100644
--- a/deps/v8/src/execution/frames.h
+++ b/deps/v8/src/execution/frames.h
@@ -11,6 +11,36 @@
#include "src/objects/code.h"
#include "src/objects/objects.h"
+//
+// Frame inheritance hierarchy (please keep in sync with frame-constants.h):
+// - CommonFrame
+// - CommonFrameWithJSLinkage
+// - JavaScriptFrame (aka StandardFrame)
+// - InterpretedFrame
+// - OptimizedFrame
+// - ArgumentsAdaptorFrame (technically a TypedFrame)
+// - TypedFrameWithJSLinkage
+// - BuiltinFrame
+// - JavaScriptBuiltinContinuationFrame
+// - JavaScriptBuiltinContinuationWithCatchFrame
+// - TypedFrame
+// - NativeFrame
+// - EntryFrame
+// - ConstructEntryFrame
+// - ExitFrame
+// - BuiltinExitFrame
+// - StubFrame
+// - JsToWasmFrame
+// - CWasmEntryFrame
+// - Internal
+// - ConstructFrame
+// - BuiltinContinuationFrame
+// - WasmFrame
+// - WasmExitFrame
+// - WasmDebugBreakFrame
+// - WasmCompileLazyFrame
+//
+
namespace v8 {
namespace internal {
namespace wasm {
@@ -196,13 +226,10 @@ class StackFrame {
}
bool is_construct() const { return type() == CONSTRUCT; }
bool is_builtin_exit() const { return type() == BUILTIN_EXIT; }
- virtual bool is_standard() const { return false; }
bool is_java_script() const {
Type type = this->type();
- return (type == OPTIMIZED) || (type == INTERPRETED) || (type == BUILTIN) ||
- (type == JAVA_SCRIPT_BUILTIN_CONTINUATION) ||
- (type == JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH);
+ return (type == OPTIMIZED) || (type == INTERPRETED);
}
bool is_wasm_to_js() const { return type() == WASM_TO_JS; }
bool is_js_to_wasm() const { return type() == JS_TO_WASM; }
@@ -213,12 +240,6 @@ class StackFrame {
Address callee_fp() const { return state_.callee_fp; }
inline Address callee_pc() const;
Address caller_sp() const { return GetCallerStackPointer(); }
-
- // If this frame is optimized and was dynamically aligned return its old
- // unaligned frame pointer. When the frame is deoptimized its FP will shift
- // up one word and become unaligned.
- Address UnpaddedFP() const;
-
inline Address pc() const;
// Skip authentication of the PC, when using CFI. Used in the profiler, where
@@ -312,145 +333,7 @@ class StackFrame {
friend class SafeStackFrameIterator;
};
-class NativeFrame : public StackFrame {
- public:
- Type type() const override { return NATIVE; }
-
- Code unchecked_code() const override;
-
- // Garbage collection support.
- void Iterate(RootVisitor* v) const override {}
-
- protected:
- inline explicit NativeFrame(StackFrameIteratorBase* iterator);
-
- Address GetCallerStackPointer() const override;
-
- private:
- void ComputeCallerState(State* state) const override;
-
- friend class StackFrameIteratorBase;
-};
-
-// Entry frames are used to enter JavaScript execution from C.
-class EntryFrame : public StackFrame {
- public:
- Type type() const override { return ENTRY; }
-
- Code unchecked_code() const override;
-
- // Garbage collection support.
- void Iterate(RootVisitor* v) const override;
-
- static EntryFrame* cast(StackFrame* frame) {
- DCHECK(frame->is_entry());
- return static_cast<EntryFrame*>(frame);
- }
-
- protected:
- inline explicit EntryFrame(StackFrameIteratorBase* iterator);
-
- // The caller stack pointer for entry frames is always zero. The
- // real information about the caller frame is available through the
- // link to the top exit frame.
- Address GetCallerStackPointer() const override { return 0; }
-
- private:
- void ComputeCallerState(State* state) const override;
- Type GetCallerState(State* state) const override;
-
- friend class StackFrameIteratorBase;
-};
-
-class ConstructEntryFrame : public EntryFrame {
- public:
- Type type() const override { return CONSTRUCT_ENTRY; }
-
- Code unchecked_code() const override;
-
- static ConstructEntryFrame* cast(StackFrame* frame) {
- DCHECK(frame->is_construct_entry());
- return static_cast<ConstructEntryFrame*>(frame);
- }
-
- protected:
- inline explicit ConstructEntryFrame(StackFrameIteratorBase* iterator);
-
- private:
- friend class StackFrameIteratorBase;
-};
-
-// Exit frames are used to exit JavaScript execution and go to C.
-class ExitFrame : public StackFrame {
- public:
- Type type() const override { return EXIT; }
-
- Code unchecked_code() const override;
-
- // Garbage collection support.
- void Iterate(RootVisitor* v) const override;
-
- static ExitFrame* cast(StackFrame* frame) {
- DCHECK(frame->is_exit());
- return static_cast<ExitFrame*>(frame);
- }
-
- // Compute the state and type of an exit frame given a frame
- // pointer. Used when constructing the first stack frame seen by an
- // iterator and the frames following entry frames.
- static Type GetStateForFramePointer(Address fp, State* state);
- static Address ComputeStackPointer(Address fp);
- static StackFrame::Type ComputeFrameType(Address fp);
- static void FillState(Address fp, Address sp, State* state);
-
- protected:
- inline explicit ExitFrame(StackFrameIteratorBase* iterator);
-
- Address GetCallerStackPointer() const override;
-
- private:
- void ComputeCallerState(State* state) const override;
-
- friend class StackFrameIteratorBase;
-};
-
-// Builtin exit frames are a special case of exit frames, which are used
-// whenever C++ builtins (e.g., Math.acos) are called. Their main purpose is
-// to allow such builtins to appear in stack traces.
-class BuiltinExitFrame : public ExitFrame {
- public:
- Type type() const override { return BUILTIN_EXIT; }
-
- static BuiltinExitFrame* cast(StackFrame* frame) {
- DCHECK(frame->is_builtin_exit());
- return static_cast<BuiltinExitFrame*>(frame);
- }
-
- JSFunction function() const;
- Object receiver() const;
-
- bool IsConstructor() const;
-
- void Print(StringStream* accumulator, PrintMode mode,
- int index) const override;
-
- protected:
- inline explicit BuiltinExitFrame(StackFrameIteratorBase* iterator);
-
- private:
- Object GetParameter(int i) const;
- int ComputeParametersCount() const;
-
- inline Object receiver_slot_object() const;
- inline Object argc_slot_object() const;
- inline Object target_slot_object() const;
- inline Object new_target_slot_object() const;
-
- friend class StackFrameIteratorBase;
- friend class FrameArrayBuilder;
-};
-
-class StandardFrame;
+class CommonFrame;
class V8_EXPORT_PRIVATE FrameSummary {
public:
@@ -541,10 +424,10 @@ class V8_EXPORT_PRIVATE FrameSummary {
~FrameSummary();
- static FrameSummary GetTop(const StandardFrame* frame);
- static FrameSummary GetBottom(const StandardFrame* frame);
- static FrameSummary GetSingle(const StandardFrame* frame);
- static FrameSummary Get(const StandardFrame* frame, int index);
+ static FrameSummary GetTop(const CommonFrame* frame);
+ static FrameSummary GetBottom(const CommonFrame* frame);
+ static FrameSummary GetSingle(const CommonFrame* frame);
+ static FrameSummary Get(const CommonFrame* frame, int index);
void EnsureSourcePositionsAvailable();
bool AreSourcePositionsAvailable() const;
@@ -578,15 +461,11 @@ class V8_EXPORT_PRIVATE FrameSummary {
#undef FRAME_SUMMARY_FIELD
};
-class StandardFrame : public StackFrame {
+class CommonFrame : public StackFrame {
public:
- // Testers.
- bool is_standard() const override { return true; }
-
// Accessors.
- virtual Object receiver() const;
- virtual Script script() const;
- virtual Object context() const;
+ virtual Object context()
+ const; // TODO(victorgomes): CommonFrames don't have context.
virtual int position() const;
// Access the expressions in the stack frame including locals.
@@ -594,25 +473,20 @@ class StandardFrame : public StackFrame {
inline void SetExpression(int index, Object value);
int ComputeExpressionsCount() const;
- // Access the parameters.
- virtual Object GetParameter(int index) const;
- virtual int ComputeParametersCount() const;
-
- // Check if this frame is a constructor frame invoked through 'new'.
- virtual bool IsConstructor() const;
+ Address GetCallerStackPointer() const override;
// Build a list with summaries for this frame including all inlined frames.
// The functions are ordered bottom-to-top (i.e. summaries.last() is the
// top-most activation; caller comes before callee).
virtual void Summarize(std::vector<FrameSummary>* frames) const;
- static StandardFrame* cast(StackFrame* frame) {
- DCHECK(frame->is_standard());
- return static_cast<StandardFrame*>(frame);
+ static CommonFrame* cast(StackFrame* frame) {
+ // It is always safe to cast to common.
+ return static_cast<CommonFrame*>(frame);
}
protected:
- inline explicit StandardFrame(StackFrameIteratorBase* iterator);
+ inline explicit CommonFrame(StackFrameIteratorBase* iterator);
void ComputeCallerState(State* state) const override;
@@ -639,10 +513,6 @@ class StandardFrame : public StackFrame {
// an arguments adaptor frame.
static inline bool IsArgumentsAdaptorFrame(Address fp);
- // Determines if the standard frame for the given frame pointer is a
- // construct frame.
- static inline bool IsConstructFrame(Address fp);
-
// Used by OptimizedFrames and StubFrames.
void IterateCompiledFrame(RootVisitor* v) const;
@@ -651,29 +521,77 @@ class StandardFrame : public StackFrame {
friend class SafeStackFrameIterator;
};
-class JavaScriptFrame : public StandardFrame {
+class TypedFrame : public CommonFrame {
public:
- Type type() const override = 0;
+ Code unchecked_code() const override { return Code(); }
+ void Iterate(RootVisitor* v) const override { IterateCompiledFrame(v); }
+
+ protected:
+ inline explicit TypedFrame(StackFrameIteratorBase* iterator);
+};
+class CommonFrameWithJSLinkage : public CommonFrame {
+ public:
+ // Accessors.
+ virtual JSFunction function() const = 0;
+
+ // Access the parameters.
+ virtual Object receiver() const;
+ virtual Object GetParameter(int index) const;
+ virtual int ComputeParametersCount() const;
+ Handle<FixedArray> GetParameters() const;
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ virtual int GetActualArgumentCount() const;
+#endif
+
+ // Determine the code for the frame.
+ Code unchecked_code() const override;
+
+ // Lookup exception handler for current {pc}, returns -1 if none found. Also
+ // returns data associated with the handler site specific to the frame type:
+ // - OptimizedFrame : Data is not used and will not return a value.
+ // - InterpretedFrame: Data is the register index holding the context.
+ virtual int LookupExceptionHandlerInTable(
+ int* data, HandlerTable::CatchPrediction* prediction);
+
+ // Check if this frame is a constructor frame invoked through 'new'.
+ virtual bool IsConstructor() const;
+
+ // Summarize Frame
void Summarize(std::vector<FrameSummary>* frames) const override;
+ protected:
+ inline explicit CommonFrameWithJSLinkage(StackFrameIteratorBase* iterator);
+
+ // Determines if the standard frame for the given frame pointer is a
+ // construct frame.
+ static inline bool IsConstructFrame(Address fp);
+ inline Address GetParameterSlot(int index) const;
+};
+
+class TypedFrameWithJSLinkage : public CommonFrameWithJSLinkage {
+ public:
+ void Iterate(RootVisitor* v) const override;
+
+ protected:
+ inline explicit TypedFrameWithJSLinkage(StackFrameIteratorBase* iterator);
+};
+
+class JavaScriptFrame : public CommonFrameWithJSLinkage {
+ public:
+ Type type() const override = 0;
+
// Accessors.
- virtual JSFunction function() const;
+ JSFunction function() const override;
Object unchecked_function() const;
- Object receiver() const override;
+ Script script() const;
Object context() const override;
- Script script() const override;
-
- inline void set_receiver(Object value);
- // Access the parameters.
- inline Address GetParameterSlot(int index) const;
- Object GetParameter(int index) const override;
- int ComputeParametersCount() const override;
#ifdef V8_NO_ARGUMENTS_ADAPTOR
- int GetActualArgumentCount() const;
+ int GetActualArgumentCount() const override;
#endif
- Handle<FixedArray> GetParameters() const;
+
+ inline void set_receiver(Object value);
// Debugger access.
void SetParameterValue(int index, Object value) const;
@@ -697,21 +615,11 @@ class JavaScriptFrame : public StandardFrame {
void Print(StringStream* accumulator, PrintMode mode,
int index) const override;
- // Determine the code for the frame.
- Code unchecked_code() const override;
-
// Return a list with {SharedFunctionInfo} objects of this frame.
virtual void GetFunctions(std::vector<SharedFunctionInfo>* functions) const;
void GetFunctions(std::vector<Handle<SharedFunctionInfo>>* functions) const;
- // Lookup exception handler for current {pc}, returns -1 if none found. Also
- // returns data associated with the handler site specific to the frame type:
- // - OptimizedFrame : Data is not used and will not return a value.
- // - InterpretedFrame: Data is the register index holding the context.
- virtual int LookupExceptionHandlerInTable(
- int* data, HandlerTable::CatchPrediction* prediction);
-
// Architecture-specific register description.
static Register fp_register();
static Register context_register();
@@ -746,13 +654,139 @@ class JavaScriptFrame : public StandardFrame {
friend class StackFrameIteratorBase;
};
-class StubFrame : public StandardFrame {
+class NativeFrame : public TypedFrame {
public:
- Type type() const override { return STUB; }
+ Type type() const override { return NATIVE; }
- // GC support.
+ // Garbage collection support.
+ void Iterate(RootVisitor* v) const override {}
+
+ protected:
+ inline explicit NativeFrame(StackFrameIteratorBase* iterator);
+
+ private:
+ void ComputeCallerState(State* state) const override;
+
+ friend class StackFrameIteratorBase;
+};
+
+// Entry frames are used to enter JavaScript execution from C.
+class EntryFrame : public TypedFrame {
+ public:
+ Type type() const override { return ENTRY; }
+
+ Code unchecked_code() const override;
+
+ // Garbage collection support.
void Iterate(RootVisitor* v) const override;
+ static EntryFrame* cast(StackFrame* frame) {
+ DCHECK(frame->is_entry());
+ return static_cast<EntryFrame*>(frame);
+ }
+
+ protected:
+ inline explicit EntryFrame(StackFrameIteratorBase* iterator);
+
+ // The caller stack pointer for entry frames is always zero. The
+ // real information about the caller frame is available through the
+ // link to the top exit frame.
+ Address GetCallerStackPointer() const override { return 0; }
+
+ private:
+ void ComputeCallerState(State* state) const override;
+ Type GetCallerState(State* state) const override;
+
+ friend class StackFrameIteratorBase;
+};
+
+class ConstructEntryFrame : public EntryFrame {
+ public:
+ Type type() const override { return CONSTRUCT_ENTRY; }
+
+ Code unchecked_code() const override;
+
+ static ConstructEntryFrame* cast(StackFrame* frame) {
+ DCHECK(frame->is_construct_entry());
+ return static_cast<ConstructEntryFrame*>(frame);
+ }
+
+ protected:
+ inline explicit ConstructEntryFrame(StackFrameIteratorBase* iterator);
+
+ private:
+ friend class StackFrameIteratorBase;
+};
+
+// Exit frames are used to exit JavaScript execution and go to C.
+class ExitFrame : public TypedFrame {
+ public:
+ Type type() const override { return EXIT; }
+
+ // Garbage collection support.
+ void Iterate(RootVisitor* v) const override;
+
+ static ExitFrame* cast(StackFrame* frame) {
+ DCHECK(frame->is_exit());
+ return static_cast<ExitFrame*>(frame);
+ }
+
+ // Compute the state and type of an exit frame given a frame
+ // pointer. Used when constructing the first stack frame seen by an
+ // iterator and the frames following entry frames.
+ static Type GetStateForFramePointer(Address fp, State* state);
+ static Address ComputeStackPointer(Address fp);
+ static StackFrame::Type ComputeFrameType(Address fp);
+ static void FillState(Address fp, Address sp, State* state);
+
+ protected:
+ inline explicit ExitFrame(StackFrameIteratorBase* iterator);
+
+ private:
+ void ComputeCallerState(State* state) const override;
+
+ friend class StackFrameIteratorBase;
+};
+
+// Builtin exit frames are a special case of exit frames, which are used
+// whenever C++ builtins (e.g., Math.acos) are called. Their main purpose is
+// to allow such builtins to appear in stack traces.
+class BuiltinExitFrame : public ExitFrame {
+ public:
+ Type type() const override { return BUILTIN_EXIT; }
+
+ static BuiltinExitFrame* cast(StackFrame* frame) {
+ DCHECK(frame->is_builtin_exit());
+ return static_cast<BuiltinExitFrame*>(frame);
+ }
+
+ JSFunction function() const;
+ Object receiver() const;
+ bool IsConstructor() const;
+
+ void Print(StringStream* accumulator, PrintMode mode,
+ int index) const override;
+
+ protected:
+ inline explicit BuiltinExitFrame(StackFrameIteratorBase* iterator);
+
+ private:
+ Object GetParameter(int i) const;
+ int ComputeParametersCount() const;
+
+ inline Object receiver_slot_object() const;
+ inline Object argc_slot_object() const;
+ inline Object target_slot_object() const;
+ inline Object new_target_slot_object() const;
+
+ friend class StackFrameIteratorBase;
+ friend class FrameArrayBuilder;
+};
+
+class StubFrame : public TypedFrame {
+ public:
+ Type type() const override { return STUB; }
+
// Determine the code for the frame.
Code unchecked_code() const override;
@@ -763,8 +797,7 @@ class StubFrame : public StandardFrame {
protected:
inline explicit StubFrame(StackFrameIteratorBase* iterator);
- Address GetCallerStackPointer() const override;
-
+ private:
friend class StackFrameIteratorBase;
};
@@ -788,11 +821,6 @@ class OptimizedFrame : public JavaScriptFrame {
DeoptimizationData GetDeoptimizationData(int* deopt_index) const;
-#ifndef V8_REVERSE_JSARGS
- // When the arguments are reversed in the stack, receiver() is
- // inherited from JavaScriptFrame.
- Object receiver() const override;
-#endif
int ComputeParametersCount() const override;
static int StackSlotOffsetRelativeToFp(int slot_index);
@@ -857,6 +885,10 @@ class InterpretedFrame : public JavaScriptFrame {
// Arguments adaptor frames are automatically inserted below
// JavaScript frames when the actual number of parameters does not
// match the formal number of parameters.
+// NOTE: this inheritance is wrong, an ArgumentsAdaptorFrame should be
+// of type TypedFrame, but due to FrameInspector::javascript_frame(),
+// it needs to be seen as JavaScriptFrame.
+// This frame will however be deleted soon.
class ArgumentsAdaptorFrame : public JavaScriptFrame {
public:
Type type() const override { return ARGUMENTS_ADAPTOR; }
@@ -884,7 +916,7 @@ class ArgumentsAdaptorFrame : public JavaScriptFrame {
// Builtin frames are built for builtins with JavaScript linkage, such as
// various standard library functions (i.e. Math.asin, Math.floor, etc.).
-class BuiltinFrame final : public JavaScriptFrame {
+class BuiltinFrame final : public TypedFrameWithJSLinkage {
public:
Type type() const final { return BUILTIN; }
@@ -892,24 +924,21 @@ class BuiltinFrame final : public JavaScriptFrame {
DCHECK(frame->is_builtin());
return static_cast<BuiltinFrame*>(frame);
}
- int ComputeParametersCount() const final;
+
+ JSFunction function() const override;
+ int ComputeParametersCount() const override;
protected:
inline explicit BuiltinFrame(StackFrameIteratorBase* iterator);
- void PrintFrameKind(StringStream* accumulator) const override;
-
private:
friend class StackFrameIteratorBase;
};
-class WasmFrame : public StandardFrame {
+class WasmFrame : public TypedFrame {
public:
Type type() const override { return WASM; }
- // GC support.
- void Iterate(RootVisitor* v) const override;
-
// Printing support.
void Print(StringStream* accumulator, PrintMode mode,
int index) const override;
@@ -925,7 +954,7 @@ class WasmFrame : public StandardFrame {
V8_EXPORT_PRIVATE wasm::NativeModule* native_module() const;
wasm::WasmCode* wasm_code() const;
uint32_t function_index() const;
- Script script() const override;
+ Script script() const;
// Byte position in the module, or asm.js source position.
int position() const override;
Object context() const override;
@@ -944,8 +973,6 @@ class WasmFrame : public StandardFrame {
protected:
inline explicit WasmFrame(StackFrameIteratorBase* iterator);
- Address GetCallerStackPointer() const override;
-
private:
friend class StackFrameIteratorBase;
WasmModuleObject module_object() const;
@@ -963,15 +990,13 @@ class WasmExitFrame : public WasmFrame {
friend class StackFrameIteratorBase;
};
-class WasmDebugBreakFrame final : public StandardFrame {
+class WasmDebugBreakFrame final : public TypedFrame {
public:
Type type() const override { return WASM_DEBUG_BREAK; }
// GC support.
void Iterate(RootVisitor* v) const override;
- Code unchecked_code() const override;
-
void Print(StringStream* accumulator, PrintMode mode,
int index) const override;
@@ -983,8 +1008,6 @@ class WasmDebugBreakFrame final : public StandardFrame {
protected:
inline explicit WasmDebugBreakFrame(StackFrameIteratorBase*);
- Address GetCallerStackPointer() const override;
-
private:
friend class StackFrameIteratorBase;
};
@@ -1025,11 +1048,10 @@ class CWasmEntryFrame : public StubFrame {
Type GetCallerState(State* state) const override;
};
-class WasmCompileLazyFrame : public StandardFrame {
+class WasmCompileLazyFrame : public TypedFrame {
public:
Type type() const override { return WASM_COMPILE_LAZY; }
- Code unchecked_code() const override;
WasmInstanceObject wasm_instance() const;
FullObjectSlot wasm_instance_slot() const;
@@ -1044,13 +1066,11 @@ class WasmCompileLazyFrame : public StandardFrame {
protected:
inline explicit WasmCompileLazyFrame(StackFrameIteratorBase* iterator);
- Address GetCallerStackPointer() const override;
-
private:
friend class StackFrameIteratorBase;
};
-class InternalFrame : public StandardFrame {
+class InternalFrame : public TypedFrame {
public:
Type type() const override { return INTERNAL; }
@@ -1068,8 +1088,6 @@ class InternalFrame : public StandardFrame {
protected:
inline explicit InternalFrame(StackFrameIteratorBase* iterator);
- Address GetCallerStackPointer() const override;
-
private:
friend class StackFrameIteratorBase;
};
@@ -1108,7 +1126,7 @@ class BuiltinContinuationFrame : public InternalFrame {
friend class StackFrameIteratorBase;
};
-class JavaScriptBuiltinContinuationFrame : public JavaScriptFrame {
+class JavaScriptBuiltinContinuationFrame : public TypedFrameWithJSLinkage {
public:
Type type() const override { return JAVA_SCRIPT_BUILTIN_CONTINUATION; }
@@ -1117,6 +1135,7 @@ class JavaScriptBuiltinContinuationFrame : public JavaScriptFrame {
return static_cast<JavaScriptBuiltinContinuationFrame*>(frame);
}
+ JSFunction function() const override;
int ComputeParametersCount() const override;
intptr_t GetSPToFPDelta() const;
@@ -1236,7 +1255,7 @@ class V8_EXPORT_PRIVATE StackTraceFrameIterator {
void AdvanceOneFrame() { iterator_.Advance(); }
int FrameFunctionCount() const;
- inline StandardFrame* frame() const;
+ inline CommonFrame* frame() const;
inline bool is_javascript() const;
inline bool is_wasm() const;
@@ -1315,14 +1334,15 @@ enum class BuiltinContinuationMode {
class InterpretedFrameInfo {
public:
static InterpretedFrameInfo Precise(int parameters_count_with_receiver,
- int translation_height, bool is_topmost) {
+ int translation_height, bool is_topmost,
+ bool pad_arguments) {
return {parameters_count_with_receiver, translation_height, is_topmost,
- FrameInfoKind::kPrecise};
+ pad_arguments, FrameInfoKind::kPrecise};
}
static InterpretedFrameInfo Conservative(int parameters_count_with_receiver,
int locals_count) {
- return {parameters_count_with_receiver, locals_count, false,
+ return {parameters_count_with_receiver, locals_count, false, true,
FrameInfoKind::kConservative};
}
@@ -1337,7 +1357,7 @@ class InterpretedFrameInfo {
private:
InterpretedFrameInfo(int parameters_count_with_receiver,
int translation_height, bool is_topmost,
- FrameInfoKind frame_info_kind);
+ bool pad_arguments, FrameInfoKind frame_info_kind);
uint32_t register_stack_slot_count_;
uint32_t frame_size_in_bytes_without_fixed_;
diff --git a/deps/v8/src/execution/isolate-data.h b/deps/v8/src/execution/isolate-data.h
index 26acf4253c..c875d92f09 100644
--- a/deps/v8/src/execution/isolate-data.h
+++ b/deps/v8/src/execution/isolate-data.h
@@ -8,6 +8,7 @@
#include "src/builtins/builtins.h"
#include "src/codegen/constants-arch.h"
#include "src/codegen/external-reference-table.h"
+#include "src/execution/external-pointer-table.h"
#include "src/execution/stack-guard.h"
#include "src/execution/thread-local-top.h"
#include "src/roots/roots.h"
@@ -56,6 +57,10 @@ class IsolateData final {
static constexpr int builtin_entry_table_offset() {
return kBuiltinEntryTableOffset - kIsolateRootBias;
}
+ static constexpr int builtin_entry_slot_offset(Builtins::Name builtin_index) {
+ CONSTEXPR_DCHECK(Builtins::IsBuiltinId(builtin_index));
+ return builtin_entry_table_offset() + builtin_index * kSystemPointerSize;
+ }
// Root-register-relative offset of the builtins table.
static constexpr int builtins_table_offset() {
@@ -131,6 +136,7 @@ class IsolateData final {
V(kThreadLocalTopOffset, ThreadLocalTop::kSizeInBytes) \
V(kBuiltinEntryTableOffset, Builtins::builtin_count* kSystemPointerSize) \
V(kBuiltinsTableOffset, Builtins::builtin_count* kSystemPointerSize) \
+ FIELDS_HEAP_SANDBOX(V) \
V(kStackIsIterableOffset, kUInt8Size) \
/* This padding aligns IsolateData size by 8 bytes. */ \
V(kPaddingOffset, \
@@ -138,6 +144,13 @@ class IsolateData final {
/* Total size. */ \
V(kSize, 0)
+#ifdef V8_HEAP_SANDBOX
+#define FIELDS_HEAP_SANDBOX(V) \
+ V(kExternalPointerTableOffset, kSystemPointerSize * 3)
+#else
+#define FIELDS_HEAP_SANDBOX(V)
+#endif // V8_HEAP_SANDBOX
+
DEFINE_FIELD_OFFSET_CONSTANTS(0, FIELDS)
#undef FIELDS
@@ -172,6 +185,11 @@ class IsolateData final {
// The entries in this array are tagged pointers to Code objects.
Address builtins_[Builtins::builtin_count] = {};
+ // Table containing pointers to external objects.
+#ifdef V8_HEAP_SANDBOX
+ ExternalPointerTable external_pointer_table_;
+#endif
+
// Whether the SafeStackFrameIterator can successfully iterate the current
// stack. Only valid values are 0 or 1.
uint8_t stack_is_iterable_ = 1;
@@ -215,6 +233,10 @@ void IsolateData::AssertPredictableLayout() {
STATIC_ASSERT(offsetof(IsolateData, fast_c_call_caller_pc_) ==
kFastCCallCallerPCOffset);
STATIC_ASSERT(offsetof(IsolateData, stack_guard_) == kStackGuardOffset);
+#ifdef V8_HEAP_SANDBOX
+ STATIC_ASSERT(offsetof(IsolateData, external_pointer_table_) ==
+ kExternalPointerTableOffset);
+#endif
STATIC_ASSERT(offsetof(IsolateData, stack_is_iterable_) ==
kStackIsIterableOffset);
STATIC_ASSERT(sizeof(IsolateData) == IsolateData::kSize);
diff --git a/deps/v8/src/execution/isolate-inl.h b/deps/v8/src/execution/isolate-inl.h
index b3a84d01be..42f534c23e 100644
--- a/deps/v8/src/execution/isolate-inl.h
+++ b/deps/v8/src/execution/isolate-inl.h
@@ -17,10 +17,6 @@
namespace v8 {
namespace internal {
-IsolateAllocationMode Isolate::isolate_allocation_mode() {
- return isolate_allocator_->mode();
-}
-
void Isolate::set_context(Context context) {
DCHECK(context.is_null() || context.IsContext());
thread_local_top()->context_ = context;
diff --git a/deps/v8/src/execution/isolate-utils-inl.h b/deps/v8/src/execution/isolate-utils-inl.h
index 0c739eafd9..2cc66a473c 100644
--- a/deps/v8/src/execution/isolate-utils-inl.h
+++ b/deps/v8/src/execution/isolate-utils-inl.h
@@ -13,34 +13,19 @@
namespace v8 {
namespace internal {
-inline const Isolate* GetIsolateForPtrComprFromOnHeapAddress(Address address) {
+inline constexpr IsolateRoot GetIsolateForPtrComprFromOnHeapAddress(
+ Address address) {
#ifdef V8_COMPRESS_POINTERS
- return Isolate::FromRoot(GetIsolateRoot(address));
+ return IsolateRoot(GetIsolateRootAddress(address));
#else
- return nullptr;
+ return IsolateRoot();
#endif // V8_COMPRESS_POINTERS
}
-inline const Isolate* GetIsolateForPtrCompr(HeapObject object) {
+inline IsolateRoot GetIsolateForPtrCompr(HeapObject object) {
return GetIsolateForPtrComprFromOnHeapAddress(object.ptr());
}
-inline const Isolate* GetIsolateForPtrCompr(const Isolate* isolate) {
-#ifdef V8_COMPRESS_POINTERS
- return isolate;
-#else
- return nullptr;
-#endif // V8_COMPRESS_POINTERS
-}
-
-inline const Isolate* GetIsolateForPtrCompr(const LocalIsolate* isolate) {
-#ifdef V8_COMPRESS_POINTERS
- return isolate->GetIsolateForPtrCompr();
-#else
- return nullptr;
-#endif // V8_COMPRESS_POINTERS
-}
-
V8_INLINE Heap* GetHeapFromWritableObject(HeapObject object) {
// Avoid using the below GetIsolateFromWritableObject because we want to be
// able to get the heap, but not the isolate, for off-thread objects.
@@ -48,7 +33,8 @@ V8_INLINE Heap* GetHeapFromWritableObject(HeapObject object) {
#if defined V8_ENABLE_THIRD_PARTY_HEAP
return Heap::GetIsolateFromWritableObject(object)->heap();
#elif defined V8_COMPRESS_POINTERS
- Isolate* isolate = Isolate::FromRoot(GetIsolateRoot(object.ptr()));
+ Isolate* isolate =
+ Isolate::FromRootAddress(GetIsolateRootAddress(object.ptr()));
DCHECK_NOT_NULL(isolate);
return isolate->heap();
#else
@@ -62,7 +48,8 @@ V8_INLINE Isolate* GetIsolateFromWritableObject(HeapObject object) {
#ifdef V8_ENABLE_THIRD_PARTY_HEAP
return Heap::GetIsolateFromWritableObject(object);
#elif defined V8_COMPRESS_POINTERS
- Isolate* isolate = Isolate::FromRoot(GetIsolateRoot(object.ptr()));
+ Isolate* isolate =
+ Isolate::FromRootAddress(GetIsolateRootAddress(object.ptr()));
DCHECK_NOT_NULL(isolate);
return isolate;
#else
diff --git a/deps/v8/src/execution/isolate-utils.h b/deps/v8/src/execution/isolate-utils.h
index 3b5505f765..2204b2cd96 100644
--- a/deps/v8/src/execution/isolate-utils.h
+++ b/deps/v8/src/execution/isolate-utils.h
@@ -14,7 +14,7 @@ namespace internal {
// value is intended to be used only as a hoisted computation of isolate root
// inside trivial accessors for optmizing value decompression.
// When pointer compression is disabled this function always returns nullptr.
-V8_INLINE const Isolate* GetIsolateForPtrCompr(HeapObject object);
+V8_INLINE IsolateRoot GetIsolateForPtrCompr(HeapObject object);
V8_INLINE Heap* GetHeapFromWritableObject(HeapObject object);
diff --git a/deps/v8/src/execution/isolate.cc b/deps/v8/src/execution/isolate.cc
index c1c3bd1b24..1c1380061b 100644
--- a/deps/v8/src/execution/isolate.cc
+++ b/deps/v8/src/execution/isolate.cc
@@ -18,6 +18,7 @@
#include "src/ast/ast-value-factory.h"
#include "src/ast/scopes.h"
#include "src/base/hashmap.h"
+#include "src/base/logging.h"
#include "src/base/platform/platform.h"
#include "src/base/sys-info.h"
#include "src/base/utils/random-number-generator.h"
@@ -60,6 +61,7 @@
#include "src/numbers/hash-seed-inl.h"
#include "src/objects/backing-store.h"
#include "src/objects/elements.h"
+#include "src/objects/feedback-vector.h"
#include "src/objects/frame-array-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/js-array-inl.h"
@@ -106,8 +108,8 @@
extern "C" const uint8_t* v8_Default_embedded_blob_code_;
extern "C" uint32_t v8_Default_embedded_blob_code_size_;
-extern "C" const uint8_t* v8_Default_embedded_blob_metadata_;
-extern "C" uint32_t v8_Default_embedded_blob_metadata_size_;
+extern "C" const uint8_t* v8_Default_embedded_blob_data_;
+extern "C" uint32_t v8_Default_embedded_blob_data_size_;
namespace v8 {
namespace internal {
@@ -130,18 +132,18 @@ const uint8_t* DefaultEmbeddedBlobCode() {
uint32_t DefaultEmbeddedBlobCodeSize() {
return v8_Default_embedded_blob_code_size_;
}
-const uint8_t* DefaultEmbeddedBlobMetadata() {
- return v8_Default_embedded_blob_metadata_;
+const uint8_t* DefaultEmbeddedBlobData() {
+ return v8_Default_embedded_blob_data_;
}
-uint32_t DefaultEmbeddedBlobMetadataSize() {
- return v8_Default_embedded_blob_metadata_size_;
+uint32_t DefaultEmbeddedBlobDataSize() {
+ return v8_Default_embedded_blob_data_size_;
}
#ifdef V8_MULTI_SNAPSHOTS
extern "C" const uint8_t* v8_Trusted_embedded_blob_code_;
extern "C" uint32_t v8_Trusted_embedded_blob_code_size_;
-extern "C" const uint8_t* v8_Trusted_embedded_blob_metadata_;
-extern "C" uint32_t v8_Trusted_embedded_blob_metadata_size_;
+extern "C" const uint8_t* v8_Trusted_embedded_blob_data_;
+extern "C" uint32_t v8_Trusted_embedded_blob_data_size_;
const uint8_t* TrustedEmbeddedBlobCode() {
return v8_Trusted_embedded_blob_code_;
@@ -149,11 +151,11 @@ const uint8_t* TrustedEmbeddedBlobCode() {
uint32_t TrustedEmbeddedBlobCodeSize() {
return v8_Trusted_embedded_blob_code_size_;
}
-const uint8_t* TrustedEmbeddedBlobMetadata() {
- return v8_Trusted_embedded_blob_metadata_;
+const uint8_t* TrustedEmbeddedBlobData() {
+ return v8_Trusted_embedded_blob_data_;
}
-uint32_t TrustedEmbeddedBlobMetadataSize() {
- return v8_Trusted_embedded_blob_metadata_size_;
+uint32_t TrustedEmbeddedBlobDataSize() {
+ return v8_Trusted_embedded_blob_data_size_;
}
#endif
@@ -168,8 +170,8 @@ namespace {
std::atomic<const uint8_t*> current_embedded_blob_code_(nullptr);
std::atomic<uint32_t> current_embedded_blob_code_size_(0);
-std::atomic<const uint8_t*> current_embedded_blob_metadata_(nullptr);
-std::atomic<uint32_t> current_embedded_blob_metadata_size_(0);
+std::atomic<const uint8_t*> current_embedded_blob_data_(nullptr);
+std::atomic<uint32_t> current_embedded_blob_data_size_(0);
// The various workflows around embedded snapshots are fairly complex. We need
// to support plain old snapshot builds, nosnap builds, and the requirements of
@@ -195,16 +197,16 @@ std::atomic<uint32_t> current_embedded_blob_metadata_size_(0);
// This mutex protects access to the following variables:
// - sticky_embedded_blob_code_
// - sticky_embedded_blob_code_size_
-// - sticky_embedded_blob_metadata_
-// - sticky_embedded_blob_metadata_size_
+// - sticky_embedded_blob_data_
+// - sticky_embedded_blob_data_size_
// - enable_embedded_blob_refcounting_
// - current_embedded_blob_refs_
base::LazyMutex current_embedded_blob_refcount_mutex_ = LAZY_MUTEX_INITIALIZER;
const uint8_t* sticky_embedded_blob_code_ = nullptr;
uint32_t sticky_embedded_blob_code_size_ = 0;
-const uint8_t* sticky_embedded_blob_metadata_ = nullptr;
-uint32_t sticky_embedded_blob_metadata_size_ = 0;
+const uint8_t* sticky_embedded_blob_data_ = nullptr;
+uint32_t sticky_embedded_blob_data_size_ = 0;
bool enable_embedded_blob_refcounting_ = true;
int current_embedded_blob_refs_ = 0;
@@ -213,19 +215,17 @@ const uint8_t* StickyEmbeddedBlobCode() { return sticky_embedded_blob_code_; }
uint32_t StickyEmbeddedBlobCodeSize() {
return sticky_embedded_blob_code_size_;
}
-const uint8_t* StickyEmbeddedBlobMetadata() {
- return sticky_embedded_blob_metadata_;
-}
-uint32_t StickyEmbeddedBlobMetadataSize() {
- return sticky_embedded_blob_metadata_size_;
+const uint8_t* StickyEmbeddedBlobData() { return sticky_embedded_blob_data_; }
+uint32_t StickyEmbeddedBlobDataSize() {
+ return sticky_embedded_blob_data_size_;
}
void SetStickyEmbeddedBlob(const uint8_t* code, uint32_t code_size,
- const uint8_t* metadata, uint32_t metadata_size) {
+ const uint8_t* data, uint32_t data_size) {
sticky_embedded_blob_code_ = code;
sticky_embedded_blob_code_size_ = code_size;
- sticky_embedded_blob_metadata_ = metadata;
- sticky_embedded_blob_metadata_size_ = metadata_size;
+ sticky_embedded_blob_data_ = data;
+ sticky_embedded_blob_data_size_ = data_size;
}
} // namespace
@@ -242,23 +242,22 @@ void FreeCurrentEmbeddedBlob() {
if (StickyEmbeddedBlobCode() == nullptr) return;
CHECK_EQ(StickyEmbeddedBlobCode(), Isolate::CurrentEmbeddedBlobCode());
- CHECK_EQ(StickyEmbeddedBlobMetadata(),
- Isolate::CurrentEmbeddedBlobMetadata());
+ CHECK_EQ(StickyEmbeddedBlobData(), Isolate::CurrentEmbeddedBlobData());
InstructionStream::FreeOffHeapInstructionStream(
const_cast<uint8_t*>(Isolate::CurrentEmbeddedBlobCode()),
Isolate::CurrentEmbeddedBlobCodeSize(),
- const_cast<uint8_t*>(Isolate::CurrentEmbeddedBlobMetadata()),
- Isolate::CurrentEmbeddedBlobMetadataSize());
+ const_cast<uint8_t*>(Isolate::CurrentEmbeddedBlobData()),
+ Isolate::CurrentEmbeddedBlobDataSize());
current_embedded_blob_code_.store(nullptr, std::memory_order_relaxed);
current_embedded_blob_code_size_.store(0, std::memory_order_relaxed);
- current_embedded_blob_metadata_.store(nullptr, std::memory_order_relaxed);
- current_embedded_blob_metadata_size_.store(0, std::memory_order_relaxed);
+ current_embedded_blob_data_.store(nullptr, std::memory_order_relaxed);
+ current_embedded_blob_data_size_.store(0, std::memory_order_relaxed);
sticky_embedded_blob_code_ = nullptr;
sticky_embedded_blob_code_size_ = 0;
- sticky_embedded_blob_metadata_ = nullptr;
- sticky_embedded_blob_metadata_size_ = 0;
+ sticky_embedded_blob_data_ = nullptr;
+ sticky_embedded_blob_data_size_ = 0;
}
// static
@@ -278,29 +277,37 @@ bool Isolate::CurrentEmbeddedBlobIsBinaryEmbedded() {
}
void Isolate::SetEmbeddedBlob(const uint8_t* code, uint32_t code_size,
- const uint8_t* metadata, uint32_t metadata_size) {
+ const uint8_t* data, uint32_t data_size) {
CHECK_NOT_NULL(code);
- CHECK_NOT_NULL(metadata);
+ CHECK_NOT_NULL(data);
embedded_blob_code_ = code;
embedded_blob_code_size_ = code_size;
- embedded_blob_metadata_ = metadata;
- embedded_blob_metadata_size_ = metadata_size;
+ embedded_blob_data_ = data;
+ embedded_blob_data_size_ = data_size;
current_embedded_blob_code_.store(code, std::memory_order_relaxed);
current_embedded_blob_code_size_.store(code_size, std::memory_order_relaxed);
- current_embedded_blob_metadata_.store(metadata, std::memory_order_relaxed);
- current_embedded_blob_metadata_size_.store(metadata_size,
- std::memory_order_relaxed);
+ current_embedded_blob_data_.store(data, std::memory_order_relaxed);
+ current_embedded_blob_data_size_.store(data_size, std::memory_order_relaxed);
#ifdef DEBUG
// Verify that the contents of the embedded blob are unchanged from
// serialization-time, just to ensure the compiler isn't messing with us.
EmbeddedData d = EmbeddedData::FromBlob();
- if (d.EmbeddedBlobHash() != d.CreateEmbeddedBlobHash()) {
+ if (d.EmbeddedBlobDataHash() != d.CreateEmbeddedBlobDataHash()) {
FATAL(
- "Embedded blob checksum verification failed. This indicates that the "
- "embedded blob has been modified since compilation time. A common "
- "cause is a debugging breakpoint set within builtin code.");
+ "Embedded blob data section checksum verification failed. This "
+ "indicates that the embedded blob has been modified since compilation "
+ "time.");
+ }
+ if (FLAG_text_is_readable) {
+ if (d.EmbeddedBlobCodeHash() != d.CreateEmbeddedBlobCodeHash()) {
+ FATAL(
+ "Embedded blob code section checksum verification failed. This "
+ "indicates that the embedded blob has been modified since "
+ "compilation time. A common cause is a debugging breakpoint set "
+ "within builtin code.");
+ }
}
#endif // DEBUG
@@ -313,21 +320,21 @@ void Isolate::ClearEmbeddedBlob() {
CHECK(enable_embedded_blob_refcounting_);
CHECK_EQ(embedded_blob_code_, CurrentEmbeddedBlobCode());
CHECK_EQ(embedded_blob_code_, StickyEmbeddedBlobCode());
- CHECK_EQ(embedded_blob_metadata_, CurrentEmbeddedBlobMetadata());
- CHECK_EQ(embedded_blob_metadata_, StickyEmbeddedBlobMetadata());
+ CHECK_EQ(embedded_blob_data_, CurrentEmbeddedBlobData());
+ CHECK_EQ(embedded_blob_data_, StickyEmbeddedBlobData());
embedded_blob_code_ = nullptr;
embedded_blob_code_size_ = 0;
- embedded_blob_metadata_ = nullptr;
- embedded_blob_metadata_size_ = 0;
+ embedded_blob_data_ = nullptr;
+ embedded_blob_data_size_ = 0;
current_embedded_blob_code_.store(nullptr, std::memory_order_relaxed);
current_embedded_blob_code_size_.store(0, std::memory_order_relaxed);
- current_embedded_blob_metadata_.store(nullptr, std::memory_order_relaxed);
- current_embedded_blob_metadata_size_.store(0, std::memory_order_relaxed);
+ current_embedded_blob_data_.store(nullptr, std::memory_order_relaxed);
+ current_embedded_blob_data_size_.store(0, std::memory_order_relaxed);
sticky_embedded_blob_code_ = nullptr;
sticky_embedded_blob_code_size_ = 0;
- sticky_embedded_blob_metadata_ = nullptr;
- sticky_embedded_blob_metadata_size_ = 0;
+ sticky_embedded_blob_data_ = nullptr;
+ sticky_embedded_blob_data_size_ = 0;
}
const uint8_t* Isolate::embedded_blob_code() const {
@@ -336,11 +343,11 @@ const uint8_t* Isolate::embedded_blob_code() const {
uint32_t Isolate::embedded_blob_code_size() const {
return embedded_blob_code_size_;
}
-const uint8_t* Isolate::embedded_blob_metadata() const {
- return embedded_blob_metadata_;
+const uint8_t* Isolate::embedded_blob_data() const {
+ return embedded_blob_data_;
}
-uint32_t Isolate::embedded_blob_metadata_size() const {
- return embedded_blob_metadata_size_;
+uint32_t Isolate::embedded_blob_data_size() const {
+ return embedded_blob_data_size_;
}
// static
@@ -356,14 +363,14 @@ uint32_t Isolate::CurrentEmbeddedBlobCodeSize() {
}
// static
-const uint8_t* Isolate::CurrentEmbeddedBlobMetadata() {
- return current_embedded_blob_metadata_.load(
+const uint8_t* Isolate::CurrentEmbeddedBlobData() {
+ return current_embedded_blob_data_.load(
std::memory_order::memory_order_relaxed);
}
// static
-uint32_t Isolate::CurrentEmbeddedBlobMetadataSize() {
- return current_embedded_blob_metadata_size_.load(
+uint32_t Isolate::CurrentEmbeddedBlobDataSize() {
+ return current_embedded_blob_data_size_.load(
std::memory_order::memory_order_relaxed);
}
@@ -385,13 +392,14 @@ size_t Isolate::HashIsolateForEmbeddedBlob() {
reinterpret_cast<uint8_t*>(code.ptr() - kHeapObjectTag);
// These static asserts ensure we don't miss relevant fields. We don't hash
- // instruction size and flags since they change when creating the off-heap
- // trampolines. Other data fields must remain the same.
+ // instruction/metadata size and flags since they change when creating the
+ // off-heap trampolines. Other data fields must remain the same.
STATIC_ASSERT(Code::kInstructionSizeOffset == Code::kDataStart);
- STATIC_ASSERT(Code::kFlagsOffset == Code::kInstructionSizeOffsetEnd + 1);
- STATIC_ASSERT(Code::kSafepointTableOffsetOffset ==
- Code::kFlagsOffsetEnd + 1);
- static constexpr int kStartOffset = Code::kSafepointTableOffsetOffset;
+ STATIC_ASSERT(Code::kMetadataSizeOffset ==
+ Code::kInstructionSizeOffsetEnd + 1);
+ STATIC_ASSERT(Code::kFlagsOffset == Code::kMetadataSizeOffsetEnd + 1);
+ STATIC_ASSERT(Code::kBuiltinIndexOffset == Code::kFlagsOffsetEnd + 1);
+ static constexpr int kStartOffset = Code::kBuiltinIndexOffset;
for (int j = kStartOffset; j < Code::kUnalignedHeaderSize; j++) {
hash = base::hash_combine(hash, size_t{code_ptr[j]});
@@ -1020,6 +1028,9 @@ Handle<Object> CaptureStackTrace(Isolate* isolate, Handle<Object> caller,
CaptureStackTraceOptions options) {
DisallowJavascriptExecution no_js(isolate);
+ TRACE_EVENT_BEGIN1(TRACE_DISABLED_BY_DEFAULT("v8.stack_trace"),
+ "CaptureStackTrace", "maxFrameCount", options.limit);
+
wasm::WasmCodeRefScope code_ref_scope;
FrameArrayBuilder builder(isolate, options.skip_mode, options.limit, caller,
options.filter_mode);
@@ -1040,7 +1051,7 @@ Handle<Object> CaptureStackTrace(Isolate* isolate, Handle<Object> caller,
// A standard frame may include many summarized frames (due to
// inlining).
std::vector<FrameSummary> frames;
- StandardFrame::cast(frame)->Summarize(&frames);
+ CommonFrame::cast(frame)->Summarize(&frames);
for (size_t i = frames.size(); i-- != 0 && !builder.full();) {
auto& summary = frames[i];
if (options.capture_only_frames_subject_to_debugging &&
@@ -1141,7 +1152,10 @@ Handle<Object> CaptureStackTrace(Isolate* isolate, Handle<Object> caller,
}
}
- return builder.GetElementsAsStackTraceFrameArray();
+ Handle<FixedArray> stack_trace = builder.GetElementsAsStackTraceFrameArray();
+ TRACE_EVENT_END1(TRACE_DISABLED_BY_DEFAULT("v8.stack_trace"),
+ "CaptureStackTrace", "frameCount", stack_trace->length());
+ return stack_trace;
}
} // namespace
@@ -1411,7 +1425,7 @@ Object Isolate::StackOverflow() {
ErrorUtils::Construct(this, fun, fun, msg, SKIP_NONE, no_caller,
ErrorUtils::StackTraceCollection::kSimple));
- Throw(*exception, nullptr);
+ Throw(*exception);
#ifdef VERIFY_HEAP
if (FLAG_verify_heap && FLAG_stress_compaction) {
@@ -1423,7 +1437,7 @@ Object Isolate::StackOverflow() {
return ReadOnlyRoots(heap()).exception();
}
-void Isolate::ThrowAt(Handle<JSObject> exception, MessageLocation* location) {
+Object Isolate::ThrowAt(Handle<JSObject> exception, MessageLocation* location) {
Handle<Name> key_start_pos = factory()->error_start_pos_symbol();
Object::SetProperty(this, exception, key_start_pos,
handle(Smi::FromInt(location->start_pos()), this),
@@ -1444,11 +1458,11 @@ void Isolate::ThrowAt(Handle<JSObject> exception, MessageLocation* location) {
Just(ShouldThrow::kThrowOnError))
.Check();
- Throw(*exception, location);
+ return ThrowInternal(*exception, location);
}
Object Isolate::TerminateExecution() {
- return Throw(ReadOnlyRoots(this).termination_exception(), nullptr);
+ return Throw(ReadOnlyRoots(this).termination_exception());
}
void Isolate::CancelTerminateExecution() {
@@ -1578,7 +1592,7 @@ Handle<JSMessageObject> Isolate::CreateMessageOrAbort(
return message_obj;
}
-Object Isolate::Throw(Object raw_exception, MessageLocation* location) {
+Object Isolate::ThrowInternal(Object raw_exception, MessageLocation* location) {
DCHECK(!has_pending_exception());
HandleScope scope(this);
@@ -1803,7 +1817,7 @@ Object Isolate::UnwindAndFindHandler() {
code.stack_slots() * kSystemPointerSize;
// TODO(bmeurer): Turbofanned BUILTIN frames appear as OPTIMIZED,
- // but do not have a code kind of OPTIMIZED_FUNCTION.
+ // but do not have a code kind of TURBOFAN.
if (CodeKindCanDeoptimize(code.kind()) &&
code.marked_for_deoptimization()) {
// If the target code is lazy deoptimized, we jump to the original
@@ -1880,9 +1894,8 @@ Object Isolate::UnwindAndFindHandler() {
case StackFrame::BUILTIN:
// For builtin frames we are guaranteed not to find a handler.
if (catchable_by_js) {
- CHECK_EQ(-1,
- JavaScriptFrame::cast(frame)->LookupExceptionHandlerInTable(
- nullptr, nullptr));
+ CHECK_EQ(-1, BuiltinFrame::cast(frame)->LookupExceptionHandlerInTable(
+ nullptr, nullptr));
}
break;
@@ -2114,7 +2127,7 @@ void Isolate::PrintCurrentStackTrace(FILE* out) {
bool Isolate::ComputeLocation(MessageLocation* target) {
StackTraceFrameIterator it(this);
if (it.done()) return false;
- StandardFrame* frame = it.frame();
+ CommonFrame* frame = it.frame();
// Compute the location from the function and the relocation info of the
// baseline code. For optimized code this will use the deoptimization
// information to get canonical location information.
@@ -2656,6 +2669,15 @@ void Isolate::ReleaseSharedPtrs() {
}
}
+bool Isolate::IsBuiltinsTableHandleLocation(Address* handle_location) {
+ FullObjectSlot location(handle_location);
+ FullObjectSlot first_root(builtins_table());
+ FullObjectSlot last_root(builtins_table() + Builtins::builtin_count);
+ if (location >= last_root) return false;
+ if (location < first_root) return false;
+ return true;
+}
+
void Isolate::RegisterManagedPtrDestructor(ManagedPtrDestructor* destructor) {
base::MutexGuard lock(&managed_ptr_destructors_mutex_);
DCHECK_NULL(destructor->prev_);
@@ -2857,18 +2879,16 @@ std::atomic<size_t> Isolate::non_disposed_isolates_;
#endif // DEBUG
// static
-Isolate* Isolate::New(IsolateAllocationMode mode) {
+Isolate* Isolate::New() {
// IsolateAllocator allocates the memory for the Isolate object according to
// the given allocation mode.
std::unique_ptr<IsolateAllocator> isolate_allocator =
- std::make_unique<IsolateAllocator>(mode);
+ std::make_unique<IsolateAllocator>();
// Construct Isolate object in the allocated memory.
void* isolate_ptr = isolate_allocator->isolate_memory();
Isolate* isolate = new (isolate_ptr) Isolate(std::move(isolate_allocator));
-#if V8_TARGET_ARCH_64_BIT
- DCHECK_IMPLIES(
- mode == IsolateAllocationMode::kInV8Heap,
- IsAligned(isolate->isolate_root(), kPtrComprIsolateRootAlignment));
+#ifdef V8_COMPRESS_POINTERS
+ DCHECK(IsAligned(isolate->isolate_root(), kPtrComprIsolateRootAlignment));
#endif
#ifdef DEBUG
@@ -2933,6 +2953,9 @@ Isolate::Isolate(std::unique_ptr<i::IsolateAllocator> isolate_allocator)
id_(isolate_counter.fetch_add(1, std::memory_order_relaxed)),
allocator_(new TracingAccountingAllocator(this)),
builtins_(this),
+#if defined(DEBUG) || defined(VERIFY_HEAP)
+ num_active_deserializers_(0),
+#endif
rail_mode_(PERFORMANCE_ANIMATION),
code_event_dispatcher_(new CodeEventDispatcher()),
persistent_handles_list_(new PersistentHandlesList()),
@@ -2982,6 +3005,15 @@ void Isolate::CheckIsolateLayout() {
Internals::kIsolateStackGuardOffset);
CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, isolate_data_.roots_)),
Internals::kIsolateRootsOffset);
+
+#ifdef V8_HEAP_SANDBOX
+ CHECK_EQ(static_cast<int>(OFFSET_OF(ExternalPointerTable, buffer_)),
+ Internals::kExternalPointerTableBufferOffset);
+ CHECK_EQ(static_cast<int>(OFFSET_OF(ExternalPointerTable, length_)),
+ Internals::kExternalPointerTableLengthOffset);
+ CHECK_EQ(static_cast<int>(OFFSET_OF(ExternalPointerTable, capacity_)),
+ Internals::kExternalPointerTableCapacityOffset);
+#endif
}
void Isolate::ClearSerializerData() {
@@ -3057,8 +3089,6 @@ void Isolate::Deinit() {
ReleaseSharedPtrs();
- delete deoptimizer_data_;
- deoptimizer_data_ = nullptr;
string_table_.reset();
builtins_.TearDown();
bootstrapper_->TearDown();
@@ -3267,17 +3297,16 @@ namespace {
void CreateOffHeapTrampolines(Isolate* isolate) {
DCHECK_NOT_NULL(isolate->embedded_blob_code());
DCHECK_NE(0, isolate->embedded_blob_code_size());
- DCHECK_NOT_NULL(isolate->embedded_blob_metadata());
- DCHECK_NE(0, isolate->embedded_blob_metadata_size());
+ DCHECK_NOT_NULL(isolate->embedded_blob_data());
+ DCHECK_NE(0, isolate->embedded_blob_data_size());
HandleScope scope(isolate);
Builtins* builtins = isolate->builtins();
EmbeddedData d = EmbeddedData::FromBlob();
+ STATIC_ASSERT(Builtins::kAllBuiltinsAreIsolateIndependent);
for (int i = 0; i < Builtins::builtin_count; i++) {
- if (!Builtins::IsIsolateIndependent(i)) continue;
-
Address instruction_start = d.InstructionStartOfBuiltin(i);
Handle<Code> trampoline = isolate->factory()->NewOffHeapTrampolineFor(
builtins->builtin_handle(i), instruction_start);
@@ -3300,15 +3329,15 @@ bool IsolateIsCompatibleWithEmbeddedBlob(Isolate* isolate) {
void Isolate::InitializeDefaultEmbeddedBlob() {
const uint8_t* code = DefaultEmbeddedBlobCode();
uint32_t code_size = DefaultEmbeddedBlobCodeSize();
- const uint8_t* metadata = DefaultEmbeddedBlobMetadata();
- uint32_t metadata_size = DefaultEmbeddedBlobMetadataSize();
+ const uint8_t* data = DefaultEmbeddedBlobData();
+ uint32_t data_size = DefaultEmbeddedBlobDataSize();
#ifdef V8_MULTI_SNAPSHOTS
if (!FLAG_untrusted_code_mitigations) {
code = TrustedEmbeddedBlobCode();
code_size = TrustedEmbeddedBlobCodeSize();
- metadata = TrustedEmbeddedBlobMetadata();
- metadata_size = TrustedEmbeddedBlobMetadataSize();
+ data = TrustedEmbeddedBlobData();
+ data_size = TrustedEmbeddedBlobDataSize();
}
#endif
@@ -3318,8 +3347,8 @@ void Isolate::InitializeDefaultEmbeddedBlob() {
if (StickyEmbeddedBlobCode() != nullptr) {
code = StickyEmbeddedBlobCode();
code_size = StickyEmbeddedBlobCodeSize();
- metadata = StickyEmbeddedBlobMetadata();
- metadata_size = StickyEmbeddedBlobMetadataSize();
+ data = StickyEmbeddedBlobData();
+ data_size = StickyEmbeddedBlobDataSize();
current_embedded_blob_refs_++;
}
}
@@ -3327,7 +3356,7 @@ void Isolate::InitializeDefaultEmbeddedBlob() {
if (code == nullptr) {
CHECK_EQ(0, code_size);
} else {
- SetEmbeddedBlob(code, code_size, metadata, metadata_size);
+ SetEmbeddedBlob(code, code_size, data, data_size);
}
}
@@ -3341,25 +3370,25 @@ void Isolate::CreateAndSetEmbeddedBlob() {
// If a sticky blob has been set, we reuse it.
if (StickyEmbeddedBlobCode() != nullptr) {
CHECK_EQ(embedded_blob_code(), StickyEmbeddedBlobCode());
- CHECK_EQ(embedded_blob_metadata(), StickyEmbeddedBlobMetadata());
+ CHECK_EQ(embedded_blob_data(), StickyEmbeddedBlobData());
CHECK_EQ(CurrentEmbeddedBlobCode(), StickyEmbeddedBlobCode());
- CHECK_EQ(CurrentEmbeddedBlobMetadata(), StickyEmbeddedBlobMetadata());
+ CHECK_EQ(CurrentEmbeddedBlobData(), StickyEmbeddedBlobData());
} else {
// Create and set a new embedded blob.
uint8_t* code;
uint32_t code_size;
- uint8_t* metadata;
- uint32_t metadata_size;
- InstructionStream::CreateOffHeapInstructionStream(
- this, &code, &code_size, &metadata, &metadata_size);
+ uint8_t* data;
+ uint32_t data_size;
+ InstructionStream::CreateOffHeapInstructionStream(this, &code, &code_size,
+ &data, &data_size);
CHECK_EQ(0, current_embedded_blob_refs_);
const uint8_t* const_code = const_cast<const uint8_t*>(code);
- const uint8_t* const_metadata = const_cast<const uint8_t*>(metadata);
- SetEmbeddedBlob(const_code, code_size, const_metadata, metadata_size);
+ const uint8_t* const_data = const_cast<const uint8_t*>(data);
+ SetEmbeddedBlob(const_code, code_size, const_data, data_size);
current_embedded_blob_refs_++;
- SetStickyEmbeddedBlob(code, code_size, metadata, metadata_size);
+ SetStickyEmbeddedBlob(code, code_size, data, data_size);
}
CreateOffHeapTrampolines(this);
@@ -3370,9 +3399,9 @@ void Isolate::TearDownEmbeddedBlob() {
if (StickyEmbeddedBlobCode() == nullptr) return;
CHECK_EQ(embedded_blob_code(), StickyEmbeddedBlobCode());
- CHECK_EQ(embedded_blob_metadata(), StickyEmbeddedBlobMetadata());
+ CHECK_EQ(embedded_blob_data(), StickyEmbeddedBlobData());
CHECK_EQ(CurrentEmbeddedBlobCode(), StickyEmbeddedBlobCode());
- CHECK_EQ(CurrentEmbeddedBlobMetadata(), StickyEmbeddedBlobMetadata());
+ CHECK_EQ(CurrentEmbeddedBlobData(), StickyEmbeddedBlobData());
base::MutexGuard guard(current_embedded_blob_refcount_mutex_.Pointer());
current_embedded_blob_refs_--;
@@ -3380,19 +3409,19 @@ void Isolate::TearDownEmbeddedBlob() {
// We own the embedded blob and are the last holder. Free it.
InstructionStream::FreeOffHeapInstructionStream(
const_cast<uint8_t*>(embedded_blob_code()), embedded_blob_code_size(),
- const_cast<uint8_t*>(embedded_blob_metadata()),
- embedded_blob_metadata_size());
+ const_cast<uint8_t*>(embedded_blob_data()), embedded_blob_data_size());
ClearEmbeddedBlob();
}
}
-bool Isolate::InitWithoutSnapshot() { return Init(nullptr, nullptr); }
+bool Isolate::InitWithoutSnapshot() { return Init(nullptr, nullptr, false); }
-bool Isolate::InitWithSnapshot(ReadOnlyDeserializer* read_only_deserializer,
- StartupDeserializer* startup_deserializer) {
- DCHECK_NOT_NULL(read_only_deserializer);
- DCHECK_NOT_NULL(startup_deserializer);
- return Init(read_only_deserializer, startup_deserializer);
+bool Isolate::InitWithSnapshot(SnapshotData* startup_snapshot_data,
+ SnapshotData* read_only_snapshot_data,
+ bool can_rehash) {
+ DCHECK_NOT_NULL(startup_snapshot_data);
+ DCHECK_NOT_NULL(read_only_snapshot_data);
+ return Init(startup_snapshot_data, read_only_snapshot_data, can_rehash);
}
static std::string AddressToString(uintptr_t address) {
@@ -3441,12 +3470,12 @@ using MapOfLoadsAndStoresPerFunction =
MapOfLoadsAndStoresPerFunction* stack_access_count_map = nullptr;
} // namespace
-bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer,
- StartupDeserializer* startup_deserializer) {
+bool Isolate::Init(SnapshotData* startup_snapshot_data,
+ SnapshotData* read_only_snapshot_data, bool can_rehash) {
TRACE_ISOLATE(init);
- const bool create_heap_objects = (read_only_deserializer == nullptr);
+ const bool create_heap_objects = (read_only_snapshot_data == nullptr);
// We either have both or neither.
- DCHECK_EQ(create_heap_objects, startup_deserializer == nullptr);
+ DCHECK_EQ(create_heap_objects, startup_snapshot_data == nullptr);
base::ElapsedTimer timer;
if (create_heap_objects && FLAG_profile_deserialization) timer.Start();
@@ -3507,7 +3536,7 @@ bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer,
// SetUp the object heap.
DCHECK(!heap_.HasBeenSetUp());
heap_.SetUp();
- ReadOnlyHeap::SetUp(this, read_only_deserializer);
+ ReadOnlyHeap::SetUp(this, read_only_snapshot_data, can_rehash);
heap_.SetUpSpaces();
isolate_data_.external_reference_table()->Init(this);
@@ -3518,8 +3547,6 @@ bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer,
}
DCHECK_NOT_NULL(wasm_engine_);
- deoptimizer_data_ = new DeoptimizerData(heap());
-
if (setup_delegate_ == nullptr) {
setup_delegate_ = new SetupIsolateDelegate(create_heap_objects);
}
@@ -3598,7 +3625,9 @@ bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer,
heap_.read_only_space()->ClearStringPaddingIfNeeded();
read_only_heap_->OnCreateHeapObjectsComplete(this);
} else {
- startup_deserializer->DeserializeInto(this);
+ StartupDeserializer startup_deserializer(this, startup_snapshot_data,
+ can_rehash);
+ startup_deserializer.DeserializeIntoIsolate();
}
load_stub_cache_->Initialize();
store_stub_cache_->Initialize();
@@ -4572,7 +4601,7 @@ SaveContext::~SaveContext() {
isolate_->set_context(context_.is_null() ? Context() : *context_);
}
-bool SaveContext::IsBelowFrame(StandardFrame* frame) {
+bool SaveContext::IsBelowFrame(CommonFrame* frame) {
return (c_entry_fp_ == 0) || (c_entry_fp_ > frame->sp());
}
diff --git a/deps/v8/src/execution/isolate.h b/deps/v8/src/execution/isolate.h
index 43b7e27dd4..18fb4b6417 100644
--- a/deps/v8/src/execution/isolate.h
+++ b/deps/v8/src/execution/isolate.h
@@ -23,6 +23,7 @@
#include "src/common/globals.h"
#include "src/debug/interface-types.h"
#include "src/execution/execution.h"
+#include "src/execution/external-pointer-table.h"
#include "src/execution/futex-emulation.h"
#include "src/execution/isolate-data.h"
#include "src/execution/messages.h"
@@ -70,12 +71,13 @@ class BuiltinsConstantsTableBuilder;
class CancelableTaskManager;
class CodeEventDispatcher;
class CodeTracer;
+class CommonFrame;
class CompilationCache;
class CompilationStatistics;
class CompilerDispatcher;
class Counters;
class Debug;
-class DeoptimizerData;
+class Deoptimizer;
class DescriptorLookupCache;
class EmbeddedFileWriterInterface;
class EternalHandles;
@@ -83,6 +85,7 @@ class HandleScopeImplementer;
class HeapObjectToIndexHashMap;
class HeapProfiler;
class InnerPointerToCodeCache;
+class LocalIsolate;
class Logger;
class MaterializedObjectStore;
class Microtask;
@@ -91,14 +94,12 @@ class OptimizingCompileDispatcher;
class PersistentHandles;
class PersistentHandlesList;
class ReadOnlyArtifacts;
-class ReadOnlyDeserializer;
class RegExpStack;
class RootVisitor;
class RuntimeProfiler;
class SetupIsolateDelegate;
class Simulator;
-class StandardFrame;
-class StartupDeserializer;
+class SnapshotData;
class StringTable;
class StubCache;
class ThreadManager;
@@ -371,6 +372,18 @@ class Recorder;
} \
} while (false)
+#define WHILE_WITH_HANDLE_SCOPE(isolate, limit_check, body) \
+ do { \
+ Isolate* for_with_handle_isolate = isolate; \
+ while (limit_check) { \
+ HandleScope loop_scope(for_with_handle_isolate); \
+ for (int for_with_handle_it = 0; \
+ limit_check && for_with_handle_it < 1024; ++for_with_handle_it) { \
+ body \
+ } \
+ } \
+ } while (false)
+
#define FIELD_ACCESSOR(type, name) \
inline void set_##name(type v) { name##_ = v; } \
inline type name() const { return name##_; }
@@ -410,6 +423,8 @@ using DebugObjectCache = std::vector<Handle<HeapObject>>;
V(AllowCodeGenerationFromStringsCallback, allow_code_gen_callback, nullptr) \
V(ModifyCodeGenerationFromStringsCallback, modify_code_gen_callback, \
nullptr) \
+ V(ModifyCodeGenerationFromStringsCallback2, modify_code_gen_callback2, \
+ nullptr) \
V(AllowWasmCodeGenerationCallback, allow_wasm_code_gen_callback, nullptr) \
V(ExtensionCallback, wasm_module_callback, &NoExtension) \
V(ExtensionCallback, wasm_instance_callback, &NoExtension) \
@@ -524,8 +539,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
// Creates Isolate object. Must be used instead of constructing Isolate with
// new operator.
- static Isolate* New(
- IsolateAllocationMode mode = IsolateAllocationMode::kDefault);
+ static Isolate* New();
// Deletes Isolate object. Must be used instead of delete operator.
// Destroys the non-default isolates.
@@ -537,9 +551,6 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
ReadOnlyHeap* ro_heap);
void set_read_only_heap(ReadOnlyHeap* ro_heap) { read_only_heap_ = ro_heap; }
- // Returns allocation mode of this isolate.
- V8_INLINE IsolateAllocationMode isolate_allocation_mode();
-
// Page allocator that must be used for allocating V8 heap pages.
v8::PageAllocator* page_allocator();
@@ -573,8 +584,8 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
bool InitializeCounters(); // Returns false if already initialized.
bool InitWithoutSnapshot();
- bool InitWithSnapshot(ReadOnlyDeserializer* read_only_deserializer,
- StartupDeserializer* startup_deserializer);
+ bool InitWithSnapshot(SnapshotData* startup_snapshot_data,
+ SnapshotData* read_only_snapshot_data, bool can_rehash);
// True if at least one thread Enter'ed this isolate.
bool IsInUse() { return entry_stack_ != nullptr; }
@@ -615,6 +626,14 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
// Mutex for serializing access to break control structures.
base::RecursiveMutex* break_access() { return &break_access_; }
+ // Shared mutex for allowing concurrent read/writes to FeedbackVectors.
+ base::SharedMutex* feedback_vector_access() {
+ return &feedback_vector_access_;
+ }
+
+ // Shared mutex for allowing concurrent read/writes to Strings.
+ base::SharedMutex* string_access() { return &string_access_; }
+
// Shared mutex for allowing concurrent read/writes to TransitionArrays.
base::SharedMutex* transition_array_access() {
return &transition_array_access_;
@@ -697,6 +716,27 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
return &thread_local_top()->c_function_;
}
+#if defined(DEBUG) || defined(VERIFY_HEAP)
+ // Count the number of active deserializers, so that the heap verifier knows
+ // whether there is currently an active deserialization happening.
+ //
+ // This is needed as the verifier currently doesn't support verifying objects
+ // which are partially deserialized.
+ //
+ // TODO(leszeks): Make the verifier a bit more deserialization compatible.
+ void RegisterDeserializerStarted() { ++num_active_deserializers_; }
+ void RegisterDeserializerFinished() {
+ CHECK_GE(--num_active_deserializers_, 0);
+ }
+ bool has_active_deserializer() const {
+ return num_active_deserializers_.load(std::memory_order_acquire) > 0;
+ }
+#else
+ void RegisterDeserializerStarted() {}
+ void RegisterDeserializerFinished() {}
+ bool has_active_deserializer() const { UNREACHABLE(); }
+#endif
+
// Bottom JS entry.
Address js_entry_sp() { return thread_local_top()->js_entry_sp_; }
inline Address* js_entry_sp_address() {
@@ -789,17 +829,22 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
// Exception throwing support. The caller should use the result
// of Throw() as its return value.
- Object Throw(Object exception, MessageLocation* location = nullptr);
+ Object Throw(Object exception) { return ThrowInternal(exception, nullptr); }
+ Object ThrowAt(Handle<JSObject> exception, MessageLocation* location);
Object ThrowIllegalOperation();
template <typename T>
- V8_WARN_UNUSED_RESULT MaybeHandle<T> Throw(
- Handle<Object> exception, MessageLocation* location = nullptr) {
- Throw(*exception, location);
+ V8_WARN_UNUSED_RESULT MaybeHandle<T> Throw(Handle<Object> exception) {
+ Throw(*exception);
return MaybeHandle<T>();
}
- void ThrowAt(Handle<JSObject> exception, MessageLocation* location);
+ template <typename T>
+ V8_WARN_UNUSED_RESULT MaybeHandle<T> ThrowAt(Handle<JSObject> exception,
+ MessageLocation* location) {
+ ThrowAt(exception, location);
+ return MaybeHandle<T>();
+ }
void FatalProcessOutOfHeapMemory(const char* location) {
heap()->FatalProcessOutOfMemory(location);
@@ -958,7 +1003,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
static size_t isolate_root_bias() {
return OFFSET_OF(Isolate, isolate_data_) + IsolateData::kIsolateRootBias;
}
- static Isolate* FromRoot(Address isolate_root) {
+ static Isolate* FromRootAddress(Address isolate_root) {
return reinterpret_cast<Isolate*>(isolate_root - isolate_root_bias());
}
@@ -991,9 +1036,21 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
Address* builtin_entry_table() { return isolate_data_.builtin_entry_table(); }
V8_INLINE Address* builtins_table() { return isolate_data_.builtins(); }
+ bool IsBuiltinsTableHandleLocation(Address* handle_location);
+
StubCache* load_stub_cache() { return load_stub_cache_; }
StubCache* store_stub_cache() { return store_stub_cache_; }
- DeoptimizerData* deoptimizer_data() { return deoptimizer_data_; }
+ Deoptimizer* GetAndClearCurrentDeoptimizer() {
+ Deoptimizer* result = current_deoptimizer_;
+ CHECK_NOT_NULL(result);
+ current_deoptimizer_ = nullptr;
+ return result;
+ }
+ void set_current_deoptimizer(Deoptimizer* deoptimizer) {
+ DCHECK_NULL(current_deoptimizer_);
+ DCHECK_NOT_NULL(deoptimizer);
+ current_deoptimizer_ = deoptimizer;
+ }
bool deoptimizer_lazy_throw() const { return deoptimizer_lazy_throw_; }
void set_deoptimizer_lazy_throw(bool value) {
deoptimizer_lazy_throw_ = value;
@@ -1390,16 +1447,16 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
static const uint8_t* CurrentEmbeddedBlobCode();
static uint32_t CurrentEmbeddedBlobCodeSize();
- static const uint8_t* CurrentEmbeddedBlobMetadata();
- static uint32_t CurrentEmbeddedBlobMetadataSize();
+ static const uint8_t* CurrentEmbeddedBlobData();
+ static uint32_t CurrentEmbeddedBlobDataSize();
static bool CurrentEmbeddedBlobIsBinaryEmbedded();
// These always return the same result as static methods above, but don't
// access the global atomic variable (and thus *might be* slightly faster).
const uint8_t* embedded_blob_code() const;
uint32_t embedded_blob_code_size() const;
- const uint8_t* embedded_blob_metadata() const;
- uint32_t embedded_blob_metadata_size() const;
+ const uint8_t* embedded_blob_data() const;
+ uint32_t embedded_blob_data_size() const;
void set_array_buffer_allocator(v8::ArrayBuffer::Allocator* allocator) {
array_buffer_allocator_ = allocator;
@@ -1560,12 +1617,26 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
MaybeLocal<v8::Context> GetContextFromRecorderContextId(
v8::metrics::Recorder::ContextId id);
+#ifdef V8_HEAP_SANDBOX
+ ExternalPointerTable& external_pointer_table() {
+ return isolate_data_.external_pointer_table_;
+ }
+
+ const ExternalPointerTable& external_pointer_table() const {
+ return isolate_data_.external_pointer_table_;
+ }
+
+ Address external_pointer_table_address() {
+ return reinterpret_cast<Address>(&isolate_data_.external_pointer_table_);
+ }
+#endif
+
private:
explicit Isolate(std::unique_ptr<IsolateAllocator> isolate_allocator);
~Isolate();
- bool Init(ReadOnlyDeserializer* read_only_deserializer,
- StartupDeserializer* startup_deserializer);
+ bool Init(SnapshotData* startup_snapshot_data,
+ SnapshotData* read_only_snapshot_data, bool can_rehash);
void CheckIsolateLayout();
@@ -1660,6 +1731,9 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
void AddCrashKeysForIsolateAndHeapPointers();
+ // Returns the Exception sentinel.
+ Object ThrowInternal(Object exception, MessageLocation* location);
+
// This class contains a collection of data accessible from both C++ runtime
// and compiled code (including assembly stubs, builtins, interpreter bytecode
// handlers and optimized code).
@@ -1681,11 +1755,13 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
CompilationCache* compilation_cache_ = nullptr;
std::shared_ptr<Counters> async_counters_;
base::RecursiveMutex break_access_;
+ base::SharedMutex feedback_vector_access_;
+ base::SharedMutex string_access_;
base::SharedMutex transition_array_access_;
Logger* logger_ = nullptr;
StubCache* load_stub_cache_ = nullptr;
StubCache* store_stub_cache_ = nullptr;
- DeoptimizerData* deoptimizer_data_ = nullptr;
+ Deoptimizer* current_deoptimizer_ = nullptr;
bool deoptimizer_lazy_throw_ = false;
MaterializedObjectStore* materialized_object_store_ = nullptr;
bool capture_stack_trace_for_uncaught_exceptions_ = false;
@@ -1704,6 +1780,9 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
RuntimeState runtime_state_;
Builtins builtins_;
SetupIsolateDelegate* setup_delegate_ = nullptr;
+#if defined(DEBUG) || defined(VERIFY_HEAP)
+ std::atomic<int> num_active_deserializers_;
+#endif
#ifndef V8_INTL_SUPPORT
unibrow::Mapping<unibrow::Ecma262UnCanonicalize> jsregexp_uncanonicalize_;
unibrow::Mapping<unibrow::CanonicalizationRange> jsregexp_canonrange_;
@@ -1853,13 +1932,13 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
void TearDownEmbeddedBlob();
void SetEmbeddedBlob(const uint8_t* code, uint32_t code_size,
- const uint8_t* metadata, uint32_t metadata_size);
+ const uint8_t* data, uint32_t data_size);
void ClearEmbeddedBlob();
const uint8_t* embedded_blob_code_ = nullptr;
uint32_t embedded_blob_code_size_ = 0;
- const uint8_t* embedded_blob_metadata_ = nullptr;
- uint32_t embedded_blob_metadata_size_ = 0;
+ const uint8_t* embedded_blob_data_ = nullptr;
+ uint32_t embedded_blob_data_size_ = 0;
v8::ArrayBuffer::Allocator* array_buffer_allocator_ = nullptr;
std::shared_ptr<v8::ArrayBuffer::Allocator> array_buffer_allocator_shared_;
@@ -1952,7 +2031,7 @@ class V8_EXPORT_PRIVATE SaveContext {
Handle<Context> context() { return context_; }
// Returns true if this save context is below a given JavaScript frame.
- bool IsBelowFrame(StandardFrame* frame);
+ bool IsBelowFrame(CommonFrame* frame);
private:
Isolate* const isolate_;
@@ -2018,6 +2097,7 @@ class StackLimitCheck {
StackGuard* stack_guard = isolate_->stack_guard();
return GetCurrentStackPosition() < stack_guard->real_climit();
}
+ static bool HasOverflowed(LocalIsolate* local_isolate);
// Use this to check for interrupt request in C++ code.
bool InterruptRequested() {
diff --git a/deps/v8/src/execution/local-isolate-inl.h b/deps/v8/src/execution/local-isolate-inl.h
index 3f61f6716c..318cc10fa4 100644
--- a/deps/v8/src/execution/local-isolate-inl.h
+++ b/deps/v8/src/execution/local-isolate-inl.h
@@ -13,11 +13,11 @@ namespace v8 {
namespace internal {
Address LocalIsolate::isolate_root() const { return isolate_->isolate_root(); }
-ReadOnlyHeap* LocalIsolate::read_only_heap() {
+ReadOnlyHeap* LocalIsolate::read_only_heap() const {
return isolate_->read_only_heap();
}
-Object LocalIsolate::root(RootIndex index) {
+Object LocalIsolate::root(RootIndex index) const {
DCHECK(RootsTable::IsImmortalImmovable(index));
return isolate_->root(index);
}
diff --git a/deps/v8/src/execution/local-isolate.cc b/deps/v8/src/execution/local-isolate.cc
index bba871c35b..77733907f8 100644
--- a/deps/v8/src/execution/local-isolate.cc
+++ b/deps/v8/src/execution/local-isolate.cc
@@ -12,12 +12,15 @@
namespace v8 {
namespace internal {
-LocalIsolate::LocalIsolate(Isolate* isolate)
+LocalIsolate::LocalIsolate(Isolate* isolate, ThreadKind kind)
: HiddenLocalFactory(isolate),
- heap_(isolate->heap()),
+ heap_(isolate->heap(), kind),
isolate_(isolate),
logger_(new LocalLogger(isolate)),
- thread_id_(ThreadId::Current()) {}
+ thread_id_(ThreadId::Current()),
+ stack_limit_(kind == ThreadKind::kMain
+ ? isolate->stack_guard()->real_climit()
+ : GetCurrentStackPosition() - FLAG_stack_size * KB) {}
LocalIsolate::~LocalIsolate() = default;
@@ -29,10 +32,15 @@ int LocalIsolate::GetNextUniqueSharedFunctionInfoId() {
}
#endif // V8_SFI_HAS_UNIQUE_ID
-bool LocalIsolate::is_collecting_type_profile() {
+bool LocalIsolate::is_collecting_type_profile() const {
// TODO(leszeks): Figure out if it makes sense to check this asynchronously.
return isolate_->is_collecting_type_profile();
}
+// static
+bool StackLimitCheck::HasOverflowed(LocalIsolate* local_isolate) {
+ return GetCurrentStackPosition() < local_isolate->stack_limit();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/execution/local-isolate.h b/deps/v8/src/execution/local-isolate.h
index 1420ae7311..7cfa156fb7 100644
--- a/deps/v8/src/execution/local-isolate.h
+++ b/deps/v8/src/execution/local-isolate.h
@@ -36,7 +36,7 @@ class V8_EXPORT_PRIVATE LocalIsolate final : private HiddenLocalFactory {
public:
using HandleScopeType = LocalHandleScope;
- explicit LocalIsolate(Isolate* isolate);
+ explicit LocalIsolate(Isolate* isolate, ThreadKind kind);
~LocalIsolate();
// Kinda sketchy.
@@ -48,12 +48,10 @@ class V8_EXPORT_PRIVATE LocalIsolate final : private HiddenLocalFactory {
LocalHeap* heap() { return &heap_; }
inline Address isolate_root() const;
- inline ReadOnlyHeap* read_only_heap();
- inline Object root(RootIndex index);
+ inline ReadOnlyHeap* read_only_heap() const;
+ inline Object root(RootIndex index) const;
- StringTable* string_table() { return isolate_->string_table(); }
-
- const Isolate* GetIsolateForPtrCompr() const { return isolate_; }
+ StringTable* string_table() const { return isolate_->string_table(); }
v8::internal::LocalFactory* factory() {
// Upcast to the privately inherited base-class using c-style casts to avoid
@@ -77,10 +75,11 @@ class V8_EXPORT_PRIVATE LocalIsolate final : private HiddenLocalFactory {
int GetNextUniqueSharedFunctionInfoId();
#endif // V8_SFI_HAS_UNIQUE_ID
- bool is_collecting_type_profile();
+ bool is_collecting_type_profile() const;
- LocalLogger* logger() { return logger_.get(); }
- ThreadId thread_id() { return thread_id_; }
+ LocalLogger* logger() const { return logger_.get(); }
+ ThreadId thread_id() const { return thread_id_; }
+ Address stack_limit() const { return stack_limit_; }
private:
friend class v8::internal::LocalFactory;
@@ -89,10 +88,11 @@ class V8_EXPORT_PRIVATE LocalIsolate final : private HiddenLocalFactory {
// TODO(leszeks): Extract out the fields of the Isolate we want and store
// those instead of the whole thing.
- Isolate* isolate_;
+ Isolate* const isolate_;
std::unique_ptr<LocalLogger> logger_;
- ThreadId thread_id_;
+ ThreadId const thread_id_;
+ Address const stack_limit_;
};
} // namespace internal
diff --git a/deps/v8/src/execution/messages.cc b/deps/v8/src/execution/messages.cc
index ab6c6bc392..ea31dc3374 100644
--- a/deps/v8/src/execution/messages.cc
+++ b/deps/v8/src/execution/messages.cc
@@ -514,26 +514,6 @@ int JSStackFrame::GetColumnNumber() {
return kNone;
}
-int JSStackFrame::GetEnclosingLineNumber() {
- if (HasScript()) {
- Handle<SharedFunctionInfo> shared = handle(function_->shared(), isolate_);
- return Script::GetLineNumber(GetScript(),
- shared->function_token_position()) + 1;
- } else {
- return kNone;
- }
-}
-
-int JSStackFrame::GetEnclosingColumnNumber() {
- if (HasScript()) {
- Handle<SharedFunctionInfo> shared = handle(function_->shared(), isolate_);
- return Script::GetColumnNumber(GetScript(),
- shared->function_token_position()) + 1;
- } else {
- return kNone;
- }
-}
-
int JSStackFrame::GetPromiseIndex() const {
return (is_promise_all_ || is_promise_any_) ? offset_ : kNone;
}
@@ -622,12 +602,6 @@ int WasmStackFrame::GetPosition() const {
int WasmStackFrame::GetColumnNumber() { return GetModuleOffset(); }
-int WasmStackFrame::GetEnclosingColumnNumber() {
- const int function_offset =
- GetWasmFunctionOffset(wasm_instance_->module(), wasm_func_index_);
- return function_offset;
-}
-
int WasmStackFrame::GetModuleOffset() const {
const int function_offset =
GetWasmFunctionOffset(wasm_instance_->module(), wasm_func_index_);
@@ -698,26 +672,6 @@ int AsmJsWasmStackFrame::GetColumnNumber() {
return Script::GetColumnNumber(script, GetPosition()) + 1;
}
-int AsmJsWasmStackFrame::GetEnclosingLineNumber() {
- DCHECK_LE(0, GetPosition());
- Handle<Script> script(wasm_instance_->module_object().script(), isolate_);
- DCHECK(script->IsUserJavaScript());
- int byte_offset = GetSourcePosition(wasm_instance_->module(),
- wasm_func_index_, 0,
- is_at_number_conversion_);
- return Script::GetLineNumber(script, byte_offset) + 1;
-}
-
-int AsmJsWasmStackFrame::GetEnclosingColumnNumber() {
- DCHECK_LE(0, GetPosition());
- Handle<Script> script(wasm_instance_->module_object().script(), isolate_);
- DCHECK(script->IsUserJavaScript());
- int byte_offset = GetSourcePosition(wasm_instance_->module(),
- wasm_func_index_, 0,
- is_at_number_conversion_);
- return Script::GetColumnNumber(script, byte_offset) + 1;
-}
-
FrameArrayIterator::FrameArrayIterator(Isolate* isolate,
Handle<FrameArray> array, int frame_ix)
: isolate_(isolate), array_(array), frame_ix_(frame_ix) {}
@@ -1280,7 +1234,18 @@ Handle<String> BuildDefaultCallSite(Isolate* isolate, Handle<Object> object) {
builder.AppendString(Object::TypeOf(isolate, object));
if (object->IsString()) {
builder.AppendCString(" \"");
- builder.AppendString(Handle<String>::cast(object));
+ Handle<String> string = Handle<String>::cast(object);
+ // This threshold must be sufficiently far below String::kMaxLength that
+ // the {builder}'s result can never exceed that limit.
+ constexpr int kMaxPrintedStringLength = 100;
+ if (string->length() <= kMaxPrintedStringLength) {
+ builder.AppendString(string);
+ } else {
+ string = isolate->factory()->NewProperSubString(string, 0,
+ kMaxPrintedStringLength);
+ builder.AppendString(string);
+ builder.AppendCString("<...>");
+ }
builder.AppendCString("\"");
} else if (object->IsNull(isolate)) {
builder.AppendCString(" ");
@@ -1337,13 +1302,12 @@ MessageTemplate UpdateErrorTemplate(CallPrinter::ErrorHint hint,
case CallPrinter::ErrorHint::kNone:
return default_id;
}
- return default_id;
}
} // namespace
-Handle<Object> ErrorUtils::NewIteratorError(Isolate* isolate,
- Handle<Object> source) {
+Handle<JSObject> ErrorUtils::NewIteratorError(Isolate* isolate,
+ Handle<Object> source) {
MessageLocation location;
CallPrinter::ErrorHint hint = CallPrinter::kNone;
Handle<String> callsite = RenderCallSite(isolate, source, &location, &hint);
@@ -1387,13 +1351,13 @@ Object ErrorUtils::ThrowSpreadArgError(Isolate* isolate, MessageTemplate id,
}
}
- Handle<Object> exception =
- isolate->factory()->NewTypeError(id, callsite, object);
- return isolate->Throw(*exception, &location);
+ isolate->ThrowAt(isolate->factory()->NewTypeError(id, callsite, object),
+ &location);
+ return ReadOnlyRoots(isolate).exception();
}
-Handle<Object> ErrorUtils::NewCalledNonCallableError(Isolate* isolate,
- Handle<Object> source) {
+Handle<JSObject> ErrorUtils::NewCalledNonCallableError(Isolate* isolate,
+ Handle<Object> source) {
MessageLocation location;
CallPrinter::ErrorHint hint = CallPrinter::kNone;
Handle<String> callsite = RenderCallSite(isolate, source, &location, &hint);
@@ -1402,7 +1366,7 @@ Handle<Object> ErrorUtils::NewCalledNonCallableError(Isolate* isolate,
return isolate->factory()->NewTypeError(id, callsite);
}
-Handle<Object> ErrorUtils::NewConstructedNonConstructable(
+Handle<JSObject> ErrorUtils::NewConstructedNonConstructable(
Isolate* isolate, Handle<Object> source) {
MessageLocation location;
CallPrinter::ErrorHint hint = CallPrinter::kNone;
@@ -1412,10 +1376,6 @@ Handle<Object> ErrorUtils::NewConstructedNonConstructable(
}
Object ErrorUtils::ThrowLoadFromNullOrUndefined(Isolate* isolate,
- Handle<Object> object) {
- return ThrowLoadFromNullOrUndefined(isolate, object, MaybeHandle<Object>());
-}
-Object ErrorUtils::ThrowLoadFromNullOrUndefined(Isolate* isolate,
Handle<Object> object,
MaybeHandle<Object> key) {
DCHECK(object->IsNullOrUndefined());
@@ -1487,7 +1447,7 @@ Object ErrorUtils::ThrowLoadFromNullOrUndefined(Isolate* isolate,
callsite = BuildDefaultCallSite(isolate, object);
}
- Handle<Object> error;
+ Handle<JSObject> error;
Handle<String> property_name;
if (is_destructuring) {
if (maybe_property_name.ToHandle(&property_name)) {
@@ -1511,7 +1471,12 @@ Object ErrorUtils::ThrowLoadFromNullOrUndefined(Isolate* isolate,
}
}
- return isolate->Throw(*error, location_computed ? &location : nullptr);
+ if (location_computed) {
+ isolate->ThrowAt(error, &location);
+ } else {
+ isolate->Throw(*error);
+ }
+ return ReadOnlyRoots(isolate).exception();
}
} // namespace internal
diff --git a/deps/v8/src/execution/messages.h b/deps/v8/src/execution/messages.h
index ad72d762d2..4aab728f7c 100644
--- a/deps/v8/src/execution/messages.h
+++ b/deps/v8/src/execution/messages.h
@@ -87,9 +87,6 @@ class StackFrameBase {
// Return 0-based Wasm function index. Returns -1 for non-Wasm frames.
virtual int GetWasmFunctionIndex();
- virtual int GetEnclosingColumnNumber() = 0;
- virtual int GetEnclosingLineNumber() = 0;
-
// Returns the index of the rejected promise in the Promise combinator input,
// or -1 if this frame is not a Promise combinator frame.
virtual int GetPromiseIndex() const = 0;
@@ -136,9 +133,6 @@ class JSStackFrame : public StackFrameBase {
int GetLineNumber() override;
int GetColumnNumber() override;
- int GetEnclosingColumnNumber() override;
- int GetEnclosingLineNumber() override;
-
int GetPromiseIndex() const override;
bool IsNative() override;
@@ -189,8 +183,6 @@ class WasmStackFrame : public StackFrameBase {
int GetPosition() const override;
int GetLineNumber() override { return 0; }
int GetColumnNumber() override;
- int GetEnclosingColumnNumber() override;
- int GetEnclosingLineNumber() override { return 0; }
int GetWasmFunctionIndex() override { return wasm_func_index_; }
int GetPromiseIndex() const override { return GetPosition(); }
@@ -239,9 +231,6 @@ class AsmJsWasmStackFrame : public WasmStackFrame {
int GetLineNumber() override;
int GetColumnNumber() override;
- int GetEnclosingColumnNumber() override;
- int GetEnclosingLineNumber() override;
-
private:
friend class FrameArrayIterator;
AsmJsWasmStackFrame() = default;
@@ -308,16 +297,16 @@ class ErrorUtils : public AllStatic {
Handle<JSObject> error,
Handle<Object> stack_trace);
- static Handle<Object> NewIteratorError(Isolate* isolate,
- Handle<Object> source);
- static Handle<Object> NewCalledNonCallableError(Isolate* isolate,
- Handle<Object> source);
- static Handle<Object> NewConstructedNonConstructable(Isolate* isolate,
- Handle<Object> source);
+ static Handle<JSObject> NewIteratorError(Isolate* isolate,
+ Handle<Object> source);
+ static Handle<JSObject> NewCalledNonCallableError(Isolate* isolate,
+ Handle<Object> source);
+ static Handle<JSObject> NewConstructedNonConstructable(Isolate* isolate,
+ Handle<Object> source);
+ // Returns the Exception sentinel.
static Object ThrowSpreadArgError(Isolate* isolate, MessageTemplate id,
Handle<Object> object);
- static Object ThrowLoadFromNullOrUndefined(Isolate* isolate,
- Handle<Object> object);
+ // Returns the Exception sentinel.
static Object ThrowLoadFromNullOrUndefined(Isolate* isolate,
Handle<Object> object,
MaybeHandle<Object> key);
diff --git a/deps/v8/src/execution/ppc/frame-constants-ppc.h b/deps/v8/src/execution/ppc/frame-constants-ppc.h
index 0931ffe101..d29bd8c450 100644
--- a/deps/v8/src/execution/ppc/frame-constants-ppc.h
+++ b/deps/v8/src/execution/ppc/frame-constants-ppc.h
@@ -15,7 +15,9 @@ namespace internal {
class EntryFrameConstants : public AllStatic {
public:
// Need to take constant pool into account.
- static constexpr int kCallerFPOffset = -4 * kSystemPointerSize;
+ static constexpr int kCallerFPOffset = FLAG_enable_embedded_constant_pool
+ ? -4 * kSystemPointerSize
+ : -3 * kSystemPointerSize;
};
class WasmCompileLazyFrameConstants : public TypedFrameConstants {
diff --git a/deps/v8/src/execution/runtime-profiler.cc b/deps/v8/src/execution/runtime-profiler.cc
index 686fa23751..b7b8f5963c 100644
--- a/deps/v8/src/execution/runtime-profiler.cc
+++ b/deps/v8/src/execution/runtime-profiler.cc
@@ -92,8 +92,20 @@ void TraceHeuristicOptimizationDisallowed(JSFunction function) {
}
}
+// TODO(jgruber): Remove this once we include this tracing with --trace-opt.
+void TraceNCIRecompile(JSFunction function, OptimizationReason reason) {
+ if (FLAG_trace_turbo_nci) {
+ StdoutStream os;
+ os << "NCI tierup mark: " << Brief(function) << ", "
+ << OptimizationReasonToString(reason) << std::endl;
+ }
+}
+
void TraceRecompile(JSFunction function, OptimizationReason reason,
- Isolate* isolate) {
+ CodeKind code_kind, Isolate* isolate) {
+ if (code_kind == CodeKind::NATIVE_CONTEXT_INDEPENDENT) {
+ TraceNCIRecompile(function, reason);
+ }
if (FLAG_trace_opt) {
CodeTracer::Scope scope(isolate->GetCodeTracer());
PrintF(scope.file(), "[marking ");
@@ -104,22 +116,15 @@ void TraceRecompile(JSFunction function, OptimizationReason reason,
}
}
-void TraceNCIRecompile(JSFunction function, OptimizationReason reason) {
- if (FLAG_trace_turbo_nci) {
- StdoutStream os;
- os << "NCI tierup mark: " << Brief(function) << ", "
- << OptimizationReasonToString(reason) << std::endl;
- }
-}
-
} // namespace
RuntimeProfiler::RuntimeProfiler(Isolate* isolate)
: isolate_(isolate), any_ic_changed_(false) {}
-void RuntimeProfiler::Optimize(JSFunction function, OptimizationReason reason) {
+void RuntimeProfiler::Optimize(JSFunction function, OptimizationReason reason,
+ CodeKind code_kind) {
DCHECK_NE(reason, OptimizationReason::kDoNotOptimize);
- TraceRecompile(function, reason, isolate_);
+ TraceRecompile(function, reason, code_kind, isolate_);
function.MarkForOptimization(ConcurrencyMode::kConcurrent);
}
@@ -150,43 +155,15 @@ void RuntimeProfiler::AttemptOnStackReplacement(InterpretedFrame* frame,
Min(level + loop_nesting_levels, AbstractCode::kMaxLoopNestingMarker));
}
-void RuntimeProfiler::MaybeOptimizeInterpretedFrame(JSFunction function,
- InterpretedFrame* frame) {
+void RuntimeProfiler::MaybeOptimizeFrame(JSFunction function,
+ JavaScriptFrame* frame,
+ CodeKind code_kind) {
+ DCHECK(CodeKindCanTierUp(code_kind));
if (function.IsInOptimizationQueue()) {
TraceInOptimizationQueue(function);
return;
}
- if (FLAG_testing_d8_test_runner &&
- !PendingOptimizationTable::IsHeuristicOptimizationAllowed(isolate_,
- function)) {
- TraceHeuristicOptimizationDisallowed(function);
- return;
- }
-
- if (function.shared().optimization_disabled()) return;
-
- if (FLAG_always_osr) {
- AttemptOnStackReplacement(frame, AbstractCode::kMaxLoopNestingMarker);
- // Fall through and do a normal optimized compile as well.
- } else if (MaybeOSR(function, frame)) {
- return;
- }
- OptimizationReason reason =
- ShouldOptimize(function, function.shared().GetBytecodeArray());
-
- if (reason != OptimizationReason::kDoNotOptimize) {
- Optimize(function, reason);
- }
-}
-
-void RuntimeProfiler::MaybeOptimizeNCIFrame(JSFunction function) {
- DCHECK_EQ(function.code().kind(), CodeKind::NATIVE_CONTEXT_INDEPENDENT);
-
- if (function.IsInOptimizationQueue()) {
- TraceInOptimizationQueue(function);
- return;
- }
if (FLAG_testing_d8_test_runner &&
!PendingOptimizationTable::IsHeuristicOptimizationAllowed(isolate_,
function)) {
@@ -196,15 +173,24 @@ void RuntimeProfiler::MaybeOptimizeNCIFrame(JSFunction function) {
if (function.shared().optimization_disabled()) return;
- // Note: We currently do not trigger OSR compilation from NCI code.
+ // Note: We currently do not trigger OSR compilation from NCI or TP code.
// TODO(jgruber,v8:8888): But we should.
+ if (frame->is_interpreted()) {
+ DCHECK_EQ(code_kind, CodeKind::INTERPRETED_FUNCTION);
+ if (FLAG_always_osr) {
+ AttemptOnStackReplacement(InterpretedFrame::cast(frame),
+ AbstractCode::kMaxLoopNestingMarker);
+ // Fall through and do a normal optimized compile as well.
+ } else if (MaybeOSR(function, InterpretedFrame::cast(frame))) {
+ return;
+ }
+ }
OptimizationReason reason =
ShouldOptimize(function, function.shared().GetBytecodeArray());
if (reason != OptimizationReason::kDoNotOptimize) {
- TraceNCIRecompile(function, reason);
- Optimize(function, reason);
+ Optimize(function, reason, code_kind);
}
}
@@ -224,6 +210,9 @@ bool RuntimeProfiler::MaybeOSR(JSFunction function, InterpretedFrame* frame) {
function.HasAvailableOptimizedCode()) {
// Attempt OSR if we are still running interpreted code even though the
// the function has long been marked or even already been optimized.
+ // TODO(turboprop, mythria): Currently we don't tier up from Turboprop code
+ // to Turbofan OSR code. When we start supporting this, the ticks have to be
+ // scaled accordingly
int64_t allowance =
kOSRBytecodeSizeAllowanceBase +
static_cast<int64_t>(ticks) * kOSRBytecodeSizeAllowancePerTick;
@@ -240,22 +229,31 @@ OptimizationReason RuntimeProfiler::ShouldOptimize(JSFunction function,
if (function.ActiveTierIsTurbofan()) {
return OptimizationReason::kDoNotOptimize;
}
+ if (V8_UNLIKELY(FLAG_turboprop) && function.ActiveTierIsToptierTurboprop()) {
+ return OptimizationReason::kDoNotOptimize;
+ }
int ticks = function.feedback_vector().profiler_ticks();
+ int scale_factor = function.ActiveTierIsMidtierTurboprop()
+ ? FLAG_ticks_scale_factor_for_top_tier
+ : 1;
int ticks_for_optimization =
kProfilerTicksBeforeOptimization +
(bytecode.length() / kBytecodeSizeAllowancePerTick);
+ ticks_for_optimization *= scale_factor;
if (ticks >= ticks_for_optimization) {
return OptimizationReason::kHotAndStable;
} else if (!any_ic_changed_ &&
bytecode.length() < kMaxBytecodeSizeForEarlyOpt) {
+ // TODO(turboprop, mythria): Do we need to support small function
+ // optimization for TP->TF tier up. If so, do we want to scale the bytecode
+ // size?
// If no IC was patched since the last tick and this function is very
// small, optimistically optimize it now.
return OptimizationReason::kSmallFunction;
} else if (FLAG_trace_opt_verbose) {
PrintF("[not yet optimizing ");
function.PrintName();
- PrintF(", not enough ticks: %d/%d and ", ticks,
- kProfilerTicksBeforeOptimization);
+ PrintF(", not enough ticks: %d/%d and ", ticks, ticks_for_optimization);
if (any_ic_changed_) {
PrintF("ICs changed]\n");
} else {
@@ -293,7 +291,7 @@ void RuntimeProfiler::MarkCandidatesForOptimizationFromBytecode() {
if (!function.has_feedback_vector()) continue;
- MaybeOptimizeInterpretedFrame(function, InterpretedFrame::cast(frame));
+ MaybeOptimizeFrame(function, frame, CodeKind::INTERPRETED_FUNCTION);
// TODO(leszeks): Move this increment to before the maybe optimize checks,
// and update the tests to assume the increment has already happened.
@@ -311,7 +309,8 @@ void RuntimeProfiler::MarkCandidatesForOptimizationFromCode() {
if (!frame->is_optimized()) continue;
JSFunction function = frame->function();
- if (function.code().kind() != CodeKind::NATIVE_CONTEXT_INDEPENDENT) {
+ auto code_kind = function.code().kind();
+ if (!CodeKindIsOptimizedAndCanTierUp(code_kind)) {
continue;
}
@@ -320,7 +319,7 @@ void RuntimeProfiler::MarkCandidatesForOptimizationFromCode() {
function.feedback_vector().SaturatingIncrementProfilerTicks();
- MaybeOptimizeNCIFrame(function);
+ MaybeOptimizeFrame(function, frame, code_kind);
}
}
diff --git a/deps/v8/src/execution/runtime-profiler.h b/deps/v8/src/execution/runtime-profiler.h
index d7125ef73c..b4207d03f9 100644
--- a/deps/v8/src/execution/runtime-profiler.h
+++ b/deps/v8/src/execution/runtime-profiler.h
@@ -15,7 +15,9 @@ namespace internal {
class BytecodeArray;
class Isolate;
class InterpretedFrame;
+class JavaScriptFrame;
class JSFunction;
+enum class CodeKind;
enum class OptimizationReason : uint8_t;
class RuntimeProfiler {
@@ -35,15 +37,16 @@ class RuntimeProfiler {
private:
// Make the decision whether to optimize the given function, and mark it for
// optimization if the decision was 'yes'.
- void MaybeOptimizeNCIFrame(JSFunction function);
- void MaybeOptimizeInterpretedFrame(JSFunction function,
- InterpretedFrame* frame);
+ void MaybeOptimizeFrame(JSFunction function, JavaScriptFrame* frame,
+ CodeKind code_kind);
+
// Potentially attempts OSR from and returns whether no other
// optimization attempts should be made.
bool MaybeOSR(JSFunction function, InterpretedFrame* frame);
OptimizationReason ShouldOptimize(JSFunction function,
BytecodeArray bytecode_array);
- void Optimize(JSFunction function, OptimizationReason reason);
+ void Optimize(JSFunction function, OptimizationReason reason,
+ CodeKind code_kind);
void Baseline(JSFunction function, OptimizationReason reason);
class MarkCandidatesForOptimizationScope final {
diff --git a/deps/v8/src/execution/s390/simulator-s390.cc b/deps/v8/src/execution/s390/simulator-s390.cc
index 3c30c87583..a9fc318e4b 100644
--- a/deps/v8/src/execution/s390/simulator-s390.cc
+++ b/deps/v8/src/execution/s390/simulator-s390.cc
@@ -3405,9 +3405,10 @@ EVALUATE(VPKLS) {
template <class S, class D>
void VectorUnpackHigh(void* dst, void* src) {
+ constexpr size_t kItemCount = kSimd128Size / sizeof(D);
D value = 0;
- for (size_t i = 0; i < kSimd128Size / sizeof(D); i++) {
- value = *(reinterpret_cast<S*>(src) + i);
+ for (size_t i = 0; i < kItemCount; i++) {
+ value = *(reinterpret_cast<S*>(src) + i + kItemCount);
memcpy(reinterpret_cast<D*>(dst) + i, &value, sizeof(D));
}
}
@@ -3462,11 +3463,14 @@ EVALUATE(VUPLH) {
template <class S, class D>
void VectorUnpackLow(void* dst, void* src) {
- D value = 0;
- size_t count = kSimd128Size / sizeof(D);
- for (size_t i = 0; i < count; i++) {
- value = *(reinterpret_cast<S*>(src) + i + count);
- memcpy(reinterpret_cast<D*>(dst) + i, &value, sizeof(D));
+ constexpr size_t kItemCount = kSimd128Size / sizeof(D);
+ D temps[kItemCount] = {0};
+ // About overwriting if src and dst are the same register.
+ for (size_t i = 0; i < kItemCount; i++) {
+ temps[i] = static_cast<D>(*(reinterpret_cast<S*>(src) + i));
+ }
+ for (size_t i = 0; i < kItemCount; i++) {
+ memcpy(reinterpret_cast<D*>(dst) + i, &temps[i], sizeof(D));
}
}
@@ -3742,15 +3746,14 @@ EVALUATE(VPERM) {
USE(m6);
for (int i = 0; i < kSimd128Size; i++) {
int8_t lane_num = get_simd_register_by_lane<int8_t>(r4, i);
+ // Get the five least significant bits.
+ lane_num = (lane_num << 3) >> 3;
int reg = r2;
if (lane_num >= kSimd128Size) {
lane_num = lane_num - kSimd128Size;
reg = r3;
}
- int8_t result = 0;
- if (lane_num >= 0 && lane_num < kSimd128Size * 2) {
- result = get_simd_register_by_lane<int8_t>(reg, lane_num);
- }
+ int8_t result = get_simd_register_by_lane<int8_t>(reg, lane_num);
set_simd_register_by_lane<int8_t>(r1, i, result);
}
return length;
diff --git a/deps/v8/src/extensions/gc-extension.cc b/deps/v8/src/extensions/gc-extension.cc
index fd0cf91333..de288fe2f5 100644
--- a/deps/v8/src/extensions/gc-extension.cc
+++ b/deps/v8/src/extensions/gc-extension.cc
@@ -82,7 +82,7 @@ void InvokeGC(v8::Isolate* isolate, v8::Isolate::GarbageCollectionType type,
kGCCallbackFlagForced);
break;
case v8::Isolate::GarbageCollectionType::kFullGarbageCollection:
- heap->SetEmbedderStackStateForNextFinalizaton(embedder_stack_state);
+ heap->SetEmbedderStackStateForNextFinalization(embedder_stack_state);
heap->PreciseCollectAllGarbage(i::Heap::kNoGCFlags,
i::GarbageCollectionReason::kTesting,
kGCCallbackFlagForced);
diff --git a/deps/v8/src/flags/flag-definitions.h b/deps/v8/src/flags/flag-definitions.h
index ab689283e9..00fcf712d6 100644
--- a/deps/v8/src/flags/flag-definitions.h
+++ b/deps/v8/src/flags/flag-definitions.h
@@ -242,8 +242,6 @@ DEFINE_BOOL(es_staging, false,
DEFINE_BOOL(harmony, false, "enable all completed harmony features")
DEFINE_BOOL(harmony_shipping, true, "enable all shipped harmony features")
DEFINE_IMPLICATION(es_staging, harmony)
-// Enabling import.meta requires to also enable import()
-DEFINE_IMPLICATION(harmony_import_meta, harmony_dynamic_import)
// Enabling FinalizationRegistry#cleanupSome also enables weak refs
DEFINE_IMPLICATION(harmony_weak_refs_with_cleanup_some, harmony_weak_refs)
@@ -254,7 +252,8 @@ DEFINE_IMPLICATION(harmony_weak_refs_with_cleanup_some, harmony_weak_refs)
V(harmony_regexp_sequence, "RegExp Unicode sequence properties") \
V(harmony_weak_refs_with_cleanup_some, \
"harmony weak references with FinalizationRegistry.prototype.cleanupSome") \
- V(harmony_regexp_match_indices, "harmony regexp match indices")
+ V(harmony_regexp_match_indices, "harmony regexp match indices") \
+ V(harmony_import_assertions, "harmony import assertions")
#ifdef V8_INTL_SUPPORT
#define HARMONY_INPROGRESS(V) \
@@ -279,13 +278,8 @@ DEFINE_IMPLICATION(harmony_weak_refs_with_cleanup_some, harmony_weak_refs)
// Features that are shipping (turned on by default, but internal flag remains).
#define HARMONY_SHIPPING_BASE(V) \
- V(harmony_namespace_exports, \
- "harmony namespace exports (export * as foo from 'bar')") \
V(harmony_sharedarraybuffer, "harmony sharedarraybuffer") \
V(harmony_atomics, "harmony atomics") \
- V(harmony_import_meta, "harmony import.meta property") \
- V(harmony_dynamic_import, "harmony dynamic import") \
- V(harmony_promise_all_settled, "harmony Promise.allSettled") \
V(harmony_promise_any, "harmony Promise.any") \
V(harmony_private_methods, "harmony private methods in class literals") \
V(harmony_weak_refs, "harmony weak references") \
@@ -365,6 +359,10 @@ DEFINE_IMPLICATION(lite_mode, optimize_for_size)
#define V8_ENABLE_THIRD_PARTY_HEAP_BOOL false
#endif
+DEFINE_NEG_IMPLICATION(enable_third_party_heap, inline_new)
+DEFINE_NEG_IMPLICATION(enable_third_party_heap, allocation_site_pretenuring)
+DEFINE_NEG_IMPLICATION(enable_third_party_heap, turbo_allocation_folding)
+
DEFINE_BOOL_READONLY(enable_third_party_heap, V8_ENABLE_THIRD_PARTY_HEAP_BOOL,
"Use third-party heap")
@@ -425,6 +423,7 @@ DEFINE_BOOL(future, FUTURE_BOOL,
DEFINE_WEAK_IMPLICATION(future, write_protect_code_memory)
DEFINE_WEAK_IMPLICATION(future, finalize_streaming_on_background)
+DEFINE_WEAK_IMPLICATION(future, super_ic)
// Flags for jitless
DEFINE_BOOL(jitless, V8_LITE_BOOL,
@@ -449,6 +448,10 @@ DEFINE_NEG_IMPLICATION(jitless, interpreted_frames_native_stack)
DEFINE_BOOL(assert_types, false,
"generate runtime type assertions to test the typer")
+DEFINE_BOOL(trace_code_dependencies, false, "trace code dependencies")
+// Depend on --trace-deopt-verbose for reporting dependency invalidations.
+DEFINE_IMPLICATION(trace_code_dependencies, trace_deopt_verbose)
+
// Flags for experimental implementation features.
DEFINE_BOOL(allocation_site_pretenuring, true,
"pretenure with allocation sites")
@@ -548,15 +551,24 @@ DEFINE_BOOL(trace_generalization, false, "trace map generalization")
// Flags for TurboProp.
DEFINE_BOOL(turboprop, false, "enable experimental turboprop mid-tier compiler")
-DEFINE_BOOL(turboprop_mid_tier_reg_alloc, false,
- "enable experimental mid-tier register allocator")
-DEFINE_NEG_IMPLICATION(turboprop, turbo_inlining)
+DEFINE_BOOL(turboprop_mid_tier_reg_alloc, true,
+ "enable mid-tier register allocator for turboprop")
+DEFINE_BOOL(turboprop_dynamic_map_checks, false,
+ "use dynamic map checks when generating code for property accesses "
+ "if all handlers in an IC are the same for turboprop")
+DEFINE_BOOL(turboprop_as_midtier, false,
+ "enable experimental turboprop mid-tier compiler")
+DEFINE_IMPLICATION(turboprop_as_midtier, turboprop)
DEFINE_IMPLICATION(turboprop, concurrent_inlining)
DEFINE_VALUE_IMPLICATION(turboprop, interrupt_budget, 15 * KB)
DEFINE_VALUE_IMPLICATION(turboprop, reuse_opt_code_count, 2)
-DEFINE_IMPLICATION(turboprop, dynamic_map_checks)
DEFINE_UINT_READONLY(max_minimorphic_map_checks, 4,
"max number of map checks to perform in minimorphic state")
+// Since Turboprop uses much lower value for interrupt budget, we need to wait
+// for a higher number of ticks to tierup to Turbofan roughly match the default.
+// The default of 10 is approximately the ration of TP to TF interrupt budget.
+DEFINE_INT(ticks_scale_factor_for_top_tier, 10,
+ "scale factor for profiler ticks when tiering up from midtier")
// Flags for concurrent recompilation.
DEFINE_BOOL(concurrent_recompilation, true,
@@ -723,9 +735,6 @@ DEFINE_BOOL(
DEFINE_BOOL(turbo_fast_api_calls, false, "enable fast API calls from TurboFan")
DEFINE_INT(reuse_opt_code_count, 0,
"don't discard optimized code for the specified number of deopts.")
-DEFINE_BOOL(dynamic_map_checks, false,
- "use dynamic map checks when generating code for property accesses "
- "if all handlers in an IC are the same")
// Native context independent (NCI) code.
DEFINE_BOOL(turbo_nci, false,
@@ -739,6 +748,18 @@ DEFINE_BOOL(print_nci_code, false, "print native context independent code.")
DEFINE_BOOL(trace_turbo_nci, false, "trace native context independent code.")
DEFINE_BOOL(turbo_collect_feedback_in_generic_lowering, true,
"enable experimental feedback collection in generic lowering.")
+// TODO(jgruber,v8:8888): Remove this flag once we've settled on a codegen
+// strategy.
+DEFINE_BOOL(turbo_nci_delayed_codegen, true,
+ "delay NCI codegen to reduce useless compilation work.")
+// TODO(jgruber,v8:8888): Remove this flag once we've settled on an ageing
+// strategy.
+DEFINE_BOOL(turbo_nci_cache_ageing, false,
+ "enable ageing of the NCI code cache.")
+// TODO(jgruber,v8:8888): Remove this flag once we've settled on an ageing
+// strategy.
+DEFINE_BOOL(isolate_script_cache_ageing, true,
+ "enable ageing of the isolate script cache.")
// Favor memory over execution speed.
DEFINE_BOOL(optimize_for_size, false,
@@ -793,7 +814,7 @@ DEFINE_INT(trace_wasm_ast_start, 0,
DEFINE_INT(trace_wasm_ast_end, 0, "end function for wasm AST trace (exclusive)")
DEFINE_BOOL(liftoff, true,
"enable Liftoff, the baseline compiler for WebAssembly")
-DEFINE_BOOL(liftoff_extern_ref, false,
+DEFINE_BOOL(experimental_liftoff_extern_ref, false,
"enable support for externref in Liftoff")
// We can't tier up (from Liftoff to TurboFan) in single-threaded mode, hence
// disable Liftoff in that configuration for now. The alternative is disabling
@@ -846,7 +867,7 @@ DEFINE_BOOL(wasm_staging, false, "enable staged wasm features")
FOREACH_WASM_STAGING_FEATURE_FLAG(WASM_STAGING_IMPLICATION)
#undef WASM_STAGING_IMPLICATION
-DEFINE_BOOL(wasm_opt, false, "enable wasm optimization")
+DEFINE_BOOL(wasm_opt, true, "enable wasm optimization")
DEFINE_BOOL(
wasm_bounds_checks, true,
"enable bounds checks (disable for performance testing only)")
@@ -861,8 +882,10 @@ DEFINE_BOOL(wasm_trap_handler, true,
DEFINE_BOOL(wasm_fuzzer_gen_test, false,
"generate a test case when running a wasm fuzzer")
DEFINE_IMPLICATION(wasm_fuzzer_gen_test, single_threaded)
-DEFINE_BOOL(print_wasm_code, false, "Print WebAssembly code")
-DEFINE_BOOL(print_wasm_stub_code, false, "Print WebAssembly stub code")
+DEFINE_BOOL(print_wasm_code, false, "print WebAssembly code")
+DEFINE_INT(print_wasm_code_function_index, -1,
+ "print WebAssembly code for function at index")
+DEFINE_BOOL(print_wasm_stub_code, false, "print WebAssembly stub code")
DEFINE_BOOL(asm_wasm_lazy_compilation, false,
"enable lazy compilation for asm-wasm modules")
DEFINE_IMPLICATION(validate_asm, asm_wasm_lazy_compilation)
@@ -873,10 +896,6 @@ DEFINE_DEBUG_BOOL(trace_wasm_lazy_compilation, false,
DEFINE_BOOL(wasm_lazy_validation, false,
"enable lazy validation for lazily compiled wasm functions")
-// Flags for wasm prototyping that are not strictly features i.e., part of
-// an existing proposal that may be conditionally enabled.
-DEFINE_BOOL(wasm_atomics_on_non_shared_memory, true,
- "allow atomic operations on non-shared WebAssembly memory")
DEFINE_BOOL(wasm_grow_shared_memory, true,
"allow growing shared WebAssembly memory objects")
DEFINE_BOOL(wasm_simd_post_mvp, false,
@@ -990,7 +1009,7 @@ DEFINE_BOOL(scavenge_separate_stack_scanning, false,
"use a separate phase for stack scanning in scavenge")
DEFINE_BOOL(trace_parallel_scavenge, false, "trace parallel scavenge")
DEFINE_BOOL(write_protect_code_memory, true, "write protect code memory")
-#ifdef V8_CONCURRENT_MARKING
+#if defined(V8_ATOMIC_MARKING_STATE) && defined(V8_ATOMIC_OBJECT_FIELD_WRITES)
#define V8_CONCURRENT_MARKING_BOOL true
#else
#define V8_CONCURRENT_MARKING_BOOL false
@@ -1388,6 +1407,13 @@ DEFINE_BOOL(log_colour, ENABLE_LOG_COLOUR,
DEFINE_BOOL(trace_sim_messages, false,
"Trace simulator debug messages. Implied by --trace-sim.")
+#if defined V8_TARGET_ARCH_ARM64
+// pointer-auth-arm64.cc
+DEFINE_DEBUG_BOOL(sim_abort_on_bad_auth, false,
+ "Stop execution when a pointer authentication fails in the "
+ "ARM64 simulator.")
+#endif
+
// isolate.cc
DEFINE_BOOL(async_stack_traces, true,
"include async stack traces in Error.stack")
@@ -1443,13 +1469,6 @@ DEFINE_BOOL(profile_deserialization, false,
"Print the time it takes to deserialize the snapshot.")
DEFINE_BOOL(serialization_statistics, false,
"Collect statistics on serialized objects.")
-#ifdef V8_ENABLE_THIRD_PARTY_HEAP
-DEFINE_UINT_READONLY(serialization_chunk_size, 1,
- "Custom size for serialization chunks")
-#else
-DEFINE_UINT(serialization_chunk_size, 4096,
- "Custom size for serialization chunks")
-#endif
// Regexp
DEFINE_BOOL(regexp_optimization, true, "generate optimized regexp code")
DEFINE_BOOL(regexp_mode_modifiers, false, "enable inline flags in regexp.")
@@ -1476,10 +1495,22 @@ DEFINE_BOOL(trace_regexp_parser, false, "trace regexp parsing")
DEFINE_BOOL(trace_regexp_tier_up, false, "trace regexp tiering up execution")
DEFINE_BOOL(enable_experimental_regexp_engine, false,
- "enable experimental linear time regexp engine")
+ "recognize regexps with 'l' flag, run them on experimental engine")
+DEFINE_BOOL(default_to_experimental_regexp_engine, false,
+ "run regexps with the experimental engine where possible")
+DEFINE_IMPLICATION(default_to_experimental_regexp_engine,
+ enable_experimental_regexp_engine)
DEFINE_BOOL(trace_experimental_regexp_engine, false,
"trace execution of experimental regexp engine")
+DEFINE_BOOL(enable_experimental_regexp_engine_on_excessive_backtracks, false,
+ "fall back to a breadth-first regexp engine on excessive "
+ "backtracking")
+DEFINE_UINT(regexp_backtracks_before_fallback, 50000,
+ "number of backtracks during regexp execution before fall back "
+ "to experimental engine if "
+ "enable_experimental_regexp_engine_on_excessive_backtracks is set")
+
// Testing flags test/cctest/test-{flags,api,serialization}.cc
DEFINE_BOOL(testing_bool_flag, true, "testing_bool_flag")
DEFINE_MAYBE_BOOL(testing_maybe_bool_flag, "testing_maybe_bool_flag")
@@ -1520,6 +1551,11 @@ DEFINE_STRING(turbo_profiling_log_file, nullptr,
"Path of the input file containing basic block counters for "
"builtins. (mksnapshot only)")
+// On some platforms, the .text section only has execute permissions.
+DEFINE_BOOL(text_is_readable, true,
+ "Whether the .text section of binary can be read")
+DEFINE_NEG_NEG_IMPLICATION(text_is_readable, partial_constant_pool)
+
//
// Minor mark compact collector flags.
//
diff --git a/deps/v8/src/handles/DIR_METADATA b/deps/v8/src/handles/DIR_METADATA
new file mode 100644
index 0000000000..ff55846b31
--- /dev/null
+++ b/deps/v8/src/handles/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>GC"
+} \ No newline at end of file
diff --git a/deps/v8/src/handles/OWNERS b/deps/v8/src/handles/OWNERS
index 75a534ce98..4df0a2548a 100644
--- a/deps/v8/src/handles/OWNERS
+++ b/deps/v8/src/handles/OWNERS
@@ -3,5 +3,3 @@ ishell@chromium.org
jkummerow@chromium.org
mlippautz@chromium.org
ulan@chromium.org
-
-# COMPONENT: Blink>JavaScript>GC
diff --git a/deps/v8/src/handles/global-handles.cc b/deps/v8/src/handles/global-handles.cc
index 7a91116ac1..1782514d6e 100644
--- a/deps/v8/src/handles/global-handles.cc
+++ b/deps/v8/src/handles/global-handles.cc
@@ -5,6 +5,7 @@
#include "src/handles/global-handles.h"
#include <algorithm>
+#include <cstdint>
#include <map>
#include "src/api/api-inl.h"
@@ -79,9 +80,8 @@ class GlobalHandles::NodeBlock final {
template <class NodeType>
const GlobalHandles::NodeBlock<NodeType>*
GlobalHandles::NodeBlock<NodeType>::From(const NodeType* node) {
- uintptr_t ptr = reinterpret_cast<const uintptr_t>(node) -
- sizeof(NodeType) * node->index();
- const BlockType* block = reinterpret_cast<const BlockType*>(ptr);
+ const NodeType* firstNode = node - node->index();
+ const BlockType* block = reinterpret_cast<const BlockType*>(firstNode);
DCHECK_EQ(node, block->at(node->index()));
return block;
}
@@ -89,9 +89,8 @@ GlobalHandles::NodeBlock<NodeType>::From(const NodeType* node) {
template <class NodeType>
GlobalHandles::NodeBlock<NodeType>* GlobalHandles::NodeBlock<NodeType>::From(
NodeType* node) {
- uintptr_t ptr =
- reinterpret_cast<uintptr_t>(node) - sizeof(NodeType) * node->index();
- BlockType* block = reinterpret_cast<BlockType*>(ptr);
+ NodeType* firstNode = node - node->index();
+ BlockType* block = reinterpret_cast<BlockType*>(firstNode);
DCHECK_EQ(node, block->at(node->index()));
return block;
}
@@ -381,7 +380,7 @@ namespace {
void ExtractInternalFields(JSObject jsobject, void** embedder_fields, int len) {
int field_count = jsobject.GetEmbedderFieldCount();
- const Isolate* isolate = GetIsolateForPtrCompr(jsobject);
+ IsolateRoot isolate = GetIsolateForPtrCompr(jsobject);
for (int i = 0; i < len; ++i) {
if (field_count == i) break;
void* pointer;
@@ -748,14 +747,10 @@ class GlobalHandles::OnStackTracedNodeSpace final {
void SetStackStart(void* stack_start) {
CHECK(on_stack_nodes_.empty());
- stack_start_ =
- GetStackAddressForSlot(reinterpret_cast<uintptr_t>(stack_start));
+ stack_start_ = base::Stack::GetRealStackAddressForSlot(stack_start);
}
- bool IsOnStack(uintptr_t slot) const {
- const uintptr_t address = GetStackAddressForSlot(slot);
- return stack_start_ >= address && address > GetCurrentStackPosition();
- }
+ V8_INLINE bool IsOnStack(uintptr_t slot) const;
void Iterate(RootVisitor* v);
TracedNode* Acquire(Object value, uintptr_t address);
@@ -772,32 +767,36 @@ class GlobalHandles::OnStackTracedNodeSpace final {
GlobalHandles* global_handles;
};
- uintptr_t GetStackAddressForSlot(uintptr_t slot) const;
-
- // Keeps track of registered handles and their stack address. The data
- // structure is cleaned on iteration and when adding new references using the
- // current stack address.
+ // Keeps track of registered handles. The data structure is cleaned on
+ // iteration and when adding new references using the current stack address.
+ // Cleaning is based on current stack address and the key of the map which is
+ // slightly different for ASAN configs -- see below.
+#ifdef V8_USE_ADDRESS_SANITIZER
+ // Mapping from stack slots or real stack frames to the corresponding nodes.
+ // In case a reference is part of a fake frame, we map it to the real stack
+ // frame base instead of the actual stack slot. The list keeps all nodes for
+ // a particular real frame.
+ std::map<uintptr_t, std::list<NodeEntry>> on_stack_nodes_;
+#else // !V8_USE_ADDRESS_SANITIZER
+ // Mapping from stack slots to the corresponding nodes. We don't expect
+ // aliasing with overlapping lifetimes of nodes.
std::map<uintptr_t, NodeEntry> on_stack_nodes_;
+#endif // !V8_USE_ADDRESS_SANITIZER
+
uintptr_t stack_start_ = 0;
GlobalHandles* global_handles_ = nullptr;
size_t acquire_count_ = 0;
};
-uintptr_t GlobalHandles::OnStackTracedNodeSpace::GetStackAddressForSlot(
- uintptr_t slot) const {
+bool GlobalHandles::OnStackTracedNodeSpace::IsOnStack(uintptr_t slot) const {
#ifdef V8_USE_ADDRESS_SANITIZER
- void* fake_stack = __asan_get_current_fake_stack();
- if (fake_stack) {
- void* fake_frame_start;
- void* real_frame = __asan_addr_is_in_fake_stack(
- fake_stack, reinterpret_cast<void*>(slot), &fake_frame_start, nullptr);
- if (real_frame) {
- return reinterpret_cast<uintptr_t>(real_frame) +
- (slot - reinterpret_cast<uintptr_t>(fake_frame_start));
- }
+ if (__asan_addr_is_in_fake_stack(__asan_get_current_fake_stack(),
+ reinterpret_cast<void*>(slot), nullptr,
+ nullptr)) {
+ return true;
}
#endif // V8_USE_ADDRESS_SANITIZER
- return slot;
+ return stack_start_ >= slot && slot > base::Stack::GetCurrentStackPosition();
}
void GlobalHandles::OnStackTracedNodeSpace::NotifyEmptyEmbedderStack() {
@@ -805,6 +804,17 @@ void GlobalHandles::OnStackTracedNodeSpace::NotifyEmptyEmbedderStack() {
}
void GlobalHandles::OnStackTracedNodeSpace::Iterate(RootVisitor* v) {
+#ifdef V8_USE_ADDRESS_SANITIZER
+ for (auto& pair : on_stack_nodes_) {
+ for (auto& node_entry : pair.second) {
+ TracedNode& node = node_entry.node;
+ if (node.IsRetainer()) {
+ v->VisitRootPointer(Root::kGlobalHandles, "on-stack TracedReference",
+ node.location());
+ }
+ }
+ }
+#else // !V8_USE_ADDRESS_SANITIZER
// Handles have been cleaned from the GC entry point which is higher up the
// stack.
for (auto& pair : on_stack_nodes_) {
@@ -814,6 +824,7 @@ void GlobalHandles::OnStackTracedNodeSpace::Iterate(RootVisitor* v) {
node.location());
}
}
+#endif // !V8_USE_ADDRESS_SANITIZER
}
GlobalHandles::TracedNode* GlobalHandles::OnStackTracedNodeSpace::Acquire(
@@ -828,8 +839,14 @@ GlobalHandles::TracedNode* GlobalHandles::OnStackTracedNodeSpace::Acquire(
NodeEntry entry;
entry.node.Free(nullptr);
entry.global_handles = global_handles_;
- auto pair =
- on_stack_nodes_.insert({GetStackAddressForSlot(slot), std::move(entry)});
+#ifdef V8_USE_ADDRESS_SANITIZER
+ auto pair = on_stack_nodes_.insert(
+ {base::Stack::GetRealStackAddressForSlot(slot), {}});
+ pair.first->second.push_back(std::move(entry));
+ TracedNode* result = &(pair.first->second.back().node);
+#else // !V8_USE_ADDRESS_SANITIZER
+ auto pair = on_stack_nodes_.insert(
+ {base::Stack::GetRealStackAddressForSlot(slot), std::move(entry)});
if (!pair.second) {
// Insertion failed because there already was an entry present for that
// stack address. This can happen because cleanup is conservative in which
@@ -838,6 +855,7 @@ GlobalHandles::TracedNode* GlobalHandles::OnStackTracedNodeSpace::Acquire(
pair.first->second.node.Free(nullptr);
}
TracedNode* result = &(pair.first->second.node);
+#endif // !V8_USE_ADDRESS_SANITIZER
result->Acquire(value);
result->set_is_on_stack(true);
return result;
@@ -845,7 +863,8 @@ GlobalHandles::TracedNode* GlobalHandles::OnStackTracedNodeSpace::Acquire(
void GlobalHandles::OnStackTracedNodeSpace::CleanupBelowCurrentStackPosition() {
if (on_stack_nodes_.empty()) return;
- const auto it = on_stack_nodes_.upper_bound(GetCurrentStackPosition());
+ const auto it =
+ on_stack_nodes_.upper_bound(base::Stack::GetCurrentStackPosition());
on_stack_nodes_.erase(on_stack_nodes_.begin(), it);
}
@@ -1073,7 +1092,7 @@ void GlobalHandles::MoveTracedGlobal(Address** from, Address** to) {
}
}
DestroyTraced(*from);
- *from = nullptr;
+ SetSlotThreadSafe(from, nullptr);
} else {
// Pure heap move.
DestroyTraced(*to);
@@ -1086,7 +1105,7 @@ void GlobalHandles::MoveTracedGlobal(Address** from, Address** to) {
if (to_node->has_destructor()) {
to_node->set_parameter(to);
}
- *from = nullptr;
+ SetSlotThreadSafe(from, nullptr);
}
TracedNode::Verify(global_handles, to);
}
diff --git a/deps/v8/src/handles/global-handles.h b/deps/v8/src/handles/global-handles.h
index ccd3e4ceda..bcca8627d1 100644
--- a/deps/v8/src/handles/global-handles.h
+++ b/deps/v8/src/handles/global-handles.h
@@ -10,12 +10,12 @@
#include <utility>
#include <vector>
-#include "include/v8.h"
#include "include/v8-profiler.h"
-
-#include "src/utils/utils.h"
+#include "include/v8.h"
#include "src/handles/handles.h"
+#include "src/heap/heap.h"
#include "src/objects/objects.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
@@ -323,6 +323,52 @@ class EternalHandles final {
DISALLOW_COPY_AND_ASSIGN(EternalHandles);
};
+// A vector of global Handles which automatically manages the backing of those
+// Handles as a vector of strong-rooted addresses. Handles returned by the
+// vector are valid as long as they are present in the vector.
+template <typename T>
+class GlobalHandleVector {
+ public:
+ class Iterator {
+ public:
+ explicit Iterator(
+ std::vector<Address, StrongRootBlockAllocator>::iterator it)
+ : it_(it) {}
+ Iterator& operator++() {
+ ++it_;
+ return *this;
+ }
+ Handle<T> operator*() { return Handle<T>(&*it_); }
+ bool operator!=(Iterator& that) { return it_ != that.it_; }
+
+ private:
+ std::vector<Address, StrongRootBlockAllocator>::iterator it_;
+ };
+
+ explicit GlobalHandleVector(Heap* heap)
+ : locations_(StrongRootBlockAllocator(heap)) {}
+
+ Handle<T> operator[](size_t i) { return Handle<T>(&locations_[i]); }
+
+ size_t size() const { return locations_.size(); }
+ bool empty() const { return locations_.empty(); }
+
+ void Push(T val) { locations_.push_back(val.ptr()); }
+ // Handles into the GlobalHandleVector become invalid when they are removed,
+ // so "pop" returns a raw object rather than a handle.
+ T Pop() {
+ T obj = T::cast(Object(locations_.back()));
+ locations_.pop_back();
+ return obj;
+ }
+
+ Iterator begin() { return Iterator(locations_.begin()); }
+ Iterator end() { return Iterator(locations_.end()); }
+
+ private:
+ std::vector<Address, StrongRootBlockAllocator> locations_;
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/handles/handles-inl.h b/deps/v8/src/handles/handles-inl.h
index b263187ecd..0215d13ddb 100644
--- a/deps/v8/src/handles/handles-inl.h
+++ b/deps/v8/src/handles/handles-inl.h
@@ -9,6 +9,7 @@
#include "src/execution/local-isolate.h"
#include "src/handles/handles.h"
#include "src/handles/local-handles-inl.h"
+#include "src/objects/objects.h"
#include "src/sanitizer/msan.h"
namespace v8 {
@@ -25,8 +26,15 @@ HandleBase::HandleBase(Address object, LocalIsolate* isolate)
HandleBase::HandleBase(Address object, LocalHeap* local_heap)
: location_(LocalHandleScope::GetHandle(local_heap, object)) {}
-// Allocate a new handle for the object, do not canonicalize.
+bool HandleBase::is_identical_to(const HandleBase that) const {
+ SLOW_DCHECK((this->location_ == nullptr || this->IsDereferenceAllowed()) &&
+ (that.location_ == nullptr || that.IsDereferenceAllowed()));
+ if (this->location_ == that.location_) return true;
+ if (this->location_ == nullptr || that.location_ == nullptr) return false;
+ return Object(*this->location_) == Object(*that.location_);
+}
+// Allocate a new handle for the object, do not canonicalize.
template <typename T>
Handle<T> Handle<T>::New(T object, Isolate* isolate) {
return Handle(HandleScope::CreateHandle(isolate, object.ptr()));
diff --git a/deps/v8/src/handles/handles.cc b/deps/v8/src/handles/handles.cc
index 85072a375a..aee0e27f20 100644
--- a/deps/v8/src/handles/handles.cc
+++ b/deps/v8/src/handles/handles.cc
@@ -41,6 +41,7 @@ bool HandleBase::IsDereferenceAllowed() const {
RootsTable::IsImmortalImmovable(root_index)) {
return true;
}
+ if (isolate->IsBuiltinsTableHandleLocation(location_)) return true;
LocalHeap* local_heap = LocalHeap::Current();
if (FLAG_local_heaps && local_heap) {
@@ -175,12 +176,12 @@ Address* CanonicalHandleScope::Lookup(Address object) {
return isolate_->root_handle(root_index).location();
}
}
- Address** entry = identity_map_->Get(Object(object));
- if (*entry == nullptr) {
+ auto find_result = identity_map_->FindOrInsert(Object(object));
+ if (!find_result.already_exists) {
// Allocate new handle location.
- *entry = HandleScope::CreateHandle(isolate_, object);
+ *find_result.entry = HandleScope::CreateHandle(isolate_, object);
}
- return *entry;
+ return *find_result.entry;
}
std::unique_ptr<CanonicalHandlesMap>
diff --git a/deps/v8/src/handles/handles.h b/deps/v8/src/handles/handles.h
index 6f45da8483..62f06ce232 100644
--- a/deps/v8/src/handles/handles.h
+++ b/deps/v8/src/handles/handles.h
@@ -44,14 +44,7 @@ class HandleBase {
V8_INLINE explicit HandleBase(Address object, LocalHeap* local_heap);
// Check if this handle refers to the exact same object as the other handle.
- V8_INLINE bool is_identical_to(const HandleBase that) const {
- SLOW_DCHECK((this->location_ == nullptr || this->IsDereferenceAllowed()) &&
- (that.location_ == nullptr || that.IsDereferenceAllowed()));
- if (this->location_ == that.location_) return true;
- if (this->location_ == nullptr || that.location_ == nullptr) return false;
- return *this->location_ == *that.location_;
- }
-
+ V8_INLINE bool is_identical_to(const HandleBase that) const;
V8_INLINE bool is_null() const { return location_ == nullptr; }
// Returns the raw address where this handle is stored. This should only be
diff --git a/deps/v8/src/handles/maybe-handles-inl.h b/deps/v8/src/handles/maybe-handles-inl.h
index d4989d9456..62c00dde34 100644
--- a/deps/v8/src/handles/maybe-handles-inl.h
+++ b/deps/v8/src/handles/maybe-handles-inl.h
@@ -17,6 +17,10 @@ template <typename T>
MaybeHandle<T>::MaybeHandle(T object, Isolate* isolate)
: MaybeHandle(handle(object, isolate)) {}
+template <typename T>
+MaybeHandle<T>::MaybeHandle(T object, LocalHeap* local_heap)
+ : MaybeHandle(handle(object, local_heap)) {}
+
MaybeObjectHandle::MaybeObjectHandle(MaybeObject object, Isolate* isolate) {
HeapObject heap_object;
DCHECK(!object->IsCleared());
@@ -29,6 +33,19 @@ MaybeObjectHandle::MaybeObjectHandle(MaybeObject object, Isolate* isolate) {
}
}
+MaybeObjectHandle::MaybeObjectHandle(MaybeObject object,
+ LocalHeap* local_heap) {
+ HeapObject heap_object;
+ DCHECK(!object->IsCleared());
+ if (object->GetHeapObjectIfWeak(&heap_object)) {
+ handle_ = handle(heap_object, local_heap);
+ reference_type_ = HeapObjectReferenceType::WEAK;
+ } else {
+ handle_ = handle(object->cast<Object>(), local_heap);
+ reference_type_ = HeapObjectReferenceType::STRONG;
+ }
+}
+
MaybeObjectHandle::MaybeObjectHandle(Handle<Object> object)
: reference_type_(HeapObjectReferenceType::STRONG), handle_(object) {}
@@ -36,6 +53,10 @@ MaybeObjectHandle::MaybeObjectHandle(Object object, Isolate* isolate)
: reference_type_(HeapObjectReferenceType::STRONG),
handle_(object, isolate) {}
+MaybeObjectHandle::MaybeObjectHandle(Object object, LocalHeap* local_heap)
+ : reference_type_(HeapObjectReferenceType::STRONG),
+ handle_(object, local_heap) {}
+
MaybeObjectHandle::MaybeObjectHandle(Object object,
HeapObjectReferenceType reference_type,
Isolate* isolate)
@@ -53,6 +74,15 @@ MaybeObjectHandle MaybeObjectHandle::Weak(Object object, Isolate* isolate) {
return MaybeObjectHandle(object, HeapObjectReferenceType::WEAK, isolate);
}
+bool MaybeObjectHandle::is_identical_to(const MaybeObjectHandle& other) const {
+ Handle<Object> this_handle;
+ Handle<Object> other_handle;
+ return reference_type_ == other.reference_type_ &&
+ handle_.ToHandle(&this_handle) ==
+ other.handle_.ToHandle(&other_handle) &&
+ this_handle.is_identical_to(other_handle);
+}
+
MaybeObject MaybeObjectHandle::operator*() const {
if (reference_type_ == HeapObjectReferenceType::WEAK) {
return HeapObjectReference::Weak(*handle_.ToHandleChecked());
@@ -77,6 +107,10 @@ inline MaybeObjectHandle handle(MaybeObject object, Isolate* isolate) {
return MaybeObjectHandle(object, isolate);
}
+inline MaybeObjectHandle handle(MaybeObject object, LocalHeap* local_heap) {
+ return MaybeObjectHandle(object, local_heap);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/handles/maybe-handles.h b/deps/v8/src/handles/maybe-handles.h
index d804374088..15397ef0df 100644
--- a/deps/v8/src/handles/maybe-handles.h
+++ b/deps/v8/src/handles/maybe-handles.h
@@ -45,6 +45,7 @@ class MaybeHandle final {
: location_(maybe_handle.location_) {}
V8_INLINE MaybeHandle(T object, Isolate* isolate);
+ V8_INLINE MaybeHandle(T object, LocalHeap* local_heap);
V8_INLINE void Assert() const { DCHECK_NOT_NULL(location_); }
V8_INLINE void Check() const { CHECK_NOT_NULL(location_); }
@@ -91,6 +92,8 @@ class MaybeObjectHandle {
: reference_type_(HeapObjectReferenceType::STRONG) {}
inline MaybeObjectHandle(MaybeObject object, Isolate* isolate);
inline MaybeObjectHandle(Object object, Isolate* isolate);
+ inline MaybeObjectHandle(MaybeObject object, LocalHeap* local_heap);
+ inline MaybeObjectHandle(Object object, LocalHeap* local_heap);
inline explicit MaybeObjectHandle(Handle<Object> object);
static inline MaybeObjectHandle Weak(Object object, Isolate* isolate);
@@ -100,15 +103,7 @@ class MaybeObjectHandle {
inline MaybeObject operator->() const;
inline Handle<Object> object() const;
- bool is_identical_to(const MaybeObjectHandle& other) const {
- Handle<Object> this_handle;
- Handle<Object> other_handle;
- return reference_type_ == other.reference_type_ &&
- handle_.ToHandle(&this_handle) ==
- other.handle_.ToHandle(&other_handle) &&
- this_handle.is_identical_to(other_handle);
- }
-
+ inline bool is_identical_to(const MaybeObjectHandle& other) const;
bool is_null() const { return handle_.is_null(); }
private:
diff --git a/deps/v8/src/heap/DIR_METADATA b/deps/v8/src/heap/DIR_METADATA
new file mode 100644
index 0000000000..ff55846b31
--- /dev/null
+++ b/deps/v8/src/heap/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>GC"
+} \ No newline at end of file
diff --git a/deps/v8/src/heap/OWNERS b/deps/v8/src/heap/OWNERS
index 51a6b41416..95beec5ca2 100644
--- a/deps/v8/src/heap/OWNERS
+++ b/deps/v8/src/heap/OWNERS
@@ -7,5 +7,3 @@ ulan@chromium.org
per-file *factory*=leszeks@chromium.org
per-file read-only-*=delphick@chromium.org
-
-# COMPONENT: Blink>JavaScript>GC
diff --git a/deps/v8/src/heap/array-buffer-sweeper.cc b/deps/v8/src/heap/array-buffer-sweeper.cc
index ab75e65166..5bc8fcb720 100644
--- a/deps/v8/src/heap/array-buffer-sweeper.cc
+++ b/deps/v8/src/heap/array-buffer-sweeper.cc
@@ -3,6 +3,9 @@
// found in the LICENSE file.
#include "src/heap/array-buffer-sweeper.h"
+
+#include <atomic>
+
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
#include "src/objects/js-array-buffer.h"
@@ -69,27 +72,25 @@ void ArrayBufferSweeper::EnsureFinished() {
if (!sweeping_in_progress_) return;
TryAbortResult abort_result =
- heap_->isolate()->cancelable_task_manager()->TryAbort(job_.id);
+ heap_->isolate()->cancelable_task_manager()->TryAbort(job_->id_);
switch (abort_result) {
case TryAbortResult::kTaskAborted: {
- Sweep();
+ job_->Sweep();
Merge();
break;
}
case TryAbortResult::kTaskRemoved: {
- CHECK_NE(job_.state, SweepingState::Uninitialized);
- if (job_.state == SweepingState::Prepared) Sweep();
- Merge();
+ if (job_->state_ == SweepingState::kInProgress) job_->Sweep();
+ if (job_->state_ == SweepingState::kDone) Merge();
break;
}
case TryAbortResult::kTaskRunning: {
base::MutexGuard guard(&sweeping_mutex_);
- CHECK_NE(job_.state, SweepingState::Uninitialized);
// Wait until task is finished with its work.
- while (job_.state != SweepingState::Swept) {
+ while (job_->state_ != SweepingState::kDone) {
job_finished_.Wait(&sweeping_mutex_);
}
Merge();
@@ -104,27 +105,34 @@ void ArrayBufferSweeper::EnsureFinished() {
sweeping_in_progress_ = false;
}
-void ArrayBufferSweeper::DecrementExternalMemoryCounters() {
- size_t bytes = freed_bytes_.load(std::memory_order_relaxed);
- if (bytes == 0) return;
-
- while (!freed_bytes_.compare_exchange_weak(bytes, 0)) {
- // empty body
+void ArrayBufferSweeper::AdjustCountersAndMergeIfPossible() {
+ if (sweeping_in_progress_) {
+ DCHECK(job_.has_value());
+ if (job_->state_ == SweepingState::kDone) {
+ Merge();
+ sweeping_in_progress_ = false;
+ } else {
+ DecrementExternalMemoryCounters();
+ }
}
+}
- if (bytes == 0) return;
+void ArrayBufferSweeper::DecrementExternalMemoryCounters() {
+ size_t freed_bytes = freed_bytes_.exchange(0, std::memory_order_relaxed);
- heap_->DecrementExternalBackingStoreBytes(
- ExternalBackingStoreType::kArrayBuffer, bytes);
- heap_->update_external_memory(-static_cast<int64_t>(bytes));
+ if (freed_bytes > 0) {
+ heap_->DecrementExternalBackingStoreBytes(
+ ExternalBackingStoreType::kArrayBuffer, freed_bytes);
+ heap_->update_external_memory(-static_cast<int64_t>(freed_bytes));
+ }
}
void ArrayBufferSweeper::RequestSweepYoung() {
- RequestSweep(SweepingScope::Young);
+ RequestSweep(SweepingScope::kYoung);
}
void ArrayBufferSweeper::RequestSweepFull() {
- RequestSweep(SweepingScope::Full);
+ RequestSweep(SweepingScope::kFull);
}
size_t ArrayBufferSweeper::YoungBytes() { return young_bytes_; }
@@ -134,7 +142,7 @@ size_t ArrayBufferSweeper::OldBytes() { return old_bytes_; }
void ArrayBufferSweeper::RequestSweep(SweepingScope scope) {
DCHECK(!sweeping_in_progress_);
- if (young_.IsEmpty() && (old_.IsEmpty() || scope == SweepingScope::Young))
+ if (young_.IsEmpty() && (old_.IsEmpty() || scope == SweepingScope::kYoung))
return;
if (!heap_->IsTearingDown() && !heap_->ShouldReduceMemory() &&
@@ -146,42 +154,45 @@ void ArrayBufferSweeper::RequestSweep(SweepingScope scope) {
heap_->tracer(),
GCTracer::BackgroundScope::BACKGROUND_ARRAY_BUFFER_SWEEP);
base::MutexGuard guard(&sweeping_mutex_);
- Sweep();
+ job_->Sweep();
job_finished_.NotifyAll();
});
- job_.id = task->id();
+ job_->id_ = task->id();
V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
sweeping_in_progress_ = true;
} else {
Prepare(scope);
- Sweep();
+ job_->Sweep();
Merge();
DecrementExternalMemoryCounters();
}
}
void ArrayBufferSweeper::Prepare(SweepingScope scope) {
- CHECK_EQ(job_.state, SweepingState::Uninitialized);
+ DCHECK(!job_.has_value());
- if (scope == SweepingScope::Young) {
- job_ =
- SweepingJob::Prepare(young_, ArrayBufferList(), SweepingScope::Young);
+ if (scope == SweepingScope::kYoung) {
+ job_.emplace(this, young_, ArrayBufferList(), SweepingScope::kYoung);
young_.Reset();
+ young_bytes_ = 0;
} else {
- CHECK_EQ(scope, SweepingScope::Full);
- job_ = SweepingJob::Prepare(young_, old_, SweepingScope::Full);
+ CHECK_EQ(scope, SweepingScope::kFull);
+ job_.emplace(this, young_, old_, SweepingScope::kFull);
young_.Reset();
old_.Reset();
+ young_bytes_ = old_bytes_ = 0;
}
}
void ArrayBufferSweeper::Merge() {
- CHECK_EQ(job_.state, SweepingState::Swept);
- young_.Append(&job_.young);
- old_.Append(&job_.old);
+ DCHECK(job_.has_value());
+ CHECK_EQ(job_->state_, SweepingState::kDone);
+ young_.Append(&job_->young_);
+ old_.Append(&job_->old_);
young_bytes_ = young_.Bytes();
old_bytes_ = old_.Bytes();
- job_.state = SweepingState::Uninitialized;
+
+ job_.reset();
}
void ArrayBufferSweeper::ReleaseAll() {
@@ -215,6 +226,7 @@ void ArrayBufferSweeper::Append(JSArrayBuffer object,
old_bytes_ += bytes;
}
+ AdjustCountersAndMergeIfPossible();
DecrementExternalMemoryCounters();
IncrementExternalMemoryCounters(bytes);
}
@@ -226,42 +238,34 @@ void ArrayBufferSweeper::IncrementExternalMemoryCounters(size_t bytes) {
->AdjustAmountOfExternalAllocatedMemory(static_cast<int64_t>(bytes));
}
-ArrayBufferSweeper::SweepingJob::SweepingJob()
- : state(SweepingState::Uninitialized) {}
-
-ArrayBufferSweeper::SweepingJob ArrayBufferSweeper::SweepingJob::Prepare(
- ArrayBufferList young, ArrayBufferList old, SweepingScope scope) {
- SweepingJob job;
- job.young = young;
- job.old = old;
- job.scope = scope;
- job.id = 0;
- job.state = SweepingState::Prepared;
- return job;
+void ArrayBufferSweeper::IncrementFreedBytes(size_t bytes) {
+ if (bytes == 0) return;
+ freed_bytes_.fetch_add(bytes, std::memory_order_relaxed);
}
-void ArrayBufferSweeper::Sweep() {
- CHECK_EQ(job_.state, SweepingState::Prepared);
+void ArrayBufferSweeper::SweepingJob::Sweep() {
+ CHECK_EQ(state_, SweepingState::kInProgress);
- if (job_.scope == SweepingScope::Young) {
+ if (scope_ == SweepingScope::kYoung) {
SweepYoung();
} else {
- CHECK_EQ(job_.scope, SweepingScope::Full);
+ CHECK_EQ(scope_, SweepingScope::kFull);
SweepFull();
}
- job_.state = SweepingState::Swept;
+ state_ = SweepingState::kDone;
}
-void ArrayBufferSweeper::SweepFull() {
- CHECK_EQ(job_.scope, SweepingScope::Full);
- ArrayBufferList promoted = SweepListFull(&job_.young);
- ArrayBufferList survived = SweepListFull(&job_.old);
+void ArrayBufferSweeper::SweepingJob::SweepFull() {
+ CHECK_EQ(scope_, SweepingScope::kFull);
+ ArrayBufferList promoted = SweepListFull(&young_);
+ ArrayBufferList survived = SweepListFull(&old_);
- job_.old = promoted;
- job_.old.Append(&survived);
+ old_ = promoted;
+ old_.Append(&survived);
}
-ArrayBufferList ArrayBufferSweeper::SweepListFull(ArrayBufferList* list) {
+ArrayBufferList ArrayBufferSweeper::SweepingJob::SweepListFull(
+ ArrayBufferList* list) {
ArrayBufferExtension* current = list->head_;
ArrayBufferList survivor_list;
@@ -271,7 +275,7 @@ ArrayBufferList ArrayBufferSweeper::SweepListFull(ArrayBufferList* list) {
if (!current->IsMarked()) {
size_t bytes = current->accounting_length();
delete current;
- IncrementFreedBytes(bytes);
+ sweeper_->IncrementFreedBytes(bytes);
} else {
current->Unmark();
survivor_list.Append(current);
@@ -284,9 +288,9 @@ ArrayBufferList ArrayBufferSweeper::SweepListFull(ArrayBufferList* list) {
return survivor_list;
}
-void ArrayBufferSweeper::SweepYoung() {
- CHECK_EQ(job_.scope, SweepingScope::Young);
- ArrayBufferExtension* current = job_.young.head_;
+void ArrayBufferSweeper::SweepingJob::SweepYoung() {
+ CHECK_EQ(scope_, SweepingScope::kYoung);
+ ArrayBufferExtension* current = young_.head_;
ArrayBufferList new_young;
ArrayBufferList new_old;
@@ -297,7 +301,7 @@ void ArrayBufferSweeper::SweepYoung() {
if (!current->IsYoungMarked()) {
size_t bytes = current->accounting_length();
delete current;
- IncrementFreedBytes(bytes);
+ sweeper_->IncrementFreedBytes(bytes);
} else if (current->IsYoungPromoted()) {
current->YoungUnmark();
new_old.Append(current);
@@ -309,13 +313,8 @@ void ArrayBufferSweeper::SweepYoung() {
current = next;
}
- job_.old = new_old;
- job_.young = new_young;
-}
-
-void ArrayBufferSweeper::IncrementFreedBytes(size_t bytes) {
- if (bytes == 0) return;
- freed_bytes_.fetch_add(bytes);
+ old_ = new_old;
+ young_ = new_young;
}
} // namespace internal
diff --git a/deps/v8/src/heap/array-buffer-sweeper.h b/deps/v8/src/heap/array-buffer-sweeper.h
index 5cedb2b8f8..963682d82f 100644
--- a/deps/v8/src/heap/array-buffer-sweeper.h
+++ b/deps/v8/src/heap/array-buffer-sweeper.h
@@ -68,37 +68,46 @@ class ArrayBufferSweeper {
size_t OldBytes();
private:
- enum class SweepingScope { Young, Full };
+ enum class SweepingScope { kYoung, kFull };
- enum class SweepingState { Uninitialized, Prepared, Swept };
+ enum class SweepingState { kInProgress, kDone };
struct SweepingJob {
- CancelableTaskManager::Id id;
- SweepingState state;
- ArrayBufferList young;
- ArrayBufferList old;
- SweepingScope scope;
-
- SweepingJob();
-
- static SweepingJob Prepare(ArrayBufferList young, ArrayBufferList old,
- SweepingScope scope);
- } job_;
+ ArrayBufferSweeper* sweeper_;
+ CancelableTaskManager::Id id_;
+ std::atomic<SweepingState> state_;
+ ArrayBufferList young_;
+ ArrayBufferList old_;
+ SweepingScope scope_;
+
+ SweepingJob(ArrayBufferSweeper* sweeper, ArrayBufferList young,
+ ArrayBufferList old, SweepingScope scope)
+ : sweeper_(sweeper),
+ id_(0),
+ state_(SweepingState::kInProgress),
+ young_(young),
+ old_(old),
+ scope_(scope) {}
+
+ void Sweep();
+ void SweepYoung();
+ void SweepFull();
+ ArrayBufferList SweepListFull(ArrayBufferList* list);
+ };
+
+ base::Optional<SweepingJob> job_;
void Merge();
+ void AdjustCountersAndMergeIfPossible();
void DecrementExternalMemoryCounters();
void IncrementExternalMemoryCounters(size_t bytes);
void IncrementFreedBytes(size_t bytes);
+ void IncrementFreedYoungBytes(size_t bytes);
void RequestSweep(SweepingScope sweeping_task);
void Prepare(SweepingScope sweeping_task);
- void Sweep();
- void SweepYoung();
- void SweepFull();
- ArrayBufferList SweepListFull(ArrayBufferList* list);
-
ArrayBufferList SweepYoungGen();
void SweepOldGen(ArrayBufferExtension* extension);
diff --git a/deps/v8/src/heap/base/stack.cc b/deps/v8/src/heap/base/stack.cc
index cd28444474..939487ca77 100644
--- a/deps/v8/src/heap/base/stack.cc
+++ b/deps/v8/src/heap/base/stack.cc
@@ -20,9 +20,19 @@ extern "C" void PushAllRegistersAndIterateStack(const Stack*, StackVisitor*,
Stack::Stack(const void* stack_start) : stack_start_(stack_start) {}
bool Stack::IsOnStack(void* slot) const {
- void* raw_slot = v8::base::Stack::GetStackSlot(slot);
- return v8::base::Stack::GetCurrentStackPosition() <= raw_slot &&
- raw_slot <= stack_start_;
+#ifdef V8_USE_ADDRESS_SANITIZER
+ // If the slot is part of a fake frame, then it is definitely on the stack.
+ void* real_frame = __asan_addr_is_in_fake_stack(
+ __asan_get_current_fake_stack(), reinterpret_cast<void*>(slot), nullptr,
+ nullptr);
+ if (real_frame) {
+ return true;
+ }
+ // Fall through as there is still a regular stack present even when running
+ // with ASAN fake stacks.
+#endif // V8_USE_ADDRESS_SANITIZER
+ return v8::base::Stack::GetCurrentStackPosition() <= slot &&
+ slot <= stack_start_;
}
namespace {
diff --git a/deps/v8/src/heap/base/worklist.h b/deps/v8/src/heap/base/worklist.h
index be2ecf158b..e2d33616ad 100644
--- a/deps/v8/src/heap/base/worklist.h
+++ b/deps/v8/src/heap/base/worklist.h
@@ -285,6 +285,9 @@ class Worklist<EntryType, SegmentSize>::Local {
void Publish();
void Merge(Worklist<EntryType, SegmentSize>::Local* other);
+ bool IsEmpty() const;
+ void Clear();
+
size_t PushSegmentSize() const { return push_segment_->Size(); }
private:
@@ -445,6 +448,17 @@ bool Worklist<EntryType, SegmentSize>::Local::StealPopSegment() {
return false;
}
+template <typename EntryType, uint16_t SegmentSize>
+bool Worklist<EntryType, SegmentSize>::Local::IsEmpty() const {
+ return push_segment_->IsEmpty() && pop_segment_->IsEmpty();
+}
+
+template <typename EntryType, uint16_t SegmentSize>
+void Worklist<EntryType, SegmentSize>::Local::Clear() {
+ push_segment_->Clear();
+ pop_segment_->Clear();
+}
+
} // namespace base
} // namespace heap
diff --git a/deps/v8/src/heap/basic-memory-chunk.h b/deps/v8/src/heap/basic-memory-chunk.h
index d6c3e5f29a..e102349fa9 100644
--- a/deps/v8/src/heap/basic-memory-chunk.h
+++ b/deps/v8/src/heap/basic-memory-chunk.h
@@ -283,13 +283,11 @@ class BasicMemoryChunk {
// Only works if the pointer is in the first kPageSize of the MemoryChunk.
static BasicMemoryChunk* FromAddress(Address a) {
- DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
return reinterpret_cast<BasicMemoryChunk*>(BaseAddress(a));
}
// Only works if the object is in the first kPageSize of the MemoryChunk.
static BasicMemoryChunk* FromHeapObject(HeapObject o) {
- DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
return reinterpret_cast<BasicMemoryChunk*>(BaseAddress(o.ptr()));
}
diff --git a/deps/v8/src/heap/code-object-registry.cc b/deps/v8/src/heap/code-object-registry.cc
index ebaa29fbae..f8ec349409 100644
--- a/deps/v8/src/heap/code-object-registry.cc
+++ b/deps/v8/src/heap/code-object-registry.cc
@@ -12,63 +12,57 @@ namespace v8 {
namespace internal {
void CodeObjectRegistry::RegisterNewlyAllocatedCodeObject(Address code) {
- auto result = code_object_registry_newly_allocated_.insert(code);
- USE(result);
- DCHECK(result.second);
+ if (is_sorted_) {
+ is_sorted_ =
+ (code_object_registry_.empty() || code_object_registry_.back() < code);
+ }
+ code_object_registry_.push_back(code);
}
void CodeObjectRegistry::RegisterAlreadyExistingCodeObject(Address code) {
- code_object_registry_already_existing_.push_back(code);
+ DCHECK(is_sorted_);
+ DCHECK(code_object_registry_.empty() || code_object_registry_.back() < code);
+ code_object_registry_.push_back(code);
}
void CodeObjectRegistry::Clear() {
- code_object_registry_already_existing_.clear();
- code_object_registry_newly_allocated_.clear();
+ code_object_registry_.clear();
+ is_sorted_ = true;
}
void CodeObjectRegistry::Finalize() {
- code_object_registry_already_existing_.shrink_to_fit();
+ DCHECK(is_sorted_);
+ code_object_registry_.shrink_to_fit();
}
bool CodeObjectRegistry::Contains(Address object) const {
- return (code_object_registry_newly_allocated_.find(object) !=
- code_object_registry_newly_allocated_.end()) ||
- (std::binary_search(code_object_registry_already_existing_.begin(),
- code_object_registry_already_existing_.end(),
- object));
+ if (!is_sorted_) {
+ std::sort(code_object_registry_.begin(), code_object_registry_.end());
+ is_sorted_ = true;
+ }
+ return (std::binary_search(code_object_registry_.begin(),
+ code_object_registry_.end(), object));
}
Address CodeObjectRegistry::GetCodeObjectStartFromInnerAddress(
Address address) const {
- // Let's first find the object which comes right before address in the vector
- // of already existing code objects.
- Address already_existing_set_ = 0;
- Address newly_allocated_set_ = 0;
- if (!code_object_registry_already_existing_.empty()) {
- auto it =
- std::upper_bound(code_object_registry_already_existing_.begin(),
- code_object_registry_already_existing_.end(), address);
- if (it != code_object_registry_already_existing_.begin()) {
- already_existing_set_ = *(--it);
- }
- }
-
- // Next, let's find the object which comes right before address in the set
- // of newly allocated code objects.
- if (!code_object_registry_newly_allocated_.empty()) {
- auto it = code_object_registry_newly_allocated_.upper_bound(address);
- if (it != code_object_registry_newly_allocated_.begin()) {
- newly_allocated_set_ = *(--it);
- }
+ if (!is_sorted_) {
+ std::sort(code_object_registry_.begin(), code_object_registry_.end());
+ is_sorted_ = true;
}
- // The code objects which contains address has to be in one of the two
- // data structures.
- DCHECK(already_existing_set_ != 0 || newly_allocated_set_ != 0);
+ // The code registry can't be empty, else the code object can't exist.
+ DCHECK(!code_object_registry_.empty());
- // The address which is closest to the given address is the code object.
- return already_existing_set_ > newly_allocated_set_ ? already_existing_set_
- : newly_allocated_set_;
+ // std::upper_bound returns the first code object strictly greater than
+ // address, so the code object containing the address has to be the previous
+ // one.
+ auto it = std::upper_bound(code_object_registry_.begin(),
+ code_object_registry_.end(), address);
+ // The address has to be contained in a code object, so necessarily the
+ // address can't be smaller than the first code object.
+ DCHECK_NE(it, code_object_registry_.begin());
+ return *(--it);
}
} // namespace internal
diff --git a/deps/v8/src/heap/code-object-registry.h b/deps/v8/src/heap/code-object-registry.h
index beab176625..ae5199903b 100644
--- a/deps/v8/src/heap/code-object-registry.h
+++ b/deps/v8/src/heap/code-object-registry.h
@@ -28,8 +28,10 @@ class V8_EXPORT_PRIVATE CodeObjectRegistry {
Address GetCodeObjectStartFromInnerAddress(Address address) const;
private:
- std::vector<Address> code_object_registry_already_existing_;
- std::set<Address> code_object_registry_newly_allocated_;
+ // A vector of addresses, which may be sorted. This is set to 'mutable' so
+ // that it can be lazily sorted during GetCodeObjectStartFromInnerAddress.
+ mutable std::vector<Address> code_object_registry_;
+ mutable bool is_sorted_ = true;
};
} // namespace internal
diff --git a/deps/v8/src/heap/code-stats.cc b/deps/v8/src/heap/code-stats.cc
index bf8b9a64ce..abca2c75f9 100644
--- a/deps/v8/src/heap/code-stats.cc
+++ b/deps/v8/src/heap/code-stats.cc
@@ -194,12 +194,13 @@ void CodeStatistics::CollectCommentStatistics(Isolate* isolate,
EnterComment(isolate, comment_txt, flat_delta);
}
-// Collects code comment statistics
+// Collects code comment statistics.
void CodeStatistics::CollectCodeCommentStatistics(HeapObject obj,
Isolate* isolate) {
// Bytecode objects do not contain RelocInfo. Only process code objects
// for code comment statistics.
if (!obj.IsCode()) {
+ DCHECK(obj.IsBytecodeArray());
return;
}
@@ -214,8 +215,8 @@ void CodeStatistics::CollectCodeCommentStatistics(HeapObject obj,
cit.Next();
}
- DCHECK(0 <= prev_pc_offset && prev_pc_offset <= code.raw_instruction_size());
- delta += static_cast<int>(code.raw_instruction_size() - prev_pc_offset);
+ DCHECK(0 <= prev_pc_offset && prev_pc_offset <= code.InstructionSize());
+ delta += static_cast<int>(code.InstructionSize() - prev_pc_offset);
EnterComment(isolate, "NoComment", delta);
}
#endif
diff --git a/deps/v8/src/heap/collection-barrier.cc b/deps/v8/src/heap/collection-barrier.cc
new file mode 100644
index 0000000000..47a9db882b
--- /dev/null
+++ b/deps/v8/src/heap/collection-barrier.cc
@@ -0,0 +1,100 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/collection-barrier.h"
+
+#include "src/base/platform/time.h"
+#include "src/heap/gc-tracer.h"
+#include "src/heap/heap-inl.h"
+#include "src/heap/heap.h"
+
+namespace v8 {
+namespace internal {
+
+void CollectionBarrier::ResumeThreadsAwaitingCollection() {
+ base::MutexGuard guard(&mutex_);
+ ClearCollectionRequested();
+ cond_.NotifyAll();
+}
+
+void CollectionBarrier::ShutdownRequested() {
+ base::MutexGuard guard(&mutex_);
+ if (timer_.IsStarted()) timer_.Stop();
+ state_.store(RequestState::kShutdown);
+ cond_.NotifyAll();
+}
+
+class BackgroundCollectionInterruptTask : public CancelableTask {
+ public:
+ explicit BackgroundCollectionInterruptTask(Heap* heap)
+ : CancelableTask(heap->isolate()), heap_(heap) {}
+
+ ~BackgroundCollectionInterruptTask() override = default;
+
+ private:
+ // v8::internal::CancelableTask overrides.
+ void RunInternal() override { heap_->CheckCollectionRequested(); }
+
+ Heap* heap_;
+ DISALLOW_COPY_AND_ASSIGN(BackgroundCollectionInterruptTask);
+};
+
+void CollectionBarrier::AwaitCollectionBackground() {
+ bool first;
+
+ {
+ base::MutexGuard guard(&mutex_);
+ first = FirstCollectionRequest();
+ if (first) timer_.Start();
+ }
+
+ if (first) {
+ // This is the first background thread requesting collection, ask the main
+ // thread for GC.
+ ActivateStackGuardAndPostTask();
+ }
+
+ BlockUntilCollected();
+}
+
+void CollectionBarrier::StopTimeToCollectionTimer() {
+ base::MutexGuard guard(&mutex_);
+ RequestState old_state = state_.exchange(RequestState::kCollectionStarted,
+ std::memory_order_relaxed);
+ if (old_state == RequestState::kCollectionRequested) {
+ DCHECK(timer_.IsStarted());
+ base::TimeDelta delta = timer_.Elapsed();
+ TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "V8.TimeToCollection", TRACE_EVENT_SCOPE_THREAD,
+ "duration", delta.InMillisecondsF());
+ heap_->isolate()->counters()->time_to_collection()->AddTimedSample(delta);
+ timer_.Stop();
+ } else {
+ DCHECK_EQ(old_state, RequestState::kDefault);
+ DCHECK(!timer_.IsStarted());
+ }
+}
+
+void CollectionBarrier::ActivateStackGuardAndPostTask() {
+ Isolate* isolate = heap_->isolate();
+ ExecutionAccess access(isolate);
+ isolate->stack_guard()->RequestGC();
+ auto taskrunner = V8::GetCurrentPlatform()->GetForegroundTaskRunner(
+ reinterpret_cast<v8::Isolate*>(isolate));
+ taskrunner->PostTask(
+ std::make_unique<BackgroundCollectionInterruptTask>(heap_));
+}
+
+void CollectionBarrier::BlockUntilCollected() {
+ TRACE_BACKGROUND_GC(heap_->tracer(),
+ GCTracer::BackgroundScope::BACKGROUND_COLLECTION);
+ base::MutexGuard guard(&mutex_);
+
+ while (CollectionRequested()) {
+ cond_.Wait(&mutex_);
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/collection-barrier.h b/deps/v8/src/heap/collection-barrier.h
new file mode 100644
index 0000000000..418f93ce04
--- /dev/null
+++ b/deps/v8/src/heap/collection-barrier.h
@@ -0,0 +1,93 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_COLLECTION_BARRIER_H_
+#define V8_HEAP_COLLECTION_BARRIER_H_
+
+#include <atomic>
+
+#include "src/base/optional.h"
+#include "src/base/platform/elapsed-timer.h"
+#include "src/base/platform/mutex.h"
+#include "src/logging/counters.h"
+
+namespace v8 {
+namespace internal {
+
+class Heap;
+
+// This class stops and resumes all background threads waiting for GC.
+class CollectionBarrier {
+ Heap* heap_;
+ base::Mutex mutex_;
+ base::ConditionVariable cond_;
+ base::ElapsedTimer timer_;
+
+ enum class RequestState {
+ // Default state, no collection requested and tear down wasn't initated
+ // yet.
+ kDefault,
+
+ // Collection was already requested
+ kCollectionRequested,
+
+ // Collection was already started
+ kCollectionStarted,
+
+ // This state is reached after isolate starts to shut down. The main
+ // thread can't perform any GCs anymore, so all allocations need to be
+ // allowed from here on until background thread finishes.
+ kShutdown,
+ };
+
+ // The current state.
+ std::atomic<RequestState> state_;
+
+ // Request GC by activating stack guards and posting a task to perform the
+ // GC.
+ void ActivateStackGuardAndPostTask();
+
+ // Returns true when state was successfully updated from kDefault to
+ // kCollection.
+ bool FirstCollectionRequest() {
+ RequestState expected = RequestState::kDefault;
+ return state_.compare_exchange_strong(expected,
+ RequestState::kCollectionRequested);
+ }
+
+ // Sets state back to kDefault - invoked at end of GC.
+ void ClearCollectionRequested() {
+ RequestState old_state =
+ state_.exchange(RequestState::kDefault, std::memory_order_relaxed);
+ USE(old_state);
+ DCHECK_EQ(old_state, RequestState::kCollectionStarted);
+ }
+
+ public:
+ explicit CollectionBarrier(Heap* heap)
+ : heap_(heap), state_(RequestState::kDefault) {}
+
+ // Checks whether any background thread requested GC.
+ bool CollectionRequested() {
+ return state_.load(std::memory_order_relaxed) ==
+ RequestState::kCollectionRequested;
+ }
+
+ void StopTimeToCollectionTimer();
+ void BlockUntilCollected();
+
+ // Resumes threads waiting for collection.
+ void ResumeThreadsAwaitingCollection();
+
+ // Sets current state to kShutdown.
+ void ShutdownRequested();
+
+ // This is the method use by background threads to request and wait for GC.
+ void AwaitCollectionBackground();
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_COLLECTION_BARRIER_H_
diff --git a/deps/v8/src/heap/concurrent-allocator.cc b/deps/v8/src/heap/concurrent-allocator.cc
index 5db9159f14..82975fa339 100644
--- a/deps/v8/src/heap/concurrent-allocator.cc
+++ b/deps/v8/src/heap/concurrent-allocator.cc
@@ -9,6 +9,7 @@
#include "src/handles/persistent-handles.h"
#include "src/heap/concurrent-allocator-inl.h"
#include "src/heap/local-heap-inl.h"
+#include "src/heap/local-heap.h"
#include "src/heap/marking.h"
#include "src/heap/memory-chunk.h"
@@ -17,7 +18,8 @@ namespace internal {
void StressConcurrentAllocatorTask::RunInternal() {
Heap* heap = isolate_->heap();
- LocalHeap local_heap(heap);
+ LocalHeap local_heap(heap, ThreadKind::kBackground);
+ UnparkedScope unparked_scope(&local_heap);
const int kNumIterations = 2000;
const int kSmallObjectSize = 10 * kTaggedSize;
diff --git a/deps/v8/src/heap/concurrent-marking.cc b/deps/v8/src/heap/concurrent-marking.cc
index b0c3e50951..f2dfad057b 100644
--- a/deps/v8/src/heap/concurrent-marking.cc
+++ b/deps/v8/src/heap/concurrent-marking.cc
@@ -347,27 +347,37 @@ FixedArray ConcurrentMarkingVisitor::Cast(HeapObject object) {
return FixedArray::unchecked_cast(object);
}
-class ConcurrentMarking::Task : public CancelableTask {
+// The Deserializer changes the map from StrongDescriptorArray to
+// DescriptorArray
+template <>
+StrongDescriptorArray ConcurrentMarkingVisitor::Cast(HeapObject object) {
+ return StrongDescriptorArray::unchecked_cast(DescriptorArray::cast(object));
+}
+
+class ConcurrentMarking::JobTask : public v8::JobTask {
public:
- Task(Isolate* isolate, ConcurrentMarking* concurrent_marking,
- TaskState* task_state, int task_id)
- : CancelableTask(isolate),
- concurrent_marking_(concurrent_marking),
- task_state_(task_state),
- task_id_(task_id) {}
+ JobTask(ConcurrentMarking* concurrent_marking, unsigned mark_compact_epoch,
+ bool is_forced_gc)
+ : concurrent_marking_(concurrent_marking),
+ mark_compact_epoch_(mark_compact_epoch),
+ is_forced_gc_(is_forced_gc) {}
- ~Task() override = default;
+ ~JobTask() override = default;
- private:
- // v8::internal::CancelableTask overrides.
- void RunInternal() override {
- concurrent_marking_->Run(task_id_, task_state_);
+ // v8::JobTask overrides.
+ void Run(JobDelegate* delegate) override {
+ concurrent_marking_->Run(delegate, mark_compact_epoch_, is_forced_gc_);
}
+ size_t GetMaxConcurrency(size_t worker_count) const override {
+ return concurrent_marking_->GetMaxConcurrency(worker_count);
+ }
+
+ private:
ConcurrentMarking* concurrent_marking_;
- TaskState* task_state_;
- int task_id_;
- DISALLOW_COPY_AND_ASSIGN(Task);
+ const unsigned mark_compact_epoch_;
+ const bool is_forced_gc_;
+ DISALLOW_COPY_AND_ASSIGN(JobTask);
};
ConcurrentMarking::ConcurrentMarking(Heap* heap,
@@ -376,22 +386,29 @@ ConcurrentMarking::ConcurrentMarking(Heap* heap,
: heap_(heap),
marking_worklists_(marking_worklists),
weak_objects_(weak_objects) {
-// The runtime flag should be set only if the compile time flag was set.
-#ifndef V8_CONCURRENT_MARKING
+#ifndef V8_ATOMIC_MARKING_STATE
+ // Concurrent and parallel marking require atomic marking state.
CHECK(!FLAG_concurrent_marking && !FLAG_parallel_marking);
#endif
+#ifndef V8_ATOMIC_OBJECT_FIELD_WRITES
+ // Concurrent marking requires atomic object field writes.
+ CHECK(!FLAG_concurrent_marking);
+#endif
}
-void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
+void ConcurrentMarking::Run(JobDelegate* delegate, unsigned mark_compact_epoch,
+ bool is_forced_gc) {
TRACE_BACKGROUND_GC(heap_->tracer(),
GCTracer::BackgroundScope::MC_BACKGROUND_MARKING);
size_t kBytesUntilInterruptCheck = 64 * KB;
int kObjectsUntilInterrupCheck = 1000;
+ uint8_t task_id = delegate->GetTaskId() + 1;
+ TaskState* task_state = &task_state_[task_id];
MarkingWorklists::Local local_marking_worklists(marking_worklists_);
ConcurrentMarkingVisitor visitor(
task_id, &local_marking_worklists, weak_objects_, heap_,
- task_state->mark_compact_epoch, Heap::GetBytecodeFlushMode(),
- heap_->local_embedder_heap_tracer()->InUse(), task_state->is_forced_gc,
+ mark_compact_epoch, Heap::GetBytecodeFlushMode(),
+ heap_->local_embedder_heap_tracer()->InUse(), is_forced_gc,
&task_state->memory_chunk_data);
NativeContextInferrer& native_context_inferrer =
task_state->native_context_inferrer;
@@ -457,7 +474,7 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
marked_bytes += current_marked_bytes;
base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes,
marked_bytes);
- if (task_state->preemption_request) {
+ if (delegate->ShouldYield()) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"ConcurrentMarking::Run Preempted");
break;
@@ -492,13 +509,6 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
if (ephemeron_marked) {
set_ephemeron_marked(true);
}
-
- {
- base::MutexGuard guard(&pending_lock_);
- is_pending_[task_id] = false;
- --pending_task_count_;
- pending_condition_.NotifyAll();
- }
}
if (FLAG_trace_concurrent_marking) {
heap_->isolate()->PrintWithTimestamp(
@@ -507,109 +517,71 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
}
}
-void ConcurrentMarking::ScheduleTasks() {
+size_t ConcurrentMarking::GetMaxConcurrency(size_t worker_count) {
+ size_t marking_items = marking_worklists_->shared()->Size();
+ for (auto& worklist : marking_worklists_->context_worklists())
+ marking_items += worklist.worklist->Size();
+ return std::min<size_t>(
+ kMaxTasks,
+ worker_count + std::max<size_t>(
+ {marking_items,
+ weak_objects_->discovered_ephemerons.GlobalPoolSize(),
+ weak_objects_->current_ephemerons.GlobalPoolSize()}));
+}
+
+void ConcurrentMarking::ScheduleJob(TaskPriority priority) {
DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
DCHECK(!heap_->IsTearingDown());
- base::MutexGuard guard(&pending_lock_);
- if (total_task_count_ == 0) {
- static const int num_cores =
- V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
-#if defined(V8_OS_MACOSX)
- // Mac OSX 10.11 and prior seems to have trouble when doing concurrent
- // marking on competing hyper-threads (regresses Octane/Splay). As such,
- // only use num_cores/2, leaving one of those for the main thread.
- // TODO(ulan): Use all cores on Mac 10.12+.
- total_task_count_ = Max(1, Min(kMaxTasks, (num_cores / 2) - 1));
-#else // defined(V8_OS_MACOSX)
- // On other platforms use all logical cores, leaving one for the main
- // thread.
- total_task_count_ = Max(1, Min(kMaxTasks, num_cores - 2));
-#endif // defined(V8_OS_MACOSX)
- if (FLAG_gc_experiment_reduce_concurrent_marking_tasks) {
- // Use at most half of the cores in the experiment.
- total_task_count_ = Max(1, Min(kMaxTasks, (num_cores / 2) - 1));
- }
- DCHECK_LE(total_task_count_, kMaxTasks);
- }
- // Task id 0 is for the main thread.
- for (int i = 1; i <= total_task_count_; i++) {
- if (!is_pending_[i]) {
- if (FLAG_trace_concurrent_marking) {
- heap_->isolate()->PrintWithTimestamp(
- "Scheduling concurrent marking task %d\n", i);
- }
- task_state_[i].preemption_request = false;
- task_state_[i].mark_compact_epoch =
- heap_->mark_compact_collector()->epoch();
- task_state_[i].is_forced_gc = heap_->is_current_gc_forced();
- is_pending_[i] = true;
- ++pending_task_count_;
- auto task =
- std::make_unique<Task>(heap_->isolate(), this, &task_state_[i], i);
- cancelable_id_[i] = task->id();
- V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
- }
- }
- DCHECK_EQ(total_task_count_, pending_task_count_);
+ DCHECK(!job_handle_ || !job_handle_->IsValid());
+
+ job_handle_ = V8::GetCurrentPlatform()->PostJob(
+ priority,
+ std::make_unique<JobTask>(this, heap_->mark_compact_collector()->epoch(),
+ heap_->is_current_gc_forced()));
+ DCHECK(job_handle_->IsValid());
}
-void ConcurrentMarking::RescheduleTasksIfNeeded() {
+void ConcurrentMarking::RescheduleJobIfNeeded(TaskPriority priority) {
DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
if (heap_->IsTearingDown()) return;
- {
- base::MutexGuard guard(&pending_lock_);
- // The total task count is initialized in ScheduleTasks from
- // NumberOfWorkerThreads of the platform.
- if (total_task_count_ > 0 && pending_task_count_ == total_task_count_) {
- return;
- }
+
+ if (marking_worklists_->shared()->IsEmpty() &&
+ weak_objects_->current_ephemerons.IsGlobalPoolEmpty() &&
+ weak_objects_->discovered_ephemerons.IsGlobalPoolEmpty()) {
+ return;
}
- if (!marking_worklists_->shared()->IsEmpty() ||
- !weak_objects_->current_ephemerons.IsGlobalPoolEmpty() ||
- !weak_objects_->discovered_ephemerons.IsGlobalPoolEmpty()) {
- ScheduleTasks();
+ if (!job_handle_ || !job_handle_->IsValid()) {
+ ScheduleJob(priority);
+ } else {
+ if (priority != TaskPriority::kUserVisible)
+ job_handle_->UpdatePriority(priority);
+ job_handle_->NotifyConcurrencyIncrease();
}
}
-bool ConcurrentMarking::Stop(StopRequest stop_request) {
+void ConcurrentMarking::Join() {
DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
- base::MutexGuard guard(&pending_lock_);
-
- if (pending_task_count_ == 0) return false;
-
- if (stop_request != StopRequest::COMPLETE_TASKS_FOR_TESTING) {
- CancelableTaskManager* task_manager =
- heap_->isolate()->cancelable_task_manager();
- for (int i = 1; i <= total_task_count_; i++) {
- if (is_pending_[i]) {
- if (task_manager->TryAbort(cancelable_id_[i]) ==
- TryAbortResult::kTaskAborted) {
- is_pending_[i] = false;
- --pending_task_count_;
- } else if (stop_request == StopRequest::PREEMPT_TASKS) {
- task_state_[i].preemption_request = true;
- }
- }
- }
- }
- while (pending_task_count_ > 0) {
- pending_condition_.Wait(&pending_lock_);
- }
- for (int i = 1; i <= total_task_count_; i++) {
- DCHECK(!is_pending_[i]);
- }
+ if (!job_handle_ || !job_handle_->IsValid()) return;
+ job_handle_->Join();
+}
+
+bool ConcurrentMarking::Pause() {
+ DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
+ if (!job_handle_ || !job_handle_->IsValid()) return false;
+
+ job_handle_->Cancel();
return true;
}
bool ConcurrentMarking::IsStopped() {
if (!FLAG_concurrent_marking) return true;
- base::MutexGuard guard(&pending_lock_);
- return pending_task_count_ == 0;
+ return !job_handle_ || !job_handle_->IsValid();
}
void ConcurrentMarking::FlushNativeContexts(NativeContextStats* main_stats) {
- for (int i = 1; i <= total_task_count_; i++) {
+ DCHECK(!job_handle_ || !job_handle_->IsValid());
+ for (int i = 1; i <= kMaxTasks; i++) {
main_stats->Merge(task_state_[i].native_context_stats);
task_state_[i].native_context_stats.Clear();
}
@@ -617,8 +589,8 @@ void ConcurrentMarking::FlushNativeContexts(NativeContextStats* main_stats) {
void ConcurrentMarking::FlushMemoryChunkData(
MajorNonAtomicMarkingState* marking_state) {
- DCHECK_EQ(pending_task_count_, 0);
- for (int i = 1; i <= total_task_count_; i++) {
+ DCHECK(!job_handle_ || !job_handle_->IsValid());
+ for (int i = 1; i <= kMaxTasks; i++) {
MemoryChunkDataMap& memory_chunk_data = task_state_[i].memory_chunk_data;
for (auto& pair : memory_chunk_data) {
// ClearLiveness sets the live bytes to zero.
@@ -640,7 +612,8 @@ void ConcurrentMarking::FlushMemoryChunkData(
}
void ConcurrentMarking::ClearMemoryChunkData(MemoryChunk* chunk) {
- for (int i = 1; i <= total_task_count_; i++) {
+ DCHECK(!job_handle_ || !job_handle_->IsValid());
+ for (int i = 1; i <= kMaxTasks; i++) {
auto it = task_state_[i].memory_chunk_data.find(chunk);
if (it != task_state_[i].memory_chunk_data.end()) {
it->second.live_bytes = 0;
@@ -651,7 +624,7 @@ void ConcurrentMarking::ClearMemoryChunkData(MemoryChunk* chunk) {
size_t ConcurrentMarking::TotalMarkedBytes() {
size_t result = 0;
- for (int i = 1; i <= total_task_count_; i++) {
+ for (int i = 1; i <= kMaxTasks; i++) {
result +=
base::AsAtomicWord::Relaxed_Load<size_t>(&task_state_[i].marked_bytes);
}
@@ -661,14 +634,12 @@ size_t ConcurrentMarking::TotalMarkedBytes() {
ConcurrentMarking::PauseScope::PauseScope(ConcurrentMarking* concurrent_marking)
: concurrent_marking_(concurrent_marking),
- resume_on_exit_(FLAG_concurrent_marking &&
- concurrent_marking_->Stop(
- ConcurrentMarking::StopRequest::PREEMPT_TASKS)) {
+ resume_on_exit_(FLAG_concurrent_marking && concurrent_marking_->Pause()) {
DCHECK_IMPLIES(resume_on_exit_, FLAG_concurrent_marking);
}
ConcurrentMarking::PauseScope::~PauseScope() {
- if (resume_on_exit_) concurrent_marking_->RescheduleTasksIfNeeded();
+ if (resume_on_exit_) concurrent_marking_->RescheduleJobIfNeeded();
}
} // namespace internal
diff --git a/deps/v8/src/heap/concurrent-marking.h b/deps/v8/src/heap/concurrent-marking.h
index 6ed671fb1b..4d2dda08c1 100644
--- a/deps/v8/src/heap/concurrent-marking.h
+++ b/deps/v8/src/heap/concurrent-marking.h
@@ -29,7 +29,7 @@ class Heap;
class Isolate;
class MajorNonAtomicMarkingState;
class MemoryChunk;
-struct WeakObjects;
+class WeakObjects;
struct MemoryChunkData {
intptr_t live_bytes;
@@ -54,17 +54,6 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
const bool resume_on_exit_;
};
- enum class StopRequest {
- // Preempt ongoing tasks ASAP (and cancel unstarted tasks).
- PREEMPT_TASKS,
- // Wait for ongoing tasks to complete (and cancels unstarted tasks).
- COMPLETE_ONGOING_TASKS,
- // Wait for all scheduled tasks to complete (only use this in tests that
- // control the full stack -- otherwise tasks cancelled by the platform can
- // make this call hang).
- COMPLETE_TASKS_FOR_TESTING,
- };
-
// TODO(gab): The only thing that prevents this being above 7 is
// Worklist::kMaxNumTasks being maxed at 8 (concurrent marking doesn't use
// task 0, reserved for the main thread).
@@ -73,16 +62,22 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
ConcurrentMarking(Heap* heap, MarkingWorklists* marking_worklists,
WeakObjects* weak_objects);
- // Schedules asynchronous tasks to perform concurrent marking. Objects in the
- // heap should not be moved while these are active (can be stopped safely via
- // Stop() or PauseScope).
- void ScheduleTasks();
-
- // Stops concurrent marking per |stop_request|'s semantics. Returns true
- // if concurrent marking was in progress, false otherwise.
- bool Stop(StopRequest stop_request);
-
- void RescheduleTasksIfNeeded();
+ // Schedules asynchronous job to perform concurrent marking at |priority|.
+ // Objects in the heap should not be moved while these are active (can be
+ // stopped safely via Stop() or PauseScope).
+ void ScheduleJob(TaskPriority priority = TaskPriority::kUserVisible);
+
+ // Waits for scheduled job to complete.
+ void Join();
+ // Preempts ongoing job ASAP. Returns true if concurrent marking was in
+ // progress, false otherwise.
+ bool Pause();
+
+ // Schedules asynchronous job to perform concurrent marking at |priority| if
+ // not already running, otherwise adjusts the number of workers running job
+ // and the priority if diffrent from the default kUserVisible.
+ void RescheduleJobIfNeeded(
+ TaskPriority priority = TaskPriority::kUserVisible);
// Flushes native context sizes to the given table of the main thread.
void FlushNativeContexts(NativeContextStats* main_stats);
// Flushes memory chunk data using the given marking state.
@@ -103,31 +98,24 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
private:
struct TaskState {
- // The main thread sets this flag to true when it wants the concurrent
- // marker to give up the worker thread.
- std::atomic<bool> preemption_request;
size_t marked_bytes = 0;
- unsigned mark_compact_epoch;
- bool is_forced_gc;
MemoryChunkDataMap memory_chunk_data;
NativeContextInferrer native_context_inferrer;
NativeContextStats native_context_stats;
char cache_line_padding[64];
};
- class Task;
- void Run(int task_id, TaskState* task_state);
+ class JobTask;
+ void Run(JobDelegate* delegate, unsigned mark_compact_epoch,
+ bool is_forced_gc);
+ size_t GetMaxConcurrency(size_t worker_count);
+
+ std::unique_ptr<JobHandle> job_handle_;
Heap* const heap_;
MarkingWorklists* const marking_worklists_;
WeakObjects* const weak_objects_;
TaskState task_state_[kMaxTasks + 1];
std::atomic<size_t> total_marked_bytes_{0};
std::atomic<bool> ephemeron_marked_{false};
- base::Mutex pending_lock_;
- base::ConditionVariable pending_condition_;
- int pending_task_count_ = 0;
- bool is_pending_[kMaxTasks + 1] = {};
- CancelableTaskManager::Id cancelable_id_[kMaxTasks + 1] = {};
- int total_task_count_ = 0;
};
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc-js/cpp-heap.cc b/deps/v8/src/heap/cppgc-js/cpp-heap.cc
index 6c9f99272a..712be09f2c 100644
--- a/deps/v8/src/heap/cppgc-js/cpp-heap.cc
+++ b/deps/v8/src/heap/cppgc-js/cpp-heap.cc
@@ -12,8 +12,11 @@
#include "src/execution/isolate.h"
#include "src/flags/flags.h"
#include "src/heap/base/stack.h"
+#include "src/heap/cppgc-js/cpp-snapshot.h"
#include "src/heap/cppgc-js/unified-heap-marking-state.h"
+#include "src/heap/cppgc-js/unified-heap-marking-verifier.h"
#include "src/heap/cppgc-js/unified-heap-marking-visitor.h"
+#include "src/heap/cppgc/concurrent-marker.h"
#include "src/heap/cppgc/gc-info-table.h"
#include "src/heap/cppgc/heap-base.h"
#include "src/heap/cppgc/heap-object-header.h"
@@ -27,6 +30,7 @@
#include "src/heap/marking-worklist.h"
#include "src/heap/sweeper.h"
#include "src/init/v8.h"
+#include "src/profiler/heap-profiler.h"
namespace v8 {
namespace internal {
@@ -63,6 +67,33 @@ class CppgcPlatformAdapter final : public cppgc::Platform {
v8::Isolate* isolate_;
};
+class UnifiedHeapConcurrentMarker
+ : public cppgc::internal::ConcurrentMarkerBase {
+ public:
+ UnifiedHeapConcurrentMarker(
+ cppgc::internal::HeapBase& heap,
+ cppgc::internal::MarkingWorklists& marking_worklists,
+ cppgc::internal::IncrementalMarkingSchedule& incremental_marking_schedule,
+ cppgc::Platform* platform,
+ UnifiedHeapMarkingState& unified_heap_marking_state)
+ : cppgc::internal::ConcurrentMarkerBase(
+ heap, marking_worklists, incremental_marking_schedule, platform),
+ unified_heap_marking_state_(unified_heap_marking_state) {}
+
+ std::unique_ptr<cppgc::Visitor> CreateConcurrentMarkingVisitor(
+ ConcurrentMarkingState&) const final;
+
+ private:
+ UnifiedHeapMarkingState& unified_heap_marking_state_;
+};
+
+std::unique_ptr<cppgc::Visitor>
+UnifiedHeapConcurrentMarker::CreateConcurrentMarkingVisitor(
+ ConcurrentMarkingState& marking_state) const {
+ return std::make_unique<ConcurrentUnifiedHeapMarkingVisitor>(
+ heap(), marking_state, unified_heap_marking_state_);
+}
+
class UnifiedHeapMarker final : public cppgc::internal::MarkerBase {
public:
UnifiedHeapMarker(Key, Heap& v8_heap, cppgc::internal::HeapBase& cpp_heap,
@@ -82,8 +113,8 @@ class UnifiedHeapMarker final : public cppgc::internal::MarkerBase {
}
private:
- UnifiedHeapMarkingState unified_heap_mutator_marking_state_;
- UnifiedHeapMarkingVisitor marking_visitor_;
+ UnifiedHeapMarkingState unified_heap_marking_state_;
+ MutatorUnifiedHeapMarkingVisitor marking_visitor_;
cppgc::internal::ConservativeMarkingVisitor conservative_marking_visitor_;
};
@@ -92,11 +123,15 @@ UnifiedHeapMarker::UnifiedHeapMarker(Key key, Heap& v8_heap,
cppgc::Platform* platform,
MarkingConfig config)
: cppgc::internal::MarkerBase(key, heap, platform, config),
- unified_heap_mutator_marking_state_(v8_heap),
+ unified_heap_marking_state_(v8_heap),
marking_visitor_(heap, mutator_marking_state_,
- unified_heap_mutator_marking_state_),
+ unified_heap_marking_state_),
conservative_marking_visitor_(heap, mutator_marking_state_,
- marking_visitor_) {}
+ marking_visitor_) {
+ concurrent_marker_ = std::make_unique<UnifiedHeapConcurrentMarker>(
+ heap_, marking_worklists_, schedule_, platform_,
+ unified_heap_marking_state_);
+}
void UnifiedHeapMarker::AddObject(void* object) {
mutator_marking_state_.MarkAndPush(
@@ -105,13 +140,22 @@ void UnifiedHeapMarker::AddObject(void* object) {
} // namespace
-CppHeap::CppHeap(v8::Isolate* isolate, size_t custom_spaces)
+CppHeap::CppHeap(
+ v8::Isolate* isolate,
+ const std::vector<std::unique_ptr<cppgc::CustomSpaceBase>>& custom_spaces)
: cppgc::internal::HeapBase(std::make_shared<CppgcPlatformAdapter>(isolate),
custom_spaces,
cppgc::internal::HeapBase::StackSupport::
kSupportsConservativeStackScan),
isolate_(*reinterpret_cast<Isolate*>(isolate)) {
CHECK(!FLAG_incremental_marking_wrappers);
+ isolate_.heap_profiler()->AddBuildEmbedderGraphCallback(&CppGraphBuilder::Run,
+ this);
+}
+
+CppHeap::~CppHeap() {
+ isolate_.heap_profiler()->RemoveBuildEmbedderGraphCallback(
+ &CppGraphBuilder::Run, this);
}
void CppHeap::RegisterV8References(
@@ -126,10 +170,19 @@ void CppHeap::RegisterV8References(
}
void CppHeap::TracePrologue(TraceFlags flags) {
+ // Finish sweeping in case it is still running.
+ sweeper_.FinishIfRunning();
+
const UnifiedHeapMarker::MarkingConfig marking_config{
UnifiedHeapMarker::MarkingConfig::CollectionType::kMajor,
cppgc::Heap::StackState::kNoHeapPointers,
- UnifiedHeapMarker::MarkingConfig::MarkingType::kIncremental};
+ UnifiedHeapMarker::MarkingConfig::MarkingType::kIncrementalAndConcurrent};
+ if ((flags == TraceFlags::kReduceMemory) || (flags == TraceFlags::kForced)) {
+ // Only enable compaction when in a memory reduction garbage collection as
+ // it may significantly increase the final garbage collection pause.
+ compactor_.InitializeIfShouldCompact(marking_config.marking_type,
+ marking_config.stack_state);
+ }
marker_ =
cppgc::internal::MarkerFactory::CreateAndStartMarking<UnifiedHeapMarker>(
*isolate_.heap(), AsBase(), platform_.get(), marking_config);
@@ -147,27 +200,39 @@ bool CppHeap::AdvanceTracing(double deadline_in_ms) {
bool CppHeap::IsTracingDone() { return marking_done_; }
void CppHeap::EnterFinalPause(EmbedderStackState stack_state) {
- marker_->EnterAtomicPause(cppgc::Heap::StackState::kNoHeapPointers);
+ marker_->EnterAtomicPause(stack_state);
+ if (compactor_.CancelIfShouldNotCompact(
+ UnifiedHeapMarker::MarkingConfig::MarkingType::kAtomic,
+ stack_state)) {
+ marker_->NotifyCompactionCancelled();
+ }
}
void CppHeap::TraceEpilogue(TraceSummary* trace_summary) {
CHECK(marking_done_);
- marker_->LeaveAtomicPause();
{
- // Pre finalizers are forbidden from allocating objects
+ // Weakness callbacks and pre-finalizers are forbidden from allocating
+ // objects.
cppgc::internal::ObjectAllocator::NoAllocationScope no_allocation_scope_(
object_allocator_);
- marker()->ProcessWeakness();
+ marker_->LeaveAtomicPause();
prefinalizer_handler()->InvokePreFinalizers();
}
marker_.reset();
// TODO(chromium:1056170): replace build flag with dedicated flag.
#if DEBUG
- VerifyMarking(cppgc::Heap::StackState::kNoHeapPointers);
+ UnifiedHeapMarkingVerifier verifier(*this);
+ verifier.Run(cppgc::Heap::StackState::kNoHeapPointers);
#endif
+ cppgc::internal::Sweeper::SweepingConfig::CompactableSpaceHandling
+ compactable_space_handling = compactor_.CompactSpacesIfEnabled();
{
NoGCScope no_gc(*this);
- sweeper().Start(cppgc::internal::Sweeper::Config::kAtomic);
+ const cppgc::internal::Sweeper::SweepingConfig sweeping_config{
+ cppgc::internal::Sweeper::SweepingConfig::SweepingType::
+ kIncrementalAndConcurrent,
+ compactable_space_handling};
+ sweeper().Start(sweeping_config);
}
}
diff --git a/deps/v8/src/heap/cppgc-js/cpp-heap.h b/deps/v8/src/heap/cppgc-js/cpp-heap.h
index f3bbab8b16..b2bfc6f8a7 100644
--- a/deps/v8/src/heap/cppgc-js/cpp-heap.h
+++ b/deps/v8/src/heap/cppgc-js/cpp-heap.h
@@ -19,7 +19,13 @@ namespace internal {
class V8_EXPORT_PRIVATE CppHeap final : public cppgc::internal::HeapBase,
public v8::EmbedderHeapTracer {
public:
- CppHeap(v8::Isolate* isolate, size_t custom_spaces);
+ CppHeap(v8::Isolate* isolate,
+ const std::vector<std::unique_ptr<cppgc::CustomSpaceBase>>&
+ custom_spaces);
+ ~CppHeap() final;
+
+ CppHeap(const CppHeap&) = delete;
+ CppHeap& operator=(const CppHeap&) = delete;
HeapBase& AsBase() { return *this; }
const HeapBase& AsBase() const { return *this; }
diff --git a/deps/v8/src/heap/cppgc-js/cpp-snapshot.cc b/deps/v8/src/heap/cppgc-js/cpp-snapshot.cc
new file mode 100644
index 0000000000..b1784baa9f
--- /dev/null
+++ b/deps/v8/src/heap/cppgc-js/cpp-snapshot.cc
@@ -0,0 +1,713 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc-js/cpp-snapshot.h"
+
+#include <memory>
+
+#include "include/cppgc/trace-trait.h"
+#include "include/v8-cppgc.h"
+#include "include/v8-profiler.h"
+#include "src/api/api-inl.h"
+#include "src/base/logging.h"
+#include "src/execution/isolate.h"
+#include "src/heap/cppgc-js/cpp-heap.h"
+#include "src/heap/cppgc/heap-object-header.h"
+#include "src/heap/cppgc/heap-visitor.h"
+#include "src/heap/embedder-tracing.h"
+#include "src/heap/mark-compact.h"
+#include "src/objects/js-objects.h"
+#include "src/profiler/heap-profiler.h"
+
+namespace v8 {
+namespace internal {
+
+class CppGraphBuilderImpl;
+class StateStorage;
+class State;
+
+using cppgc::internal::GCInfo;
+using cppgc::internal::GlobalGCInfoTable;
+using cppgc::internal::HeapObjectHeader;
+
+// Node representing a C++ object on the heap.
+class EmbedderNode : public v8::EmbedderGraph::Node {
+ public:
+ explicit EmbedderNode(const char* name) : name_(name) {}
+ ~EmbedderNode() override = default;
+
+ const char* Name() final { return name_; }
+ size_t SizeInBytes() override { return 0; }
+
+ void SetWrapperNode(v8::EmbedderGraph::Node* wrapper_node) {
+ wrapper_node_ = wrapper_node;
+ }
+ Node* WrapperNode() final { return wrapper_node_; }
+
+ void SetDetachedness(Detachedness detachedness) {
+ detachedness_ = detachedness;
+ }
+ Detachedness GetDetachedness() final { return detachedness_; }
+
+ private:
+ const char* name_;
+ Node* wrapper_node_ = nullptr;
+ Detachedness detachedness_ = Detachedness::kUnknown;
+};
+
+// Node representing an artificial root group, e.g., set of Persistent handles.
+class EmbedderRootNode final : public EmbedderNode {
+ public:
+ explicit EmbedderRootNode(const char* name) : EmbedderNode(name) {}
+ ~EmbedderRootNode() final = default;
+
+ bool IsRootNode() final { return true; }
+ size_t SizeInBytes() final { return 0; }
+};
+
+// Canonical state representing real and artificial (e.g. root) objects.
+class StateBase {
+ public:
+ // Objects can either be hidden/visible, or depend on some other nodes while
+ // traversing the same SCC.
+ enum class Visibility {
+ kHidden,
+ kDependentVisibility,
+ kVisible,
+ };
+
+ StateBase(const void* key, size_t state_count, Visibility visibility,
+ EmbedderNode* node, bool visited)
+ : key_(key),
+ state_count_(state_count),
+ visibility_(visibility),
+ node_(node),
+ visited_(visited) {
+ DCHECK_NE(Visibility::kDependentVisibility, visibility);
+ }
+
+ // Visited objects have already been processed or are currently being
+ // processed, see also IsPending() below.
+ bool IsVisited() const { return visited_; }
+
+ // Pending objects are currently being processed as part of the same SCC.
+ bool IsPending() const { return pending_; }
+
+ bool IsVisibleNotDependent() {
+ auto v = GetVisibility();
+ CHECK_NE(Visibility::kDependentVisibility, v);
+ return v == Visibility::kVisible;
+ }
+
+ void set_node(EmbedderNode* node) {
+ CHECK_EQ(Visibility::kVisible, GetVisibility());
+ node_ = node;
+ }
+
+ EmbedderNode* get_node() {
+ CHECK_EQ(Visibility::kVisible, GetVisibility());
+ return node_;
+ }
+
+ protected:
+ const void* key_;
+ // State count keeps track of node processing order. It is used to create only
+ // dependencies on ancestors in the sub graph which ensures that there will be
+ // no cycles in dependencies.
+ const size_t state_count_;
+
+ Visibility visibility_;
+ StateBase* visibility_dependency_ = nullptr;
+ EmbedderNode* node_;
+ bool visited_;
+ bool pending_ = false;
+
+ Visibility GetVisibility() {
+ FollowDependencies();
+ return visibility_;
+ }
+
+ StateBase* FollowDependencies() {
+ if (visibility_ != Visibility::kDependentVisibility) {
+ CHECK_NULL(visibility_dependency_);
+ return this;
+ }
+ StateBase* current = this;
+ std::vector<StateBase*> dependencies;
+ while (current->visibility_dependency_ &&
+ current->visibility_dependency_ != current) {
+ DCHECK_EQ(Visibility::kDependentVisibility, current->visibility_);
+ dependencies.push_back(current);
+ current = current->visibility_dependency_;
+ }
+ auto new_visibility = Visibility::kDependentVisibility;
+ auto* new_visibility_dependency = current;
+ if (current->visibility_ == Visibility::kVisible) {
+ new_visibility = Visibility::kVisible;
+ new_visibility_dependency = nullptr;
+ } else if (!IsPending()) {
+ DCHECK(IsVisited());
+ // The object was not visible (above case). Having a dependency on itself
+ // or null means no visible object was found.
+ new_visibility = Visibility::kHidden;
+ new_visibility_dependency = nullptr;
+ }
+ current->visibility_ = new_visibility;
+ current->visibility_dependency_ = new_visibility_dependency;
+ for (auto* state : dependencies) {
+ state->visibility_ = new_visibility;
+ state->visibility_dependency_ = new_visibility_dependency;
+ }
+ return current;
+ }
+
+ friend class State;
+};
+
+class State final : public StateBase {
+ public:
+ State(const HeapObjectHeader& header, size_t state_count)
+ : StateBase(&header, state_count, Visibility::kHidden, nullptr, false) {}
+
+ const HeapObjectHeader* header() const {
+ return static_cast<const HeapObjectHeader*>(key_);
+ }
+
+ void MarkVisited() { visited_ = true; }
+
+ void MarkPending() { pending_ = true; }
+ void UnmarkPending() { pending_ = false; }
+
+ void MarkVisible() {
+ visibility_ = Visibility::kVisible;
+ visibility_dependency_ = nullptr;
+ }
+
+ void MarkDependentVisibility(StateBase* dependency) {
+ // Follow and update dependencies as much as possible.
+ dependency = dependency->FollowDependencies();
+ DCHECK(dependency->IsVisited());
+ if (visibility_ == StateBase::Visibility::kVisible) {
+ // Already visible, no dependency needed.
+ DCHECK_NULL(visibility_dependency_);
+ return;
+ }
+ if (dependency->visibility_ == Visibility::kVisible) {
+ // Simple case: Dependency is visible.
+ visibility_ = Visibility::kVisible;
+ visibility_dependency_ = nullptr;
+ return;
+ }
+ if ((visibility_dependency_ &&
+ (visibility_dependency_->state_count_ > dependency->state_count_)) ||
+ (!visibility_dependency_ &&
+ (state_count_ > dependency->state_count_))) {
+ // Only update when new state_count_ < original state_count_. This
+ // ensures that we pick an ancestor as dependency and not a child which
+ // is guaranteed to converge to an answer.
+ //
+ // Dependency is now
+ // a) either pending with unknown visibility (same call chain), or
+ // b) not pending and has defined visibility.
+ //
+ // It's not possible to point to a state that is not pending but has
+ // dependent visibility because dependencies are updated to the top-most
+ // dependency at the beginning of method.
+ if (dependency->IsPending()) {
+ visibility_ = Visibility::kDependentVisibility;
+ visibility_dependency_ = dependency;
+ } else {
+ CHECK_NE(Visibility::kDependentVisibility, dependency->visibility_);
+ if (dependency->visibility_ == Visibility::kVisible) {
+ visibility_ = Visibility::kVisible;
+ visibility_dependency_ = nullptr;
+ }
+ }
+ }
+ }
+};
+
+// Root states are similar to regular states with the difference that they can
+// have named edges (source location of the root) that aid debugging.
+class RootState final : public StateBase {
+ public:
+ RootState(EmbedderRootNode* node, size_t state_count)
+ // Root states are always visited, visible, and have a node attached.
+ : StateBase(node, state_count, Visibility::kVisible, node, true) {}
+
+ void AddNamedEdge(std::unique_ptr<const char> edge_name) {
+ named_edges_.push_back(std::move(edge_name));
+ }
+
+ private:
+ // Edge names are passed to V8 but are required to be held alive from the
+ // embedder until the snapshot is compiled.
+ std::vector<std::unique_ptr<const char>> named_edges_;
+};
+
+// Abstraction for storing states. Storage allows for creation and lookup of
+// different state objects.
+class StateStorage final {
+ public:
+ bool StateExists(const void* key) const {
+ return states_.find(key) != states_.end();
+ }
+
+ StateBase& GetExistingState(const void* key) const {
+ CHECK(StateExists(key));
+ return *states_.at(key).get();
+ }
+
+ State& GetExistingState(const HeapObjectHeader& header) const {
+ return static_cast<State&>(GetExistingState(&header));
+ }
+
+ State& GetOrCreateState(const HeapObjectHeader& header) {
+ if (!StateExists(&header)) {
+ auto it = states_.insert(std::make_pair(
+ &header, std::make_unique<State>(header, ++state_count_)));
+ DCHECK(it.second);
+ USE(it);
+ }
+ return GetExistingState(header);
+ }
+
+ RootState& CreateRootState(EmbedderRootNode* root_node) {
+ CHECK(!StateExists(root_node));
+ auto it = states_.insert(std::make_pair(
+ root_node, std::make_unique<RootState>(root_node, ++state_count_)));
+ DCHECK(it.second);
+ USE(it);
+ return static_cast<RootState&>(*it.first->second.get());
+ }
+
+ template <typename Callback>
+ void ForAllVisibleStates(Callback callback) {
+ for (auto& state : states_) {
+ if (state.second->IsVisibleNotDependent()) {
+ callback(state.second.get());
+ }
+ }
+ }
+
+ private:
+ std::unordered_map<const void*, std::unique_ptr<StateBase>> states_;
+ size_t state_count_ = 0;
+};
+
+bool HasEmbedderDataBackref(Isolate* isolate, v8::Local<v8::Value> v8_value,
+ void* expected_backref) {
+ // See LocalEmbedderHeapTracer::VerboseWrapperTypeInfo for details on how
+ // wrapper objects are set up.
+ if (!v8_value->IsObject()) return false;
+
+ Handle<Object> v8_object = Utils::OpenHandle(*v8_value);
+ if (!v8_object->IsJSObject() || !JSObject::cast(*v8_object).IsApiWrapper())
+ return false;
+
+ JSObject js_object = JSObject::cast(*v8_object);
+ return js_object.GetEmbedderFieldCount() >= 2 &&
+ LocalEmbedderHeapTracer::VerboseWrapperInfo(
+ LocalEmbedderHeapTracer::ExtractWrapperInfo(isolate, js_object))
+ .instance() == expected_backref;
+}
+
+// The following implements a snapshotting algorithm for C++ objects that also
+// filters strongly-connected components (SCCs) of only "hidden" objects that
+// are not (transitively) referencing any non-hidden objects.
+//
+// C++ objects come in two versions.
+// a. Named objects that have been assigned a name through NameProvider.
+// b. Unnamed objects, that are potentially hidden if the build configuration
+// requires Oilpan to hide such names. Hidden objects have their name
+// set to NameProvider::kHiddenName.
+//
+// The main challenge for the algorithm is to avoid blowing up the final object
+// graph with hidden nodes that do not carry information. For that reason, the
+// algorithm filters SCCs of only hidden objects, e.g.:
+// ... -> (object) -> (object) -> (hidden) -> (hidden)
+// In this case the (hidden) objects are filtered from the graph. The trickiest
+// part is maintaining visibility state for objects referencing other objects
+// that are currently being processed.
+//
+// Main algorithm idea (two passes):
+// 1. First pass marks all non-hidden objects and those that transitively reach
+// non-hidden objects as visible. Details:
+// - Iterate over all objects.
+// - If object is non-hidden mark it as visible and also mark parent as
+// visible if needed.
+// - If object is hidden, traverse children as DFS to find non-hidden
+// objects. Post-order process the objects and mark those objects as
+// visible that have child nodes that are visible themselves.
+// - Maintain an epoch counter (StateStorage::state_count_) to allow
+// deferring the visibility decision to other objects in the same SCC. This
+// is similar to the "lowlink" value in Tarjan's algorithm for SCC.
+// - After the first pass it is guaranteed that all deferred visibility
+// decisions can be resolved.
+// 2. Second pass adds nodes and edges for all visible objects.
+// - Upon first checking the visibility state of an object, all deferred
+// visibility states are resolved.
+//
+// For practical reasons, the recursion is transformed into an iteration. We do
+// do not use plain Tarjan's algorithm to avoid another pass over all nodes to
+// create SCCs.
+class CppGraphBuilderImpl final {
+ public:
+ CppGraphBuilderImpl(CppHeap& cpp_heap, v8::EmbedderGraph& graph)
+ : cpp_heap_(cpp_heap), graph_(graph) {}
+
+ void Run();
+
+ void VisitForVisibility(State* parent, const HeapObjectHeader&);
+ void VisitForVisibility(State& parent, const TracedReferenceBase&);
+ void VisitRootForGraphBuilding(RootState&, const HeapObjectHeader&,
+ const cppgc::SourceLocation&);
+ void ProcessPendingObjects();
+
+ EmbedderRootNode* AddRootNode(const char* name) {
+ return static_cast<EmbedderRootNode*>(graph_.AddNode(
+ std::unique_ptr<v8::EmbedderGraph::Node>{new EmbedderRootNode(name)}));
+ }
+
+ EmbedderNode* AddNode(const HeapObjectHeader& header) {
+ return static_cast<EmbedderNode*>(
+ graph_.AddNode(std::unique_ptr<v8::EmbedderGraph::Node>{
+ new EmbedderNode(header.GetName().value)}));
+ }
+
+ void AddEdge(State& parent, const HeapObjectHeader& header) {
+ DCHECK(parent.IsVisibleNotDependent());
+ auto& current = states_.GetExistingState(header);
+ if (!current.IsVisibleNotDependent()) return;
+
+ // Both states are visible. Create nodes in case this is the first edge
+ // created for any of them.
+ if (!parent.get_node()) {
+ parent.set_node(AddNode(*parent.header()));
+ }
+ if (!current.get_node()) {
+ current.set_node(AddNode(header));
+ }
+ graph_.AddEdge(parent.get_node(), current.get_node());
+ }
+
+ void AddEdge(State& parent, const TracedReferenceBase& ref) {
+ DCHECK(parent.IsVisibleNotDependent());
+ v8::Local<v8::Value> v8_value = ref.Get(cpp_heap_.isolate());
+ if (!v8_value.IsEmpty()) {
+ if (!parent.get_node()) {
+ parent.set_node(AddNode(*parent.header()));
+ }
+ auto* v8_node = graph_.V8Node(v8_value);
+ graph_.AddEdge(parent.get_node(), v8_node);
+
+ // References that have a class id set may have their internal fields
+ // pointing back to the object. Set up a wrapper node for the graph so
+ // that the snapshot generator can merge the nodes appropriately.
+ if (!ref.WrapperClassId()) return;
+
+ if (HasEmbedderDataBackref(
+ reinterpret_cast<v8::internal::Isolate*>(cpp_heap_.isolate()),
+ v8_value, parent.header()->Payload())) {
+ parent.get_node()->SetWrapperNode(v8_node);
+
+ auto* profiler =
+ reinterpret_cast<Isolate*>(cpp_heap_.isolate())->heap_profiler();
+ if (profiler->HasGetDetachednessCallback()) {
+ parent.get_node()->SetDetachedness(
+ profiler->GetDetachedness(v8_value, ref.WrapperClassId()));
+ }
+ }
+ }
+ }
+
+ void AddRootEdge(RootState& root, State& child, std::string edge_name) {
+ DCHECK(root.IsVisibleNotDependent());
+ if (!child.IsVisibleNotDependent()) return;
+
+ // Root states always have a node set.
+ DCHECK_NOT_NULL(root.get_node());
+ if (!child.get_node()) {
+ child.set_node(AddNode(*child.header()));
+ }
+
+ if (!edge_name.empty()) {
+ // V8's API is based on raw C strings. Allocate and temporarily keep the
+ // edge name alive from the corresponding node.
+ const size_t len = edge_name.length();
+ char* raw_location_string = new char[len + 1];
+ strncpy(raw_location_string, edge_name.c_str(), len);
+ raw_location_string[len] = 0;
+ std::unique_ptr<const char> holder(raw_location_string);
+ graph_.AddEdge(root.get_node(), child.get_node(), holder.get());
+ root.AddNamedEdge(std::move(holder));
+ return;
+ }
+ graph_.AddEdge(root.get_node(), child.get_node());
+ }
+
+ private:
+ class WorkstackItemBase;
+ class VisitationItem;
+ class VisitationDoneItem;
+
+ CppHeap& cpp_heap_;
+ v8::EmbedderGraph& graph_;
+ StateStorage states_;
+ std::vector<std::unique_ptr<WorkstackItemBase>> workstack_;
+};
+
+// Iterating live objects to mark them as visible if needed.
+class LiveObjectsForVisibilityIterator final
+ : public cppgc::internal::HeapVisitor<LiveObjectsForVisibilityIterator> {
+ friend class cppgc::internal::HeapVisitor<LiveObjectsForVisibilityIterator>;
+
+ public:
+ explicit LiveObjectsForVisibilityIterator(CppGraphBuilderImpl& graph_builder)
+ : graph_builder_(graph_builder) {}
+
+ private:
+ bool VisitHeapObjectHeader(HeapObjectHeader* header) {
+ if (header->IsFree()) return true;
+ graph_builder_.VisitForVisibility(nullptr, *header);
+ graph_builder_.ProcessPendingObjects();
+ return true;
+ }
+
+ CppGraphBuilderImpl& graph_builder_;
+};
+
+class ParentScope final {
+ public:
+ explicit ParentScope(StateBase& parent) : parent_(parent) {}
+
+ RootState& ParentAsRootState() const {
+ return static_cast<RootState&>(parent_);
+ }
+ State& ParentAsRegularState() const { return static_cast<State&>(parent_); }
+
+ private:
+ StateBase& parent_;
+};
+
+class VisiblityVisitor final : public JSVisitor {
+ public:
+ explicit VisiblityVisitor(CppGraphBuilderImpl& graph_builder,
+ const ParentScope& parent_scope)
+ : JSVisitor(cppgc::internal::VisitorFactory::CreateKey()),
+ graph_builder_(graph_builder),
+ parent_scope_(parent_scope) {}
+
+ // C++ handling.
+ void Visit(const void*, cppgc::TraceDescriptor desc) final {
+ graph_builder_.VisitForVisibility(
+ &parent_scope_.ParentAsRegularState(),
+ HeapObjectHeader::FromPayload(desc.base_object_payload));
+ }
+ void VisitRoot(const void*, cppgc::TraceDescriptor,
+ const cppgc::SourceLocation&) final {}
+ void VisitWeakRoot(const void*, cppgc::TraceDescriptor, cppgc::WeakCallback,
+ const void*, const cppgc::SourceLocation&) final {}
+ void VisitWeakContainer(const void* object,
+ cppgc::TraceDescriptor strong_desc,
+ cppgc::TraceDescriptor weak_desc, cppgc::WeakCallback,
+ const void*) final {
+ if (!weak_desc.callback) {
+ // Weak container does not contribute to liveness.
+ return;
+ }
+ // Heap snapshot is always run after a GC so we know there are no dead
+ // entries in the backing store, thus it safe to trace it strongly.
+ if (object) {
+ Visit(object, strong_desc);
+ }
+ }
+
+ // JS handling.
+ void Visit(const TracedReferenceBase& ref) final {
+ graph_builder_.VisitForVisibility(parent_scope_.ParentAsRegularState(),
+ ref);
+ }
+
+ private:
+ CppGraphBuilderImpl& graph_builder_;
+ const ParentScope& parent_scope_;
+};
+
+class GraphBuildingVisitor final : public JSVisitor {
+ public:
+ GraphBuildingVisitor(CppGraphBuilderImpl& graph_builder,
+ const ParentScope& parent_scope)
+ : JSVisitor(cppgc::internal::VisitorFactory::CreateKey()),
+ graph_builder_(graph_builder),
+ parent_scope_(parent_scope) {}
+
+ // C++ handling.
+ void Visit(const void*, cppgc::TraceDescriptor desc) final {
+ graph_builder_.AddEdge(
+ parent_scope_.ParentAsRegularState(),
+ HeapObjectHeader::FromPayload(desc.base_object_payload));
+ }
+ void VisitRoot(const void*, cppgc::TraceDescriptor desc,
+ const cppgc::SourceLocation& loc) final {
+ graph_builder_.VisitRootForGraphBuilding(
+ parent_scope_.ParentAsRootState(),
+ HeapObjectHeader::FromPayload(desc.base_object_payload), loc);
+ }
+ void VisitWeakRoot(const void*, cppgc::TraceDescriptor, cppgc::WeakCallback,
+ const void*, const cppgc::SourceLocation&) final {}
+ // JS handling.
+ void Visit(const TracedReferenceBase& ref) final {
+ graph_builder_.AddEdge(parent_scope_.ParentAsRegularState(), ref);
+ }
+
+ private:
+ CppGraphBuilderImpl& graph_builder_;
+ const ParentScope& parent_scope_;
+};
+
+// Base class for transforming recursion into iteration. Items are processed
+// in stack fashion.
+class CppGraphBuilderImpl::WorkstackItemBase {
+ public:
+ WorkstackItemBase(State* parent, State& current)
+ : parent_(parent), current_(current) {}
+
+ virtual ~WorkstackItemBase() = default;
+ virtual void Process(CppGraphBuilderImpl&) = 0;
+
+ protected:
+ State* parent_;
+ State& current_;
+};
+
+void CppGraphBuilderImpl::ProcessPendingObjects() {
+ while (!workstack_.empty()) {
+ std::unique_ptr<WorkstackItemBase> item = std::move(workstack_.back());
+ workstack_.pop_back();
+ item->Process(*this);
+ }
+}
+
+// Post-order processing of an object. It's guaranteed that all children have
+// been processed first.
+class CppGraphBuilderImpl::VisitationDoneItem final : public WorkstackItemBase {
+ public:
+ VisitationDoneItem(State* parent, State& current)
+ : WorkstackItemBase(parent, current) {}
+
+ void Process(CppGraphBuilderImpl& graph_builder) final {
+ CHECK(parent_);
+ parent_->MarkDependentVisibility(&current_);
+ current_.UnmarkPending();
+ }
+};
+
+class CppGraphBuilderImpl::VisitationItem final : public WorkstackItemBase {
+ public:
+ VisitationItem(State* parent, State& current)
+ : WorkstackItemBase(parent, current) {}
+
+ void Process(CppGraphBuilderImpl& graph_builder) final {
+ if (parent_) {
+ // Re-add the same object for post-order processing. This must happen
+ // lazily, as the parent's visibility depends on its children.
+ graph_builder.workstack_.push_back(std::unique_ptr<WorkstackItemBase>{
+ new VisitationDoneItem(parent_, current_)});
+ }
+ ParentScope parent_scope(current_);
+ VisiblityVisitor object_visitor(graph_builder, parent_scope);
+ current_.header()->Trace(&object_visitor);
+ if (!parent_) {
+ current_.UnmarkPending();
+ }
+ }
+};
+
+void CppGraphBuilderImpl::VisitForVisibility(State* parent,
+ const HeapObjectHeader& header) {
+ auto& current = states_.GetOrCreateState(header);
+
+ if (current.IsVisited()) {
+ // Avoid traversing into already visited subgraphs and just update the state
+ // based on a previous result.
+ if (parent) {
+ parent->MarkDependentVisibility(&current);
+ }
+ return;
+ }
+
+ current.MarkVisited();
+ if (header.GetName().name_was_hidden) {
+ current.MarkPending();
+ workstack_.push_back(std::unique_ptr<WorkstackItemBase>{
+ new VisitationItem(parent, current)});
+ } else {
+ // No need to mark/unmark pending as the node is immediately processed.
+ current.MarkVisible();
+ if (parent) {
+ // Eagerly update a parent object as its visibility state is now fixed.
+ parent->MarkVisible();
+ }
+ }
+}
+
+void CppGraphBuilderImpl::VisitForVisibility(State& parent,
+ const TracedReferenceBase& ref) {
+ v8::Local<v8::Value> v8_value = ref.Get(cpp_heap_.isolate());
+ if (!v8_value.IsEmpty()) {
+ parent.MarkVisible();
+ }
+}
+
+void CppGraphBuilderImpl::VisitRootForGraphBuilding(
+ RootState& root, const HeapObjectHeader& header,
+ const cppgc::SourceLocation& loc) {
+ State& current = states_.GetExistingState(header);
+ if (!current.IsVisibleNotDependent()) return;
+
+ AddRootEdge(root, current, loc.ToString());
+}
+
+void CppGraphBuilderImpl::Run() {
+ // Sweeping from a previous GC might still be running, in which case not all
+ // pages have been returned to spaces yet.
+ cpp_heap_.sweeper().FinishIfRunning();
+ // First pass: Figure out which objects should be included in the graph -- see
+ // class-level comment on CppGraphBuilder.
+ LiveObjectsForVisibilityIterator visitor(*this);
+ visitor.Traverse(&cpp_heap_.raw_heap());
+ // Second pass: Add graph nodes for objects that must be shown.
+ states_.ForAllVisibleStates([this](StateBase* state) {
+ ParentScope parent_scope(*state);
+ GraphBuildingVisitor object_visitor(*this, parent_scope);
+ // No roots have been created so far, so all StateBase objects are State.
+ static_cast<State*>(state)->header()->Trace(&object_visitor);
+ });
+ // Add roots.
+ {
+ ParentScope parent_scope(states_.CreateRootState(AddRootNode("C++ roots")));
+ GraphBuildingVisitor object_visitor(*this, parent_scope);
+ cpp_heap_.GetStrongPersistentRegion().Trace(&object_visitor);
+ }
+ {
+ ParentScope parent_scope(
+ states_.CreateRootState(AddRootNode("C++ cross-thread roots")));
+ GraphBuildingVisitor object_visitor(*this, parent_scope);
+ cpp_heap_.GetStrongCrossThreadPersistentRegion().Trace(&object_visitor);
+ }
+}
+
+// static
+void CppGraphBuilder::Run(v8::Isolate* isolate, v8::EmbedderGraph* graph,
+ void* data) {
+ CppHeap* cpp_heap = static_cast<CppHeap*>(data);
+ CHECK_NOT_NULL(cpp_heap);
+ CHECK_NOT_NULL(graph);
+ CppGraphBuilderImpl graph_builder(*cpp_heap, *graph);
+ graph_builder.Run();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/cppgc-js/cpp-snapshot.h b/deps/v8/src/heap/cppgc-js/cpp-snapshot.h
new file mode 100644
index 0000000000..89d1026017
--- /dev/null
+++ b/deps/v8/src/heap/cppgc-js/cpp-snapshot.h
@@ -0,0 +1,29 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_JS_CPP_SNAPSHOT_H_
+#define V8_HEAP_CPPGC_JS_CPP_SNAPSHOT_H_
+
+#include "src/base/macros.h"
+
+namespace v8 {
+
+class Isolate;
+class EmbedderGraph;
+
+namespace internal {
+
+class V8_EXPORT_PRIVATE CppGraphBuilder final {
+ public:
+ // Add the C++ snapshot to the existing |graph|. See CppGraphBuilderImpl for
+ // algorithm internals.
+ static void Run(v8::Isolate* isolate, v8::EmbedderGraph* graph, void* data);
+
+ CppGraphBuilder() = delete;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_CPPGC_JS_CPP_SNAPSHOT_H_
diff --git a/deps/v8/src/heap/cppgc-js/unified-heap-marking-state.h b/deps/v8/src/heap/cppgc-js/unified-heap-marking-state.h
index 3ef36de504..1a1da3f278 100644
--- a/deps/v8/src/heap/cppgc-js/unified-heap-marking-state.h
+++ b/deps/v8/src/heap/cppgc-js/unified-heap-marking-state.h
@@ -6,17 +6,16 @@
#define V8_HEAP_CPPGC_JS_UNIFIED_HEAP_MARKING_STATE_H_
#include "include/v8-cppgc.h"
+#include "include/v8.h"
#include "src/heap/heap.h"
namespace v8 {
-class JSMemberBase;
-
namespace internal {
-class JSMemberBaseExtractor {
+class BasicTracedReferenceExtractor {
public:
- static Address* ObjectReference(const JSMemberBase& ref) {
+ static Address* ObjectReference(const TracedReferenceBase& ref) {
return reinterpret_cast<Address*>(ref.val_);
}
};
@@ -28,15 +27,15 @@ class UnifiedHeapMarkingState {
UnifiedHeapMarkingState(const UnifiedHeapMarkingState&) = delete;
UnifiedHeapMarkingState& operator=(const UnifiedHeapMarkingState&) = delete;
- inline void MarkAndPush(const JSMemberBase&);
+ inline void MarkAndPush(const TracedReferenceBase&);
private:
Heap& heap_;
};
-void UnifiedHeapMarkingState::MarkAndPush(const JSMemberBase& ref) {
+void UnifiedHeapMarkingState::MarkAndPush(const TracedReferenceBase& ref) {
heap_.RegisterExternallyReferencedObject(
- JSMemberBaseExtractor::ObjectReference(ref));
+ BasicTracedReferenceExtractor::ObjectReference(ref));
}
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc-js/unified-heap-marking-verifier.cc b/deps/v8/src/heap/cppgc-js/unified-heap-marking-verifier.cc
new file mode 100644
index 0000000000..ea14b52048
--- /dev/null
+++ b/deps/v8/src/heap/cppgc-js/unified-heap-marking-verifier.cc
@@ -0,0 +1,70 @@
+
+
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc-js/unified-heap-marking-verifier.h"
+
+#include "include/v8-cppgc.h"
+#include "src/heap/cppgc/marking-verifier.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+class UnifiedHeapVerificationVisitor final : public JSVisitor {
+ public:
+ explicit UnifiedHeapVerificationVisitor(
+ cppgc::internal::VerificationState& state)
+ : JSVisitor(cppgc::internal::VisitorFactory::CreateKey()),
+ state_(state) {}
+
+ void Visit(const void*, cppgc::TraceDescriptor desc) final {
+ state_.VerifyMarked(desc.base_object_payload);
+ }
+
+ void VisitWeak(const void*, cppgc::TraceDescriptor desc, cppgc::WeakCallback,
+ const void*) final {
+ // Weak objects should have been cleared at this point. As a consequence,
+ // all objects found through weak references have to point to live objects
+ // at this point.
+ state_.VerifyMarked(desc.base_object_payload);
+ }
+
+ void VisitWeakContainer(const void* object, cppgc::TraceDescriptor,
+ cppgc::TraceDescriptor weak_desc, cppgc::WeakCallback,
+ const void*) {
+ if (!object) return;
+
+ // Contents of weak containers are found themselves through page iteration
+ // and are treated strongly, similar to how they are treated strongly when
+ // found through stack scanning. The verification here only makes sure that
+ // the container itself is properly marked.
+ state_.VerifyMarked(weak_desc.base_object_payload);
+ }
+
+ void Visit(const TracedReferenceBase& ref) final {
+ // TODO(chromium:1056170): Verify V8 object is indeed marked.
+ }
+
+ private:
+ cppgc::internal::VerificationState& state_;
+};
+
+} // namespace
+
+UnifiedHeapMarkingVerifier::UnifiedHeapMarkingVerifier(
+ cppgc::internal::HeapBase& heap_base)
+ : MarkingVerifierBase(
+ heap_base, std::make_unique<UnifiedHeapVerificationVisitor>(state_)) {
+}
+
+void UnifiedHeapMarkingVerifier::SetCurrentParent(
+ const cppgc::internal::HeapObjectHeader* parent) {
+ state_.SetCurrentParent(parent);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/cppgc-js/unified-heap-marking-verifier.h b/deps/v8/src/heap/cppgc-js/unified-heap-marking-verifier.h
new file mode 100644
index 0000000000..3a54b4dd32
--- /dev/null
+++ b/deps/v8/src/heap/cppgc-js/unified-heap-marking-verifier.h
@@ -0,0 +1,30 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_JS_UNIFIED_HEAP_MARKING_VERIFIER_H_
+#define V8_HEAP_CPPGC_JS_UNIFIED_HEAP_MARKING_VERIFIER_H_
+
+#include "src/heap/cppgc/marking-verifier.h"
+
+namespace v8 {
+namespace internal {
+
+class V8_EXPORT_PRIVATE UnifiedHeapMarkingVerifier final
+ : public cppgc::internal::MarkingVerifierBase {
+ public:
+ explicit UnifiedHeapMarkingVerifier(cppgc::internal::HeapBase&);
+ ~UnifiedHeapMarkingVerifier() final = default;
+
+ void SetCurrentParent(const cppgc::internal::HeapObjectHeader*) final;
+
+ private:
+ // TODO(chromium:1056170): Use a verification state that can handle JS
+ // references.
+ cppgc::internal::VerificationState state_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_CPPGC_JS_UNIFIED_HEAP_MARKING_VERIFIER_H_
diff --git a/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.cc b/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.cc
index fc39a7a3dc..e235f8885d 100644
--- a/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.cc
+++ b/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.cc
@@ -4,6 +4,7 @@
#include "src/heap/cppgc-js/unified-heap-marking-visitor.h"
+#include "include/v8.h"
#include "src/heap/cppgc-js/unified-heap-marking-state.h"
#include "src/heap/cppgc/heap.h"
#include "src/heap/cppgc/marking-state.h"
@@ -12,46 +13,95 @@
namespace v8 {
namespace internal {
-UnifiedHeapMarkingVisitor::UnifiedHeapMarkingVisitor(
- HeapBase& heap, MarkingState& marking_state,
+UnifiedHeapMarkingVisitorBase::UnifiedHeapMarkingVisitorBase(
+ HeapBase& heap, MarkingStateBase& marking_state,
UnifiedHeapMarkingState& unified_heap_marking_state)
: JSVisitor(cppgc::internal::VisitorFactory::CreateKey()),
marking_state_(marking_state),
unified_heap_marking_state_(unified_heap_marking_state) {}
-void UnifiedHeapMarkingVisitor::Visit(const void* object,
- TraceDescriptor desc) {
+void UnifiedHeapMarkingVisitorBase::Visit(const void* object,
+ TraceDescriptor desc) {
marking_state_.MarkAndPush(object, desc);
}
-void UnifiedHeapMarkingVisitor::VisitWeak(const void* object,
- TraceDescriptor desc,
- WeakCallback weak_callback,
- const void* weak_member) {
+void UnifiedHeapMarkingVisitorBase::VisitWeak(const void* object,
+ TraceDescriptor desc,
+ WeakCallback weak_callback,
+ const void* weak_member) {
marking_state_.RegisterWeakReferenceIfNeeded(object, desc, weak_callback,
weak_member);
}
-void UnifiedHeapMarkingVisitor::VisitRoot(const void* object,
- TraceDescriptor desc) {
- Visit(object, desc);
+void UnifiedHeapMarkingVisitorBase::VisitEphemeron(const void* key,
+ TraceDescriptor value_desc) {
+ marking_state_.ProcessEphemeron(key, value_desc);
}
-void UnifiedHeapMarkingVisitor::VisitWeakRoot(const void* object,
- TraceDescriptor desc,
- WeakCallback weak_callback,
- const void* weak_root) {
- marking_state_.InvokeWeakRootsCallbackIfNeeded(object, desc, weak_callback,
- weak_root);
+void UnifiedHeapMarkingVisitorBase::VisitWeakContainer(
+ const void* self, TraceDescriptor strong_desc, TraceDescriptor weak_desc,
+ WeakCallback callback, const void* data) {
+ marking_state_.ProcessWeakContainer(self, weak_desc, callback, data);
}
-void UnifiedHeapMarkingVisitor::RegisterWeakCallback(WeakCallback callback,
- const void* object) {
+void UnifiedHeapMarkingVisitorBase::RegisterWeakCallback(WeakCallback callback,
+ const void* object) {
marking_state_.RegisterWeakCallback(callback, object);
}
-void UnifiedHeapMarkingVisitor::Visit(const internal::JSMemberBase& ref) {
- unified_heap_marking_state_.MarkAndPush(ref);
+void UnifiedHeapMarkingVisitorBase::HandleMovableReference(const void** slot) {
+ marking_state_.RegisterMovableReference(slot);
+}
+
+namespace {
+void DeferredTraceTracedReference(cppgc::Visitor* visitor, const void* ref) {
+ static_cast<JSVisitor*>(visitor)->Trace(
+ *static_cast<const TracedReferenceBase*>(ref));
+}
+} // namespace
+
+void UnifiedHeapMarkingVisitorBase::Visit(const TracedReferenceBase& ref) {
+ bool should_defer_tracing = DeferTraceToMutatorThreadIfConcurrent(
+ &ref, DeferredTraceTracedReference, 0);
+
+ if (!should_defer_tracing) unified_heap_marking_state_.MarkAndPush(ref);
+}
+
+MutatorUnifiedHeapMarkingVisitor::MutatorUnifiedHeapMarkingVisitor(
+ HeapBase& heap, MutatorMarkingState& marking_state,
+ UnifiedHeapMarkingState& unified_heap_marking_state)
+ : UnifiedHeapMarkingVisitorBase(heap, marking_state,
+ unified_heap_marking_state) {}
+
+void MutatorUnifiedHeapMarkingVisitor::VisitRoot(const void* object,
+ TraceDescriptor desc,
+ const SourceLocation&) {
+ this->Visit(object, desc);
+}
+
+void MutatorUnifiedHeapMarkingVisitor::VisitWeakRoot(const void* object,
+ TraceDescriptor desc,
+ WeakCallback weak_callback,
+ const void* weak_root,
+ const SourceLocation&) {
+ static_cast<MutatorMarkingState&>(marking_state_)
+ .InvokeWeakRootsCallbackIfNeeded(object, desc, weak_callback, weak_root);
+}
+
+ConcurrentUnifiedHeapMarkingVisitor::ConcurrentUnifiedHeapMarkingVisitor(
+ HeapBase& heap, ConcurrentMarkingState& marking_state,
+ UnifiedHeapMarkingState& unified_heap_marking_state)
+ : UnifiedHeapMarkingVisitorBase(heap, marking_state,
+ unified_heap_marking_state) {}
+
+bool ConcurrentUnifiedHeapMarkingVisitor::DeferTraceToMutatorThreadIfConcurrent(
+ const void* parameter, cppgc::TraceCallback callback,
+ size_t deferred_size) {
+ marking_state_.concurrent_marking_bailout_worklist().Push(
+ {parameter, callback, deferred_size});
+ static_cast<ConcurrentMarkingState&>(marking_state_)
+ .AccountDeferredMarkedBytes(deferred_size);
+ return true;
}
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.h b/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.h
index f80b86c1be..05e3affaa8 100644
--- a/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.h
+++ b/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.h
@@ -12,40 +12,84 @@
#include "src/heap/cppgc/marking-visitor.h"
namespace cppgc {
+
+class SourceLocation;
+
namespace internal {
-class MarkingState;
+class ConcurrentMarkingState;
+class MarkingStateBase;
+class MutatorMarkingState;
} // namespace internal
} // namespace cppgc
namespace v8 {
namespace internal {
+using cppgc::SourceLocation;
using cppgc::TraceDescriptor;
using cppgc::WeakCallback;
+using cppgc::internal::ConcurrentMarkingState;
using cppgc::internal::HeapBase;
-using cppgc::internal::MarkingState;
+using cppgc::internal::MarkingStateBase;
+using cppgc::internal::MutatorMarkingState;
-class V8_EXPORT_PRIVATE UnifiedHeapMarkingVisitor : public JSVisitor {
+class V8_EXPORT_PRIVATE UnifiedHeapMarkingVisitorBase : public JSVisitor {
public:
- UnifiedHeapMarkingVisitor(HeapBase&, MarkingState&, UnifiedHeapMarkingState&);
- ~UnifiedHeapMarkingVisitor() override = default;
+ UnifiedHeapMarkingVisitorBase(HeapBase&, MarkingStateBase&,
+ UnifiedHeapMarkingState&);
+ ~UnifiedHeapMarkingVisitorBase() override = default;
- private:
+ protected:
// C++ handling.
void Visit(const void*, TraceDescriptor) final;
void VisitWeak(const void*, TraceDescriptor, WeakCallback, const void*) final;
- void VisitRoot(const void*, TraceDescriptor) final;
- void VisitWeakRoot(const void*, TraceDescriptor, WeakCallback,
- const void*) final;
+ void VisitEphemeron(const void*, TraceDescriptor) final;
+ void VisitWeakContainer(const void* self, TraceDescriptor strong_desc,
+ TraceDescriptor weak_desc, WeakCallback callback,
+ const void* data) final;
void RegisterWeakCallback(WeakCallback, const void*) final;
+ void HandleMovableReference(const void**) final;
// JS handling.
- void Visit(const internal::JSMemberBase& ref) final;
+ void Visit(const TracedReferenceBase& ref) final;
- MarkingState& marking_state_;
+ MarkingStateBase& marking_state_;
UnifiedHeapMarkingState& unified_heap_marking_state_;
};
+class V8_EXPORT_PRIVATE MutatorUnifiedHeapMarkingVisitor final
+ : public UnifiedHeapMarkingVisitorBase {
+ public:
+ MutatorUnifiedHeapMarkingVisitor(HeapBase&, MutatorMarkingState&,
+ UnifiedHeapMarkingState&);
+ ~MutatorUnifiedHeapMarkingVisitor() override = default;
+
+ protected:
+ void VisitRoot(const void*, TraceDescriptor, const SourceLocation&) final;
+ void VisitWeakRoot(const void*, TraceDescriptor, WeakCallback, const void*,
+ const SourceLocation&) final;
+};
+
+class V8_EXPORT_PRIVATE ConcurrentUnifiedHeapMarkingVisitor final
+ : public UnifiedHeapMarkingVisitorBase {
+ public:
+ ConcurrentUnifiedHeapMarkingVisitor(HeapBase&, ConcurrentMarkingState&,
+ UnifiedHeapMarkingState&);
+ ~ConcurrentUnifiedHeapMarkingVisitor() override = default;
+
+ protected:
+ void VisitRoot(const void*, TraceDescriptor, const SourceLocation&) final {
+ UNREACHABLE();
+ }
+ void VisitWeakRoot(const void*, TraceDescriptor, WeakCallback, const void*,
+ const SourceLocation&) final {
+ UNREACHABLE();
+ }
+
+ bool DeferTraceToMutatorThreadIfConcurrent(const void*, cppgc::TraceCallback,
+ size_t) final;
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/cppgc/compaction-worklists.cc b/deps/v8/src/heap/cppgc/compaction-worklists.cc
new file mode 100644
index 0000000000..bb182a58c8
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/compaction-worklists.cc
@@ -0,0 +1,14 @@
+
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/compaction-worklists.h"
+
+namespace cppgc {
+namespace internal {
+
+void CompactionWorklists::ClearForTesting() { movable_slots_worklist_.Clear(); }
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/compaction-worklists.h b/deps/v8/src/heap/cppgc/compaction-worklists.h
new file mode 100644
index 0000000000..6222bd9a92
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/compaction-worklists.h
@@ -0,0 +1,35 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_COMPACTION_WORKLISTS_H_
+#define V8_HEAP_CPPGC_COMPACTION_WORKLISTS_H_
+
+#include <unordered_set>
+
+#include "src/heap/base/worklist.h"
+
+namespace cppgc {
+namespace internal {
+
+class CompactionWorklists {
+ public:
+ using MovableReference = const void*;
+
+ using MovableReferencesWorklist =
+ heap::base::Worklist<MovableReference*, 256 /* local entries */>;
+
+ MovableReferencesWorklist* movable_slots_worklist() {
+ return &movable_slots_worklist_;
+ }
+
+ void ClearForTesting();
+
+ private:
+ MovableReferencesWorklist movable_slots_worklist_;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_COMPACTION_WORKLISTS_H_
diff --git a/deps/v8/src/heap/cppgc/compactor.cc b/deps/v8/src/heap/cppgc/compactor.cc
new file mode 100644
index 0000000000..23869d2f14
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/compactor.cc
@@ -0,0 +1,505 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/compactor.h"
+
+#include <map>
+#include <numeric>
+#include <unordered_map>
+#include <unordered_set>
+
+#include "include/cppgc/macros.h"
+#include "src/heap/cppgc/compaction-worklists.h"
+#include "src/heap/cppgc/globals.h"
+#include "src/heap/cppgc/heap-base.h"
+#include "src/heap/cppgc/heap-page.h"
+#include "src/heap/cppgc/heap-space.h"
+#include "src/heap/cppgc/raw-heap.h"
+
+namespace cppgc {
+namespace internal {
+
+namespace {
+// Freelist size threshold that must be exceeded before compaction
+// should be considered.
+static constexpr size_t kFreeListSizeThreshold = 512 * kKB;
+
+// The real worker behind heap compaction, recording references to movable
+// objects ("slots".) When the objects end up being compacted and moved,
+// relocate() will adjust the slots to point to the new location of the
+// object along with handling references for interior pointers.
+//
+// The MovableReferences object is created and maintained for the lifetime
+// of one heap compaction-enhanced GC.
+class MovableReferences final {
+ using MovableReference = CompactionWorklists::MovableReference;
+
+ public:
+ explicit MovableReferences(HeapBase& heap) : heap_(heap) {}
+
+ // Adds a slot for compaction. Filters slots in dead objects.
+ void AddOrFilter(MovableReference*);
+
+ // Relocates a backing store |from| -> |to|.
+ void Relocate(Address from, Address to);
+
+ // Relocates interior slots in a backing store that is moved |from| -> |to|.
+ void RelocateInteriorReferences(Address from, Address to, size_t size);
+
+ // Updates the collection of callbacks from the item pushed the worklist by
+ // marking visitors.
+ void UpdateCallbacks();
+
+ private:
+ HeapBase& heap_;
+
+ // Map from movable reference (value) to its slot. Upon moving an object its
+ // slot pointing to it requires updating. Movable reference should currently
+ // have only a single movable reference to them registered.
+ std::unordered_map<MovableReference, MovableReference*> movable_references_;
+
+ // Map of interior slots to their final location. Needs to be an ordered map
+ // as it is used to walk through slots starting at a given memory address.
+ // Requires log(n) lookup to make the early bailout reasonably fast.
+ //
+ // - The initial value for a given key is nullptr.
+ // - Upon moving an object this value is adjusted accordingly.
+ std::map<MovableReference*, Address> interior_movable_references_;
+
+#if DEBUG
+ // The following two collections are used to allow refer back from a slot to
+ // an already moved object.
+ std::unordered_set<const void*> moved_objects_;
+ std::unordered_map<MovableReference*, MovableReference>
+ interior_slot_to_object_;
+#endif // DEBUG
+};
+
+void MovableReferences::AddOrFilter(MovableReference* slot) {
+ const BasePage* slot_page = BasePage::FromInnerAddress(&heap_, slot);
+ CHECK_NOT_NULL(slot_page);
+
+ const void* value = *slot;
+ if (!value) return;
+
+ // All slots and values are part of Oilpan's heap.
+ // - Slots may be contained within dead objects if e.g. the write barrier
+ // registered the slot while backing itself has not been marked live in
+ // time. Slots in dead objects are filtered below.
+ // - Values may only be contained in or point to live objects.
+
+ const HeapObjectHeader& slot_header =
+ slot_page->ObjectHeaderFromInnerAddress(slot);
+ // Filter the slot since the object that contains the slot is dead.
+ if (!slot_header.IsMarked()) return;
+
+ const BasePage* value_page = BasePage::FromInnerAddress(&heap_, value);
+ CHECK_NOT_NULL(value_page);
+
+ // The following cases are not compacted and do not require recording:
+ // - Compactable object on large pages.
+ // - Compactable object on non-compactable spaces.
+ if (value_page->is_large() || !value_page->space()->is_compactable()) return;
+
+ // Slots must reside in and values must point to live objects at this
+ // point. |value| usually points to a separate object but can also point
+ // to the an interior pointer in the same object storage which is why the
+ // dynamic header lookup is required.
+ const HeapObjectHeader& value_header =
+ value_page->ObjectHeaderFromInnerAddress(value);
+ CHECK(value_header.IsMarked());
+
+ // Slots may have been recorded already but must point to the same value.
+ auto reference_it = movable_references_.find(value);
+ if (V8_UNLIKELY(reference_it != movable_references_.end())) {
+ CHECK_EQ(slot, reference_it->second);
+ return;
+ }
+
+ // Add regular movable reference.
+ movable_references_.emplace(value, slot);
+
+ // Check whether the slot itself resides on a page that is compacted.
+ if (V8_LIKELY(!slot_page->space()->is_compactable())) return;
+
+ CHECK_EQ(interior_movable_references_.end(),
+ interior_movable_references_.find(slot));
+ interior_movable_references_.emplace(slot, nullptr);
+#if DEBUG
+ interior_slot_to_object_.emplace(slot, slot_header.Payload());
+#endif // DEBUG
+}
+
+void MovableReferences::Relocate(Address from, Address to) {
+#if DEBUG
+ moved_objects_.insert(from);
+#endif // DEBUG
+
+ // Interior slots always need to be processed for moved objects.
+ // Consider an object A with slot A.x pointing to value B where A is
+ // allocated on a movable page itself. When B is finally moved, it needs to
+ // find the corresponding slot A.x. Object A may be moved already and the
+ // memory may have been freed, which would result in a crash.
+ if (!interior_movable_references_.empty()) {
+ const HeapObjectHeader& header = HeapObjectHeader::FromPayload(to);
+ const size_t size = header.GetSize() - sizeof(HeapObjectHeader);
+ RelocateInteriorReferences(from, to, size);
+ }
+
+ auto it = movable_references_.find(from);
+ // This means that there is no corresponding slot for a live object.
+ // This may happen because a mutator may change the slot to point to a
+ // different object because e.g. incremental marking marked an object
+ // as live that was later on replaced.
+ if (it == movable_references_.end()) {
+ return;
+ }
+
+ // If the object is referenced by a slot that is contained on a compacted
+ // area itself, check whether it can be updated already.
+ MovableReference* slot = it->second;
+ auto interior_it = interior_movable_references_.find(slot);
+ if (interior_it != interior_movable_references_.end()) {
+ MovableReference* slot_location =
+ reinterpret_cast<MovableReference*>(interior_it->second);
+ if (!slot_location) {
+ interior_it->second = to;
+#if DEBUG
+ // Check that the containing object has not been moved yet.
+ auto reverse_it = interior_slot_to_object_.find(slot);
+ DCHECK_NE(interior_slot_to_object_.end(), reverse_it);
+ DCHECK_EQ(moved_objects_.end(), moved_objects_.find(reverse_it->second));
+#endif // DEBUG
+ } else {
+ slot = slot_location;
+ }
+ }
+
+ // Compaction is atomic so slot should not be updated during compaction.
+ DCHECK_EQ(from, *slot);
+
+ // Update the slots new value.
+ *slot = to;
+}
+
+void MovableReferences::RelocateInteriorReferences(Address from, Address to,
+ size_t size) {
+ // |from| is a valid address for a slot.
+ auto interior_it = interior_movable_references_.lower_bound(
+ reinterpret_cast<MovableReference*>(from));
+ if (interior_it == interior_movable_references_.end()) return;
+ DCHECK_GE(reinterpret_cast<Address>(interior_it->first), from);
+
+ size_t offset = reinterpret_cast<Address>(interior_it->first) - from;
+ while (offset < size) {
+ if (!interior_it->second) {
+ // Update the interior reference value, so that when the object the slot
+ // is pointing to is moved, it can re-use this value.
+ Address refernece = to + offset;
+ interior_it->second = refernece;
+
+ // If the |slot|'s content is pointing into the region [from, from +
+ // size) we are dealing with an interior pointer that does not point to
+ // a valid HeapObjectHeader. Such references need to be fixed up
+ // immediately.
+ Address& reference_contents = *reinterpret_cast<Address*>(refernece);
+ if (reference_contents > from && reference_contents < (from + size)) {
+ reference_contents = reference_contents - from + to;
+ }
+ }
+
+ interior_it++;
+ if (interior_it == interior_movable_references_.end()) return;
+ offset = reinterpret_cast<Address>(interior_it->first) - from;
+ }
+}
+
+class CompactionState final {
+ CPPGC_STACK_ALLOCATED();
+ using Pages = std::vector<NormalPage*>;
+
+ public:
+ CompactionState(NormalPageSpace* space, MovableReferences& movable_references)
+ : space_(space), movable_references_(movable_references) {}
+
+ void AddPage(NormalPage* page) {
+ DCHECK_EQ(space_, page->space());
+ // If not the first page, add |page| onto the available pages chain.
+ if (!current_page_)
+ current_page_ = page;
+ else
+ available_pages_.push_back(page);
+ }
+
+ void RelocateObject(const NormalPage* page, const Address header,
+ size_t size) {
+ // Allocate and copy over the live object.
+ Address compact_frontier =
+ current_page_->PayloadStart() + used_bytes_in_current_page_;
+ if (compact_frontier + size > current_page_->PayloadEnd()) {
+ // Can't fit on current page. Add remaining onto the freelist and advance
+ // to next available page.
+ ReturnCurrentPageToSpace();
+
+ current_page_ = available_pages_.back();
+ available_pages_.pop_back();
+ used_bytes_in_current_page_ = 0;
+ compact_frontier = current_page_->PayloadStart();
+ }
+ if (V8_LIKELY(compact_frontier != header)) {
+ // Use a non-overlapping copy, if possible.
+ if (current_page_ == page)
+ memmove(compact_frontier, header, size);
+ else
+ memcpy(compact_frontier, header, size);
+ movable_references_.Relocate(header + sizeof(HeapObjectHeader),
+ compact_frontier + sizeof(HeapObjectHeader));
+ }
+ current_page_->object_start_bitmap().SetBit(compact_frontier);
+ used_bytes_in_current_page_ += size;
+ DCHECK_LE(used_bytes_in_current_page_, current_page_->PayloadSize());
+ }
+
+ void FinishCompactingSpace() {
+ // If the current page hasn't been allocated into, add it to the available
+ // list, for subsequent release below.
+ if (used_bytes_in_current_page_ == 0) {
+ available_pages_.push_back(current_page_);
+ } else {
+ ReturnCurrentPageToSpace();
+ }
+
+ // Return remaining available pages to the free page pool, decommitting
+ // them from the pagefile.
+ for (NormalPage* page : available_pages_) {
+ SET_MEMORY_INACCESSIBLE(page->PayloadStart(), page->PayloadSize());
+ NormalPage::Destroy(page);
+ }
+ }
+
+ void FinishCompactingPage(NormalPage* page) {
+#if DEBUG || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \
+ defined(MEMORY_SANITIZER)
+ // Zap the unused portion, until it is either compacted into or freed.
+ if (current_page_ != page) {
+ ZapMemory(page->PayloadStart(), page->PayloadSize());
+ } else {
+ ZapMemory(page->PayloadStart() + used_bytes_in_current_page_,
+ page->PayloadSize() - used_bytes_in_current_page_);
+ }
+#endif
+ }
+
+ private:
+ void ReturnCurrentPageToSpace() {
+ DCHECK_EQ(space_, current_page_->space());
+ space_->AddPage(current_page_);
+ if (used_bytes_in_current_page_ != current_page_->PayloadSize()) {
+ // Put the remainder of the page onto the free list.
+ size_t freed_size =
+ current_page_->PayloadSize() - used_bytes_in_current_page_;
+ Address payload = current_page_->PayloadStart();
+ Address free_start = payload + used_bytes_in_current_page_;
+ SET_MEMORY_INACCESSIBLE(free_start, freed_size);
+ space_->free_list().Add({free_start, freed_size});
+ current_page_->object_start_bitmap().SetBit(free_start);
+ }
+ }
+
+ NormalPageSpace* space_;
+ MovableReferences& movable_references_;
+ // Page into which compacted object will be written to.
+ NormalPage* current_page_ = nullptr;
+ // Offset into |current_page_| to the next free address.
+ size_t used_bytes_in_current_page_ = 0;
+ // Additional pages in the current space that can be used as compaction
+ // targets. Pages that remain available at the compaction can be released.
+ Pages available_pages_;
+};
+
+void CompactPage(NormalPage* page, CompactionState& compaction_state) {
+ compaction_state.AddPage(page);
+
+ page->object_start_bitmap().Clear();
+
+ for (Address header_address = page->PayloadStart();
+ header_address < page->PayloadEnd();) {
+ HeapObjectHeader* header =
+ reinterpret_cast<HeapObjectHeader*>(header_address);
+ size_t size = header->GetSize();
+ DCHECK_GT(size, 0u);
+ DCHECK_LT(size, kPageSize);
+
+ if (header->IsFree()) {
+ // Unpoison the freelist entry so that we can compact into it as wanted.
+ ASAN_UNPOISON_MEMORY_REGION(header_address, size);
+ header_address += size;
+ continue;
+ }
+
+ if (!header->IsMarked()) {
+ // Compaction is currently launched only from AtomicPhaseEpilogue, so it's
+ // guaranteed to be on the mutator thread - no need to postpone
+ // finalization.
+ header->Finalize();
+
+ // As compaction is under way, leave the freed memory accessible
+ // while compacting the rest of the page. We just zap the payload
+ // to catch out other finalizers trying to access it.
+#if DEBUG || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \
+ defined(MEMORY_SANITIZER)
+ ZapMemory(header, size);
+#endif
+ header_address += size;
+ continue;
+ }
+
+ // Object is marked.
+#if !defined(CPPGC_YOUNG_GENERATION)
+ header->Unmark();
+#endif
+ compaction_state.RelocateObject(page, header_address, size);
+ header_address += size;
+ }
+
+ compaction_state.FinishCompactingPage(page);
+}
+
+void CompactSpace(NormalPageSpace* space,
+ MovableReferences& movable_references) {
+ using Pages = NormalPageSpace::Pages;
+
+ DCHECK(space->is_compactable());
+
+ space->free_list().Clear();
+
+ // Compaction generally follows Jonker's algorithm for fast garbage
+ // compaction. Compaction is performed in-place, sliding objects down over
+ // unused holes for a smaller heap page footprint and improved locality. A
+ // "compaction pointer" is consequently kept, pointing to the next available
+ // address to move objects down to. It will belong to one of the already
+ // compacted pages for this space, but as compaction proceeds, it will not
+ // belong to the same page as the one being currently compacted.
+ //
+ // The compaction pointer is represented by the
+ // |(current_page_, used_bytes_in_current_page_)| pair, with
+ // |used_bytes_in_current_page_| being the offset into |current_page_|, making
+ // up the next available location. When the compaction of an arena page causes
+ // the compaction pointer to exhaust the current page it is compacting into,
+ // page compaction will advance the current page of the compaction
+ // pointer, as well as the allocation point.
+ //
+ // By construction, the page compaction can be performed without having
+ // to allocate any new pages. So to arrange for the page compaction's
+ // supply of freed, available pages, we chain them together after each
+ // has been "compacted from". The page compaction will then reuse those
+ // as needed, and once finished, the chained, available pages can be
+ // released back to the OS.
+ //
+ // To ease the passing of the compaction state when iterating over an
+ // arena's pages, package it up into a |CompactionState|.
+
+ Pages pages = space->RemoveAllPages();
+ if (pages.empty()) return;
+
+ CompactionState compaction_state(space, movable_references);
+ for (BasePage* page : pages) {
+ // Large objects do not belong to this arena.
+ CompactPage(NormalPage::From(page), compaction_state);
+ }
+
+ compaction_state.FinishCompactingSpace();
+ // Sweeping will verify object start bitmap of compacted space.
+}
+
+size_t UpdateHeapResidency(const std::vector<NormalPageSpace*>& spaces) {
+ return std::accumulate(spaces.cbegin(), spaces.cend(), 0u,
+ [](size_t acc, const NormalPageSpace* space) {
+ DCHECK(space->is_compactable());
+ if (!space->size()) return acc;
+ return acc + space->free_list().Size();
+ });
+}
+
+} // namespace
+
+Compactor::Compactor(RawHeap& heap) : heap_(heap) {
+ for (auto& space : heap_) {
+ if (!space->is_compactable()) continue;
+ DCHECK_EQ(&heap, space->raw_heap());
+ compactable_spaces_.push_back(static_cast<NormalPageSpace*>(space.get()));
+ }
+}
+
+bool Compactor::ShouldCompact(
+ GarbageCollector::Config::MarkingType marking_type,
+ GarbageCollector::Config::StackState stack_state) {
+ if (compactable_spaces_.empty() ||
+ (marking_type == GarbageCollector::Config::MarkingType::kAtomic &&
+ stack_state ==
+ GarbageCollector::Config::StackState::kMayContainHeapPointers)) {
+ // The following check ensures that tests that want to test compaction are
+ // not interrupted by garbage collections that cannot use compaction.
+ DCHECK(!enable_for_next_gc_for_testing_);
+ return false;
+ }
+
+ if (enable_for_next_gc_for_testing_) {
+ return true;
+ }
+
+ size_t free_list_size = UpdateHeapResidency(compactable_spaces_);
+
+ return free_list_size > kFreeListSizeThreshold;
+}
+
+void Compactor::InitializeIfShouldCompact(
+ GarbageCollector::Config::MarkingType marking_type,
+ GarbageCollector::Config::StackState stack_state) {
+ DCHECK(!is_enabled_);
+
+ if (!ShouldCompact(marking_type, stack_state)) return;
+
+ compaction_worklists_ = std::make_unique<CompactionWorklists>();
+
+ is_enabled_ = true;
+ enable_for_next_gc_for_testing_ = false;
+}
+
+bool Compactor::CancelIfShouldNotCompact(
+ GarbageCollector::Config::MarkingType marking_type,
+ GarbageCollector::Config::StackState stack_state) {
+ if (!is_enabled_ || ShouldCompact(marking_type, stack_state)) return false;
+
+ DCHECK_NOT_NULL(compaction_worklists_);
+ compaction_worklists_->movable_slots_worklist()->Clear();
+ compaction_worklists_.reset();
+
+ is_enabled_ = false;
+ return true;
+}
+
+Compactor::CompactableSpaceHandling Compactor::CompactSpacesIfEnabled() {
+ if (!is_enabled_) return CompactableSpaceHandling::kSweep;
+
+ MovableReferences movable_references(*heap_.heap());
+
+ CompactionWorklists::MovableReferencesWorklist::Local local(
+ compaction_worklists_->movable_slots_worklist());
+ CompactionWorklists::MovableReference* slot;
+ while (local.Pop(&slot)) {
+ movable_references.AddOrFilter(slot);
+ }
+ compaction_worklists_.reset();
+
+ for (NormalPageSpace* space : compactable_spaces_) {
+ CompactSpace(space, movable_references);
+ }
+
+ is_enabled_ = false;
+ return CompactableSpaceHandling::kIgnore;
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/compactor.h b/deps/v8/src/heap/cppgc/compactor.h
new file mode 100644
index 0000000000..d354274a33
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/compactor.h
@@ -0,0 +1,56 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_COMPACTOR_H_
+#define V8_HEAP_CPPGC_COMPACTOR_H_
+
+#include "src/heap/cppgc/compaction-worklists.h"
+#include "src/heap/cppgc/garbage-collector.h"
+#include "src/heap/cppgc/raw-heap.h"
+
+namespace cppgc {
+namespace internal {
+
+class V8_EXPORT_PRIVATE Compactor final {
+ using CompactableSpaceHandling =
+ Sweeper::SweepingConfig::CompactableSpaceHandling;
+
+ public:
+ explicit Compactor(RawHeap&);
+ ~Compactor() { DCHECK(!is_enabled_); }
+
+ void InitializeIfShouldCompact(GarbageCollector::Config::MarkingType,
+ GarbageCollector::Config::StackState);
+ // Returns true is compaction was cancelled.
+ bool CancelIfShouldNotCompact(GarbageCollector::Config::MarkingType,
+ GarbageCollector::Config::StackState);
+ CompactableSpaceHandling CompactSpacesIfEnabled();
+
+ CompactionWorklists* compaction_worklists() {
+ return compaction_worklists_.get();
+ }
+
+ void EnableForNextGCForTesting() { enable_for_next_gc_for_testing_ = true; }
+
+ bool IsEnabledForTesting() const { return is_enabled_; }
+
+ private:
+ bool ShouldCompact(GarbageCollector::Config::MarkingType,
+ GarbageCollector::Config::StackState);
+
+ RawHeap& heap_;
+ // Compactor does not own the compactable spaces. The heap owns all spaces.
+ std::vector<NormalPageSpace*> compactable_spaces_;
+
+ std::unique_ptr<CompactionWorklists> compaction_worklists_;
+
+ bool is_enabled_ = false;
+
+ bool enable_for_next_gc_for_testing_ = false;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_COMPACTOR_H_
diff --git a/deps/v8/src/heap/cppgc/concurrent-marker.cc b/deps/v8/src/heap/cppgc/concurrent-marker.cc
new file mode 100644
index 0000000000..5df422fa5c
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/concurrent-marker.cc
@@ -0,0 +1,246 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/concurrent-marker.h"
+
+#include "include/cppgc/platform.h"
+#include "src/heap/cppgc/heap-object-header.h"
+#include "src/heap/cppgc/heap.h"
+#include "src/heap/cppgc/liveness-broker.h"
+#include "src/heap/cppgc/marking-state.h"
+#include "src/heap/cppgc/marking-visitor.h"
+
+namespace cppgc {
+namespace internal {
+
+namespace {
+
+static constexpr double kMarkingScheduleRatioBeforeConcurrentPriorityIncrease =
+ 0.5;
+
+static constexpr size_t kDefaultDeadlineCheckInterval = 750u;
+
+template <size_t kDeadlineCheckInterval = kDefaultDeadlineCheckInterval,
+ typename WorklistLocal, typename Callback>
+bool DrainWorklistWithYielding(
+ JobDelegate* job_delegate, ConcurrentMarkingState& marking_state,
+ IncrementalMarkingSchedule& incremental_marking_schedule,
+ WorklistLocal& worklist_local, Callback callback) {
+ return DrainWorklistWithPredicate<kDeadlineCheckInterval>(
+ [&incremental_marking_schedule, &marking_state, job_delegate]() {
+ incremental_marking_schedule.AddConcurrentlyMarkedBytes(
+ marking_state.RecentlyMarkedBytes());
+ return job_delegate->ShouldYield();
+ },
+ worklist_local, callback);
+}
+
+size_t WorkSizeForConcurrentMarking(MarkingWorklists& marking_worklists) {
+ return marking_worklists.marking_worklist()->Size() +
+ marking_worklists.write_barrier_worklist()->Size() +
+ marking_worklists.previously_not_fully_constructed_worklist()->Size();
+}
+
+// Checks whether worklists' global pools hold any segment a concurrent marker
+// can steal. This is called before the concurrent marker holds any Locals, so
+// no need to check local segments.
+bool HasWorkForConcurrentMarking(MarkingWorklists& marking_worklists) {
+ return !marking_worklists.marking_worklist()->IsEmpty() ||
+ !marking_worklists.write_barrier_worklist()->IsEmpty() ||
+ !marking_worklists.previously_not_fully_constructed_worklist()
+ ->IsEmpty();
+}
+
+class ConcurrentMarkingTask final : public v8::JobTask {
+ public:
+ explicit ConcurrentMarkingTask(ConcurrentMarkerBase&);
+
+ void Run(JobDelegate* delegate) final;
+
+ size_t GetMaxConcurrency(size_t) const final;
+
+ private:
+ void ProcessWorklists(JobDelegate*, ConcurrentMarkingState&, Visitor&);
+
+ const ConcurrentMarkerBase& concurrent_marker_;
+};
+
+ConcurrentMarkingTask::ConcurrentMarkingTask(
+ ConcurrentMarkerBase& concurrent_marker)
+ : concurrent_marker_(concurrent_marker) {}
+
+void ConcurrentMarkingTask::Run(JobDelegate* job_delegate) {
+ if (!HasWorkForConcurrentMarking(concurrent_marker_.marking_worklists()))
+ return;
+ ConcurrentMarkingState concurrent_marking_state(
+ concurrent_marker_.heap(), concurrent_marker_.marking_worklists(),
+ concurrent_marker_.heap().compactor().compaction_worklists());
+ std::unique_ptr<Visitor> concurrent_marking_visitor =
+ concurrent_marker_.CreateConcurrentMarkingVisitor(
+ concurrent_marking_state);
+ ProcessWorklists(job_delegate, concurrent_marking_state,
+ *concurrent_marking_visitor.get());
+ concurrent_marker_.incremental_marking_schedule().AddConcurrentlyMarkedBytes(
+ concurrent_marking_state.RecentlyMarkedBytes());
+ concurrent_marking_state.Publish();
+}
+
+size_t ConcurrentMarkingTask::GetMaxConcurrency(
+ size_t current_worker_count) const {
+ return WorkSizeForConcurrentMarking(concurrent_marker_.marking_worklists()) +
+ current_worker_count;
+}
+
+void ConcurrentMarkingTask::ProcessWorklists(
+ JobDelegate* job_delegate, ConcurrentMarkingState& concurrent_marking_state,
+ Visitor& concurrent_marking_visitor) {
+ do {
+ if (!DrainWorklistWithYielding(
+ job_delegate, concurrent_marking_state,
+ concurrent_marker_.incremental_marking_schedule(),
+ concurrent_marking_state
+ .previously_not_fully_constructed_worklist(),
+ [&concurrent_marking_state,
+ &concurrent_marking_visitor](HeapObjectHeader* header) {
+ BasePage::FromPayload(header)->SynchronizedLoad();
+ concurrent_marking_state.AccountMarkedBytes(*header);
+ DynamicallyTraceMarkedObject<AccessMode::kAtomic>(
+ concurrent_marking_visitor, *header);
+ })) {
+ return;
+ }
+
+ if (!DrainWorklistWithYielding(
+ job_delegate, concurrent_marking_state,
+ concurrent_marker_.incremental_marking_schedule(),
+ concurrent_marking_state.marking_worklist(),
+ [&concurrent_marking_state, &concurrent_marking_visitor](
+ const MarkingWorklists::MarkingItem& item) {
+ BasePage::FromPayload(item.base_object_payload)
+ ->SynchronizedLoad();
+ const HeapObjectHeader& header =
+ HeapObjectHeader::FromPayload(item.base_object_payload);
+ DCHECK(!header.IsInConstruction<AccessMode::kAtomic>());
+ DCHECK(header.IsMarked<AccessMode::kAtomic>());
+ concurrent_marking_state.AccountMarkedBytes(header);
+ item.callback(&concurrent_marking_visitor,
+ item.base_object_payload);
+ })) {
+ return;
+ }
+
+ if (!DrainWorklistWithYielding(
+ job_delegate, concurrent_marking_state,
+ concurrent_marker_.incremental_marking_schedule(),
+ concurrent_marking_state.write_barrier_worklist(),
+ [&concurrent_marking_state,
+ &concurrent_marking_visitor](HeapObjectHeader* header) {
+ BasePage::FromPayload(header)->SynchronizedLoad();
+ concurrent_marking_state.AccountMarkedBytes(*header);
+ DynamicallyTraceMarkedObject<AccessMode::kAtomic>(
+ concurrent_marking_visitor, *header);
+ })) {
+ return;
+ }
+
+ if (!DrainWorklistWithYielding(
+ job_delegate, concurrent_marking_state,
+ concurrent_marker_.incremental_marking_schedule(),
+ concurrent_marking_state.ephemeron_pairs_for_processing_worklist(),
+ [&concurrent_marking_state](
+ const MarkingWorklists::EphemeronPairItem& item) {
+ concurrent_marking_state.ProcessEphemeron(item.key,
+ item.value_desc);
+ })) {
+ return;
+ }
+ } while (
+ !concurrent_marking_state.marking_worklist().IsLocalAndGlobalEmpty());
+}
+
+} // namespace
+
+ConcurrentMarkerBase::ConcurrentMarkerBase(
+ HeapBase& heap, MarkingWorklists& marking_worklists,
+ IncrementalMarkingSchedule& incremental_marking_schedule,
+ cppgc::Platform* platform)
+ : heap_(heap),
+ marking_worklists_(marking_worklists),
+ incremental_marking_schedule_(incremental_marking_schedule),
+ platform_(platform) {}
+
+void ConcurrentMarkerBase::Start() {
+ DCHECK(platform_);
+ concurrent_marking_handle_ =
+ platform_->PostJob(v8::TaskPriority::kUserVisible,
+ std::make_unique<ConcurrentMarkingTask>(*this));
+}
+
+void ConcurrentMarkerBase::Cancel() {
+ if (concurrent_marking_handle_ && concurrent_marking_handle_->IsValid())
+ concurrent_marking_handle_->Cancel();
+}
+
+void ConcurrentMarkerBase::JoinForTesting() {
+ if (concurrent_marking_handle_ && concurrent_marking_handle_->IsValid())
+ concurrent_marking_handle_->Join();
+}
+
+bool ConcurrentMarkerBase::IsActive() const {
+ return concurrent_marking_handle_ && concurrent_marking_handle_->IsRunning();
+}
+
+ConcurrentMarkerBase::~ConcurrentMarkerBase() {
+ CHECK_IMPLIES(concurrent_marking_handle_,
+ !concurrent_marking_handle_->IsValid());
+}
+
+bool ConcurrentMarkerBase::NotifyIncrementalMutatorStepCompleted() {
+ DCHECK(concurrent_marking_handle_);
+ if (HasWorkForConcurrentMarking(marking_worklists_)) {
+ // Notifies the scheduler that max concurrency might have increased.
+ // This will adjust the number of markers if necessary.
+ IncreaseMarkingPriorityIfNeeded();
+ concurrent_marking_handle_->NotifyConcurrencyIncrease();
+ return false;
+ }
+ return !concurrent_marking_handle_->IsActive();
+}
+
+void ConcurrentMarkerBase::IncreaseMarkingPriorityIfNeeded() {
+ if (!concurrent_marking_handle_->UpdatePriorityEnabled()) return;
+ if (concurrent_marking_priority_increased_) return;
+ // If concurrent tasks aren't executed, it might delay GC finalization.
+ // As long as GC is active so is the write barrier, which incurs a
+ // performance cost. Marking is estimated to take overall
+ // |MarkingSchedulingOracle::kEstimatedMarkingTimeMs|. If
+ // concurrent marking tasks have not reported any progress (i.e. the
+ // concurrently marked bytes count as not changed) in over
+ // |kMarkingScheduleRatioBeforeConcurrentPriorityIncrease| of
+ // that expected duration, we increase the concurrent task priority
+ // for the duration of the current GC. This is meant to prevent the
+ // GC from exceeding it's expected end time.
+ size_t current_concurrently_marked_bytes_ =
+ incremental_marking_schedule_.GetConcurrentlyMarkedBytes();
+ if (current_concurrently_marked_bytes_ > last_concurrently_marked_bytes_) {
+ last_concurrently_marked_bytes_ = current_concurrently_marked_bytes_;
+ last_concurrently_marked_bytes_update_ = v8::base::TimeTicks::Now();
+ } else if ((v8::base::TimeTicks::Now() -
+ last_concurrently_marked_bytes_update_)
+ .InMilliseconds() >
+ kMarkingScheduleRatioBeforeConcurrentPriorityIncrease *
+ IncrementalMarkingSchedule::kEstimatedMarkingTimeMs) {
+ concurrent_marking_handle_->UpdatePriority(
+ cppgc::TaskPriority::kUserBlocking);
+ concurrent_marking_priority_increased_ = true;
+ }
+}
+
+std::unique_ptr<Visitor> ConcurrentMarker::CreateConcurrentMarkingVisitor(
+ ConcurrentMarkingState& marking_state) const {
+ return std::make_unique<ConcurrentMarkingVisitor>(heap(), marking_state);
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/concurrent-marker.h b/deps/v8/src/heap/cppgc/concurrent-marker.h
new file mode 100644
index 0000000000..4f0ec849d1
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/concurrent-marker.h
@@ -0,0 +1,76 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_CONCURRENT_MARKER_H_
+#define V8_HEAP_CPPGC_CONCURRENT_MARKER_H_
+
+#include "include/cppgc/platform.h"
+#include "src/heap/cppgc/incremental-marking-schedule.h"
+#include "src/heap/cppgc/marking-state.h"
+#include "src/heap/cppgc/marking-visitor.h"
+#include "src/heap/cppgc/marking-worklists.h"
+
+namespace cppgc {
+namespace internal {
+
+class V8_EXPORT_PRIVATE ConcurrentMarkerBase {
+ public:
+ ConcurrentMarkerBase(HeapBase&, MarkingWorklists&,
+ IncrementalMarkingSchedule&, cppgc::Platform*);
+ virtual ~ConcurrentMarkerBase();
+
+ ConcurrentMarkerBase(const ConcurrentMarkerBase&) = delete;
+ ConcurrentMarkerBase& operator=(const ConcurrentMarkerBase&) = delete;
+
+ void Start();
+ void Cancel();
+
+ void JoinForTesting();
+
+ bool NotifyIncrementalMutatorStepCompleted();
+
+ bool IsActive() const;
+
+ HeapBase& heap() const { return heap_; }
+ MarkingWorklists& marking_worklists() const { return marking_worklists_; }
+ IncrementalMarkingSchedule& incremental_marking_schedule() const {
+ return incremental_marking_schedule_;
+ }
+
+ virtual std::unique_ptr<Visitor> CreateConcurrentMarkingVisitor(
+ ConcurrentMarkingState&) const = 0;
+
+ protected:
+ void IncreaseMarkingPriorityIfNeeded();
+
+ private:
+ HeapBase& heap_;
+ MarkingWorklists& marking_worklists_;
+ IncrementalMarkingSchedule& incremental_marking_schedule_;
+ cppgc::Platform* const platform_;
+
+ // The job handle doubles as flag to denote concurrent marking was started.
+ std::unique_ptr<JobHandle> concurrent_marking_handle_{nullptr};
+
+ size_t last_concurrently_marked_bytes_ = 0;
+ v8::base::TimeTicks last_concurrently_marked_bytes_update_;
+ bool concurrent_marking_priority_increased_{false};
+};
+
+class V8_EXPORT_PRIVATE ConcurrentMarker : public ConcurrentMarkerBase {
+ public:
+ ConcurrentMarker(HeapBase& heap, MarkingWorklists& marking_worklists,
+ IncrementalMarkingSchedule& incremental_marking_schedule,
+ cppgc::Platform* platform)
+ : ConcurrentMarkerBase(heap, marking_worklists,
+ incremental_marking_schedule, platform) {}
+
+ std::unique_ptr<Visitor> CreateConcurrentMarkingVisitor(
+ ConcurrentMarkingState&) const final;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_CONCURRENT_MARKER_H_
diff --git a/deps/v8/src/heap/cppgc/default-job.h b/deps/v8/src/heap/cppgc/default-job.h
deleted file mode 100644
index 9ef6f3fb58..0000000000
--- a/deps/v8/src/heap/cppgc/default-job.h
+++ /dev/null
@@ -1,186 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_HEAP_CPPGC_DEFAULT_JOB_H_
-#define V8_HEAP_CPPGC_DEFAULT_JOB_H_
-
-#include <atomic>
-#include <map>
-#include <memory>
-#include <unordered_set>
-#include <vector>
-
-#include "include/cppgc/platform.h"
-#include "src/base/logging.h"
-#include "src/base/platform/mutex.h"
-
-namespace cppgc {
-namespace internal {
-
-template <typename Job>
-class DefaultJobFactory {
- public:
- static std::shared_ptr<Job> Create(std::unique_ptr<cppgc::JobTask> job_task) {
- std::shared_ptr<Job> job =
- std::make_shared<Job>(typename Job::Key(), std::move(job_task));
- job->NotifyConcurrencyIncrease();
- return job;
- }
-};
-
-template <typename Thread>
-class DefaultJobImpl {
- public:
- class JobDelegate;
- class JobHandle;
-
- class Key {
- private:
- Key() {}
-
- template <typename Job>
- friend class DefaultJobFactory;
- };
-
- DefaultJobImpl(Key, std::unique_ptr<cppgc::JobTask> job_task)
- : job_task_(std::move(job_task)) {}
-
- ~DefaultJobImpl() {
- Cancel();
- DCHECK_EQ(0, active_threads_.load(std::memory_order_relaxed));
- }
-
- void NotifyConcurrencyIncrease();
-
- void Join() {
- for (std::shared_ptr<Thread>& thread : job_threads_) thread->Join();
- job_threads_.clear();
- can_run_.store(false, std::memory_order_relaxed);
- }
-
- void Cancel() {
- can_run_.store(false, std::memory_order_relaxed);
- Join();
- }
-
- bool IsCompleted() const { return !IsRunning(); }
- bool IsRunning() const {
- uint8_t active_threads = active_threads_.load(std::memory_order_relaxed);
- return (active_threads + job_task_->GetMaxConcurrency(active_threads)) > 0;
- }
-
- bool CanRun() const { return can_run_.load(std::memory_order_relaxed); }
-
- void RunJobTask() {
- DCHECK_NOT_NULL(job_task_);
- NotifyJobThreadStart();
- JobDelegate delegate(this);
- job_task_->Run(&delegate);
- NotifyJobThreadEnd();
- }
-
- protected:
- virtual std::shared_ptr<Thread> CreateThread(DefaultJobImpl*) = 0;
-
- void NotifyJobThreadStart() {
- active_threads_.fetch_add(1, std::memory_order_relaxed);
- }
- void NotifyJobThreadEnd() {
- active_threads_.fetch_sub(1, std::memory_order_relaxed);
- }
-
- void GuaranteeAvailableIds(uint8_t max_threads) {
- if (max_threads <= highest_thread_count_) return;
- v8::base::MutexGuard guard(&ids_lock_);
- while (highest_thread_count_ < max_threads) {
- available_ids_.push_back(++highest_thread_count_);
- }
- }
-
- std::unique_ptr<cppgc::JobTask> job_task_;
- std::vector<std::shared_ptr<Thread>> job_threads_;
- std::atomic_bool can_run_{true};
- std::atomic<uint8_t> active_threads_{0};
-
- // Task id management.
- v8::base::Mutex ids_lock_;
- std::vector<uint8_t> available_ids_;
- uint8_t highest_thread_count_ = -1;
-};
-
-template <typename Thread>
-class DefaultJobImpl<Thread>::JobDelegate final : public cppgc::JobDelegate {
- public:
- explicit JobDelegate(DefaultJobImpl* job) : job_(job) {}
- ~JobDelegate() { ReleaseTaskId(); }
- bool ShouldYield() override { return !job_->CanRun(); }
- void NotifyConcurrencyIncrease() override {
- job_->NotifyConcurrencyIncrease();
- }
- uint8_t GetTaskId() override {
- AcquireTaskId();
- return job_thread_id_;
- }
-
- private:
- void AcquireTaskId() {
- if (job_thread_id_ != kInvalidTaskId) return;
- v8::base::MutexGuard guard(&job_->ids_lock_);
- job_thread_id_ = job_->available_ids_.back();
- DCHECK_NE(kInvalidTaskId, job_thread_id_);
- job_->available_ids_.pop_back();
- }
- void ReleaseTaskId() {
- if (job_thread_id_ == kInvalidTaskId) return;
- v8::base::MutexGuard guard(&job_->ids_lock_);
- job_->available_ids_.push_back(job_thread_id_);
- }
-
- DefaultJobImpl* const job_;
- static constexpr uint8_t kInvalidTaskId = std::numeric_limits<uint8_t>::max();
- uint8_t job_thread_id_ = kInvalidTaskId;
-};
-
-template <typename Thread>
-void DefaultJobImpl<Thread>::NotifyConcurrencyIncrease() {
- DCHECK(CanRun());
- static const size_t kMaxThreads = Thread::GetMaxSupportedConcurrency();
- uint8_t current_active_threads =
- active_threads_.load(std::memory_order_relaxed);
- size_t max_threads = std::min(
- kMaxThreads, job_task_->GetMaxConcurrency(current_active_threads));
- if (current_active_threads >= max_threads) return;
- DCHECK_LT(max_threads, std::numeric_limits<uint8_t>::max());
- GuaranteeAvailableIds(max_threads);
- for (uint8_t new_threads = max_threads - current_active_threads;
- new_threads > 0; --new_threads) {
- std::shared_ptr<Thread> thread = CreateThread(this);
- job_threads_.push_back(thread);
- }
-}
-
-template <typename Thread>
-class DefaultJobImpl<Thread>::JobHandle final : public cppgc::JobHandle {
- public:
- explicit JobHandle(std::shared_ptr<DefaultJobImpl> job)
- : job_(std::move(job)) {
- DCHECK_NOT_NULL(job_);
- }
-
- void NotifyConcurrencyIncrease() override {
- job_->NotifyConcurrencyIncrease();
- }
- void Join() override { job_->Join(); }
- void Cancel() override { job_->Cancel(); }
- bool IsCompleted() override { return job_->IsCompleted(); }
- bool IsRunning() override { return job_->IsRunning(); }
-
- private:
- std::shared_ptr<DefaultJobImpl> job_;
-};
-
-} // namespace internal
-} // namespace cppgc
-
-#endif // V8_HEAP_CPPGC_DEFAULT_JOB_H_
diff --git a/deps/v8/src/heap/cppgc/default-platform.cc b/deps/v8/src/heap/cppgc/default-platform.cc
deleted file mode 100644
index 0ac5440f7e..0000000000
--- a/deps/v8/src/heap/cppgc/default-platform.cc
+++ /dev/null
@@ -1,143 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "include/cppgc/default-platform.h"
-
-#include <chrono> // NOLINT(build/c++11)
-#include <thread> // NOLINT(build/c++11)
-
-#include "src/base/logging.h"
-#include "src/base/page-allocator.h"
-#include "src/base/sys-info.h"
-#include "src/heap/cppgc/default-job.h"
-
-namespace cppgc {
-
-namespace internal {
-
-// Default implementation of Jobs based on std::thread.
-namespace {
-class DefaultJobThread final : private std::thread {
- public:
- template <typename Function>
- explicit DefaultJobThread(Function function)
- : std::thread(std::move(function)) {}
- ~DefaultJobThread() { DCHECK(!joinable()); }
-
- void Join() { join(); }
-
- static size_t GetMaxSupportedConcurrency() {
- return v8::base::SysInfo::NumberOfProcessors() - 1;
- }
-};
-} // namespace
-
-class DefaultJob final : public DefaultJobImpl<DefaultJobThread> {
- public:
- DefaultJob(Key key, std::unique_ptr<cppgc::JobTask> job_task)
- : DefaultJobImpl(key, std::move(job_task)) {}
-
- std::shared_ptr<DefaultJobThread> CreateThread(DefaultJobImpl* job) final {
- return std::make_shared<DefaultJobThread>([job = this] {
- DCHECK_NOT_NULL(job);
- job->RunJobTask();
- });
- }
-};
-
-} // namespace internal
-
-void DefaultTaskRunner::PostTask(std::unique_ptr<cppgc::Task> task) {
- tasks_.push_back(std::move(task));
-}
-
-void DefaultTaskRunner::PostDelayedTask(std::unique_ptr<cppgc::Task> task,
- double) {
- PostTask(std::move(task));
-}
-
-void DefaultTaskRunner::PostNonNestableTask(std::unique_ptr<cppgc::Task>) {
- UNREACHABLE();
-}
-
-void DefaultTaskRunner::PostNonNestableDelayedTask(std::unique_ptr<cppgc::Task>,
- double) {
- UNREACHABLE();
-}
-
-void DefaultTaskRunner::PostIdleTask(std::unique_ptr<cppgc::IdleTask> task) {
- idle_tasks_.push_back(std::move(task));
-}
-
-bool DefaultTaskRunner::RunSingleTask() {
- if (!tasks_.size()) return false;
-
- tasks_.back()->Run();
- tasks_.pop_back();
-
- return true;
-}
-
-bool DefaultTaskRunner::RunSingleIdleTask(double deadline_in_seconds) {
- if (!idle_tasks_.size()) return false;
-
- idle_tasks_.back()->Run(deadline_in_seconds);
- idle_tasks_.pop_back();
-
- return true;
-}
-
-void DefaultTaskRunner::RunUntilIdle() {
- for (auto& task : tasks_) {
- task->Run();
- }
- tasks_.clear();
-
- for (auto& task : idle_tasks_) {
- task->Run(std::numeric_limits<double>::infinity());
- }
- idle_tasks_.clear();
-}
-
-DefaultPlatform::DefaultPlatform()
- : page_allocator_(std::make_unique<v8::base::PageAllocator>()),
- foreground_task_runner_(std::make_shared<DefaultTaskRunner>()) {}
-
-DefaultPlatform::~DefaultPlatform() noexcept { WaitAllBackgroundTasks(); }
-
-cppgc::PageAllocator* DefaultPlatform::GetPageAllocator() {
- return page_allocator_.get();
-}
-
-double DefaultPlatform::MonotonicallyIncreasingTime() {
- return std::chrono::duration<double>(
- std::chrono::high_resolution_clock::now().time_since_epoch())
- .count();
-}
-
-std::shared_ptr<cppgc::TaskRunner> DefaultPlatform::GetForegroundTaskRunner() {
- return foreground_task_runner_;
-}
-
-std::unique_ptr<cppgc::JobHandle> DefaultPlatform::PostJob(
- cppgc::TaskPriority priority, std::unique_ptr<cppgc::JobTask> job_task) {
- std::shared_ptr<internal::DefaultJob> job =
- internal::DefaultJobFactory<internal::DefaultJob>::Create(
- std::move(job_task));
- jobs_.push_back(job);
- return std::make_unique<internal::DefaultJob::JobHandle>(std::move(job));
-}
-
-void DefaultPlatform::WaitAllForegroundTasks() {
- foreground_task_runner_->RunUntilIdle();
-}
-
-void DefaultPlatform::WaitAllBackgroundTasks() {
- for (auto& job : jobs_) {
- job->Join();
- }
- jobs_.clear();
-}
-
-} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/garbage-collector.h b/deps/v8/src/heap/cppgc/garbage-collector.h
index 1fc7ed925d..e5f6641bdf 100644
--- a/deps/v8/src/heap/cppgc/garbage-collector.h
+++ b/deps/v8/src/heap/cppgc/garbage-collector.h
@@ -19,7 +19,7 @@ class GarbageCollector {
using CollectionType = Marker::MarkingConfig::CollectionType;
using StackState = cppgc::Heap::StackState;
using MarkingType = Marker::MarkingConfig::MarkingType;
- using SweepingType = Sweeper::Config;
+ using SweepingType = Sweeper::SweepingConfig::SweepingType;
static constexpr Config ConservativeAtomicConfig() {
return {CollectionType::kMajor, StackState::kMayContainHeapPointers,
diff --git a/deps/v8/src/heap/cppgc/gc-info-table.h b/deps/v8/src/heap/cppgc/gc-info-table.h
index 749f30b258..c8ed97ad7d 100644
--- a/deps/v8/src/heap/cppgc/gc-info-table.h
+++ b/deps/v8/src/heap/cppgc/gc-info-table.h
@@ -23,9 +23,8 @@ namespace internal {
struct GCInfo final {
FinalizationCallback finalize;
TraceCallback trace;
+ NameCallback name;
bool has_v_table;
- // Keep sizeof(GCInfo) a power of 2.
- size_t padding = 0;
};
class V8_EXPORT GCInfoTable final {
diff --git a/deps/v8/src/heap/cppgc/gc-info.cc b/deps/v8/src/heap/cppgc/gc-info.cc
index 70970139b1..57d49fb322 100644
--- a/deps/v8/src/heap/cppgc/gc-info.cc
+++ b/deps/v8/src/heap/cppgc/gc-info.cc
@@ -11,9 +11,10 @@ namespace internal {
RegisteredGCInfoIndex::RegisteredGCInfoIndex(
FinalizationCallback finalization_callback, TraceCallback trace_callback,
- bool has_v_table)
+ NameCallback name_callback, bool has_v_table)
: index_(GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
- {finalization_callback, trace_callback, has_v_table})) {}
+ {finalization_callback, trace_callback, name_callback,
+ has_v_table})) {}
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/globals.h b/deps/v8/src/heap/cppgc/globals.h
index d286a7fa42..747b194fea 100644
--- a/deps/v8/src/heap/cppgc/globals.h
+++ b/deps/v8/src/heap/cppgc/globals.h
@@ -20,6 +20,9 @@ constexpr size_t kKB = 1024;
constexpr size_t kMB = kKB * 1024;
constexpr size_t kGB = kMB * 1024;
+// AccessMode used for choosing between atomic and non-atomic accesses.
+enum class AccessMode : uint8_t { kNonAtomic, kAtomic };
+
// See 6.7.6 (http://eel.is/c++draft/basic.align) for alignment restrictions. We
// do not fully support all alignment restrictions (following
// alignof(std​::​max_­align_­t)) but limit to alignof(double).
diff --git a/deps/v8/src/heap/cppgc/heap-base.cc b/deps/v8/src/heap/cppgc/heap-base.cc
index 5a92c4f159..50edce4b4e 100644
--- a/deps/v8/src/heap/cppgc/heap-base.cc
+++ b/deps/v8/src/heap/cppgc/heap-base.cc
@@ -53,8 +53,10 @@ class ObjectSizeCounter : private HeapVisitor<ObjectSizeCounter> {
} // namespace
-HeapBase::HeapBase(std::shared_ptr<cppgc::Platform> platform,
- size_t custom_spaces, StackSupport stack_support)
+HeapBase::HeapBase(
+ std::shared_ptr<cppgc::Platform> platform,
+ const std::vector<std::unique_ptr<CustomSpaceBase>>& custom_spaces,
+ StackSupport stack_support)
: raw_heap_(this, custom_spaces),
platform_(std::move(platform)),
#if defined(CPPGC_CAGED_HEAP)
@@ -68,6 +70,7 @@ HeapBase::HeapBase(std::shared_ptr<cppgc::Platform> platform,
stack_(std::make_unique<heap::base::Stack>(
v8::base::Stack::GetStackStart())),
prefinalizer_handler_(std::make_unique<PreFinalizerHandler>()),
+ compactor_(raw_heap_),
object_allocator_(&raw_heap_, page_backend_.get(),
stats_collector_.get()),
sweeper_(&raw_heap_, platform_.get(), stats_collector_.get()),
@@ -86,10 +89,6 @@ HeapBase::NoGCScope::NoGCScope(HeapBase& heap) : heap_(heap) {
HeapBase::NoGCScope::~NoGCScope() { heap_.no_gc_scope_--; }
-void HeapBase::VerifyMarking(cppgc::Heap::StackState stack_state) {
- MarkingVerifier verifier(*this, stack_state);
-}
-
void HeapBase::AdvanceIncrementalGarbageCollectionOnAllocationIfNeeded() {
if (marker_) marker_->AdvanceMarkingOnAllocation();
}
diff --git a/deps/v8/src/heap/cppgc/heap-base.h b/deps/v8/src/heap/cppgc/heap-base.h
index efc4dbd40d..f685d94ea5 100644
--- a/deps/v8/src/heap/cppgc/heap-base.h
+++ b/deps/v8/src/heap/cppgc/heap-base.h
@@ -12,6 +12,7 @@
#include "include/cppgc/internal/persistent-node.h"
#include "include/cppgc/macros.h"
#include "src/base/macros.h"
+#include "src/heap/cppgc/compactor.h"
#include "src/heap/cppgc/marker.h"
#include "src/heap/cppgc/object-allocator.h"
#include "src/heap/cppgc/raw-heap.h"
@@ -62,7 +63,8 @@ class V8_EXPORT_PRIVATE HeapBase {
HeapBase& heap_;
};
- HeapBase(std::shared_ptr<cppgc::Platform> platform, size_t custom_spaces,
+ HeapBase(std::shared_ptr<cppgc::Platform> platform,
+ const std::vector<std::unique_ptr<CustomSpaceBase>>& custom_spaces,
StackSupport stack_support);
virtual ~HeapBase();
@@ -96,6 +98,8 @@ class V8_EXPORT_PRIVATE HeapBase {
MarkerBase* marker() const { return marker_.get(); }
+ Compactor& compactor() { return compactor_; }
+
ObjectAllocator& object_allocator() { return object_allocator_; }
Sweeper& sweeper() { return sweeper_; }
@@ -112,6 +116,18 @@ class V8_EXPORT_PRIVATE HeapBase {
const PersistentRegion& GetWeakPersistentRegion() const {
return weak_persistent_region_;
}
+ PersistentRegion& GetStrongCrossThreadPersistentRegion() {
+ return strong_cross_thread_persistent_region_;
+ }
+ const PersistentRegion& GetStrongCrossThreadPersistentRegion() const {
+ return strong_cross_thread_persistent_region_;
+ }
+ PersistentRegion& GetWeakCrossThreadPersistentRegion() {
+ return weak_cross_thread_persistent_region_;
+ }
+ const PersistentRegion& GetWeakCrossThreadPersistentRegion() const {
+ return weak_cross_thread_persistent_region_;
+ }
#if defined(CPPGC_YOUNG_GENERATION)
std::set<void*>& remembered_slots() { return remembered_slots_; }
@@ -124,8 +140,6 @@ class V8_EXPORT_PRIVATE HeapBase {
void AdvanceIncrementalGarbageCollectionOnAllocationIfNeeded();
protected:
- void VerifyMarking(cppgc::Heap::StackState);
-
virtual void FinalizeIncrementalGarbageCollectionIfNeeded(
cppgc::Heap::StackState) = 0;
@@ -143,11 +157,14 @@ class V8_EXPORT_PRIVATE HeapBase {
std::unique_ptr<PreFinalizerHandler> prefinalizer_handler_;
std::unique_ptr<MarkerBase> marker_;
+ Compactor compactor_;
ObjectAllocator object_allocator_;
Sweeper sweeper_;
PersistentRegion strong_persistent_region_;
PersistentRegion weak_persistent_region_;
+ PersistentRegion strong_cross_thread_persistent_region_;
+ PersistentRegion weak_cross_thread_persistent_region_;
#if defined(CPPGC_YOUNG_GENERATION)
std::set<void*> remembered_slots_;
diff --git a/deps/v8/src/heap/cppgc/heap-object-header.cc b/deps/v8/src/heap/cppgc/heap-object-header.cc
index ad6c570081..4ed2cf73ba 100644
--- a/deps/v8/src/heap/cppgc/heap-object-header.cc
+++ b/deps/v8/src/heap/cppgc/heap-object-header.cc
@@ -27,5 +27,15 @@ void HeapObjectHeader::Finalize() {
}
}
+HeapObjectName HeapObjectHeader::GetName() const {
+ const GCInfo& gc_info = GlobalGCInfoTable::GCInfoFromIndex(GetGCInfoIndex());
+ return gc_info.name(Payload());
+}
+
+void HeapObjectHeader::Trace(Visitor* visitor) const {
+ const GCInfo& gc_info = GlobalGCInfoTable::GCInfoFromIndex(GetGCInfoIndex());
+ return gc_info.trace(visitor, Payload());
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/heap-object-header.h b/deps/v8/src/heap/cppgc/heap-object-header.h
index 1a49701f08..ce850453b6 100644
--- a/deps/v8/src/heap/cppgc/heap-object-header.h
+++ b/deps/v8/src/heap/cppgc/heap-object-header.h
@@ -11,6 +11,7 @@
#include "include/cppgc/allocation.h"
#include "include/cppgc/internal/gc-info.h"
+#include "include/cppgc/internal/name-trait.h"
#include "src/base/atomic-utils.h"
#include "src/base/bit-field.h"
#include "src/base/logging.h"
@@ -19,6 +20,9 @@
#include "src/heap/cppgc/globals.h"
namespace cppgc {
+
+class Visitor;
+
namespace internal {
// HeapObjectHeader contains meta data per object and is prepended to each
@@ -49,8 +53,6 @@ namespace internal {
// to allow potentially accessing them non-atomically.
class HeapObjectHeader {
public:
- enum class AccessMode : uint8_t { kNonAtomic, kAtomic };
-
static constexpr size_t kSizeLog2 = 17;
static constexpr size_t kMaxSize = (size_t{1} << kSizeLog2) - 1;
static constexpr uint16_t kLargeObjectSizeInHeader = 0;
@@ -93,6 +95,10 @@ class HeapObjectHeader {
inline bool IsFinalizable() const;
void Finalize();
+ V8_EXPORT_PRIVATE HeapObjectName GetName() const;
+
+ V8_EXPORT_PRIVATE void Trace(Visitor*) const;
+
private:
enum class EncodedHalf : uint8_t { kLow, kHigh };
@@ -152,8 +158,16 @@ HeapObjectHeader::HeapObjectHeader(size_t size, GCInfoIndex gc_info_index) {
DCHECK_LT(gc_info_index, GCInfoTable::kMaxIndex);
DCHECK_EQ(0u, size & (sizeof(HeapObjectHeader) - 1));
DCHECK_GE(kMaxSize, size);
- encoded_high_ = GCInfoIndexField::encode(gc_info_index);
encoded_low_ = EncodeSize(size);
+ // Objects may get published to the marker without any other synchronization
+ // (e.g., write barrier) in which case the in-construction bit is read
+ // concurrently which requires reading encoded_high_ atomically. It is ok if
+ // this write is not observed by the marker, since the sweeper sets the
+ // in-construction bit to 0 and we can rely on that to guarantee a correct
+ // answer when checking if objects are in-construction.
+ v8::base::AsAtomicPtr(&encoded_high_)
+ ->store(GCInfoIndexField::encode(gc_info_index),
+ std::memory_order_relaxed);
DCHECK(IsInConstruction());
#ifdef DEBUG
CheckApiConstants();
@@ -165,14 +179,14 @@ Address HeapObjectHeader::Payload() const {
sizeof(HeapObjectHeader);
}
-template <HeapObjectHeader::AccessMode mode>
+template <AccessMode mode>
GCInfoIndex HeapObjectHeader::GetGCInfoIndex() const {
const uint16_t encoded =
LoadEncoded<mode, EncodedHalf::kHigh, std::memory_order_acquire>();
return GCInfoIndexField::decode(encoded);
}
-template <HeapObjectHeader::AccessMode mode>
+template <AccessMode mode>
size_t HeapObjectHeader::GetSize() const {
// Size is immutable after construction while either marking or sweeping
// is running so relaxed load (if mode == kAtomic) is enough.
@@ -187,12 +201,12 @@ void HeapObjectHeader::SetSize(size_t size) {
encoded_low_ |= EncodeSize(size);
}
-template <HeapObjectHeader::AccessMode mode>
+template <AccessMode mode>
bool HeapObjectHeader::IsLargeObject() const {
return GetSize<mode>() == kLargeObjectSizeInHeader;
}
-template <HeapObjectHeader::AccessMode mode>
+template <AccessMode mode>
bool HeapObjectHeader::IsInConstruction() const {
const uint16_t encoded =
LoadEncoded<mode, EncodedHalf::kHigh, std::memory_order_acquire>();
@@ -203,14 +217,14 @@ void HeapObjectHeader::MarkAsFullyConstructed() {
MakeGarbageCollectedTraitInternal::MarkObjectAsFullyConstructed(Payload());
}
-template <HeapObjectHeader::AccessMode mode>
+template <AccessMode mode>
bool HeapObjectHeader::IsMarked() const {
const uint16_t encoded =
LoadEncoded<mode, EncodedHalf::kLow, std::memory_order_relaxed>();
return MarkBitField::decode(encoded);
}
-template <HeapObjectHeader::AccessMode mode>
+template <AccessMode mode>
void HeapObjectHeader::Unmark() {
DCHECK(IsMarked<mode>());
StoreEncoded<mode, EncodedHalf::kLow, std::memory_order_relaxed>(
@@ -228,14 +242,14 @@ bool HeapObjectHeader::TryMarkAtomic() {
std::memory_order_relaxed);
}
-template <HeapObjectHeader::AccessMode mode>
+template <AccessMode mode>
bool HeapObjectHeader::IsYoung() const {
return !IsMarked<mode>();
}
-template <HeapObjectHeader::AccessMode mode>
+template <AccessMode mode>
bool HeapObjectHeader::IsFree() const {
- return GetGCInfoIndex() == kFreeListGCInfoIndex;
+ return GetGCInfoIndex<mode>() == kFreeListGCInfoIndex;
}
bool HeapObjectHeader::IsFinalizable() const {
@@ -243,7 +257,7 @@ bool HeapObjectHeader::IsFinalizable() const {
return gc_info.finalize;
}
-template <HeapObjectHeader::AccessMode mode, HeapObjectHeader::EncodedHalf part,
+template <AccessMode mode, HeapObjectHeader::EncodedHalf part,
std::memory_order memory_order>
uint16_t HeapObjectHeader::LoadEncoded() const {
const uint16_t& half =
@@ -252,7 +266,7 @@ uint16_t HeapObjectHeader::LoadEncoded() const {
return v8::base::AsAtomicPtr(&half)->load(memory_order);
}
-template <HeapObjectHeader::AccessMode mode, HeapObjectHeader::EncodedHalf part,
+template <AccessMode mode, HeapObjectHeader::EncodedHalf part,
std::memory_order memory_order>
void HeapObjectHeader::StoreEncoded(uint16_t bits, uint16_t mask) {
// Caveat: Not all changes to HeapObjectHeader's bitfields go through
diff --git a/deps/v8/src/heap/cppgc/heap-page.cc b/deps/v8/src/heap/cppgc/heap-page.cc
index 1ac7fe7fee..b2b3d83182 100644
--- a/deps/v8/src/heap/cppgc/heap-page.cc
+++ b/deps/v8/src/heap/cppgc/heap-page.cc
@@ -112,6 +112,7 @@ NormalPage* NormalPage::Create(PageBackend* page_backend,
DCHECK_NOT_NULL(space);
void* memory = page_backend->AllocateNormalPageMemory(space->index());
auto* normal_page = new (memory) NormalPage(space->raw_heap()->heap(), space);
+ normal_page->SynchronizedStore();
return normal_page;
}
@@ -189,6 +190,7 @@ LargePage* LargePage::Create(PageBackend* page_backend, LargePageSpace* space,
auto* heap = space->raw_heap()->heap();
void* memory = page_backend->AllocateLargePageMemory(allocation_size);
LargePage* page = new (memory) LargePage(heap, space, size);
+ page->SynchronizedStore();
return page;
}
diff --git a/deps/v8/src/heap/cppgc/heap-page.h b/deps/v8/src/heap/cppgc/heap-page.h
index fbf6059ad7..bc3762b4ae 100644
--- a/deps/v8/src/heap/cppgc/heap-page.h
+++ b/deps/v8/src/heap/cppgc/heap-page.h
@@ -48,11 +48,9 @@ class V8_EXPORT_PRIVATE BasePage {
ConstAddress PayloadEnd() const;
// |address| must refer to real object.
- template <
- HeapObjectHeader::AccessMode = HeapObjectHeader::AccessMode::kNonAtomic>
+ template <AccessMode = AccessMode::kNonAtomic>
HeapObjectHeader& ObjectHeaderFromInnerAddress(void* address) const;
- template <
- HeapObjectHeader::AccessMode = HeapObjectHeader::AccessMode::kNonAtomic>
+ template <AccessMode = AccessMode::kNonAtomic>
const HeapObjectHeader& ObjectHeaderFromInnerAddress(
const void* address) const;
@@ -63,8 +61,24 @@ class V8_EXPORT_PRIVATE BasePage {
const HeapObjectHeader* TryObjectHeaderFromInnerAddress(
const void* address) const;
+ // SynchronizedLoad and SynchronizedStore are used to sync pages after they
+ // are allocated. std::atomic_thread_fence is sufficient in practice but is
+ // not recognized by tsan. Atomic load and store of the |type_| field are
+ // added for tsan builds.
+ void SynchronizedLoad() const {
+#if defined(THREAD_SANITIZER)
+ v8::base::AsAtomicPtr(&type_)->load(std::memory_order_acquire);
+#endif
+ }
+ void SynchronizedStore() {
+ std::atomic_thread_fence(std::memory_order_seq_cst);
+#if defined(THREAD_SANITIZER)
+ v8::base::AsAtomicPtr(&type_)->store(type_, std::memory_order_release);
+#endif
+ }
+
protected:
- enum class PageType { kNormal, kLarge };
+ enum class PageType : uint8_t { kNormal, kLarge };
BasePage(HeapBase*, BaseSpace*, PageType);
private:
@@ -221,8 +235,7 @@ const BasePage* BasePage::FromPayload(const void* payload) {
kGuardPageSize);
}
-template <HeapObjectHeader::AccessMode mode =
- HeapObjectHeader::AccessMode::kNonAtomic>
+template <AccessMode mode = AccessMode::kNonAtomic>
const HeapObjectHeader* ObjectHeaderFromInnerAddressImpl(const BasePage* page,
const void* address) {
if (page->is_large()) {
@@ -232,21 +245,27 @@ const HeapObjectHeader* ObjectHeaderFromInnerAddressImpl(const BasePage* page,
NormalPage::From(page)->object_start_bitmap();
const HeapObjectHeader* header =
bitmap.FindHeader<mode>(static_cast<ConstAddress>(address));
- DCHECK_LT(address,
- reinterpret_cast<ConstAddress>(header) +
- header->GetSize<HeapObjectHeader::AccessMode::kAtomic>());
+ DCHECK_LT(address, reinterpret_cast<ConstAddress>(header) +
+ header->GetSize<AccessMode::kAtomic>());
return header;
}
-template <HeapObjectHeader::AccessMode mode>
+template <AccessMode mode>
HeapObjectHeader& BasePage::ObjectHeaderFromInnerAddress(void* address) const {
return const_cast<HeapObjectHeader&>(
ObjectHeaderFromInnerAddress<mode>(const_cast<const void*>(address)));
}
-template <HeapObjectHeader::AccessMode mode>
+template <AccessMode mode>
const HeapObjectHeader& BasePage::ObjectHeaderFromInnerAddress(
const void* address) const {
+ // This method might be called for |address| found via a Trace method of
+ // another object. If |address| is on a newly allocated page , there will
+ // be no sync between the page allocation and a concurrent marking thread,
+ // resulting in a race with page initialization (specifically with writing
+ // the page |type_| field). This can occur when tracing a Member holding a
+ // reference to a mixin type
+ SynchronizedLoad();
const HeapObjectHeader* header =
ObjectHeaderFromInnerAddressImpl<mode>(this, address);
DCHECK_NE(kFreeListGCInfoIndex, header->GetGCInfoIndex());
diff --git a/deps/v8/src/heap/cppgc/heap-space.cc b/deps/v8/src/heap/cppgc/heap-space.cc
index 7b15ba2254..9a78b44433 100644
--- a/deps/v8/src/heap/cppgc/heap-space.cc
+++ b/deps/v8/src/heap/cppgc/heap-space.cc
@@ -14,8 +14,11 @@
namespace cppgc {
namespace internal {
-BaseSpace::BaseSpace(RawHeap* heap, size_t index, PageType type)
- : heap_(heap), index_(index), type_(type) {}
+BaseSpace::BaseSpace(RawHeap* heap, size_t index, PageType type,
+ bool is_compactable)
+ : heap_(heap), index_(index), type_(type), is_compactable_(is_compactable) {
+ USE(is_compactable_);
+}
void BaseSpace::AddPage(BasePage* page) {
v8::base::LockGuard<v8::base::Mutex> lock(&pages_mutex_);
@@ -36,11 +39,12 @@ BaseSpace::Pages BaseSpace::RemoveAllPages() {
return pages;
}
-NormalPageSpace::NormalPageSpace(RawHeap* heap, size_t index)
- : BaseSpace(heap, index, PageType::kNormal) {}
+NormalPageSpace::NormalPageSpace(RawHeap* heap, size_t index,
+ bool is_compactable)
+ : BaseSpace(heap, index, PageType::kNormal, is_compactable) {}
LargePageSpace::LargePageSpace(RawHeap* heap, size_t index)
- : BaseSpace(heap, index, PageType::kLarge) {}
+ : BaseSpace(heap, index, PageType::kLarge, false /* is_compactable */) {}
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/heap-space.h b/deps/v8/src/heap/cppgc/heap-space.h
index a7e50d4f48..ac6dbba65c 100644
--- a/deps/v8/src/heap/cppgc/heap-space.h
+++ b/deps/v8/src/heap/cppgc/heap-space.h
@@ -47,9 +47,12 @@ class V8_EXPORT_PRIVATE BaseSpace {
void RemovePage(BasePage*);
Pages RemoveAllPages();
+ bool is_compactable() const { return is_compactable_; }
+
protected:
enum class PageType { kNormal, kLarge };
- explicit BaseSpace(RawHeap* heap, size_t index, PageType type);
+ explicit BaseSpace(RawHeap* heap, size_t index, PageType type,
+ bool is_compactable);
private:
RawHeap* heap_;
@@ -57,6 +60,7 @@ class V8_EXPORT_PRIVATE BaseSpace {
v8::base::Mutex pages_mutex_;
const size_t index_;
const PageType type_;
+ const bool is_compactable_;
};
class V8_EXPORT_PRIVATE NormalPageSpace final : public BaseSpace {
@@ -92,7 +96,7 @@ class V8_EXPORT_PRIVATE NormalPageSpace final : public BaseSpace {
return From(const_cast<BaseSpace*>(space));
}
- NormalPageSpace(RawHeap* heap, size_t index);
+ NormalPageSpace(RawHeap* heap, size_t index, bool is_compactable);
LinearAllocationBuffer& linear_allocation_buffer() { return current_lab_; }
const LinearAllocationBuffer& linear_allocation_buffer() const {
diff --git a/deps/v8/src/heap/cppgc/heap.cc b/deps/v8/src/heap/cppgc/heap.cc
index 0db04fb537..3da59fd1ee 100644
--- a/deps/v8/src/heap/cppgc/heap.cc
+++ b/deps/v8/src/heap/cppgc/heap.cc
@@ -10,6 +10,7 @@
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap-visitor.h"
#include "src/heap/cppgc/marker.h"
+#include "src/heap/cppgc/marking-verifier.h"
#include "src/heap/cppgc/prefinalizer-handler.h"
namespace cppgc {
@@ -77,7 +78,7 @@ void CheckConfig(Heap::Config config) {
Heap::Heap(std::shared_ptr<cppgc::Platform> platform,
cppgc::Heap::HeapOptions options)
- : HeapBase(platform, options.custom_spaces.size(), options.stack_support),
+ : HeapBase(platform, options.custom_spaces, options.stack_support),
gc_invoker_(this, platform_.get(), options.stack_support),
growing_(&gc_invoker_, stats_collector_.get(),
options.resource_constraints) {}
@@ -151,21 +152,26 @@ void Heap::FinalizeGarbageCollection(Config::StackState stack_state) {
DCHECK(!in_no_gc_scope());
config_.stack_state = stack_state;
DCHECK(marker_);
- marker_->FinishMarking(stack_state);
{
- // Pre finalizers are forbidden from allocating objects.
+ // Pre finalizers are forbidden from allocating objects. Note that this also
+ // guard atomic pause marking below, meaning that no internal method or
+ // external callbacks are allowed to allocate new objects.
ObjectAllocator::NoAllocationScope no_allocation_scope_(object_allocator_);
- marker_->ProcessWeakness();
+ marker_->FinishMarking(stack_state);
prefinalizer_handler_->InvokePreFinalizers();
}
marker_.reset();
// TODO(chromium:1056170): replace build flag with dedicated flag.
#if DEBUG
- VerifyMarking(stack_state);
+ MarkingVerifier verifier(*this);
+ verifier.Run(stack_state);
#endif
{
NoGCScope no_gc(*this);
- sweeper_.Start(config_.sweeping_type);
+ const Sweeper::SweepingConfig sweeping_config{
+ config_.sweeping_type,
+ Sweeper::SweepingConfig::CompactableSpaceHandling::kSweep};
+ sweeper_.Start(sweeping_config);
}
gc_in_progress_ = false;
}
diff --git a/deps/v8/src/heap/cppgc/incremental-marking-schedule.cc b/deps/v8/src/heap/cppgc/incremental-marking-schedule.cc
index 7e1ff951ab..cef34b1efe 100644
--- a/deps/v8/src/heap/cppgc/incremental-marking-schedule.cc
+++ b/deps/v8/src/heap/cppgc/incremental-marking-schedule.cc
@@ -11,6 +11,9 @@
namespace cppgc {
namespace internal {
+// static
+constexpr size_t IncrementalMarkingSchedule::kInvalidLastEstimatedLiveBytes;
+
const double IncrementalMarkingSchedule::kEstimatedMarkingTimeMs = 500.0;
const size_t IncrementalMarkingSchedule::kMinimumMarkedBytesPerIncrementalStep =
64 * kKB;
@@ -32,9 +35,12 @@ void IncrementalMarkingSchedule::AddConcurrentlyMarkedBytes(
concurrently_marked_bytes_.fetch_add(marked_bytes, std::memory_order_relaxed);
}
-size_t IncrementalMarkingSchedule::GetOverallMarkedBytes() {
- return incrementally_marked_bytes_ +
- concurrently_marked_bytes_.load(std::memory_order_relaxed);
+size_t IncrementalMarkingSchedule::GetOverallMarkedBytes() const {
+ return incrementally_marked_bytes_ + GetConcurrentlyMarkedBytes();
+}
+
+size_t IncrementalMarkingSchedule::GetConcurrentlyMarkedBytes() const {
+ return concurrently_marked_bytes_.load(std::memory_order_relaxed);
}
double IncrementalMarkingSchedule::GetElapsedTimeInMs(
@@ -49,6 +55,7 @@ double IncrementalMarkingSchedule::GetElapsedTimeInMs(
size_t IncrementalMarkingSchedule::GetNextIncrementalStepDuration(
size_t estimated_live_bytes) {
+ last_estimated_live_bytes_ = estimated_live_bytes;
DCHECK(!incremental_marking_start_time_.IsNull());
double elapsed_time_in_ms =
GetElapsedTimeInMs(incremental_marking_start_time_);
@@ -70,5 +77,17 @@ size_t IncrementalMarkingSchedule::GetNextIncrementalStepDuration(
expected_marked_bytes - actual_marked_bytes);
}
+constexpr double
+ IncrementalMarkingSchedule::kEphemeronPairsFlushingRatioIncrements;
+bool IncrementalMarkingSchedule::ShouldFlushEphemeronPairs() {
+ DCHECK_NE(kInvalidLastEstimatedLiveBytes, last_estimated_live_bytes_);
+ if (GetOverallMarkedBytes() <
+ (ephemeron_pairs_flushing_ratio_target * last_estimated_live_bytes_))
+ return false;
+ ephemeron_pairs_flushing_ratio_target +=
+ kEphemeronPairsFlushingRatioIncrements;
+ return true;
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/incremental-marking-schedule.h b/deps/v8/src/heap/cppgc/incremental-marking-schedule.h
index 3c8a9e1a01..a9a0f7d840 100644
--- a/deps/v8/src/heap/cppgc/incremental-marking-schedule.h
+++ b/deps/v8/src/heap/cppgc/incremental-marking-schedule.h
@@ -26,7 +26,8 @@ class V8_EXPORT_PRIVATE IncrementalMarkingSchedule {
void UpdateIncrementalMarkedBytes(size_t);
void AddConcurrentlyMarkedBytes(size_t);
- size_t GetOverallMarkedBytes();
+ size_t GetOverallMarkedBytes() const;
+ size_t GetConcurrentlyMarkedBytes() const;
size_t GetNextIncrementalStepDuration(size_t);
@@ -34,6 +35,8 @@ class V8_EXPORT_PRIVATE IncrementalMarkingSchedule {
elapsed_time_for_testing_ = elapsed_time;
}
+ bool ShouldFlushEphemeronPairs();
+
private:
double GetElapsedTimeInMs(v8::base::TimeTicks);
@@ -45,6 +48,11 @@ class V8_EXPORT_PRIVATE IncrementalMarkingSchedule {
// Using -1 as sentinel to denote
static constexpr double kNoSetElapsedTimeForTesting = -1;
double elapsed_time_for_testing_ = kNoSetElapsedTimeForTesting;
+
+ static constexpr size_t kInvalidLastEstimatedLiveBytes = -1;
+ size_t last_estimated_live_bytes_ = kInvalidLastEstimatedLiveBytes;
+ double ephemeron_pairs_flushing_ratio_target = 0.25;
+ static constexpr double kEphemeronPairsFlushingRatioIncrements = 0.25;
};
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc/marker.cc b/deps/v8/src/heap/cppgc/marker.cc
index 0d044588b6..236bc12af4 100644
--- a/deps/v8/src/heap/cppgc/marker.cc
+++ b/deps/v8/src/heap/cppgc/marker.cc
@@ -15,6 +15,7 @@
#include "src/heap/cppgc/liveness-broker.h"
#include "src/heap/cppgc/marking-state.h"
#include "src/heap/cppgc/marking-visitor.h"
+#include "src/heap/cppgc/process-heap.h"
#include "src/heap/cppgc/stats-collector.h"
#if defined(CPPGC_CAGED_HEAP)
@@ -55,7 +56,8 @@ bool ExitIncrementalMarkingIfNeeded(Marker::MarkingConfig config,
}
// Visit remembered set that was recorded in the generational barrier.
-void VisitRememberedSlots(HeapBase& heap, MarkingState& marking_state) {
+void VisitRememberedSlots(HeapBase& heap,
+ MutatorMarkingState& mutator_marking_state) {
#if defined(CPPGC_YOUNG_GENERATION)
for (void* slot : heap.remembered_slots()) {
auto& slot_header = BasePage::FromInnerAddress(&heap, slot)
@@ -65,11 +67,10 @@ void VisitRememberedSlots(HeapBase& heap, MarkingState& marking_state) {
// top level (with the guarantee that no objects are currently being in
// construction). This can be ensured by running young GCs from safe points
// or by reintroducing nested allocation scopes that avoid finalization.
- DCHECK(
- !header.IsInConstruction<HeapObjectHeader::AccessMode::kNonAtomic>());
+ DCHECK(!header.IsInConstruction<AccessMode::kNonAtomic>());
void* value = *reinterpret_cast<void**>(slot);
- marking_state.DynamicallyMarkAddress(static_cast<Address>(value));
+ mutator_marking_state.DynamicallyMarkAddress(static_cast<Address>(value));
}
#endif
}
@@ -86,32 +87,13 @@ void ResetRememberedSet(HeapBase& heap) {
static constexpr size_t kDefaultDeadlineCheckInterval = 150u;
template <size_t kDeadlineCheckInterval = kDefaultDeadlineCheckInterval,
- typename WorklistLocal, typename Callback, typename Predicate>
-bool DrainWorklistWithDeadline(Predicate should_yield,
- WorklistLocal& worklist_local,
- Callback callback) {
- size_t processed_callback_count = 0;
- typename WorklistLocal::ItemType item;
- while (worklist_local.Pop(&item)) {
- callback(item);
- if (processed_callback_count-- == 0) {
- if (should_yield()) {
- return false;
- }
- processed_callback_count = kDeadlineCheckInterval;
- }
- }
- return true;
-}
-
-template <size_t kDeadlineCheckInterval = kDefaultDeadlineCheckInterval,
typename WorklistLocal, typename Callback>
-bool DrainWorklistWithBytesAndTimeDeadline(MarkingState& marking_state,
+bool DrainWorklistWithBytesAndTimeDeadline(MarkingStateBase& marking_state,
size_t marked_bytes_deadline,
v8::base::TimeTicks time_deadline,
WorklistLocal& worklist_local,
Callback callback) {
- return DrainWorklistWithDeadline(
+ return DrainWorklistWithPredicate<kDeadlineCheckInterval>(
[&marking_state, marked_bytes_deadline, time_deadline]() {
return (marked_bytes_deadline <= marking_state.marked_bytes()) ||
(time_deadline <= v8::base::TimeTicks::Now());
@@ -119,15 +101,6 @@ bool DrainWorklistWithBytesAndTimeDeadline(MarkingState& marking_state,
worklist_local, callback);
}
-void TraceMarkedObject(Visitor* visitor, const HeapObjectHeader* header) {
- DCHECK(header);
- DCHECK(!header->IsInConstruction<HeapObjectHeader::AccessMode::kNonAtomic>());
- DCHECK(header->IsMarked<HeapObjectHeader::AccessMode::kNonAtomic>());
- const GCInfo& gcinfo =
- GlobalGCInfoTable::GCInfoFromIndex(header->GetGCInfoIndex());
- gcinfo.trace(visitor, header->Payload());
-}
-
size_t GetNextIncrementalStepDuration(IncrementalMarkingSchedule& schedule,
HeapBase& heap) {
return schedule.GetNextIncrementalStepDuration(
@@ -150,7 +123,7 @@ MarkerBase::IncrementalMarkingTask::Post(cppgc::TaskRunner* runner,
MarkerBase* marker) {
// Incremental GC is possible only via the GCInvoker, so getting here
// guarantees that either non-nestable tasks or conservative stack
- // scannnig are supported. This is required so that the incremental
+ // scanning are supported. This is required so that the incremental
// task can safely finalize GC if needed.
DCHECK_IMPLIES(marker->heap().stack_support() !=
HeapBase::StackSupport::kSupportsConservativeStackScan,
@@ -185,7 +158,8 @@ MarkerBase::MarkerBase(Key, HeapBase& heap, cppgc::Platform* platform,
config_(config),
platform_(platform),
foreground_task_runner_(platform_->GetForegroundTaskRunner()),
- mutator_marking_state_(heap, marking_worklists_) {}
+ mutator_marking_state_(heap, marking_worklists_,
+ heap.compactor().compaction_worklists()) {}
MarkerBase::~MarkerBase() {
// The fixed point iteration may have found not-fully-constructed objects.
@@ -194,19 +168,33 @@ MarkerBase::~MarkerBase() {
if (!marking_worklists_.not_fully_constructed_worklist()->IsEmpty()) {
#if DEBUG
DCHECK_NE(MarkingConfig::StackState::kNoHeapPointers, config_.stack_state);
- HeapObjectHeader* header;
- MarkingWorklists::NotFullyConstructedWorklist::Local& local =
- mutator_marking_state_.not_fully_constructed_worklist();
- while (local.Pop(&header)) {
- DCHECK(header->IsMarked());
- }
+ std::unordered_set<HeapObjectHeader*> objects =
+ mutator_marking_state_.not_fully_constructed_worklist().Extract();
+ for (HeapObjectHeader* object : objects) DCHECK(object->IsMarked());
#else
marking_worklists_.not_fully_constructed_worklist()->Clear();
#endif
}
+
+ // |discovered_ephemeron_pairs_worklist_| may still hold ephemeron pairs with
+ // dead keys.
+ if (!marking_worklists_.discovered_ephemeron_pairs_worklist()->IsEmpty()) {
+#if DEBUG
+ MarkingWorklists::EphemeronPairItem item;
+ while (mutator_marking_state_.discovered_ephemeron_pairs_worklist().Pop(
+ &item)) {
+ DCHECK(!HeapObjectHeader::FromPayload(item.key).IsMarked());
+ }
+#else
+ marking_worklists_.discovered_ephemeron_pairs_worklist()->Clear();
+#endif
+ }
+
+ marking_worklists_.weak_containers_worklist()->Clear();
}
void MarkerBase::StartMarking() {
+ DCHECK(!is_marking_started_);
heap().stats_collector()->NotifyMarkingStarted();
is_marking_started_ = true;
@@ -216,21 +204,34 @@ void MarkerBase::StartMarking() {
// Scanning the stack is expensive so we only do it at the atomic pause.
VisitRoots(MarkingConfig::StackState::kNoHeapPointers);
ScheduleIncrementalMarkingTask();
+ if (config_.marking_type ==
+ MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
+ mutator_marking_state_.Publish();
+ concurrent_marker_->Start();
+ }
}
}
void MarkerBase::EnterAtomicPause(MarkingConfig::StackState stack_state) {
if (ExitIncrementalMarkingIfNeeded(config_, heap())) {
- // Cancel remaining incremental tasks.
- if (incremental_marking_handle_) incremental_marking_handle_.Cancel();
+ // Cancel remaining concurrent/incremental tasks.
+ concurrent_marker_->Cancel();
+ incremental_marking_handle_.Cancel();
}
config_.stack_state = stack_state;
config_.marking_type = MarkingConfig::MarkingType::kAtomic;
+ // Lock guards against changes to {Weak}CrossThreadPersistent handles, that
+ // may conflict with marking. E.g., a WeakCrossThreadPersistent may be
+ // converted into a CrossThreadPersistent which requires that the handle
+ // is either cleared or the object is retained.
+ g_process_mutex.Pointer()->Lock();
+
// VisitRoots also resets the LABs.
VisitRoots(config_.stack_state);
if (config_.stack_state == MarkingConfig::StackState::kNoHeapPointers) {
mutator_marking_state_.FlushNotFullyConstructedObjects();
+ DCHECK(marking_worklists_.not_fully_constructed_worklist()->IsEmpty());
} else {
MarkNotFullyConstructedObjects();
}
@@ -242,20 +243,26 @@ void MarkerBase::LeaveAtomicPause() {
heap().stats_collector()->NotifyMarkingCompleted(
// GetOverallMarkedBytes also includes concurrently marked bytes.
schedule_.GetOverallMarkedBytes());
+ is_marking_started_ = false;
+ ProcessWeakness();
+ g_process_mutex.Pointer()->Unlock();
}
void MarkerBase::FinishMarking(MarkingConfig::StackState stack_state) {
DCHECK(is_marking_started_);
EnterAtomicPause(stack_state);
- ProcessWorklistsWithDeadline(std::numeric_limits<size_t>::max(),
- v8::base::TimeTicks::Max());
+ CHECK(ProcessWorklistsWithDeadline(std::numeric_limits<size_t>::max(),
+ v8::base::TimeTicks::Max()));
mutator_marking_state_.Publish();
LeaveAtomicPause();
- is_marking_started_ = false;
}
void MarkerBase::ProcessWeakness() {
+ DCHECK_EQ(MarkingConfig::MarkingType::kAtomic, config_.marking_type);
heap().GetWeakPersistentRegion().Trace(&visitor());
+ // Processing cross-thread handles requires taking the process lock.
+ g_process_mutex.Get().AssertHeld();
+ heap().GetWeakCrossThreadPersistentRegion().Trace(&visitor());
// Call weak callbacks on objects that may now be pointing to dead objects.
MarkingWorklists::WeakCallbackItem item;
@@ -275,6 +282,10 @@ void MarkerBase::VisitRoots(MarkingConfig::StackState stack_state) {
heap().object_allocator().ResetLinearAllocationBuffers();
heap().GetStrongPersistentRegion().Trace(&visitor());
+ if (config_.marking_type == MarkingConfig::MarkingType::kAtomic) {
+ g_process_mutex.Get().AssertHeld();
+ heap().GetStrongCrossThreadPersistentRegion().Trace(&visitor());
+ }
if (stack_state != MarkingConfig::StackState::kNoHeapPointers) {
heap().stack()->IteratePointers(&stack_visitor());
}
@@ -284,8 +295,8 @@ void MarkerBase::VisitRoots(MarkingConfig::StackState stack_state) {
}
void MarkerBase::ScheduleIncrementalMarkingTask() {
- if (!platform_ || !foreground_task_runner_ || incremental_marking_handle_)
- return;
+ DCHECK(platform_);
+ if (!foreground_task_runner_ || incremental_marking_handle_) return;
incremental_marking_handle_ =
IncrementalMarkingTask::Post(foreground_task_runner_.get(), this);
}
@@ -304,13 +315,11 @@ bool MarkerBase::IncrementalMarkingStep(MarkingConfig::StackState stack_state) {
return AdvanceMarkingWithDeadline();
}
-bool MarkerBase::AdvanceMarkingOnAllocation() {
- bool is_done = AdvanceMarkingWithDeadline();
- if (is_done) {
+void MarkerBase::AdvanceMarkingOnAllocation() {
+ if (AdvanceMarkingWithDeadline()) {
// Schedule another incremental task for finalizing without a stack.
ScheduleIncrementalMarkingTask();
}
- return is_done;
}
bool MarkerBase::AdvanceMarkingWithMaxDuration(
@@ -326,29 +335,51 @@ bool MarkerBase::AdvanceMarkingWithDeadline(v8::base::TimeDelta max_duration) {
is_done = ProcessWorklistsWithDeadline(
mutator_marking_state_.marked_bytes() + step_size_in_bytes,
v8::base::TimeTicks::Now() + max_duration);
+ schedule_.UpdateIncrementalMarkedBytes(
+ mutator_marking_state_.marked_bytes());
}
- schedule_.UpdateIncrementalMarkedBytes(mutator_marking_state_.marked_bytes());
+ mutator_marking_state_.Publish();
if (!is_done) {
// If marking is atomic, |is_done| should always be true.
DCHECK_NE(MarkingConfig::MarkingType::kAtomic, config_.marking_type);
ScheduleIncrementalMarkingTask();
+ if (config_.marking_type ==
+ MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
+ concurrent_marker_->NotifyIncrementalMutatorStepCompleted();
+ }
}
- mutator_marking_state_.Publish();
return is_done;
}
bool MarkerBase::ProcessWorklistsWithDeadline(
size_t marked_bytes_deadline, v8::base::TimeTicks time_deadline) {
do {
- // Convert |previously_not_fully_constructed_worklist_| to
- // |marking_worklist_|. This merely re-adds items with the proper
- // callbacks.
+ if ((config_.marking_type == MarkingConfig::MarkingType::kAtomic) ||
+ schedule_.ShouldFlushEphemeronPairs()) {
+ mutator_marking_state_.FlushDiscoveredEphemeronPairs();
+ }
+
+ // Bailout objects may be complicated to trace and thus might take longer
+ // than other objects. Therefore we reduce the interval between deadline
+ // checks to guarantee the deadline is not exceeded.
+ if (!DrainWorklistWithBytesAndTimeDeadline<kDefaultDeadlineCheckInterval /
+ 5>(
+ mutator_marking_state_, marked_bytes_deadline, time_deadline,
+ mutator_marking_state_.concurrent_marking_bailout_worklist(),
+ [this](const MarkingWorklists::ConcurrentMarkingBailoutItem& item) {
+ mutator_marking_state_.AccountMarkedBytes(item.bailedout_size);
+ item.callback(&visitor(), item.parameter);
+ })) {
+ return false;
+ }
+
if (!DrainWorklistWithBytesAndTimeDeadline(
mutator_marking_state_, marked_bytes_deadline, time_deadline,
mutator_marking_state_.previously_not_fully_constructed_worklist(),
[this](HeapObjectHeader* header) {
- TraceMarkedObject(&visitor(), header);
mutator_marking_state_.AccountMarkedBytes(*header);
+ DynamicallyTraceMarkedObject<AccessMode::kNonAtomic>(visitor(),
+ *header);
})) {
return false;
}
@@ -359,12 +390,10 @@ bool MarkerBase::ProcessWorklistsWithDeadline(
[this](const MarkingWorklists::MarkingItem& item) {
const HeapObjectHeader& header =
HeapObjectHeader::FromPayload(item.base_object_payload);
- DCHECK(!header.IsInConstruction<
- HeapObjectHeader::AccessMode::kNonAtomic>());
- DCHECK(
- header.IsMarked<HeapObjectHeader::AccessMode::kNonAtomic>());
- item.callback(&visitor(), item.base_object_payload);
+ DCHECK(!header.IsInConstruction<AccessMode::kNonAtomic>());
+ DCHECK(header.IsMarked<AccessMode::kNonAtomic>());
mutator_marking_state_.AccountMarkedBytes(header);
+ item.callback(&visitor(), item.base_object_payload);
})) {
return false;
}
@@ -373,8 +402,19 @@ bool MarkerBase::ProcessWorklistsWithDeadline(
mutator_marking_state_, marked_bytes_deadline, time_deadline,
mutator_marking_state_.write_barrier_worklist(),
[this](HeapObjectHeader* header) {
- TraceMarkedObject(&visitor(), header);
mutator_marking_state_.AccountMarkedBytes(*header);
+ DynamicallyTraceMarkedObject<AccessMode::kNonAtomic>(visitor(),
+ *header);
+ })) {
+ return false;
+ }
+
+ if (!DrainWorklistWithBytesAndTimeDeadline(
+ mutator_marking_state_, marked_bytes_deadline, time_deadline,
+ mutator_marking_state_.ephemeron_pairs_for_processing_worklist(),
+ [this](const MarkingWorklists::EphemeronPairItem& item) {
+ mutator_marking_state_.ProcessEphemeron(item.key,
+ item.value_desc);
})) {
return false;
}
@@ -383,32 +423,47 @@ bool MarkerBase::ProcessWorklistsWithDeadline(
}
void MarkerBase::MarkNotFullyConstructedObjects() {
- HeapObjectHeader* header;
- MarkingWorklists::NotFullyConstructedWorklist::Local& local =
- mutator_marking_state_.not_fully_constructed_worklist();
- while (local.Pop(&header)) {
- DCHECK(header);
- DCHECK(header->IsMarked<HeapObjectHeader::AccessMode::kNonAtomic>());
+ std::unordered_set<HeapObjectHeader*> objects =
+ mutator_marking_state_.not_fully_constructed_worklist().Extract();
+ for (HeapObjectHeader* object : objects) {
+ DCHECK(object);
+ if (!mutator_marking_state_.MarkNoPush(*object)) continue;
// TraceConservativelyIfNeeded will either push to a worklist
// or trace conservatively and call AccountMarkedBytes.
- conservative_visitor().TraceConservativelyIfNeeded(*header);
+ conservative_visitor().TraceConservativelyIfNeeded(*object);
}
}
void MarkerBase::ClearAllWorklistsForTesting() {
marking_worklists_.ClearForTesting();
+ auto* compaction_worklists = heap_.compactor().compaction_worklists();
+ if (compaction_worklists) compaction_worklists->ClearForTesting();
}
void MarkerBase::DisableIncrementalMarkingForTesting() {
incremental_marking_disabled_for_testing_ = true;
}
+void MarkerBase::WaitForConcurrentMarkingForTesting() {
+ concurrent_marker_->JoinForTesting();
+}
+
+void MarkerBase::NotifyCompactionCancelled() {
+ // Compaction cannot be cancelled while concurrent marking is active.
+ DCHECK_EQ(MarkingConfig::MarkingType::kAtomic, config_.marking_type);
+ DCHECK_IMPLIES(concurrent_marker_, !concurrent_marker_->IsActive());
+ mutator_marking_state_.NotifyCompactionCancelled();
+}
+
Marker::Marker(Key key, HeapBase& heap, cppgc::Platform* platform,
MarkingConfig config)
: MarkerBase(key, heap, platform, config),
marking_visitor_(heap, mutator_marking_state_),
conservative_marking_visitor_(heap, mutator_marking_state_,
- marking_visitor_) {}
+ marking_visitor_) {
+ concurrent_marker_ = std::make_unique<ConcurrentMarker>(
+ heap_, marking_worklists_, schedule_, platform_);
+}
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/marker.h b/deps/v8/src/heap/cppgc/marker.h
index 47ce9998b4..85a8027206 100644
--- a/deps/v8/src/heap/cppgc/marker.h
+++ b/deps/v8/src/heap/cppgc/marker.h
@@ -12,6 +12,7 @@
#include "src/base/macros.h"
#include "src/base/platform/time.h"
#include "src/heap/base/worklist.h"
+#include "src/heap/cppgc/concurrent-marker.h"
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/incremental-marking-schedule.h"
#include "src/heap/cppgc/marking-state.h"
@@ -73,7 +74,7 @@ class V8_EXPORT_PRIVATE MarkerBase {
bool AdvanceMarkingWithMaxDuration(v8::base::TimeDelta);
// Makes marking progress when allocation a new lab.
- bool AdvanceMarkingOnAllocation();
+ void AdvanceMarkingOnAllocation();
// Signals leaving the atomic marking pause. This method expects no more
// objects to be marked and merely updates marking states if needed.
@@ -82,6 +83,7 @@ class V8_EXPORT_PRIVATE MarkerBase {
// Combines:
// - EnterAtomicPause()
// - AdvanceMarkingWithDeadline()
+ // - ProcessWeakness()
// - LeaveAtomicPause()
void FinishMarking(MarkingConfig::StackState);
@@ -93,7 +95,9 @@ class V8_EXPORT_PRIVATE MarkerBase {
HeapBase& heap() { return heap_; }
MarkingWorklists& MarkingWorklistsForTesting() { return marking_worklists_; }
- MarkingState& MarkingStateForTesting() { return mutator_marking_state_; }
+ MutatorMarkingState& MutatorMarkingStateForTesting() {
+ return mutator_marking_state_;
+ }
cppgc::Visitor& VisitorForTesting() { return visitor(); }
void ClearAllWorklistsForTesting();
@@ -118,6 +122,10 @@ class V8_EXPORT_PRIVATE MarkerBase {
void DisableIncrementalMarkingForTesting();
+ void WaitForConcurrentMarkingForTesting();
+
+ void NotifyCompactionCancelled();
+
protected:
static constexpr v8::base::TimeDelta kMaximumIncrementalStepDuration =
v8::base::TimeDelta::FromMilliseconds(2);
@@ -162,11 +170,13 @@ class V8_EXPORT_PRIVATE MarkerBase {
IncrementalMarkingTask::Handle incremental_marking_handle_;
MarkingWorklists marking_worklists_;
- MarkingState mutator_marking_state_;
- bool is_marking_started_ = false;
+ MutatorMarkingState mutator_marking_state_;
+ bool is_marking_started_{false};
IncrementalMarkingSchedule schedule_;
+ std::unique_ptr<ConcurrentMarkerBase> concurrent_marker_{nullptr};
+
bool incremental_marking_disabled_for_testing_{false};
friend class MarkerFactory;
@@ -200,12 +210,13 @@ class V8_EXPORT_PRIVATE Marker final : public MarkerBase {
}
private:
- MarkingVisitor marking_visitor_;
+ MutatorMarkingVisitor marking_visitor_;
ConservativeMarkingVisitor conservative_marking_visitor_;
};
void MarkerBase::WriteBarrierForInConstructionObject(HeapObjectHeader& header) {
- mutator_marking_state_.not_fully_constructed_worklist().Push(&header);
+ mutator_marking_state_.not_fully_constructed_worklist()
+ .Push<AccessMode::kAtomic>(&header);
}
void MarkerBase::WriteBarrierForObject(HeapObjectHeader& header) {
diff --git a/deps/v8/src/heap/cppgc/marking-state.cc b/deps/v8/src/heap/cppgc/marking-state.cc
index 0cc160bd0d..1796e67cbe 100644
--- a/deps/v8/src/heap/cppgc/marking-state.cc
+++ b/deps/v8/src/heap/cppgc/marking-state.cc
@@ -4,16 +4,26 @@
#include "src/heap/cppgc/marking-state.h"
+#include <unordered_set>
+
namespace cppgc {
namespace internal {
-void MarkingState::FlushNotFullyConstructedObjects() {
- not_fully_constructed_worklist().Publish();
- if (!not_fully_constructed_worklist_.IsGlobalEmpty()) {
- previously_not_fully_constructed_worklist_.Merge(
- &not_fully_constructed_worklist_);
+void MutatorMarkingState::FlushNotFullyConstructedObjects() {
+ std::unordered_set<HeapObjectHeader*> objects =
+ not_fully_constructed_worklist_.Extract<AccessMode::kAtomic>();
+ for (HeapObjectHeader* object : objects) {
+ if (MarkNoPush(*object))
+ previously_not_fully_constructed_worklist_.Push(object);
+ }
+}
+
+void MutatorMarkingState::FlushDiscoveredEphemeronPairs() {
+ discovered_ephemeron_pairs_worklist_.Publish();
+ if (!discovered_ephemeron_pairs_worklist_.IsGlobalEmpty()) {
+ ephemeron_pairs_for_processing_worklist_.Merge(
+ &discovered_ephemeron_pairs_worklist_);
}
- DCHECK(not_fully_constructed_worklist_.IsGlobalEmpty());
}
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc/marking-state.h b/deps/v8/src/heap/cppgc/marking-state.h
index 526633d455..777ee08e35 100644
--- a/deps/v8/src/heap/cppgc/marking-state.h
+++ b/deps/v8/src/heap/cppgc/marking-state.h
@@ -6,6 +6,7 @@
#define V8_HEAP_CPPGC_MARKING_STATE_H_
#include "include/cppgc/trace-trait.h"
+#include "src/heap/cppgc/compaction-worklists.h"
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap-page.h"
@@ -16,52 +17,60 @@ namespace cppgc {
namespace internal {
// C++ marking implementation.
-class MarkingState {
+class MarkingStateBase {
public:
- inline MarkingState(HeapBase& heap, MarkingWorklists&);
+ inline MarkingStateBase(HeapBase& heap, MarkingWorklists&,
+ CompactionWorklists*);
- MarkingState(const MarkingState&) = delete;
- MarkingState& operator=(const MarkingState&) = delete;
+ MarkingStateBase(const MarkingStateBase&) = delete;
+ MarkingStateBase& operator=(const MarkingStateBase&) = delete;
inline void MarkAndPush(const void*, TraceDescriptor);
- inline void MarkAndPush(HeapObjectHeader&, TraceDescriptor);
inline void MarkAndPush(HeapObjectHeader&);
- inline bool MarkNoPush(HeapObjectHeader&);
-
- template <
- HeapObjectHeader::AccessMode = HeapObjectHeader::AccessMode::kNonAtomic>
- inline void DynamicallyMarkAddress(ConstAddress);
+ inline void PushMarked(HeapObjectHeader&, TraceDescriptor desc);
inline void RegisterWeakReferenceIfNeeded(const void*, TraceDescriptor,
WeakCallback, const void*);
inline void RegisterWeakCallback(WeakCallback, const void*);
- inline void InvokeWeakRootsCallbackIfNeeded(const void*, TraceDescriptor,
- WeakCallback, const void*);
+
+ void RegisterMovableReference(const void** slot) {
+ if (!movable_slots_worklist_) return;
+ movable_slots_worklist_->Push(slot);
+ }
+
+ // Weak containers are special in that they may require re-tracing if
+ // reachable through stack, even if the container was already traced before.
+ // ProcessWeakContainer records which weak containers were already marked so
+ // that conservative stack scanning knows to retrace them.
+ inline void ProcessWeakContainer(const void*, TraceDescriptor, WeakCallback,
+ const void*);
+
+ inline void ProcessEphemeron(const void*, TraceDescriptor);
inline void AccountMarkedBytes(const HeapObjectHeader&);
+ inline void AccountMarkedBytes(size_t);
size_t marked_bytes() const { return marked_bytes_; }
void Publish() {
marking_worklist_.Publish();
- not_fully_constructed_worklist_.Publish();
previously_not_fully_constructed_worklist_.Publish();
weak_callback_worklist_.Publish();
write_barrier_worklist_.Publish();
+ concurrent_marking_bailout_worklist_.Publish();
+ discovered_ephemeron_pairs_worklist_.Publish();
+ ephemeron_pairs_for_processing_worklist_.Publish();
+ if (IsCompactionEnabled()) movable_slots_worklist_->Publish();
}
- // Moves objects in not_fully_constructed_worklist_ to
- // previously_not_full_constructed_worklists_.
- void FlushNotFullyConstructedObjects();
-
MarkingWorklists::MarkingWorklist::Local& marking_worklist() {
return marking_worklist_;
}
- MarkingWorklists::NotFullyConstructedWorklist::Local&
+ MarkingWorklists::NotFullyConstructedWorklist&
not_fully_constructed_worklist() {
return not_fully_constructed_worklist_;
}
- MarkingWorklists::NotFullyConstructedWorklist::Local&
+ MarkingWorklists::PreviouslyNotFullyConstructedWorklist::Local&
previously_not_fully_constructed_worklist() {
return previously_not_fully_constructed_worklist_;
}
@@ -71,103 +80,269 @@ class MarkingState {
MarkingWorklists::WriteBarrierWorklist::Local& write_barrier_worklist() {
return write_barrier_worklist_;
}
+ MarkingWorklists::ConcurrentMarkingBailoutWorklist::Local&
+ concurrent_marking_bailout_worklist() {
+ return concurrent_marking_bailout_worklist_;
+ }
+ MarkingWorklists::EphemeronPairsWorklist::Local&
+ discovered_ephemeron_pairs_worklist() {
+ return discovered_ephemeron_pairs_worklist_;
+ }
+ MarkingWorklists::EphemeronPairsWorklist::Local&
+ ephemeron_pairs_for_processing_worklist() {
+ return ephemeron_pairs_for_processing_worklist_;
+ }
+ MarkingWorklists::WeakContainersWorklist& weak_containers_worklist() {
+ return weak_containers_worklist_;
+ }
+
+ CompactionWorklists::MovableReferencesWorklist::Local*
+ movable_slots_worklist() {
+ return movable_slots_worklist_.get();
+ }
+
+ void NotifyCompactionCancelled() {
+ DCHECK(IsCompactionEnabled());
+ movable_slots_worklist_->Clear();
+ movable_slots_worklist_.reset();
+ }
+
+ protected:
+ inline void MarkAndPush(HeapObjectHeader&, TraceDescriptor);
+
+ inline bool MarkNoPush(HeapObjectHeader&);
+
+ inline void RegisterWeakContainer(HeapObjectHeader&);
+
+ inline bool IsCompactionEnabled() const {
+ return movable_slots_worklist_.get();
+ }
- private:
#ifdef DEBUG
HeapBase& heap_;
#endif // DEBUG
MarkingWorklists::MarkingWorklist::Local marking_worklist_;
- MarkingWorklists::NotFullyConstructedWorklist::Local
+ MarkingWorklists::NotFullyConstructedWorklist&
not_fully_constructed_worklist_;
- MarkingWorklists::NotFullyConstructedWorklist::Local
+ MarkingWorklists::PreviouslyNotFullyConstructedWorklist::Local
previously_not_fully_constructed_worklist_;
MarkingWorklists::WeakCallbackWorklist::Local weak_callback_worklist_;
MarkingWorklists::WriteBarrierWorklist::Local write_barrier_worklist_;
+ MarkingWorklists::ConcurrentMarkingBailoutWorklist::Local
+ concurrent_marking_bailout_worklist_;
+ MarkingWorklists::EphemeronPairsWorklist::Local
+ discovered_ephemeron_pairs_worklist_;
+ MarkingWorklists::EphemeronPairsWorklist::Local
+ ephemeron_pairs_for_processing_worklist_;
+ MarkingWorklists::WeakContainersWorklist& weak_containers_worklist_;
+ // Existence of the worklist (|movable_slot_worklist_| != nullptr) denotes
+ // that compaction is currently enabled and slots must be recorded.
+ std::unique_ptr<CompactionWorklists::MovableReferencesWorklist::Local>
+ movable_slots_worklist_;
size_t marked_bytes_ = 0;
};
-MarkingState::MarkingState(HeapBase& heap, MarkingWorklists& marking_worklists)
+MarkingStateBase::MarkingStateBase(HeapBase& heap,
+ MarkingWorklists& marking_worklists,
+ CompactionWorklists* compaction_worklists)
:
#ifdef DEBUG
heap_(heap),
#endif // DEBUG
marking_worklist_(marking_worklists.marking_worklist()),
not_fully_constructed_worklist_(
- marking_worklists.not_fully_constructed_worklist()),
+ *marking_worklists.not_fully_constructed_worklist()),
previously_not_fully_constructed_worklist_(
marking_worklists.previously_not_fully_constructed_worklist()),
weak_callback_worklist_(marking_worklists.weak_callback_worklist()),
- write_barrier_worklist_(marking_worklists.write_barrier_worklist()) {
+ write_barrier_worklist_(marking_worklists.write_barrier_worklist()),
+ concurrent_marking_bailout_worklist_(
+ marking_worklists.concurrent_marking_bailout_worklist()),
+ discovered_ephemeron_pairs_worklist_(
+ marking_worklists.discovered_ephemeron_pairs_worklist()),
+ ephemeron_pairs_for_processing_worklist_(
+ marking_worklists.ephemeron_pairs_for_processing_worklist()),
+ weak_containers_worklist_(*marking_worklists.weak_containers_worklist()) {
+ if (compaction_worklists) {
+ movable_slots_worklist_ =
+ std::make_unique<CompactionWorklists::MovableReferencesWorklist::Local>(
+ compaction_worklists->movable_slots_worklist());
+ }
}
-void MarkingState::MarkAndPush(const void* object, TraceDescriptor desc) {
+void MarkingStateBase::MarkAndPush(const void* object, TraceDescriptor desc) {
DCHECK_NOT_NULL(object);
MarkAndPush(HeapObjectHeader::FromPayload(
const_cast<void*>(desc.base_object_payload)),
desc);
}
-void MarkingState::MarkAndPush(HeapObjectHeader& header, TraceDescriptor desc) {
+void MarkingStateBase::MarkAndPush(HeapObjectHeader& header,
+ TraceDescriptor desc) {
DCHECK_NOT_NULL(desc.callback);
- if (!MarkNoPush(header)) return;
-
- if (header.IsInConstruction<HeapObjectHeader::AccessMode::kNonAtomic>()) {
- not_fully_constructed_worklist_.Push(&header);
- } else {
- marking_worklist_.Push(desc);
+ if (header.IsInConstruction<AccessMode::kAtomic>()) {
+ not_fully_constructed_worklist_.Push<AccessMode::kAtomic>(&header);
+ } else if (MarkNoPush(header)) {
+ PushMarked(header, desc);
}
}
-bool MarkingState::MarkNoPush(HeapObjectHeader& header) {
+bool MarkingStateBase::MarkNoPush(HeapObjectHeader& header) {
// A GC should only mark the objects that belong in its heap.
DCHECK_EQ(&heap_, BasePage::FromPayload(&header)->heap());
// Never mark free space objects. This would e.g. hint to marking a promptly
// freed backing store.
- DCHECK(!header.IsFree());
+ DCHECK(!header.IsFree<AccessMode::kAtomic>());
return header.TryMarkAtomic();
}
-template <HeapObjectHeader::AccessMode mode>
-void MarkingState::DynamicallyMarkAddress(ConstAddress address) {
- HeapObjectHeader& header =
- BasePage::FromPayload(address)->ObjectHeaderFromInnerAddress<mode>(
- const_cast<Address>(address));
- DCHECK(!header.IsInConstruction<mode>());
- if (MarkNoPush(header)) {
- marking_worklist_.Push(
- {reinterpret_cast<void*>(header.Payload()),
- GlobalGCInfoTable::GCInfoFromIndex(header.GetGCInfoIndex<mode>())
- .trace});
- }
-}
-
-void MarkingState::MarkAndPush(HeapObjectHeader& header) {
+void MarkingStateBase::MarkAndPush(HeapObjectHeader& header) {
MarkAndPush(
header,
{header.Payload(),
GlobalGCInfoTable::GCInfoFromIndex(header.GetGCInfoIndex()).trace});
}
-void MarkingState::RegisterWeakReferenceIfNeeded(const void* object,
- TraceDescriptor desc,
- WeakCallback weak_callback,
- const void* parameter) {
+void MarkingStateBase::PushMarked(HeapObjectHeader& header,
+ TraceDescriptor desc) {
+ DCHECK(header.IsMarked<AccessMode::kAtomic>());
+ DCHECK(!header.IsInConstruction<AccessMode::kAtomic>());
+ DCHECK_NOT_NULL(desc.callback);
+
+ marking_worklist_.Push(desc);
+}
+
+void MarkingStateBase::RegisterWeakReferenceIfNeeded(const void* object,
+ TraceDescriptor desc,
+ WeakCallback weak_callback,
+ const void* parameter) {
// Filter out already marked values. The write barrier for WeakMember
// ensures that any newly set value after this point is kept alive and does
// not require the callback.
if (HeapObjectHeader::FromPayload(desc.base_object_payload)
- .IsMarked<HeapObjectHeader::AccessMode::kAtomic>())
+ .IsMarked<AccessMode::kAtomic>())
return;
RegisterWeakCallback(weak_callback, parameter);
}
-void MarkingState::InvokeWeakRootsCallbackIfNeeded(const void* object,
- TraceDescriptor desc,
- WeakCallback weak_callback,
- const void* parameter) {
+void MarkingStateBase::RegisterWeakCallback(WeakCallback callback,
+ const void* object) {
+ DCHECK_NOT_NULL(callback);
+ weak_callback_worklist_.Push({callback, object});
+}
+
+void MarkingStateBase::RegisterWeakContainer(HeapObjectHeader& header) {
+ weak_containers_worklist_.Push<AccessMode::kAtomic>(&header);
+}
+
+void MarkingStateBase::ProcessWeakContainer(const void* object,
+ TraceDescriptor desc,
+ WeakCallback callback,
+ const void* data) {
+ DCHECK_NOT_NULL(object);
+
+ HeapObjectHeader& header =
+ HeapObjectHeader::FromPayload(const_cast<void*>(object));
+
+ if (header.IsInConstruction<AccessMode::kAtomic>()) {
+ not_fully_constructed_worklist_.Push<AccessMode::kAtomic>(&header);
+ return;
+ }
+
+ // Only mark the container initially. Its buckets will be processed after
+ // marking.
+ if (!MarkNoPush(header)) return;
+ RegisterWeakContainer(header);
+
+ // Register final weak processing of the backing store.
+ RegisterWeakCallback(callback, data);
+
+ // Weak containers might not require tracing. In such cases the callback in
+ // the TraceDescriptor will be nullptr. For ephemerons the callback will be
+ // non-nullptr so that the container is traced and the ephemeron pairs are
+ // processed.
+ if (desc.callback) PushMarked(header, desc);
+}
+
+void MarkingStateBase::ProcessEphemeron(const void* key,
+ TraceDescriptor value_desc) {
+ // Filter out already marked keys. The write barrier for WeakMember
+ // ensures that any newly set value after this point is kept alive and does
+ // not require the callback.
+ if (HeapObjectHeader::FromPayload(key).IsMarked<AccessMode::kAtomic>()) {
+ MarkAndPush(value_desc.base_object_payload, value_desc);
+ return;
+ }
+ discovered_ephemeron_pairs_worklist_.Push({key, value_desc});
+}
+
+void MarkingStateBase::AccountMarkedBytes(const HeapObjectHeader& header) {
+ AccountMarkedBytes(
+ header.IsLargeObject<AccessMode::kAtomic>()
+ ? reinterpret_cast<const LargePage*>(BasePage::FromPayload(&header))
+ ->PayloadSize()
+ : header.GetSize<AccessMode::kAtomic>());
+}
+
+void MarkingStateBase::AccountMarkedBytes(size_t marked_bytes) {
+ marked_bytes_ += marked_bytes;
+}
+
+class MutatorMarkingState : public MarkingStateBase {
+ public:
+ MutatorMarkingState(HeapBase& heap, MarkingWorklists& marking_worklists,
+ CompactionWorklists* compaction_worklists)
+ : MarkingStateBase(heap, marking_worklists, compaction_worklists) {}
+
+ inline bool MarkNoPush(HeapObjectHeader& header) {
+ return MutatorMarkingState::MarkingStateBase::MarkNoPush(header);
+ }
+
+ inline void PushMarkedWeakContainer(HeapObjectHeader&);
+
+ inline void DynamicallyMarkAddress(ConstAddress);
+
+ // Moves objects in not_fully_constructed_worklist_ to
+ // previously_not_full_constructed_worklists_.
+ void FlushNotFullyConstructedObjects();
+
+ // Moves ephemeron pairs in discovered_ephemeron_pairs_worklist_ to
+ // ephemeron_pairs_for_processing_worklist_.
+ void FlushDiscoveredEphemeronPairs();
+
+ inline void InvokeWeakRootsCallbackIfNeeded(const void*, TraceDescriptor,
+ WeakCallback, const void*);
+
+ inline bool IsMarkedWeakContainer(HeapObjectHeader&);
+};
+
+void MutatorMarkingState::PushMarkedWeakContainer(HeapObjectHeader& header) {
+ DCHECK(weak_containers_worklist_.Contains(&header));
+ weak_containers_worklist_.Erase(&header);
+ PushMarked(
+ header,
+ {header.Payload(),
+ GlobalGCInfoTable::GCInfoFromIndex(header.GetGCInfoIndex()).trace});
+}
+
+void MutatorMarkingState::DynamicallyMarkAddress(ConstAddress address) {
+ HeapObjectHeader& header =
+ BasePage::FromPayload(address)->ObjectHeaderFromInnerAddress(
+ const_cast<Address>(address));
+ DCHECK(!header.IsInConstruction());
+ if (MarkNoPush(header)) {
+ marking_worklist_.Push(
+ {reinterpret_cast<void*>(header.Payload()),
+ GlobalGCInfoTable::GCInfoFromIndex(header.GetGCInfoIndex()).trace});
+ }
+}
+
+void MutatorMarkingState::InvokeWeakRootsCallbackIfNeeded(
+ const void* object, TraceDescriptor desc, WeakCallback weak_callback,
+ const void* parameter) {
// Since weak roots are only traced at the end of marking, we can execute
// the callback instead of registering it.
#if DEBUG
@@ -178,17 +353,65 @@ void MarkingState::InvokeWeakRootsCallbackIfNeeded(const void* object,
weak_callback(LivenessBrokerFactory::Create(), parameter);
}
-void MarkingState::RegisterWeakCallback(WeakCallback callback,
- const void* object) {
- weak_callback_worklist_.Push({callback, object});
+bool MutatorMarkingState::IsMarkedWeakContainer(HeapObjectHeader& header) {
+ const bool result = weak_containers_worklist_.Contains(&header);
+ DCHECK_IMPLIES(result, header.IsMarked());
+ return result;
}
-void MarkingState::AccountMarkedBytes(const HeapObjectHeader& header) {
- marked_bytes_ +=
- header.IsLargeObject()
- ? reinterpret_cast<const LargePage*>(BasePage::FromPayload(&header))
- ->PayloadSize()
- : header.GetSize();
+class ConcurrentMarkingState : public MarkingStateBase {
+ public:
+ ConcurrentMarkingState(HeapBase& heap, MarkingWorklists& marking_worklists,
+ CompactionWorklists* compaction_worklists)
+ : MarkingStateBase(heap, marking_worklists, compaction_worklists) {}
+
+ ~ConcurrentMarkingState() { DCHECK_EQ(last_marked_bytes_, marked_bytes_); }
+
+ size_t RecentlyMarkedBytes() {
+ return marked_bytes_ - std::exchange(last_marked_bytes_, marked_bytes_);
+ }
+
+ inline void AccountDeferredMarkedBytes(size_t deferred_bytes) {
+ // AccountDeferredMarkedBytes is called from Trace methods, which are always
+ // called after AccountMarkedBytes, so there should be no underflow here.
+ DCHECK_LE(deferred_bytes, marked_bytes_);
+ marked_bytes_ -= deferred_bytes;
+ }
+
+ private:
+ size_t last_marked_bytes_ = 0;
+};
+
+template <size_t deadline_check_interval, typename WorklistLocal,
+ typename Callback, typename Predicate>
+bool DrainWorklistWithPredicate(Predicate should_yield,
+ WorklistLocal& worklist_local,
+ Callback callback) {
+ if (worklist_local.IsLocalAndGlobalEmpty()) return true;
+ // For concurrent markers, should_yield also reports marked bytes.
+ if (should_yield()) return false;
+ size_t processed_callback_count = deadline_check_interval;
+ typename WorklistLocal::ItemType item;
+ while (worklist_local.Pop(&item)) {
+ callback(item);
+ if (--processed_callback_count == 0) {
+ if (should_yield()) {
+ return false;
+ }
+ processed_callback_count = deadline_check_interval;
+ }
+ }
+ return true;
+}
+
+template <AccessMode mode>
+void DynamicallyTraceMarkedObject(Visitor& visitor,
+ const HeapObjectHeader& header) {
+ DCHECK(!header.IsInConstruction<mode>());
+ DCHECK(header.IsMarked<mode>());
+ const GCInfo& gcinfo =
+ GlobalGCInfoTable::GCInfoFromIndex(header.GetGCInfoIndex<mode>());
+ gcinfo.trace(&visitor, header.Payload());
}
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc/marking-verifier.cc b/deps/v8/src/heap/cppgc/marking-verifier.cc
index 4238709ae1..009228a8ff 100644
--- a/deps/v8/src/heap/cppgc/marking-verifier.cc
+++ b/deps/v8/src/heap/cppgc/marking-verifier.cc
@@ -6,62 +6,63 @@
#include "src/base/logging.h"
#include "src/heap/cppgc/gc-info-table.h"
+#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap.h"
+#include "src/heap/cppgc/marking-visitor.h"
namespace cppgc {
namespace internal {
-MarkingVerifier::MarkingVerifier(HeapBase& heap,
- Heap::Config::StackState stack_state)
- : cppgc::Visitor(VisitorFactory::CreateKey()),
- ConservativeTracingVisitor(heap, *heap.page_backend(), *this) {
- Traverse(&heap.raw_heap());
+MarkingVerifierBase::MarkingVerifierBase(
+ HeapBase& heap, std::unique_ptr<cppgc::Visitor> visitor)
+ : ConservativeTracingVisitor(heap, *heap.page_backend(), *visitor.get()),
+ visitor_(std::move(visitor)) {}
+
+void MarkingVerifierBase::Run(Heap::Config::StackState stack_state) {
+ Traverse(&heap_.raw_heap());
if (stack_state == Heap::Config::StackState::kMayContainHeapPointers) {
in_construction_objects_ = &in_construction_objects_stack_;
- heap.stack()->IteratePointers(this);
+ heap_.stack()->IteratePointers(this);
CHECK_EQ(in_construction_objects_stack_, in_construction_objects_heap_);
}
}
-void MarkingVerifier::Visit(const void* object, TraceDescriptor desc) {
- VerifyChild(desc.base_object_payload);
-}
-
-void MarkingVerifier::VisitWeak(const void* object, TraceDescriptor desc,
- WeakCallback, const void*) {
- // Weak objects should have been cleared at this point. As a consequence, all
- // objects found through weak references have to point to live objects at this
- // point.
- VerifyChild(desc.base_object_payload);
-}
-
-void MarkingVerifier::VerifyChild(const void* base_object_payload) {
+void VerificationState::VerifyMarked(const void* base_object_payload) const {
const HeapObjectHeader& child_header =
HeapObjectHeader::FromPayload(base_object_payload);
- CHECK(child_header.IsMarked());
+ if (!child_header.IsMarked()) {
+ FATAL(
+ "MarkingVerifier: Encountered unmarked object.\n"
+ "#\n"
+ "# Hint:\n"
+ "# %s\n"
+ "# \\-> %s",
+ parent_->GetName().value, child_header.GetName().value);
+ }
}
-void MarkingVerifier::VisitConservatively(
+void MarkingVerifierBase::VisitInConstructionConservatively(
HeapObjectHeader& header, TraceConservativelyCallback callback) {
CHECK(header.IsMarked());
in_construction_objects_->insert(&header);
callback(this, header);
}
-void MarkingVerifier::VisitPointer(const void* address) {
+void MarkingVerifierBase::VisitPointer(const void* address) {
TraceConservativelyIfNeeded(address);
}
-bool MarkingVerifier::VisitHeapObjectHeader(HeapObjectHeader* header) {
+bool MarkingVerifierBase::VisitHeapObjectHeader(HeapObjectHeader* header) {
// Verify only non-free marked objects.
if (!header->IsMarked()) return true;
DCHECK(!header->IsFree());
+ SetCurrentParent(header);
+
if (!header->IsInConstruction()) {
- GlobalGCInfoTable::GCInfoFromIndex(header->GetGCInfoIndex())
- .trace(this, header->Payload());
+ header->Trace(visitor_.get());
} else {
// Dispatches to conservative tracing implementation.
TraceConservativelyIfNeeded(*header);
@@ -70,5 +71,50 @@ bool MarkingVerifier::VisitHeapObjectHeader(HeapObjectHeader* header) {
return true;
}
+namespace {
+
+class VerificationVisitor final : public cppgc::Visitor {
+ public:
+ explicit VerificationVisitor(VerificationState& state)
+ : cppgc::Visitor(VisitorFactory::CreateKey()), state_(state) {}
+
+ void Visit(const void*, TraceDescriptor desc) final {
+ state_.VerifyMarked(desc.base_object_payload);
+ }
+
+ void VisitWeak(const void*, TraceDescriptor desc, WeakCallback,
+ const void*) final {
+ // Weak objects should have been cleared at this point. As a consequence,
+ // all objects found through weak references have to point to live objects
+ // at this point.
+ state_.VerifyMarked(desc.base_object_payload);
+ }
+
+ void VisitWeakContainer(const void* object, TraceDescriptor,
+ TraceDescriptor weak_desc, WeakCallback,
+ const void*) {
+ if (!object) return;
+
+ // Contents of weak containers are found themselves through page iteration
+ // and are treated strongly, similar to how they are treated strongly when
+ // found through stack scanning. The verification here only makes sure that
+ // the container itself is properly marked.
+ state_.VerifyMarked(weak_desc.base_object_payload);
+ }
+
+ private:
+ VerificationState& state_;
+};
+
+} // namespace
+
+MarkingVerifier::MarkingVerifier(HeapBase& heap_base)
+ : MarkingVerifierBase(heap_base,
+ std::make_unique<VerificationVisitor>(state_)) {}
+
+void MarkingVerifier::SetCurrentParent(const HeapObjectHeader* parent) {
+ state_.SetCurrentParent(parent);
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/marking-verifier.h b/deps/v8/src/heap/cppgc/marking-verifier.h
index 45661bd465..eeced68449 100644
--- a/deps/v8/src/heap/cppgc/marking-verifier.h
+++ b/deps/v8/src/heap/cppgc/marking-verifier.h
@@ -8,6 +8,7 @@
#include <unordered_set>
#include "src/heap/base/stack.h"
+#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap-visitor.h"
#include "src/heap/cppgc/heap.h"
#include "src/heap/cppgc/visitor.h"
@@ -15,34 +16,60 @@
namespace cppgc {
namespace internal {
-class V8_EXPORT_PRIVATE MarkingVerifier final
- : private HeapVisitor<MarkingVerifier>,
- public cppgc::Visitor,
+class VerificationState {
+ public:
+ void VerifyMarked(const void*) const;
+ void SetCurrentParent(const HeapObjectHeader* header) { parent_ = header; }
+
+ private:
+ const HeapObjectHeader* parent_ = nullptr;
+};
+
+class V8_EXPORT_PRIVATE MarkingVerifierBase
+ : private HeapVisitor<MarkingVerifierBase>,
public ConservativeTracingVisitor,
public heap::base::StackVisitor {
- friend class HeapVisitor<MarkingVerifier>;
+ friend class HeapVisitor<MarkingVerifierBase>;
public:
- explicit MarkingVerifier(HeapBase&, Heap::Config::StackState);
+ ~MarkingVerifierBase() override = default;
- void Visit(const void*, TraceDescriptor) final;
- void VisitWeak(const void*, TraceDescriptor, WeakCallback, const void*) final;
+ MarkingVerifierBase(const MarkingVerifierBase&) = delete;
+ MarkingVerifierBase& operator=(const MarkingVerifierBase&) = delete;
- private:
- void VerifyChild(const void*);
+ void Run(Heap::Config::StackState);
+
+ protected:
+ MarkingVerifierBase(HeapBase&, std::unique_ptr<cppgc::Visitor>);
+
+ virtual void SetCurrentParent(const HeapObjectHeader*) = 0;
- void VisitConservatively(HeapObjectHeader&,
- TraceConservativelyCallback) final;
+ private:
+ void VisitInConstructionConservatively(HeapObjectHeader&,
+ TraceConservativelyCallback) final;
void VisitPointer(const void*) final;
bool VisitHeapObjectHeader(HeapObjectHeader*);
+ std::unique_ptr<cppgc::Visitor> visitor_;
+
std::unordered_set<const HeapObjectHeader*> in_construction_objects_heap_;
std::unordered_set<const HeapObjectHeader*> in_construction_objects_stack_;
std::unordered_set<const HeapObjectHeader*>* in_construction_objects_ =
&in_construction_objects_heap_;
};
+class V8_EXPORT_PRIVATE MarkingVerifier final : public MarkingVerifierBase {
+ public:
+ explicit MarkingVerifier(HeapBase&);
+ ~MarkingVerifier() final = default;
+
+ void SetCurrentParent(const HeapObjectHeader*) final;
+
+ private:
+ VerificationState state_;
+};
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/marking-visitor.cc b/deps/v8/src/heap/cppgc/marking-visitor.cc
index b08379eb7e..896b12fc6c 100644
--- a/deps/v8/src/heap/cppgc/marking-visitor.cc
+++ b/deps/v8/src/heap/cppgc/marking-visitor.cc
@@ -10,51 +10,100 @@
namespace cppgc {
namespace internal {
-MarkingVisitor::MarkingVisitor(HeapBase& heap, MarkingState& marking_state)
+MarkingVisitorBase::MarkingVisitorBase(HeapBase& heap,
+ MarkingStateBase& marking_state)
: marking_state_(marking_state) {}
-void MarkingVisitor::Visit(const void* object, TraceDescriptor desc) {
+void MarkingVisitorBase::Visit(const void* object, TraceDescriptor desc) {
marking_state_.MarkAndPush(object, desc);
}
-void MarkingVisitor::VisitWeak(const void* object, TraceDescriptor desc,
- WeakCallback weak_callback,
- const void* weak_member) {
+void MarkingVisitorBase::VisitWeak(const void* object, TraceDescriptor desc,
+ WeakCallback weak_callback,
+ const void* weak_member) {
marking_state_.RegisterWeakReferenceIfNeeded(object, desc, weak_callback,
weak_member);
}
-void MarkingVisitor::VisitRoot(const void* object, TraceDescriptor desc) {
- Visit(object, desc);
+void MarkingVisitorBase::VisitEphemeron(const void* key,
+ TraceDescriptor value_desc) {
+ marking_state_.ProcessEphemeron(key, value_desc);
}
-void MarkingVisitor::VisitWeakRoot(const void* object, TraceDescriptor desc,
- WeakCallback weak_callback,
- const void* weak_root) {
- marking_state_.InvokeWeakRootsCallbackIfNeeded(object, desc, weak_callback,
- weak_root);
+void MarkingVisitorBase::VisitWeakContainer(const void* object,
+ TraceDescriptor strong_desc,
+ TraceDescriptor weak_desc,
+ WeakCallback callback,
+ const void* data) {
+ marking_state_.ProcessWeakContainer(object, weak_desc, callback, data);
}
-void MarkingVisitor::RegisterWeakCallback(WeakCallback callback,
- const void* object) {
+void MarkingVisitorBase::RegisterWeakCallback(WeakCallback callback,
+ const void* object) {
marking_state_.RegisterWeakCallback(callback, object);
}
+void MarkingVisitorBase::HandleMovableReference(const void** slot) {
+ marking_state_.RegisterMovableReference(slot);
+}
+
ConservativeMarkingVisitor::ConservativeMarkingVisitor(
- HeapBase& heap, MarkingState& marking_state, cppgc::Visitor& visitor)
+ HeapBase& heap, MutatorMarkingState& marking_state, cppgc::Visitor& visitor)
: ConservativeTracingVisitor(heap, *heap.page_backend(), visitor),
marking_state_(marking_state) {}
-void ConservativeMarkingVisitor::VisitConservatively(
+void ConservativeMarkingVisitor::VisitFullyConstructedConservatively(
+ HeapObjectHeader& header) {
+ if (header.IsMarked()) {
+ if (marking_state_.IsMarkedWeakContainer(header))
+ marking_state_.PushMarkedWeakContainer(header);
+ return;
+ }
+ ConservativeTracingVisitor::VisitFullyConstructedConservatively(header);
+}
+
+void ConservativeMarkingVisitor::VisitInConstructionConservatively(
HeapObjectHeader& header, TraceConservativelyCallback callback) {
+ DCHECK(!marking_state_.IsMarkedWeakContainer(header));
marking_state_.MarkNoPush(header);
- callback(this, header);
marking_state_.AccountMarkedBytes(header);
+ callback(this, header);
}
+MutatorMarkingVisitor::MutatorMarkingVisitor(HeapBase& heap,
+ MutatorMarkingState& marking_state)
+ : MarkingVisitorBase(heap, marking_state) {}
+
+void MutatorMarkingVisitor::VisitRoot(const void* object, TraceDescriptor desc,
+ const SourceLocation&) {
+ Visit(object, desc);
+}
+
+void MutatorMarkingVisitor::VisitWeakRoot(const void* object,
+ TraceDescriptor desc,
+ WeakCallback weak_callback,
+ const void* weak_root,
+ const SourceLocation&) {
+ static_cast<MutatorMarkingState&>(marking_state_)
+ .InvokeWeakRootsCallbackIfNeeded(object, desc, weak_callback, weak_root);
+}
+
+ConcurrentMarkingVisitor::ConcurrentMarkingVisitor(
+ HeapBase& heap, ConcurrentMarkingState& marking_state)
+ : MarkingVisitorBase(heap, marking_state) {}
+
void ConservativeMarkingVisitor::VisitPointer(const void* address) {
TraceConservativelyIfNeeded(address);
}
+bool ConcurrentMarkingVisitor::DeferTraceToMutatorThreadIfConcurrent(
+ const void* parameter, TraceCallback callback, size_t deferred_size) {
+ marking_state_.concurrent_marking_bailout_worklist().Push(
+ {parameter, callback, deferred_size});
+ static_cast<ConcurrentMarkingState&>(marking_state_)
+ .AccountDeferredMarkedBytes(deferred_size);
+ return true;
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/marking-visitor.h b/deps/v8/src/heap/cppgc/marking-visitor.h
index 408fa2514c..91cca87dd9 100644
--- a/deps/v8/src/heap/cppgc/marking-visitor.h
+++ b/deps/v8/src/heap/cppgc/marking-visitor.h
@@ -16,36 +16,71 @@ namespace internal {
class HeapBase;
class HeapObjectHeader;
class Marker;
-class MarkingState;
+class MarkingStateBase;
+class MutatorMarkingState;
+class ConcurrentMarkingState;
-class V8_EXPORT_PRIVATE MarkingVisitor : public VisitorBase {
+class V8_EXPORT_PRIVATE MarkingVisitorBase : public VisitorBase {
public:
- MarkingVisitor(HeapBase&, MarkingState&);
- ~MarkingVisitor() override = default;
+ MarkingVisitorBase(HeapBase&, MarkingStateBase&);
+ ~MarkingVisitorBase() override = default;
protected:
void Visit(const void*, TraceDescriptor) final;
void VisitWeak(const void*, TraceDescriptor, WeakCallback, const void*) final;
- void VisitRoot(const void*, TraceDescriptor) final;
- void VisitWeakRoot(const void*, TraceDescriptor, WeakCallback,
- const void*) final;
+ void VisitEphemeron(const void*, TraceDescriptor) final;
+ void VisitWeakContainer(const void* self, TraceDescriptor strong_desc,
+ TraceDescriptor weak_desc, WeakCallback callback,
+ const void* data) final;
void RegisterWeakCallback(WeakCallback, const void*) final;
+ void HandleMovableReference(const void**) final;
- MarkingState& marking_state_;
+ MarkingStateBase& marking_state_;
+};
+
+class V8_EXPORT_PRIVATE MutatorMarkingVisitor : public MarkingVisitorBase {
+ public:
+ MutatorMarkingVisitor(HeapBase&, MutatorMarkingState&);
+ ~MutatorMarkingVisitor() override = default;
+
+ protected:
+ void VisitRoot(const void*, TraceDescriptor, const SourceLocation&) final;
+ void VisitWeakRoot(const void*, TraceDescriptor, WeakCallback, const void*,
+ const SourceLocation&) final;
+};
+
+class V8_EXPORT_PRIVATE ConcurrentMarkingVisitor final
+ : public MarkingVisitorBase {
+ public:
+ ConcurrentMarkingVisitor(HeapBase&, ConcurrentMarkingState&);
+ ~ConcurrentMarkingVisitor() override = default;
+
+ protected:
+ void VisitRoot(const void*, TraceDescriptor, const SourceLocation&) final {
+ UNREACHABLE();
+ }
+ void VisitWeakRoot(const void*, TraceDescriptor, WeakCallback, const void*,
+ const SourceLocation&) final {
+ UNREACHABLE();
+ }
+
+ bool DeferTraceToMutatorThreadIfConcurrent(const void*, TraceCallback,
+ size_t) final;
};
class ConservativeMarkingVisitor : public ConservativeTracingVisitor,
public heap::base::StackVisitor {
public:
- ConservativeMarkingVisitor(HeapBase&, MarkingState&, cppgc::Visitor&);
+ ConservativeMarkingVisitor(HeapBase&, MutatorMarkingState&, cppgc::Visitor&);
~ConservativeMarkingVisitor() override = default;
private:
- void VisitConservatively(HeapObjectHeader&,
- TraceConservativelyCallback) final;
+ void VisitFullyConstructedConservatively(HeapObjectHeader&) final;
+ void VisitInConstructionConservatively(HeapObjectHeader&,
+ TraceConservativelyCallback) final;
void VisitPointer(const void*) final;
- MarkingState& marking_state_;
+ MutatorMarkingState& marking_state_;
};
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc/marking-worklists.cc b/deps/v8/src/heap/cppgc/marking-worklists.cc
index 15d78fd4cf..993b5e069d 100644
--- a/deps/v8/src/heap/cppgc/marking-worklists.cc
+++ b/deps/v8/src/heap/cppgc/marking-worklists.cc
@@ -16,6 +16,13 @@ void MarkingWorklists::ClearForTesting() {
previously_not_fully_constructed_worklist_.Clear();
write_barrier_worklist_.Clear();
weak_callback_worklist_.Clear();
+ concurrent_marking_bailout_worklist_.Clear();
+ discovered_ephemeron_pairs_worklist_.Clear();
+ ephemeron_pairs_for_processing_worklist_.Clear();
+}
+
+MarkingWorklists::ExternalMarkingWorklist::~ExternalMarkingWorklist() {
+ DCHECK(IsEmpty());
}
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc/marking-worklists.h b/deps/v8/src/heap/cppgc/marking-worklists.h
index 96d11eef53..c769a14fb7 100644
--- a/deps/v8/src/heap/cppgc/marking-worklists.h
+++ b/deps/v8/src/heap/cppgc/marking-worklists.h
@@ -5,40 +5,93 @@
#ifndef V8_HEAP_CPPGC_MARKING_WORKLISTS_H_
#define V8_HEAP_CPPGC_MARKING_WORKLISTS_H_
+#include <unordered_set>
+
#include "include/cppgc/visitor.h"
+#include "src/base/platform/mutex.h"
#include "src/heap/base/worklist.h"
+#include "src/heap/cppgc/heap-object-header.h"
namespace cppgc {
namespace internal {
-class HeapObjectHeader;
-
class MarkingWorklists {
+ private:
+ class V8_EXPORT_PRIVATE ExternalMarkingWorklist {
+ public:
+ template <AccessMode = AccessMode::kNonAtomic>
+ void Push(HeapObjectHeader*);
+ template <AccessMode = AccessMode::kNonAtomic>
+ void Erase(HeapObjectHeader*);
+ template <AccessMode = AccessMode::kNonAtomic>
+ bool Contains(HeapObjectHeader*);
+ template <AccessMode = AccessMode::kNonAtomic>
+ std::unordered_set<HeapObjectHeader*> Extract();
+ template <AccessMode = AccessMode::kNonAtomic>
+ void Clear();
+ template <AccessMode = AccessMode::kNonAtomic>
+ bool IsEmpty();
+
+ ~ExternalMarkingWorklist();
+
+ private:
+ template <AccessMode>
+ struct ConditionalMutexGuard;
+
+ void* operator new(size_t) = delete;
+ void* operator new[](size_t) = delete;
+ void operator delete(void*) = delete;
+ void operator delete[](void*) = delete;
+
+ v8::base::Mutex lock_;
+ std::unordered_set<HeapObjectHeader*> objects_;
+ };
+
public:
static constexpr int kMutatorThreadId = 0;
using MarkingItem = cppgc::TraceDescriptor;
+
struct WeakCallbackItem {
cppgc::WeakCallback callback;
const void* parameter;
};
+ struct ConcurrentMarkingBailoutItem {
+ const void* parameter;
+ TraceCallback callback;
+ size_t bailedout_size;
+ };
+
+ struct EphemeronPairItem {
+ const void* key;
+ TraceDescriptor value_desc;
+ };
+
// Segment size of 512 entries necessary to avoid throughput regressions.
// Since the work list is currently a temporary object this is not a problem.
using MarkingWorklist =
heap::base::Worklist<MarkingItem, 512 /* local entries */>;
- using NotFullyConstructedWorklist =
+ using NotFullyConstructedWorklist = ExternalMarkingWorklist;
+ using PreviouslyNotFullyConstructedWorklist =
heap::base::Worklist<HeapObjectHeader*, 16 /* local entries */>;
using WeakCallbackWorklist =
heap::base::Worklist<WeakCallbackItem, 64 /* local entries */>;
using WriteBarrierWorklist =
heap::base::Worklist<HeapObjectHeader*, 64 /*local entries */>;
+ using ConcurrentMarkingBailoutWorklist =
+ heap::base::Worklist<ConcurrentMarkingBailoutItem,
+ 64 /* local entries */>;
+ using EphemeronPairsWorklist =
+ heap::base::Worklist<EphemeronPairItem, 64 /* local entries */>;
+ using WeakContainersWorklist = ExternalMarkingWorklist;
MarkingWorklist* marking_worklist() { return &marking_worklist_; }
NotFullyConstructedWorklist* not_fully_constructed_worklist() {
return &not_fully_constructed_worklist_;
}
- NotFullyConstructedWorklist* previously_not_fully_constructed_worklist() {
+ PreviouslyNotFullyConstructedWorklist*
+ previously_not_fully_constructed_worklist() {
return &previously_not_fully_constructed_worklist_;
}
WriteBarrierWorklist* write_barrier_worklist() {
@@ -47,17 +100,93 @@ class MarkingWorklists {
WeakCallbackWorklist* weak_callback_worklist() {
return &weak_callback_worklist_;
}
+ ConcurrentMarkingBailoutWorklist* concurrent_marking_bailout_worklist() {
+ return &concurrent_marking_bailout_worklist_;
+ }
+ EphemeronPairsWorklist* discovered_ephemeron_pairs_worklist() {
+ return &discovered_ephemeron_pairs_worklist_;
+ }
+ EphemeronPairsWorklist* ephemeron_pairs_for_processing_worklist() {
+ return &ephemeron_pairs_for_processing_worklist_;
+ }
+ WeakContainersWorklist* weak_containers_worklist() {
+ return &weak_containers_worklist_;
+ }
void ClearForTesting();
private:
MarkingWorklist marking_worklist_;
NotFullyConstructedWorklist not_fully_constructed_worklist_;
- NotFullyConstructedWorklist previously_not_fully_constructed_worklist_;
+ PreviouslyNotFullyConstructedWorklist
+ previously_not_fully_constructed_worklist_;
WriteBarrierWorklist write_barrier_worklist_;
WeakCallbackWorklist weak_callback_worklist_;
+ ConcurrentMarkingBailoutWorklist concurrent_marking_bailout_worklist_;
+ EphemeronPairsWorklist discovered_ephemeron_pairs_worklist_;
+ EphemeronPairsWorklist ephemeron_pairs_for_processing_worklist_;
+ WeakContainersWorklist weak_containers_worklist_;
+};
+
+template <>
+struct MarkingWorklists::ExternalMarkingWorklist::ConditionalMutexGuard<
+ AccessMode::kNonAtomic> {
+ explicit ConditionalMutexGuard(v8::base::Mutex*) {}
};
+template <>
+struct MarkingWorklists::ExternalMarkingWorklist::ConditionalMutexGuard<
+ AccessMode::kAtomic> {
+ explicit ConditionalMutexGuard(v8::base::Mutex* lock) : guard_(lock) {}
+
+ private:
+ v8::base::MutexGuard guard_;
+};
+
+template <AccessMode mode>
+void MarkingWorklists::ExternalMarkingWorklist::Push(HeapObjectHeader* object) {
+ DCHECK_NOT_NULL(object);
+ ConditionalMutexGuard<mode> guard(&lock_);
+ objects_.insert(object);
+}
+
+template <AccessMode mode>
+void MarkingWorklists::ExternalMarkingWorklist::Erase(
+ HeapObjectHeader* object) {
+ DCHECK_NOT_NULL(object);
+ ConditionalMutexGuard<mode> guard(&lock_);
+ objects_.erase(object);
+}
+
+template <AccessMode mode>
+bool MarkingWorklists::ExternalMarkingWorklist::Contains(
+ HeapObjectHeader* object) {
+ ConditionalMutexGuard<mode> guard(&lock_);
+ return objects_.find(object) != objects_.end();
+}
+
+template <AccessMode mode>
+std::unordered_set<HeapObjectHeader*>
+MarkingWorklists::ExternalMarkingWorklist::Extract() {
+ ConditionalMutexGuard<mode> guard(&lock_);
+ std::unordered_set<HeapObjectHeader*> extracted;
+ std::swap(extracted, objects_);
+ DCHECK(objects_.empty());
+ return extracted;
+}
+
+template <AccessMode mode>
+void MarkingWorklists::ExternalMarkingWorklist::Clear() {
+ ConditionalMutexGuard<mode> guard(&lock_);
+ objects_.clear();
+}
+
+template <AccessMode mode>
+bool MarkingWorklists::ExternalMarkingWorklist::IsEmpty() {
+ ConditionalMutexGuard<mode> guard(&lock_);
+ return objects_.empty();
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/name-trait.cc b/deps/v8/src/heap/cppgc/name-trait.cc
new file mode 100644
index 0000000000..d42f5229b8
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/name-trait.cc
@@ -0,0 +1,41 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/cppgc/internal/name-trait.h"
+
+#include <stdio.h>
+
+#include "src/base/logging.h"
+#include "src/base/macros.h"
+
+namespace cppgc {
+
+// static
+constexpr const char NameProvider::kHiddenName[];
+
+// static
+constexpr const char NameProvider::kNoNameDeducible[];
+
+namespace internal {
+
+// static
+HeapObjectName NameTraitBase::GetNameFromTypeSignature(const char* signature) {
+ // Parsing string of structure:
+ // static HeapObjectName NameTrait<int>::GetNameFor(...) [T = int]
+ if (!signature) return {NameProvider::kNoNameDeducible, true};
+
+ const std::string raw(signature);
+ const auto start_pos = raw.rfind("T = ") + 4;
+ DCHECK_NE(std::string::npos, start_pos);
+ const auto len = raw.length() - start_pos - 1;
+ const std::string name = raw.substr(start_pos, len).c_str();
+ char* name_buffer = new char[name.length() + 1];
+ int written = snprintf(name_buffer, name.length() + 1, "%s", name.c_str());
+ DCHECK_EQ(static_cast<size_t>(written), name.length());
+ USE(written);
+ return {name_buffer, false};
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/object-allocator.h b/deps/v8/src/heap/cppgc/object-allocator.h
index 85a8d29db2..5c857d2478 100644
--- a/deps/v8/src/heap/cppgc/object-allocator.h
+++ b/deps/v8/src/heap/cppgc/object-allocator.h
@@ -118,12 +118,20 @@ void* ObjectAllocator::AllocateObjectOnSpace(NormalPageSpace* space,
}
void* raw = current_lab.Allocate(size);
- SET_MEMORY_ACCESIBLE(raw, size);
+#if !defined(V8_USE_MEMORY_SANITIZER) && !defined(V8_USE_ADDRESS_SANITIZER) && \
+ DEBUG
+ // For debug builds, unzap only the payload.
+ SET_MEMORY_ACCESSIBLE(static_cast<char*>(raw) + sizeof(HeapObjectHeader),
+ size - sizeof(HeapObjectHeader));
+#else
+ SET_MEMORY_ACCESSIBLE(raw, size);
+#endif
auto* header = new (raw) HeapObjectHeader(size, gcinfo);
+ // The marker needs to find the object start concurrently.
NormalPage::From(BasePage::FromPayload(header))
->object_start_bitmap()
- .SetBit(reinterpret_cast<ConstAddress>(header));
+ .SetBit<AccessMode::kAtomic>(reinterpret_cast<ConstAddress>(header));
return header->Payload();
}
diff --git a/deps/v8/src/heap/cppgc/object-start-bitmap.h b/deps/v8/src/heap/cppgc/object-start-bitmap.h
index 2af6493939..38ba5ca886 100644
--- a/deps/v8/src/heap/cppgc/object-start-bitmap.h
+++ b/deps/v8/src/heap/cppgc/object-start-bitmap.h
@@ -44,19 +44,15 @@ class V8_EXPORT_PRIVATE ObjectStartBitmap {
// Finds an object header based on a
// address_maybe_pointing_to_the_middle_of_object. Will search for an object
// start in decreasing address order.
- template <
- HeapObjectHeader::AccessMode = HeapObjectHeader::AccessMode::kNonAtomic>
+ template <AccessMode = AccessMode::kNonAtomic>
inline HeapObjectHeader* FindHeader(
ConstAddress address_maybe_pointing_to_the_middle_of_object) const;
- template <
- HeapObjectHeader::AccessMode = HeapObjectHeader::AccessMode::kNonAtomic>
+ template <AccessMode = AccessMode::kNonAtomic>
inline void SetBit(ConstAddress);
- template <
- HeapObjectHeader::AccessMode = HeapObjectHeader::AccessMode::kNonAtomic>
+ template <AccessMode = AccessMode::kNonAtomic>
inline void ClearBit(ConstAddress);
- template <
- HeapObjectHeader::AccessMode = HeapObjectHeader::AccessMode::kNonAtomic>
+ template <AccessMode = AccessMode::kNonAtomic>
inline bool CheckBit(ConstAddress) const;
// Iterates all object starts recorded in the bitmap.
@@ -71,11 +67,9 @@ class V8_EXPORT_PRIVATE ObjectStartBitmap {
inline void Clear();
private:
- template <
- HeapObjectHeader::AccessMode = HeapObjectHeader::AccessMode::kNonAtomic>
+ template <AccessMode = AccessMode::kNonAtomic>
inline void store(size_t cell_index, uint8_t value);
- template <
- HeapObjectHeader::AccessMode = HeapObjectHeader::AccessMode::kNonAtomic>
+ template <AccessMode = AccessMode::kNonAtomic>
inline uint8_t load(size_t cell_index) const;
static constexpr size_t kBitsPerCell = sizeof(uint8_t) * CHAR_BIT;
@@ -88,7 +82,7 @@ class V8_EXPORT_PRIVATE ObjectStartBitmap {
inline void ObjectStartIndexAndBit(ConstAddress, size_t*, size_t*) const;
- Address offset_;
+ const Address offset_;
// The bitmap contains a bit for every kGranularity aligned address on a
// a NormalPage, i.e., for a page of size kBlinkPageSize.
std::array<uint8_t, kReservedForBitmap> object_start_bit_map_;
@@ -98,7 +92,7 @@ ObjectStartBitmap::ObjectStartBitmap(Address offset) : offset_(offset) {
Clear();
}
-template <HeapObjectHeader::AccessMode mode>
+template <AccessMode mode>
HeapObjectHeader* ObjectStartBitmap::FindHeader(
ConstAddress address_maybe_pointing_to_the_middle_of_object) const {
DCHECK_LE(offset_, address_maybe_pointing_to_the_middle_of_object);
@@ -120,7 +114,7 @@ HeapObjectHeader* ObjectStartBitmap::FindHeader(
return reinterpret_cast<HeapObjectHeader*>(object_offset + offset_);
}
-template <HeapObjectHeader::AccessMode mode>
+template <AccessMode mode>
void ObjectStartBitmap::SetBit(ConstAddress header_address) {
size_t cell_index, object_bit;
ObjectStartIndexAndBit(header_address, &cell_index, &object_bit);
@@ -129,7 +123,7 @@ void ObjectStartBitmap::SetBit(ConstAddress header_address) {
static_cast<uint8_t>(load(cell_index) | (1 << object_bit)));
}
-template <HeapObjectHeader::AccessMode mode>
+template <AccessMode mode>
void ObjectStartBitmap::ClearBit(ConstAddress header_address) {
size_t cell_index, object_bit;
ObjectStartIndexAndBit(header_address, &cell_index, &object_bit);
@@ -137,16 +131,16 @@ void ObjectStartBitmap::ClearBit(ConstAddress header_address) {
static_cast<uint8_t>(load(cell_index) & ~(1 << object_bit)));
}
-template <HeapObjectHeader::AccessMode mode>
+template <AccessMode mode>
bool ObjectStartBitmap::CheckBit(ConstAddress header_address) const {
size_t cell_index, object_bit;
ObjectStartIndexAndBit(header_address, &cell_index, &object_bit);
return load<mode>(cell_index) & (1 << object_bit);
}
-template <HeapObjectHeader::AccessMode mode>
+template <AccessMode mode>
void ObjectStartBitmap::store(size_t cell_index, uint8_t value) {
- if (mode == HeapObjectHeader::AccessMode::kNonAtomic) {
+ if (mode == AccessMode::kNonAtomic) {
object_start_bit_map_[cell_index] = value;
return;
}
@@ -154,9 +148,9 @@ void ObjectStartBitmap::store(size_t cell_index, uint8_t value) {
->store(value, std::memory_order_release);
}
-template <HeapObjectHeader::AccessMode mode>
+template <AccessMode mode>
uint8_t ObjectStartBitmap::load(size_t cell_index) const {
- if (mode == HeapObjectHeader::AccessMode::kNonAtomic) {
+ if (mode == AccessMode::kNonAtomic) {
return object_start_bit_map_[cell_index];
}
return v8::base::AsAtomicPtr(&object_start_bit_map_[cell_index])
@@ -204,15 +198,13 @@ class V8_EXPORT_PRIVATE PlatformAwareObjectStartBitmap
public:
explicit inline PlatformAwareObjectStartBitmap(Address offset);
- template <
- HeapObjectHeader::AccessMode = HeapObjectHeader::AccessMode::kNonAtomic>
+ template <AccessMode = AccessMode::kNonAtomic>
inline void SetBit(ConstAddress);
- template <
- HeapObjectHeader::AccessMode = HeapObjectHeader::AccessMode::kNonAtomic>
+ template <AccessMode = AccessMode::kNonAtomic>
inline void ClearBit(ConstAddress);
private:
- template <HeapObjectHeader::AccessMode>
+ template <AccessMode>
static bool ShouldForceNonAtomic();
};
@@ -220,11 +212,11 @@ PlatformAwareObjectStartBitmap::PlatformAwareObjectStartBitmap(Address offset)
: ObjectStartBitmap(offset) {}
// static
-template <HeapObjectHeader::AccessMode mode>
+template <AccessMode mode>
bool PlatformAwareObjectStartBitmap::ShouldForceNonAtomic() {
#if defined(V8_TARGET_ARCH_ARM)
// Use non-atomic accesses on ARMv7 when marking is not active.
- if (mode == HeapObjectHeader::AccessMode::kAtomic) {
+ if (mode == AccessMode::kAtomic) {
if (V8_LIKELY(!ProcessHeap::IsAnyIncrementalOrConcurrentMarking()))
return true;
}
@@ -232,21 +224,19 @@ bool PlatformAwareObjectStartBitmap::ShouldForceNonAtomic() {
return false;
}
-template <HeapObjectHeader::AccessMode mode>
+template <AccessMode mode>
void PlatformAwareObjectStartBitmap::SetBit(ConstAddress header_address) {
if (ShouldForceNonAtomic<mode>()) {
- ObjectStartBitmap::SetBit<HeapObjectHeader::AccessMode::kNonAtomic>(
- header_address);
+ ObjectStartBitmap::SetBit<AccessMode::kNonAtomic>(header_address);
return;
}
ObjectStartBitmap::SetBit<mode>(header_address);
}
-template <HeapObjectHeader::AccessMode mode>
+template <AccessMode mode>
void PlatformAwareObjectStartBitmap::ClearBit(ConstAddress header_address) {
if (ShouldForceNonAtomic<mode>()) {
- ObjectStartBitmap::ClearBit<HeapObjectHeader::AccessMode::kNonAtomic>(
- header_address);
+ ObjectStartBitmap::ClearBit<AccessMode::kNonAtomic>(header_address);
return;
}
ObjectStartBitmap::ClearBit<mode>(header_address);
diff --git a/deps/v8/src/heap/cppgc/persistent-node.cc b/deps/v8/src/heap/cppgc/persistent-node.cc
index 9c5113f86a..b9585f4be7 100644
--- a/deps/v8/src/heap/cppgc/persistent-node.cc
+++ b/deps/v8/src/heap/cppgc/persistent-node.cc
@@ -8,6 +8,7 @@
#include <numeric>
#include "include/cppgc/persistent.h"
+#include "src/heap/cppgc/process-heap.h"
namespace cppgc {
namespace internal {
@@ -68,5 +69,13 @@ void PersistentRegion::Trace(Visitor* visitor) {
nodes_.end());
}
+PersistentRegionLock::PersistentRegionLock() {
+ g_process_mutex.Pointer()->Lock();
+}
+
+PersistentRegionLock::~PersistentRegionLock() {
+ g_process_mutex.Pointer()->Unlock();
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/pointer-policies.cc b/deps/v8/src/heap/cppgc/pointer-policies.cc
index 0c45a7ae54..4fc5abb279 100644
--- a/deps/v8/src/heap/cppgc/pointer-policies.cc
+++ b/deps/v8/src/heap/cppgc/pointer-policies.cc
@@ -31,5 +31,17 @@ PersistentRegion& WeakPersistentPolicy::GetPersistentRegion(void* object) {
return heap->GetWeakPersistentRegion();
}
+PersistentRegion& StrongCrossThreadPersistentPolicy::GetPersistentRegion(
+ void* object) {
+ auto* heap = BasePage::FromPayload(object)->heap();
+ return heap->GetStrongCrossThreadPersistentRegion();
+}
+
+PersistentRegion& WeakCrossThreadPersistentPolicy::GetPersistentRegion(
+ void* object) {
+ auto* heap = BasePage::FromPayload(object)->heap();
+ return heap->GetWeakCrossThreadPersistentRegion();
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/process-heap.cc b/deps/v8/src/heap/cppgc/process-heap.cc
index 1408988396..76a4a5dff5 100644
--- a/deps/v8/src/heap/cppgc/process-heap.cc
+++ b/deps/v8/src/heap/cppgc/process-heap.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/heap/cppgc/process-heap.h"
+
#include "include/cppgc/internal/process-heap.h"
namespace cppgc {
@@ -9,5 +11,7 @@ namespace internal {
AtomicEntryFlag ProcessHeap::concurrent_marking_flag_;
+v8::base::LazyMutex g_process_mutex = LAZY_MUTEX_INITIALIZER;
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/process-heap.h b/deps/v8/src/heap/cppgc/process-heap.h
new file mode 100644
index 0000000000..8afc7c88eb
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/process-heap.h
@@ -0,0 +1,18 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_PROCESS_HEAP_H_
+#define V8_HEAP_CPPGC_PROCESS_HEAP_H_
+
+#include "src/base/platform/mutex.h"
+
+namespace cppgc {
+namespace internal {
+
+extern v8::base::LazyMutex g_process_mutex;
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_PROCESS_HEAP_H_
diff --git a/deps/v8/src/heap/cppgc/raw-heap.cc b/deps/v8/src/heap/cppgc/raw-heap.cc
index 19200ae8a2..f45039c870 100644
--- a/deps/v8/src/heap/cppgc/raw-heap.cc
+++ b/deps/v8/src/heap/cppgc/raw-heap.cc
@@ -12,17 +12,20 @@ namespace internal {
// static
constexpr size_t RawHeap::kNumberOfRegularSpaces;
-RawHeap::RawHeap(HeapBase* heap, size_t custom_spaces) : main_heap_(heap) {
+RawHeap::RawHeap(
+ HeapBase* heap,
+ const std::vector<std::unique_ptr<CustomSpaceBase>>& custom_spaces)
+ : main_heap_(heap) {
size_t i = 0;
for (; i < static_cast<size_t>(RegularSpaceType::kLarge); ++i) {
- spaces_.push_back(std::make_unique<NormalPageSpace>(this, i));
+ spaces_.push_back(std::make_unique<NormalPageSpace>(this, i, false));
}
spaces_.push_back(std::make_unique<LargePageSpace>(
this, static_cast<size_t>(RegularSpaceType::kLarge)));
DCHECK_EQ(kNumberOfRegularSpaces, spaces_.size());
- for (size_t j = 0; j < custom_spaces; j++) {
- spaces_.push_back(
- std::make_unique<NormalPageSpace>(this, kNumberOfRegularSpaces + j));
+ for (size_t j = 0; j < custom_spaces.size(); j++) {
+ spaces_.push_back(std::make_unique<NormalPageSpace>(
+ this, kNumberOfRegularSpaces + j, custom_spaces[j]->IsCompactable()));
}
}
diff --git a/deps/v8/src/heap/cppgc/raw-heap.h b/deps/v8/src/heap/cppgc/raw-heap.h
index 79a278546b..fceaeae594 100644
--- a/deps/v8/src/heap/cppgc/raw-heap.h
+++ b/deps/v8/src/heap/cppgc/raw-heap.h
@@ -47,7 +47,8 @@ class V8_EXPORT_PRIVATE RawHeap final {
using iterator = Spaces::iterator;
using const_iterator = Spaces::const_iterator;
- explicit RawHeap(HeapBase* heap, size_t custom_spaces);
+ RawHeap(HeapBase* heap,
+ const std::vector<std::unique_ptr<CustomSpaceBase>>& custom_spaces);
RawHeap(const RawHeap&) = delete;
RawHeap& operator=(const RawHeap&) = delete;
diff --git a/deps/v8/src/heap/cppgc/sanitizers.h b/deps/v8/src/heap/cppgc/sanitizers.h
index 17f6cd7306..c3a8ff684d 100644
--- a/deps/v8/src/heap/cppgc/sanitizers.h
+++ b/deps/v8/src/heap/cppgc/sanitizers.h
@@ -47,27 +47,32 @@
// API for newly allocated or reclaimed memory.
#if defined(V8_USE_MEMORY_SANITIZER)
-#define SET_MEMORY_ACCESIBLE(address, size) \
- MSAN_UNPOISON(address, size); \
- memset((address), 0, (size))
-#define SET_MEMORY_INACCESIBLE(address, size) MSAN_POISON((address), (size))
-#elif DEBUG || defined(V8_USE_ADDRESS_SANITIZER)
-#define SET_MEMORY_ACCESIBLE(address, size) \
- ASAN_UNPOISON_MEMORY_REGION(address, size); \
- memset((address), 0, (size))
-#define SET_MEMORY_INACCESIBLE(address, size) \
- ::cppgc::internal::ZapMemory((address), (size)); \
+#define SET_MEMORY_ACCESSIBLE(address, size) MSAN_UNPOISON(address, size);
+#define SET_MEMORY_INACCESSIBLE(address, size) \
+ memset((address), 0, (size)); \
+ MSAN_POISON((address), (size))
+#elif defined(V8_USE_ADDRESS_SANITIZER)
+#define SET_MEMORY_ACCESSIBLE(address, size) \
+ ASAN_UNPOISON_MEMORY_REGION(address, size);
+#define SET_MEMORY_INACCESSIBLE(address, size) \
+ memset((address), 0, (size)); \
ASAN_POISON_MEMORY_REGION(address, size)
+#elif DEBUG
+#define SET_MEMORY_ACCESSIBLE(address, size) memset((address), 0, (size))
+#define SET_MEMORY_INACCESSIBLE(address, size) \
+ ::cppgc::internal::ZapMemory((address), (size));
#else
-#define SET_MEMORY_ACCESIBLE(address, size) memset((address), 0, (size))
-#define SET_MEMORY_INACCESIBLE(address, size) ((void)(address), (void)(size))
+#define SET_MEMORY_ACCESSIBLE(address, size) ((void)(address), (void)(size))
+#define SET_MEMORY_INACCESSIBLE(address, size) memset((address), 0, (size))
#endif
namespace cppgc {
namespace internal {
inline void ZapMemory(void* address, size_t size) {
- static constexpr uint8_t kZappedValue = 0xcd;
+ // The lowest bit of the zapped value should be 0 so that zapped object
+ // are never viewed as fully constructed objects.
+ static constexpr uint8_t kZappedValue = 0xdc;
memset(address, kZappedValue, size);
}
diff --git a/deps/v8/src/heap/cppgc/sweeper.cc b/deps/v8/src/heap/cppgc/sweeper.cc
index 986ea6f4fa..5b28d3659c 100644
--- a/deps/v8/src/heap/cppgc/sweeper.cc
+++ b/deps/v8/src/heap/cppgc/sweeper.cc
@@ -114,7 +114,7 @@ using SpaceStates = std::vector<SpaceState>;
void StickyUnmark(HeapObjectHeader* header) {
// Young generation in Oilpan uses sticky mark bits.
#if !defined(CPPGC_YOUNG_GENERATION)
- header->Unmark<HeapObjectHeader::AccessMode::kAtomic>();
+ header->Unmark<AccessMode::kAtomic>();
#endif
}
@@ -127,7 +127,7 @@ class InlinedFinalizationBuilder final {
void AddFinalizer(HeapObjectHeader* header, size_t size) {
header->Finalize();
- SET_MEMORY_INACCESIBLE(header, size);
+ SET_MEMORY_INACCESSIBLE(header, size);
}
void AddFreeListEntry(Address start, size_t size) {
@@ -153,7 +153,7 @@ class DeferredFinalizationBuilder final {
result_.unfinalized_objects.push_back({header});
found_finalizer_ = true;
} else {
- SET_MEMORY_INACCESIBLE(header, size);
+ SET_MEMORY_INACCESSIBLE(header, size);
}
}
@@ -178,7 +178,7 @@ class DeferredFinalizationBuilder final {
template <typename FinalizationBuilder>
typename FinalizationBuilder::ResultType SweepNormalPage(NormalPage* page) {
- constexpr auto kAtomicAccess = HeapObjectHeader::AccessMode::kAtomic;
+ constexpr auto kAtomicAccess = AccessMode::kAtomic;
FinalizationBuilder builder(page);
PlatformAwareObjectStartBitmap& bitmap = page->object_start_bitmap();
@@ -191,7 +191,7 @@ typename FinalizationBuilder::ResultType SweepNormalPage(NormalPage* page) {
const size_t size = header->GetSize();
// Check if this is a free list entry.
if (header->IsFree<kAtomicAccess>()) {
- SET_MEMORY_INACCESIBLE(header, std::min(kFreeListEntrySize, size));
+ SET_MEMORY_INACCESSIBLE(header, std::min(kFreeListEntrySize, size));
begin += size;
continue;
}
@@ -273,7 +273,9 @@ class SweepFinalizer final {
// Call finalizers.
for (HeapObjectHeader* object : page_state->unfinalized_objects) {
+ const size_t size = object->GetSize();
object->Finalize();
+ SET_MEMORY_INACCESSIBLE(object, size);
}
// Unmap page if empty.
@@ -444,10 +446,19 @@ class ConcurrentSweepTask final : public cppgc::JobTask,
// - moves all Heap pages to local Sweeper's state (SpaceStates).
class PrepareForSweepVisitor final
: public HeapVisitor<PrepareForSweepVisitor> {
+ using CompactableSpaceHandling =
+ Sweeper::SweepingConfig::CompactableSpaceHandling;
+
public:
- explicit PrepareForSweepVisitor(SpaceStates* states) : states_(states) {}
+ PrepareForSweepVisitor(SpaceStates* states,
+ CompactableSpaceHandling compactable_space_handling)
+ : states_(states),
+ compactable_space_handling_(compactable_space_handling) {}
bool VisitNormalPageSpace(NormalPageSpace* space) {
+ if ((compactable_space_handling_ == CompactableSpaceHandling::kIgnore) &&
+ space->is_compactable())
+ return true;
DCHECK(!space->linear_allocation_buffer().size());
space->free_list().Clear();
ExtractPages(space);
@@ -467,6 +478,7 @@ class PrepareForSweepVisitor final
}
SpaceStates* states_;
+ CompactableSpaceHandling compactable_space_handling_;
};
} // namespace
@@ -483,17 +495,20 @@ class Sweeper::SweeperImpl final {
~SweeperImpl() { CancelSweepers(); }
- void Start(Config config) {
+ void Start(SweepingConfig config) {
is_in_progress_ = true;
#if DEBUG
+ // Verify bitmap for all spaces regardless of |compactable_space_handling|.
ObjectStartBitmapVerifier().Verify(heap_);
#endif
- PrepareForSweepVisitor(&space_states_).Traverse(heap_);
+ PrepareForSweepVisitor(&space_states_, config.compactable_space_handling)
+ .Traverse(heap_);
- if (config == Config::kAtomic) {
+ if (config.sweeping_type == SweepingConfig::SweepingType::kAtomic) {
Finish();
} else {
- DCHECK_EQ(Config::kIncrementalAndConcurrent, config);
+ DCHECK_EQ(SweepingConfig::SweepingType::kIncrementalAndConcurrent,
+ config.sweeping_type);
ScheduleIncrementalSweeping();
ScheduleConcurrentSweeping();
}
@@ -502,6 +517,11 @@ class Sweeper::SweeperImpl final {
void FinishIfRunning() {
if (!is_in_progress_) return;
+ if (concurrent_sweeper_handle_ && concurrent_sweeper_handle_->IsValid() &&
+ concurrent_sweeper_handle_->UpdatePriorityEnabled()) {
+ concurrent_sweeper_handle_->UpdatePriority(
+ cppgc::TaskPriority::kUserBlocking);
+ }
Finish();
}
@@ -524,6 +544,10 @@ class Sweeper::SweeperImpl final {
stats_collector_->NotifySweepingCompleted();
}
+ void WaitForConcurrentSweepingForTesting() {
+ if (concurrent_sweeper_handle_) concurrent_sweeper_handle_->Join();
+ }
+
private:
class IncrementalSweepTask : public cppgc::IdleTask {
public:
@@ -563,14 +587,17 @@ class Sweeper::SweeperImpl final {
};
void ScheduleIncrementalSweeping() {
- if (!platform_ || !foreground_task_runner_) return;
+ DCHECK(platform_);
+ if (!foreground_task_runner_ ||
+ !foreground_task_runner_->IdleTasksEnabled())
+ return;
incremental_sweeper_handle_ =
IncrementalSweepTask::Post(this, foreground_task_runner_.get());
}
void ScheduleConcurrentSweeping() {
- if (!platform_) return;
+ DCHECK(platform_);
concurrent_sweeper_handle_ = platform_->PostJob(
cppgc::TaskPriority::kUserVisible,
@@ -579,7 +606,8 @@ class Sweeper::SweeperImpl final {
void CancelSweepers() {
if (incremental_sweeper_handle_) incremental_sweeper_handle_.Cancel();
- if (concurrent_sweeper_handle_) concurrent_sweeper_handle_->Cancel();
+ if (concurrent_sweeper_handle_ && concurrent_sweeper_handle_->IsValid())
+ concurrent_sweeper_handle_->Cancel();
}
void SynchronizeAndFinalizeConcurrentSweeping() {
@@ -605,8 +633,11 @@ Sweeper::Sweeper(RawHeap* heap, cppgc::Platform* platform,
Sweeper::~Sweeper() = default;
-void Sweeper::Start(Config config) { impl_->Start(config); }
+void Sweeper::Start(SweepingConfig config) { impl_->Start(config); }
void Sweeper::FinishIfRunning() { impl_->FinishIfRunning(); }
+void Sweeper::WaitForConcurrentSweepingForTesting() {
+ impl_->WaitForConcurrentSweepingForTesting();
+}
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/sweeper.h b/deps/v8/src/heap/cppgc/sweeper.h
index e94036521e..cb5824cc8c 100644
--- a/deps/v8/src/heap/cppgc/sweeper.h
+++ b/deps/v8/src/heap/cppgc/sweeper.h
@@ -17,10 +17,18 @@ namespace internal {
class StatsCollector;
class RawHeap;
+class ConcurrentSweeperTest;
class V8_EXPORT_PRIVATE Sweeper final {
public:
- enum class Config { kAtomic, kIncrementalAndConcurrent };
+ struct SweepingConfig {
+ enum class SweepingType : uint8_t { kAtomic, kIncrementalAndConcurrent };
+ enum class CompactableSpaceHandling { kSweep, kIgnore };
+
+ SweepingType sweeping_type = SweepingType::kIncrementalAndConcurrent;
+ CompactableSpaceHandling compactable_space_handling =
+ CompactableSpaceHandling::kSweep;
+ };
Sweeper(RawHeap*, cppgc::Platform*, StatsCollector*);
~Sweeper();
@@ -29,12 +37,16 @@ class V8_EXPORT_PRIVATE Sweeper final {
Sweeper& operator=(const Sweeper&) = delete;
// Sweeper::Start assumes the heap holds no linear allocation buffers.
- void Start(Config);
+ void Start(SweepingConfig);
void FinishIfRunning();
private:
+ void WaitForConcurrentSweepingForTesting();
+
class SweeperImpl;
std::unique_ptr<SweeperImpl> impl_;
+
+ friend class ConcurrentSweeperTest;
};
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc/trace-trait.cc b/deps/v8/src/heap/cppgc/trace-trait.cc
index c0e9b342db..9f410b9c12 100644
--- a/deps/v8/src/heap/cppgc/trace-trait.cc
+++ b/deps/v8/src/heap/cppgc/trace-trait.cc
@@ -16,12 +16,10 @@ TraceDescriptor TraceTraitFromInnerAddressImpl::GetTraceDescriptor(
// mixins.
const HeapObjectHeader& header =
BasePage::FromPayload(address)
- ->ObjectHeaderFromInnerAddress<HeapObjectHeader::AccessMode::kAtomic>(
- address);
- return {header.Payload(),
- GlobalGCInfoTable::GCInfoFromIndex(
- header.GetGCInfoIndex<HeapObjectHeader::AccessMode::kAtomic>())
- .trace};
+ ->ObjectHeaderFromInnerAddress<AccessMode::kAtomic>(address);
+ return {header.Payload(), GlobalGCInfoTable::GCInfoFromIndex(
+ header.GetGCInfoIndex<AccessMode::kAtomic>())
+ .trace};
}
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc/visitor.cc b/deps/v8/src/heap/cppgc/visitor.cc
index 61eedf3bd9..33786f6fce 100644
--- a/deps/v8/src/heap/cppgc/visitor.cc
+++ b/deps/v8/src/heap/cppgc/visitor.cc
@@ -68,15 +68,20 @@ void ConservativeTracingVisitor::TraceConservativelyIfNeeded(
void ConservativeTracingVisitor::TraceConservativelyIfNeeded(
HeapObjectHeader& header) {
- if (!header.IsInConstruction<HeapObjectHeader::AccessMode::kNonAtomic>()) {
- visitor_.Visit(
- header.Payload(),
- {header.Payload(),
- GlobalGCInfoTable::GCInfoFromIndex(header.GetGCInfoIndex()).trace});
+ if (!header.IsInConstruction<AccessMode::kNonAtomic>()) {
+ VisitFullyConstructedConservatively(header);
} else {
- VisitConservatively(header, TraceConservatively);
+ VisitInConstructionConservatively(header, TraceConservatively);
}
}
+void ConservativeTracingVisitor::VisitFullyConstructedConservatively(
+ HeapObjectHeader& header) {
+ visitor_.Visit(
+ header.Payload(),
+ {header.Payload(),
+ GlobalGCInfoTable::GCInfoFromIndex(header.GetGCInfoIndex()).trace});
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/visitor.h b/deps/v8/src/heap/cppgc/visitor.h
index c8395ffa98..3b0f185ccb 100644
--- a/deps/v8/src/heap/cppgc/visitor.h
+++ b/deps/v8/src/heap/cppgc/visitor.h
@@ -31,14 +31,8 @@ class VisitorBase : public cppgc::Visitor {
VisitorBase(const VisitorBase&) = delete;
VisitorBase& operator=(const VisitorBase&) = delete;
- template <typename T>
- void TraceRootForTesting(const Persistent<T>& p, const SourceLocation& loc) {
- TraceRoot(p, loc);
- }
-
- template <typename T>
- void TraceRootForTesting(const WeakPersistent<T>& p,
- const SourceLocation& loc) {
+ template <typename Persistent>
+ void TraceRootForTesting(const Persistent& p, const SourceLocation& loc) {
TraceRoot(p, loc);
}
};
@@ -59,8 +53,10 @@ class ConservativeTracingVisitor {
protected:
using TraceConservativelyCallback = void(ConservativeTracingVisitor*,
const HeapObjectHeader&);
- virtual void VisitConservatively(HeapObjectHeader&,
- TraceConservativelyCallback) {}
+ virtual void V8_EXPORT_PRIVATE
+ VisitFullyConstructedConservatively(HeapObjectHeader&);
+ virtual void VisitInConstructionConservatively(HeapObjectHeader&,
+ TraceConservativelyCallback) {}
HeapBase& heap_;
PageBackend& page_backend_;
diff --git a/deps/v8/src/heap/cppgc/write-barrier.cc b/deps/v8/src/heap/cppgc/write-barrier.cc
index 4a076e8653..795bed8439 100644
--- a/deps/v8/src/heap/cppgc/write-barrier.cc
+++ b/deps/v8/src/heap/cppgc/write-barrier.cc
@@ -34,9 +34,12 @@ void MarkValue(const BasePage* page, MarkerBase* marker, const void* value) {
DCHECK(marker);
- if (V8_UNLIKELY(
- header
- .IsInConstruction<HeapObjectHeader::AccessMode::kNonAtomic>())) {
+ if (V8_UNLIKELY(header.IsInConstruction<AccessMode::kNonAtomic>())) {
+ // In construction objects are traced only if they are unmarked. If marking
+ // reaches this object again when it is fully constructed, it will re-mark
+ // it and tracing it as a previously not fully constructed object would know
+ // to bail out.
+ header.Unmark<AccessMode::kAtomic>();
marker->WriteBarrierForInConstructionObject(header);
return;
}
diff --git a/deps/v8/src/heap/embedder-tracing.cc b/deps/v8/src/heap/embedder-tracing.cc
index 9c926bed69..01c0402f7e 100644
--- a/deps/v8/src/heap/embedder-tracing.cc
+++ b/deps/v8/src/heap/embedder-tracing.cc
@@ -86,19 +86,33 @@ LocalEmbedderHeapTracer::ProcessingScope::~ProcessingScope() {
}
}
+// static
+LocalEmbedderHeapTracer::WrapperInfo
+LocalEmbedderHeapTracer::ExtractWrapperInfo(Isolate* isolate,
+ JSObject js_object) {
+ DCHECK_GE(js_object.GetEmbedderFieldCount(), 2);
+ DCHECK(js_object.IsApiWrapper());
+
+ WrapperInfo info;
+ if (EmbedderDataSlot(js_object, 0)
+ .ToAlignedPointerSafe(isolate, &info.first) &&
+ info.first &&
+ EmbedderDataSlot(js_object, 1)
+ .ToAlignedPointerSafe(isolate, &info.second)) {
+ return info;
+ }
+ return {nullptr, nullptr};
+}
+
void LocalEmbedderHeapTracer::ProcessingScope::TracePossibleWrapper(
JSObject js_object) {
DCHECK(js_object.IsApiWrapper());
if (js_object.GetEmbedderFieldCount() < 2) return;
- void* pointer0;
- void* pointer1;
- if (EmbedderDataSlot(js_object, 0)
- .ToAlignedPointer(tracer_->isolate_, &pointer0) &&
- pointer0 &&
- EmbedderDataSlot(js_object, 1)
- .ToAlignedPointer(tracer_->isolate_, &pointer1)) {
- wrapper_cache_.push_back({pointer0, pointer1});
+ WrapperInfo info =
+ LocalEmbedderHeapTracer::ExtractWrapperInfo(tracer_->isolate_, js_object);
+ if (VerboseWrapperInfo(info).is_valid()) {
+ wrapper_cache_.push_back(std::move(info));
}
FlushWrapperCacheIfFull();
}
diff --git a/deps/v8/src/heap/embedder-tracing.h b/deps/v8/src/heap/embedder-tracing.h
index 728ede4452..5aff187ed3 100644
--- a/deps/v8/src/heap/embedder-tracing.h
+++ b/deps/v8/src/heap/embedder-tracing.h
@@ -20,6 +20,23 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
using WrapperInfo = std::pair<void*, void*>;
using WrapperCache = std::vector<WrapperInfo>;
+ // WrapperInfo is passed over the API. Use VerboseWrapperInfo to access pair
+ // internals in a named way. See ProcessingScope::TracePossibleJSWrapper()
+ // below on how a V8 object is parsed to gather the information.
+ struct VerboseWrapperInfo {
+ explicit VerboseWrapperInfo(const WrapperInfo& raw_info)
+ : raw_info(raw_info) {}
+
+ // Information describing the type pointed to via instance().
+ void* type_info() const { return raw_info.first; }
+ // Direct pointer to an instance described by type_info().
+ void* instance() const { return raw_info.second; }
+
+ bool is_valid() const { return type_info(); }
+
+ const WrapperInfo& raw_info;
+ };
+
class V8_EXPORT_PRIVATE ProcessingScope {
public:
explicit ProcessingScope(LocalEmbedderHeapTracer* tracer);
@@ -38,6 +55,8 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
WrapperCache wrapper_cache_;
};
+ static WrapperInfo ExtractWrapperInfo(Isolate* isolate, JSObject js_object);
+
explicit LocalEmbedderHeapTracer(Isolate* isolate) : isolate_(isolate) {}
~LocalEmbedderHeapTracer() {
diff --git a/deps/v8/src/heap/factory-base.cc b/deps/v8/src/heap/factory-base.cc
index 51a856809a..a87611e068 100644
--- a/deps/v8/src/heap/factory-base.cc
+++ b/deps/v8/src/heap/factory-base.cc
@@ -21,6 +21,7 @@
#include "src/objects/shared-function-info-inl.h"
#include "src/objects/source-text-module.h"
#include "src/objects/string-inl.h"
+#include "src/objects/string.h"
#include "src/objects/template-objects-inl.h"
namespace v8 {
@@ -195,8 +196,8 @@ Handle<BytecodeArray> FactoryBase<Impl>::NewBytecodeArray(
instance->set_bytecode_age(BytecodeArray::kNoAgeBytecodeAge);
instance->set_constant_pool(*constant_pool);
instance->set_handler_table(read_only_roots().empty_byte_array());
- instance->set_synchronized_source_position_table(
- read_only_roots().undefined_value());
+ instance->set_source_position_table(read_only_roots().undefined_value(),
+ kReleaseStore);
CopyBytes(reinterpret_cast<byte*>(instance->GetFirstBytecodeAddress()),
raw_bytecodes, length);
instance->clear_padding();
@@ -312,9 +313,9 @@ Handle<SharedFunctionInfo> FactoryBase<Impl>::NewSharedFunctionInfo(
bool has_shared_name = maybe_name.ToHandle(&shared_name);
if (has_shared_name) {
DCHECK(shared_name->IsFlat());
- shared->set_name_or_scope_info(*shared_name);
+ shared->set_name_or_scope_info(*shared_name, kReleaseStore);
} else {
- DCHECK_EQ(shared->name_or_scope_info(),
+ DCHECK_EQ(shared->name_or_scope_info(kAcquireLoad),
SharedFunctionInfo::kNoSharedNameSentinel);
}
@@ -325,11 +326,12 @@ Handle<SharedFunctionInfo> FactoryBase<Impl>::NewSharedFunctionInfo(
DCHECK(!Builtins::IsBuiltinId(maybe_builtin_index));
DCHECK_IMPLIES(function_data->IsCode(),
!Code::cast(*function_data).is_builtin());
- shared->set_function_data(*function_data);
+ shared->set_function_data(*function_data, kReleaseStore);
} else if (Builtins::IsBuiltinId(maybe_builtin_index)) {
shared->set_builtin_id(maybe_builtin_index);
} else {
- shared->set_builtin_id(Builtins::kIllegal);
+ DCHECK(shared->HasBuiltinId());
+ DCHECK_EQ(Builtins::kIllegal, shared->builtin_id());
}
shared->CalculateConstructAsBuiltin();
@@ -409,14 +411,14 @@ FactoryBase<Impl>::NewTemplateObjectDescription(
template <typename Impl>
Handle<FeedbackMetadata> FactoryBase<Impl>::NewFeedbackMetadata(
- int slot_count, int feedback_cell_count, AllocationType allocation) {
+ int slot_count, int create_closure_slot_count, AllocationType allocation) {
DCHECK_LE(0, slot_count);
int size = FeedbackMetadata::SizeFor(slot_count);
HeapObject result = AllocateRawWithImmortalMap(
size, allocation, read_only_roots().feedback_metadata_map());
Handle<FeedbackMetadata> data(FeedbackMetadata::cast(result), isolate());
data->set_slot_count(slot_count);
- data->set_closure_feedback_cell_count(feedback_cell_count);
+ data->set_create_closure_slot_count(create_closure_slot_count);
// Initialize the data section to 0.
int data_size = size - FeedbackMetadata::kHeaderSize;
@@ -435,7 +437,7 @@ Handle<CoverageInfo> FactoryBase<Impl>::NewCoverageInfo(
int size = CoverageInfo::SizeFor(slot_count);
Map map = read_only_roots().coverage_info_map();
HeapObject result =
- AllocateRawWithImmortalMap(size, AllocationType::kYoung, map);
+ AllocateRawWithImmortalMap(size, AllocationType::kOld, map);
Handle<CoverageInfo> info(CoverageInfo::cast(result), isolate());
info->set_slot_count(slot_count);
@@ -507,7 +509,8 @@ Handle<SeqOneByteString> FactoryBase<Impl>::NewOneByteInternalizedString(
Handle<SeqOneByteString> result =
AllocateRawOneByteInternalizedString(str.length(), hash_field);
DisallowHeapAllocation no_gc;
- MemCopy(result->GetChars(no_gc), str.begin(), str.length());
+ MemCopy(result->GetChars(no_gc, SharedStringAccessGuardIfNeeded::NotNeeded()),
+ str.begin(), str.length());
return result;
}
@@ -517,7 +520,8 @@ Handle<SeqTwoByteString> FactoryBase<Impl>::NewTwoByteInternalizedString(
Handle<SeqTwoByteString> result =
AllocateRawTwoByteInternalizedString(str.length(), hash_field);
DisallowHeapAllocation no_gc;
- MemCopy(result->GetChars(no_gc), str.begin(), str.length() * kUC16Size);
+ MemCopy(result->GetChars(no_gc, SharedStringAccessGuardIfNeeded::NotNeeded()),
+ str.begin(), str.length() * kUC16Size);
return result;
}
@@ -605,21 +609,31 @@ MaybeHandle<String> FactoryBase<Impl>::NewConsString(
Handle<SeqOneByteString> result =
NewRawOneByteString(length, allocation).ToHandleChecked();
DisallowHeapAllocation no_gc;
- uint8_t* dest = result->GetChars(no_gc);
+ uint8_t* dest =
+ result->GetChars(no_gc, SharedStringAccessGuardIfNeeded::NotNeeded());
// Copy left part.
- const uint8_t* src = left->template GetChars<uint8_t>(no_gc);
- CopyChars(dest, src, left_length);
+ {
+ SharedStringAccessGuardIfNeeded access_guard(*left);
+ const uint8_t* src =
+ left->template GetChars<uint8_t>(no_gc, access_guard);
+ CopyChars(dest, src, left_length);
+ }
// Copy right part.
- src = right->template GetChars<uint8_t>(no_gc);
- CopyChars(dest + left_length, src, right_length);
+ {
+ SharedStringAccessGuardIfNeeded access_guard(*right);
+ const uint8_t* src =
+ right->template GetChars<uint8_t>(no_gc, access_guard);
+ CopyChars(dest + left_length, src, right_length);
+ }
return result;
}
Handle<SeqTwoByteString> result =
NewRawTwoByteString(length, allocation).ToHandleChecked();
- DisallowHeapAllocation pointer_stays_valid;
- uc16* sink = result->GetChars(pointer_stays_valid);
+ DisallowHeapAllocation no_gc;
+ uc16* sink =
+ result->GetChars(no_gc, SharedStringAccessGuardIfNeeded::NotNeeded());
String::WriteToFlat(*left, sink, 0, left->length());
String::WriteToFlat(*right, sink + left->length(), 0, right->length());
return result;
@@ -777,7 +791,9 @@ template <typename Impl>
HeapObject FactoryBase<Impl>::AllocateRawArray(int size,
AllocationType allocation) {
HeapObject result = AllocateRaw(size, allocation);
- if (size > kMaxRegularHeapObjectSize && FLAG_use_marking_progress_bar) {
+ if (!V8_ENABLE_THIRD_PARTY_HEAP_BOOL &&
+ (size > Heap::MaxRegularHeapObjectSize(allocation)) &&
+ FLAG_use_marking_progress_bar) {
BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(result);
chunk->SetFlag<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR);
}
diff --git a/deps/v8/src/heap/factory-base.h b/deps/v8/src/heap/factory-base.h
index dc95e16a6a..847802629e 100644
--- a/deps/v8/src/heap/factory-base.h
+++ b/deps/v8/src/heap/factory-base.h
@@ -137,7 +137,7 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) FactoryBase {
// Allocates a FeedbackMedata object and zeroes the data section.
Handle<FeedbackMetadata> NewFeedbackMetadata(
- int slot_count, int feedback_cell_count,
+ int slot_count, int create_closure_slot_count,
AllocationType allocation = AllocationType::kOld);
Handle<CoverageInfo> NewCoverageInfo(const ZoneVector<SourceRange>& slots);
diff --git a/deps/v8/src/heap/factory.cc b/deps/v8/src/heap/factory.cc
index 7e66123681..3c2f2d167e 100644
--- a/deps/v8/src/heap/factory.cc
+++ b/deps/v8/src/heap/factory.cc
@@ -59,6 +59,7 @@
#include "src/objects/stack-frame-info-inl.h"
#include "src/objects/string-set-inl.h"
#include "src/objects/struct-inl.h"
+#include "src/objects/synthetic-module-inl.h"
#include "src/objects/template-objects-inl.h"
#include "src/objects/transitions-inl.h"
#include "src/roots/roots.h"
@@ -67,25 +68,6 @@
namespace v8 {
namespace internal {
-namespace {
-
-int ComputeCodeObjectSize(const CodeDesc& desc) {
- bool has_unwinding_info = desc.unwinding_info != nullptr;
- DCHECK((has_unwinding_info && desc.unwinding_info_size > 0) ||
- (!has_unwinding_info && desc.unwinding_info_size == 0));
- int body_size = desc.instr_size;
- int unwinding_info_size_field_size = kInt64Size;
- if (has_unwinding_info) {
- body_size = RoundUp(body_size, kInt64Size) + desc.unwinding_info_size +
- unwinding_info_size_field_size;
- }
- int object_size = Code::SizeFor(RoundUp(body_size, kObjectAlignment));
- DCHECK(IsAligned(static_cast<intptr_t>(object_size), kCodeAlignment));
- return object_size;
-}
-
-} // namespace
-
Factory::CodeBuilder::CodeBuilder(Isolate* isolate, const CodeDesc& desc,
CodeKind kind)
: isolate_(isolate),
@@ -138,32 +120,27 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
isolate_->heap()->SetBasicBlockProfilingData(new_list);
}
+ STATIC_ASSERT(Code::kOnHeapBodyIsContiguous);
+ const int object_size = Code::SizeFor(code_desc_.body_size());
+
Handle<Code> code;
{
- int object_size = ComputeCodeObjectSize(code_desc_);
Heap* heap = isolate_->heap();
CodePageCollectionMemoryModificationScope code_allocation(heap);
HeapObject result;
AllocationType allocation_type =
is_executable_ ? AllocationType::kCode : AllocationType::kReadOnly;
- AllocationAlignment alignment = is_executable_
- ? AllocationAlignment::kCodeAligned
- : AllocationAlignment::kWordAligned;
if (retry_allocation_or_fail) {
result = heap->AllocateRawWith<Heap::kRetryOrFail>(
- object_size, allocation_type, AllocationOrigin::kRuntime, alignment);
+ object_size, allocation_type, AllocationOrigin::kRuntime);
} else {
result = heap->AllocateRawWith<Heap::kLightRetry>(
- object_size, allocation_type, AllocationOrigin::kRuntime, alignment);
+ object_size, allocation_type, AllocationOrigin::kRuntime);
// Return an empty handle if we cannot allocate the code object.
if (result.is_null()) return MaybeHandle<Code>();
}
- if (!is_movable_) {
- result = heap->EnsureImmovableCode(result, object_size);
- }
-
// The code object has not been fully initialized yet. We rely on the
// fact that no allocation will happen from this point on.
DisallowHeapAllocation no_gc;
@@ -179,21 +156,22 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
}
constexpr bool kIsNotOffHeapTrampoline = false;
- const bool has_unwinding_info = code_desc_.unwinding_info != nullptr;
- code->set_raw_instruction_size(code_desc_.instr_size);
+ code->set_raw_instruction_size(code_desc_.instruction_size());
+ code->set_raw_metadata_size(code_desc_.metadata_size());
code->set_relocation_info(*reloc_info);
- code->initialize_flags(kind_, has_unwinding_info, is_turbofanned_,
- stack_slots_, kIsNotOffHeapTrampoline);
+ code->initialize_flags(kind_, is_turbofanned_, stack_slots_,
+ kIsNotOffHeapTrampoline);
code->set_builtin_index(builtin_index_);
code->set_inlined_bytecode_size(inlined_bytecode_size_);
- code->set_code_data_container(*data_container);
+ code->set_code_data_container(*data_container, kReleaseStore);
code->set_deoptimization_data(*deoptimization_data_);
code->set_source_position_table(*source_position_table_);
- code->set_safepoint_table_offset(code_desc_.safepoint_table_offset);
- code->set_handler_table_offset(code_desc_.handler_table_offset);
- code->set_constant_pool_offset(code_desc_.constant_pool_offset);
- code->set_code_comments_offset(code_desc_.code_comments_offset);
+ code->set_handler_table_offset(code_desc_.handler_table_offset_relative());
+ code->set_constant_pool_offset(code_desc_.constant_pool_offset_relative());
+ code->set_code_comments_offset(code_desc_.code_comments_offset_relative());
+ code->set_unwinding_info_offset(
+ code_desc_.unwinding_info_offset_relative());
// Allow self references to created code object by patching the handle to
// point to the newly allocated Code object.
@@ -396,7 +374,8 @@ MaybeHandle<FixedArray> Factory::TryNewFixedArray(
AllocationResult allocation = heap->AllocateRaw(size, allocation_type);
HeapObject result;
if (!allocation.To(&result)) return MaybeHandle<FixedArray>();
- if (size > kMaxRegularHeapObjectSize && FLAG_use_marking_progress_bar) {
+ if ((size > Heap::MaxRegularHeapObjectSize(allocation_type)) &&
+ FLAG_use_marking_progress_bar) {
BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(result);
chunk->SetFlag<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR);
}
@@ -445,13 +424,12 @@ Handle<FeedbackVector> Factory::NewFeedbackVector(
*feedback_vector_map());
Handle<FeedbackVector> vector(FeedbackVector::cast(result), isolate());
vector->set_shared_function_info(*shared);
- vector->set_optimized_code_weak_or_smi(MaybeObject::FromSmi(Smi::FromEnum(
- FLAG_log_function_events ? OptimizationMarker::kLogFirstExecution
- : OptimizationMarker::kNone)));
+ vector->set_maybe_optimized_code(
+ HeapObjectReference::ClearedValue(isolate()));
vector->set_length(length);
vector->set_invocation_count(0);
vector->set_profiler_ticks(0);
- vector->clear_padding();
+ vector->InitializeOptimizationState();
vector->set_closure_feedback_cell_array(*closure_feedback_cell_array);
// TODO(leszeks): Initialize based on the feedback metadata.
@@ -473,6 +451,10 @@ Handle<EmbedderDataArray> Factory::NewEmbedderDataArray(int length) {
ObjectSlot end(array->slots_end());
size_t slot_count = end - start;
MemsetTagged(start, *undefined_value(), slot_count);
+ for (int i = 0; i < length; i++) {
+ // TODO(v8:10391, saelo): Handle external pointers in EmbedderDataSlot
+ EmbedderDataSlot(*array, i).AllocateExternalPointerEntry(isolate());
+ }
}
return array;
}
@@ -538,21 +520,27 @@ Handle<SmallOrderedNameDictionary> Factory::NewSmallOrderedNameDictionary(
}
Handle<OrderedHashSet> Factory::NewOrderedHashSet() {
- return OrderedHashSet::Allocate(isolate(), OrderedHashSet::kMinCapacity)
+ return OrderedHashSet::Allocate(isolate(), OrderedHashSet::kInitialCapacity,
+ AllocationType::kYoung)
.ToHandleChecked();
}
Handle<OrderedHashMap> Factory::NewOrderedHashMap() {
- return OrderedHashMap::Allocate(isolate(), OrderedHashMap::kMinCapacity)
+ return OrderedHashMap::Allocate(isolate(), OrderedHashMap::kInitialCapacity,
+ AllocationType::kYoung)
.ToHandleChecked();
}
-Handle<OrderedNameDictionary> Factory::NewOrderedNameDictionary() {
- return OrderedNameDictionary::Allocate(isolate(),
- OrderedNameDictionary::kMinCapacity)
+Handle<OrderedNameDictionary> Factory::NewOrderedNameDictionary(int capacity) {
+ return OrderedNameDictionary::Allocate(isolate(), capacity,
+ AllocationType::kYoung)
.ToHandleChecked();
}
+Handle<NameDictionary> Factory::NewNameDictionary(int at_least_space_for) {
+ return NameDictionary::New(isolate(), at_least_space_for);
+}
+
Handle<PropertyDescriptorObject> Factory::NewPropertyDescriptorObject() {
Handle<PropertyDescriptorObject> object =
Handle<PropertyDescriptorObject>::cast(
@@ -834,6 +822,7 @@ Handle<StringClass> Factory::InternalizeExternalString(Handle<String> string) {
Handle<Map> map = GetInternalizedStringMap(this, string).ToHandleChecked();
Handle<StringClass> external_string(
StringClass::cast(New(map, AllocationType::kOld)), isolate());
+ external_string->AllocateExternalPointerEntries(isolate());
external_string->set_length(cast_string->length());
external_string->set_hash_field(cast_string->hash_field());
external_string->SetResource(isolate(), nullptr);
@@ -959,6 +948,7 @@ MaybeHandle<String> Factory::NewExternalStringFromOneByte(
: uncached_external_one_byte_string_map();
Handle<ExternalOneByteString> external_string(
ExternalOneByteString::cast(New(map, AllocationType::kOld)), isolate());
+ external_string->AllocateExternalPointerEntries(isolate());
external_string->set_length(static_cast<int>(length));
external_string->set_hash_field(String::kEmptyHashField);
external_string->SetResource(isolate(), resource);
@@ -979,6 +969,7 @@ MaybeHandle<String> Factory::NewExternalStringFromTwoByte(
: uncached_external_string_map();
Handle<ExternalTwoByteString> external_string(
ExternalTwoByteString::cast(New(map, AllocationType::kOld)), isolate());
+ external_string->AllocateExternalPointerEntries(isolate());
external_string->set_length(static_cast<int>(length));
external_string->set_hash_field(String::kEmptyHashField);
external_string->SetResource(isolate(), resource);
@@ -1063,6 +1054,7 @@ Handle<NativeContext> Factory::NewNativeContext() {
AllocationType::kOld));
context->set_native_context_map(*map);
map->set_native_context(*context);
+ context->AllocateExternalPointerEntries(isolate());
context->set_scope_info(ReadOnlyRoots(isolate()).native_scope_info());
context->set_previous(Context::unchecked_cast(Smi::zero()));
context->set_extension(*undefined_value());
@@ -1313,6 +1305,7 @@ Handle<Foreign> Factory::NewForeign(Address addr) {
HeapObject result = AllocateRawWithImmortalMap(map.instance_size(),
AllocationType::kYoung, map);
Handle<Foreign> foreign(Foreign::cast(result), isolate());
+ foreign->AllocateExternalPointerEntries(isolate());
foreign->set_foreign_address(isolate(), addr);
return foreign;
}
@@ -1324,6 +1317,7 @@ Handle<WasmTypeInfo> Factory::NewWasmTypeInfo(Address type_address,
HeapObject result = AllocateRawWithImmortalMap(map.instance_size(),
AllocationType::kYoung, map);
Handle<WasmTypeInfo> info(WasmTypeInfo::cast(result), isolate());
+ info->AllocateExternalPointerEntries(isolate());
info->set_foreign_address(isolate(), type_address);
info->set_parent(*parent);
info->set_subtypes(*subtypes);
@@ -1463,7 +1457,8 @@ Map Factory::InitializeMap(Map map, InstanceType type, int instance_size,
map.SetInObjectUnusedPropertyFields(inobject_properties);
map.SetInstanceDescriptors(isolate(), *empty_descriptor_array(), 0);
if (FLAG_unbox_double_fields) {
- map.set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
+ map.set_layout_descriptor(LayoutDescriptor::FastPointerLayout(),
+ kReleaseStore);
}
// Must be called only after |instance_type|, |instance_size| and
// |layout_descriptor| are set.
@@ -1820,122 +1815,6 @@ DEFINE_ERROR(WasmLinkError, wasm_link_error)
DEFINE_ERROR(WasmRuntimeError, wasm_runtime_error)
#undef DEFINE_ERROR
-Handle<JSFunction> Factory::NewFunction(Handle<Map> map,
- Handle<SharedFunctionInfo> info,
- Handle<Context> context,
- AllocationType allocation) {
- Handle<JSFunction> function(JSFunction::cast(New(map, allocation)),
- isolate());
-
- Handle<Code> code;
- bool have_cached_code = info->TryGetCachedCode(isolate()).ToHandle(&code);
-
- function->initialize_properties(isolate());
- function->initialize_elements();
- function->set_shared(*info);
- function->set_code(have_cached_code ? *code : info->GetCode());
- function->set_context(*context);
- function->set_raw_feedback_cell(*many_closures_cell());
- int header_size;
- if (map->has_prototype_slot()) {
- header_size = JSFunction::kSizeWithPrototype;
- function->set_prototype_or_initial_map(*the_hole_value());
- } else {
- header_size = JSFunction::kSizeWithoutPrototype;
- }
- InitializeJSObjectBody(function, map, header_size);
-
- if (have_cached_code) {
- IsCompiledScope is_compiled_scope(info->is_compiled_scope(isolate()));
- JSFunction::EnsureFeedbackVector(function, &is_compiled_scope);
- if (FLAG_trace_turbo_nci) CompilationCacheCode::TraceHit(info, code);
- }
-
- return function;
-}
-
-Handle<JSFunction> Factory::NewFunctionForTest(Handle<String> name) {
- NewFunctionArgs args = NewFunctionArgs::ForFunctionWithoutCode(
- name, isolate()->sloppy_function_map(), LanguageMode::kSloppy);
- Handle<JSFunction> result = NewFunction(args);
- DCHECK(is_sloppy(result->shared().language_mode()));
- return result;
-}
-
-Handle<JSFunction> Factory::NewFunction(const NewFunctionArgs& args) {
- DCHECK(!args.name_.is_null());
-
- // Create the SharedFunctionInfo.
- Handle<NativeContext> context(isolate()->native_context());
- Handle<Map> map = args.GetMap(isolate());
- Handle<SharedFunctionInfo> info =
- NewSharedFunctionInfo(args.name_, args.maybe_wasm_function_data_,
- args.maybe_builtin_id_, kNormalFunction);
-
- // Proper language mode in shared function info will be set later.
- DCHECK(is_sloppy(info->language_mode()));
- DCHECK(!map->IsUndefined(isolate()));
-
-#ifdef DEBUG
- if (isolate()->bootstrapper()->IsActive()) {
- Handle<Code> code;
- DCHECK(
- // During bootstrapping some of these maps could be not created yet.
- (*map == context->get(Context::STRICT_FUNCTION_MAP_INDEX)) ||
- (*map ==
- context->get(Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX)) ||
- (*map ==
- context->get(
- Context::STRICT_FUNCTION_WITH_READONLY_PROTOTYPE_MAP_INDEX)) ||
- // Check if it's a creation of an empty or Proxy function during
- // bootstrapping.
- (args.maybe_builtin_id_ == Builtins::kEmptyFunction ||
- args.maybe_builtin_id_ == Builtins::kProxyConstructor));
- }
-#endif
-
- Handle<JSFunction> result = NewFunction(map, info, context);
-
- if (args.should_set_prototype_) {
- result->set_prototype_or_initial_map(
- *args.maybe_prototype_.ToHandleChecked());
- }
-
- if (args.should_set_language_mode_) {
- result->shared().set_language_mode(args.language_mode_);
- }
-
- if (args.should_create_and_set_initial_map_) {
- ElementsKind elements_kind;
- switch (args.type_) {
- case JS_ARRAY_TYPE:
- elements_kind = PACKED_SMI_ELEMENTS;
- break;
- case JS_ARGUMENTS_OBJECT_TYPE:
- elements_kind = PACKED_ELEMENTS;
- break;
- default:
- elements_kind = TERMINAL_FAST_ELEMENTS_KIND;
- break;
- }
- Handle<Map> initial_map = NewMap(args.type_, args.instance_size_,
- elements_kind, args.inobject_properties_);
- result->shared().set_expected_nof_properties(args.inobject_properties_);
- // TODO(littledan): Why do we have this is_generator test when
- // NewFunctionPrototype already handles finding an appropriately
- // shared prototype?
- Handle<HeapObject> prototype = args.maybe_prototype_.ToHandleChecked();
- if (!IsResumableFunction(result->shared().kind())) {
- if (prototype->IsTheHole(isolate())) {
- prototype = NewFunctionPrototype(result);
- }
- }
- JSFunction::SetInitialMap(result, initial_map, prototype);
- }
-
- return result;
-}
-
Handle<JSObject> Factory::NewFunctionPrototype(Handle<JSFunction> function) {
// Make sure to use globals from the function's context, since the function
// can be from a different context.
@@ -1970,71 +1849,6 @@ Handle<JSObject> Factory::NewFunctionPrototype(Handle<JSFunction> function) {
return prototype;
}
-Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
- Handle<SharedFunctionInfo> info, Handle<Context> context,
- AllocationType allocation) {
- Handle<Map> initial_map(
- Map::cast(context->native_context().get(info->function_map_index())),
- isolate());
- return NewFunctionFromSharedFunctionInfo(initial_map, info, context,
- allocation);
-}
-
-Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
- Handle<SharedFunctionInfo> info, Handle<Context> context,
- Handle<FeedbackCell> feedback_cell, AllocationType allocation) {
- Handle<Map> initial_map(
- Map::cast(context->native_context().get(info->function_map_index())),
- isolate());
- return NewFunctionFromSharedFunctionInfo(initial_map, info, context,
- feedback_cell, allocation);
-}
-
-Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
- Handle<Map> initial_map, Handle<SharedFunctionInfo> info,
- Handle<Context> context, AllocationType allocation) {
- DCHECK_EQ(JS_FUNCTION_TYPE, initial_map->instance_type());
- Handle<JSFunction> result =
- NewFunction(initial_map, info, context, allocation);
-
- // Give compiler a chance to pre-initialize.
- Compiler::PostInstantiation(result);
-
- return result;
-}
-
-Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
- Handle<Map> initial_map, Handle<SharedFunctionInfo> info,
- Handle<Context> context, Handle<FeedbackCell> feedback_cell,
- AllocationType allocation) {
- DCHECK_EQ(JS_FUNCTION_TYPE, initial_map->instance_type());
- Handle<JSFunction> result =
- NewFunction(initial_map, info, context, allocation);
-
- // Bump the closure count that is encoded in the feedback cell's map.
- if (feedback_cell->map() == *no_closures_cell_map()) {
- feedback_cell->set_map(*one_closure_cell_map());
- } else if (feedback_cell->map() == *one_closure_cell_map()) {
- feedback_cell->set_map(*many_closures_cell_map());
- } else {
- DCHECK(feedback_cell->map() == *many_closures_cell_map());
- }
-
- // Check that the optimized code in the feedback cell wasn't marked for
- // deoptimization while not pointed to by any live JSFunction.
- if (feedback_cell->value().IsFeedbackVector()) {
- FeedbackVector::cast(feedback_cell->value())
- .EvictOptimizedCodeMarkedForDeoptimization(
- *info, "new function from shared function info");
- }
- result->set_raw_feedback_cell(*feedback_cell);
-
- // Give compiler a chance to pre-initialize.
- Compiler::PostInstantiation(result);
-
- return result;
-}
-
Handle<JSObject> Factory::NewExternal(void* value) {
Handle<Foreign> foreign = NewForeign(reinterpret_cast<Address>(value));
Handle<JSObject> external = NewJSObjectFromMap(external_map());
@@ -2063,8 +1877,13 @@ Handle<Code> Factory::NewOffHeapTrampolineFor(Handle<Code> code,
Builtins::CodeObjectIsExecutable(code->builtin_index());
Handle<Code> result = Builtins::GenerateOffHeapTrampolineFor(
isolate(), off_heap_entry,
- code->code_data_container().kind_specific_flags(),
+ code->code_data_container(kAcquireLoad).kind_specific_flags(),
generate_jump_to_instruction_stream);
+
+ // Trampolines may not contain any metadata since all metadata offsets,
+ // stored on the Code object, refer to the off-heap metadata area.
+ CHECK_EQ(result->raw_metadata_size(), 0);
+
// The CodeDataContainer should not be modified beyond this point since it's
// now possibly canonicalized.
@@ -2076,14 +1895,13 @@ Handle<Code> Factory::NewOffHeapTrampolineFor(Handle<Code> code,
const bool set_is_off_heap_trampoline = true;
const int stack_slots =
code->has_safepoint_info() ? code->stack_slots() : 0;
- result->initialize_flags(code->kind(), code->has_unwinding_info(),
- code->is_turbofanned(), stack_slots,
+ result->initialize_flags(code->kind(), code->is_turbofanned(), stack_slots,
set_is_off_heap_trampoline);
result->set_builtin_index(code->builtin_index());
- result->set_safepoint_table_offset(code->safepoint_table_offset());
result->set_handler_table_offset(code->handler_table_offset());
result->set_constant_pool_offset(code->constant_pool_offset());
result->set_code_comments_offset(code->code_comments_offset());
+ result->set_unwinding_info_offset(code->unwinding_info_offset());
// Replace the newly generated trampoline's RelocInfo ByteArray with the
// canonical one stored in the roots to avoid duplicating it for every
@@ -2108,7 +1926,8 @@ Handle<Code> Factory::NewOffHeapTrampolineFor(Handle<Code> code,
Handle<Code> Factory::CopyCode(Handle<Code> code) {
Handle<CodeDataContainer> data_container = NewCodeDataContainer(
- code->code_data_container().kind_specific_flags(), AllocationType::kOld);
+ code->code_data_container(kAcquireLoad).kind_specific_flags(),
+ AllocationType::kOld);
Heap* heap = isolate()->heap();
Handle<Code> new_code;
@@ -2116,8 +1935,7 @@ Handle<Code> Factory::CopyCode(Handle<Code> code) {
int obj_size = code->Size();
CodePageCollectionMemoryModificationScope code_allocation(heap);
HeapObject result = heap->AllocateRawWith<Heap::kRetryOrFail>(
- obj_size, AllocationType::kCode, AllocationOrigin::kRuntime,
- AllocationAlignment::kCodeAligned);
+ obj_size, AllocationType::kCode, AllocationOrigin::kRuntime);
// Copy code object.
Address old_addr = code->address();
@@ -2126,7 +1944,7 @@ Handle<Code> Factory::CopyCode(Handle<Code> code) {
new_code = handle(Code::cast(result), isolate());
// Set the {CodeDataContainer}, it cannot be shared.
- new_code->set_code_data_container(*data_container);
+ new_code->set_code_data_container(*data_container, kReleaseStore);
new_code->Relocate(new_addr - old_addr);
// We have to iterate over the object and process its pointers when black
@@ -2143,6 +1961,7 @@ Handle<Code> Factory::CopyCode(Handle<Code> code) {
#endif
DCHECK(IsAligned(new_code->address(), kCodeAlignment));
DCHECK_IMPLIES(
+ !V8_ENABLE_THIRD_PARTY_HEAP_BOOL &&
!heap->memory_allocator()->code_range().is_empty(),
heap->memory_allocator()->code_range().contains(new_code->address()));
return new_code;
@@ -2162,8 +1981,8 @@ Handle<BytecodeArray> Factory::CopyBytecodeArray(
bytecode_array->incoming_new_target_or_generator_register());
copy->set_constant_pool(bytecode_array->constant_pool());
copy->set_handler_table(bytecode_array->handler_table());
- copy->set_synchronized_source_position_table(
- bytecode_array->synchronized_source_position_table());
+ copy->set_source_position_table(
+ bytecode_array->source_position_table(kAcquireLoad), kReleaseStore);
copy->set_osr_loop_nesting_level(bytecode_array->osr_loop_nesting_level());
copy->set_bytecode_age(bytecode_array->bytecode_age());
bytecode_array->CopyBytecodesTo(*copy);
@@ -2214,7 +2033,8 @@ Handle<JSGlobalObject> Factory::NewJSGlobalObject(
// The global object might be created from an object template with accessors.
// Fill these accessors into the dictionary.
- Handle<DescriptorArray> descs(map->instance_descriptors(), isolate());
+ Handle<DescriptorArray> descs(map->instance_descriptors(kRelaxedLoad),
+ isolate());
for (InternalIndex i : map->IterateOwnDescriptors()) {
PropertyDetails details = descs->GetDetails(i);
// Only accessors are expected.
@@ -2696,6 +2516,7 @@ Handle<JSTypedArray> Factory::NewJSTypedArray(ExternalArrayType type,
Handle<JSTypedArray> typed_array =
Handle<JSTypedArray>::cast(NewJSArrayBufferView(
map, empty_byte_array(), buffer, byte_offset, byte_length));
+ typed_array->AllocateExternalPointerEntries(isolate());
typed_array->set_length(length);
typed_array->SetOffHeapDataPtr(isolate(), buffer->backing_store(),
byte_offset);
@@ -2709,6 +2530,7 @@ Handle<JSDataView> Factory::NewJSDataView(Handle<JSArrayBuffer> buffer,
isolate());
Handle<JSDataView> obj = Handle<JSDataView>::cast(NewJSArrayBufferView(
map, empty_fixed_array(), buffer, byte_offset, byte_length));
+ obj->AllocateExternalPointerEntries(isolate());
obj->set_data_pointer(
isolate(), static_cast<uint8_t*>(buffer->backing_store()) + byte_offset);
return obj;
@@ -3054,8 +2876,7 @@ Handle<DebugInfo> Factory::NewDebugInfo(Handle<SharedFunctionInfo> shared) {
debug_info->set_shared(*shared);
debug_info->set_debugger_hints(0);
DCHECK_EQ(DebugInfo::kNoDebuggingId, debug_info->debugging_id());
- DCHECK(!shared->HasDebugInfo());
- debug_info->set_script(shared->script_or_debug_info());
+ debug_info->set_script(shared->script_or_debug_info(kAcquireLoad));
debug_info->set_original_bytecode_array(
ReadOnlyRoots(heap).undefined_value());
debug_info->set_debug_bytecode_array(ReadOnlyRoots(heap).undefined_value());
@@ -3463,7 +3284,8 @@ Handle<Map> Factory::CreateSloppyFunctionMap(
map->AppendDescriptor(isolate(), &d);
}
DCHECK_EQ(inobject_properties_count, field_index);
- DCHECK_EQ(0, map->instance_descriptors().number_of_slack_descriptors());
+ DCHECK_EQ(
+ 0, map->instance_descriptors(kRelaxedLoad).number_of_slack_descriptors());
LOG(isolate(), MapDetails(*map));
return map;
}
@@ -3546,7 +3368,8 @@ Handle<Map> Factory::CreateStrictFunctionMap(
map->AppendDescriptor(isolate(), &d);
}
DCHECK_EQ(inobject_properties_count, field_index);
- DCHECK_EQ(0, map->instance_descriptors().number_of_slack_descriptors());
+ DCHECK_EQ(
+ 0, map->instance_descriptors(kRelaxedLoad).number_of_slack_descriptors());
LOG(isolate(), MapDetails(*map));
return map;
}
@@ -3774,5 +3597,217 @@ Handle<Map> NewFunctionArgs::GetMap(Isolate* isolate) const {
UNREACHABLE();
}
+Handle<JSFunction> Factory::NewFunctionForTest(Handle<String> name) {
+ NewFunctionArgs args = NewFunctionArgs::ForFunctionWithoutCode(
+ name, isolate()->sloppy_function_map(), LanguageMode::kSloppy);
+ Handle<JSFunction> result = NewFunction(args);
+ DCHECK(is_sloppy(result->shared().language_mode()));
+ return result;
+}
+
+Handle<JSFunction> Factory::NewFunction(const NewFunctionArgs& args) {
+ DCHECK(!args.name_.is_null());
+
+ // Create the SharedFunctionInfo.
+ Handle<NativeContext> context(isolate()->native_context());
+ Handle<Map> map = args.GetMap(isolate());
+ Handle<SharedFunctionInfo> info =
+ NewSharedFunctionInfo(args.name_, args.maybe_wasm_function_data_,
+ args.maybe_builtin_id_, kNormalFunction);
+
+ // Proper language mode in shared function info will be set later.
+ DCHECK(is_sloppy(info->language_mode()));
+ DCHECK(!map->IsUndefined(isolate()));
+
+ if (args.should_set_language_mode_) {
+ info->set_language_mode(args.language_mode_);
+ }
+
+#ifdef DEBUG
+ if (isolate()->bootstrapper()->IsActive()) {
+ Handle<Code> code;
+ DCHECK(
+ // During bootstrapping some of these maps could be not created yet.
+ (*map == context->get(Context::STRICT_FUNCTION_MAP_INDEX)) ||
+ (*map ==
+ context->get(Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX)) ||
+ (*map ==
+ context->get(
+ Context::STRICT_FUNCTION_WITH_READONLY_PROTOTYPE_MAP_INDEX)) ||
+ // Check if it's a creation of an empty or Proxy function during
+ // bootstrapping.
+ (args.maybe_builtin_id_ == Builtins::kEmptyFunction ||
+ args.maybe_builtin_id_ == Builtins::kProxyConstructor));
+ }
+#endif
+
+ Handle<JSFunction> result =
+ JSFunctionBuilder{isolate(), info, context}.set_map(map).Build();
+
+ // Both of these write to `prototype_or_initial_map`.
+ // TODO(jgruber): Fix callsites and enable the DCHECK.
+ // DCHECK(!args.should_set_prototype_ ||
+ // !args.should_create_and_set_initial_map_);
+ if (args.should_set_prototype_) {
+ result->set_prototype_or_initial_map(
+ *args.maybe_prototype_.ToHandleChecked());
+ }
+
+ if (args.should_create_and_set_initial_map_) {
+ ElementsKind elements_kind;
+ switch (args.type_) {
+ case JS_ARRAY_TYPE:
+ elements_kind = PACKED_SMI_ELEMENTS;
+ break;
+ case JS_ARGUMENTS_OBJECT_TYPE:
+ elements_kind = PACKED_ELEMENTS;
+ break;
+ default:
+ elements_kind = TERMINAL_FAST_ELEMENTS_KIND;
+ break;
+ }
+ Handle<Map> initial_map = NewMap(args.type_, args.instance_size_,
+ elements_kind, args.inobject_properties_);
+ result->shared().set_expected_nof_properties(args.inobject_properties_);
+ // TODO(littledan): Why do we have this is_generator test when
+ // NewFunctionPrototype already handles finding an appropriately
+ // shared prototype?
+ Handle<HeapObject> prototype = args.maybe_prototype_.ToHandleChecked();
+ if (!IsResumableFunction(result->shared().kind())) {
+ if (prototype->IsTheHole(isolate())) {
+ prototype = NewFunctionPrototype(result);
+ }
+ }
+ JSFunction::SetInitialMap(result, initial_map, prototype);
+ }
+
+ return result;
+}
+
+Handle<JSFunction> Factory::NewFunction(Handle<Map> map,
+ Handle<SharedFunctionInfo> info,
+ Handle<Context> context,
+ AllocationType allocation) {
+ // TODO(jgruber): Remove this function.
+ return JSFunctionBuilder{isolate(), info, context}
+ .set_map(map)
+ .set_allocation_type(allocation)
+ .Build();
+}
+
+Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
+ Handle<SharedFunctionInfo> info, Handle<Context> context,
+ AllocationType allocation) {
+ // TODO(jgruber): Remove this function.
+ return JSFunctionBuilder{isolate(), info, context}
+ .set_allocation_type(allocation)
+ .Build();
+}
+
+Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
+ Handle<SharedFunctionInfo> info, Handle<Context> context,
+ Handle<FeedbackCell> feedback_cell, AllocationType allocation) {
+ // TODO(jgruber): Remove this function.
+ return JSFunctionBuilder{isolate(), info, context}
+ .set_feedback_cell(feedback_cell)
+ .Build();
+}
+
+Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
+ Handle<Map> initial_map, Handle<SharedFunctionInfo> info,
+ Handle<Context> context, AllocationType allocation) {
+ // TODO(jgruber): Remove this function.
+ return JSFunctionBuilder{isolate(), info, context}
+ .set_map(initial_map)
+ .set_allocation_type(allocation)
+ .Build();
+}
+
+Factory::JSFunctionBuilder::JSFunctionBuilder(Isolate* isolate,
+ Handle<SharedFunctionInfo> sfi,
+ Handle<Context> context)
+ : isolate_(isolate), sfi_(sfi), context_(context) {}
+
+Handle<JSFunction> Factory::JSFunctionBuilder::Build() {
+ PrepareMap();
+ PrepareFeedbackCell();
+
+ // Determine the associated Code object.
+ Handle<Code> code;
+ const bool have_cached_code =
+ sfi_->TryGetCachedCode(isolate_).ToHandle(&code);
+ if (!have_cached_code) code = handle(sfi_->GetCode(), isolate_);
+
+ Handle<JSFunction> result = BuildRaw(code);
+
+ if (have_cached_code) {
+ IsCompiledScope is_compiled_scope(sfi_->is_compiled_scope(isolate_));
+ JSFunction::EnsureFeedbackVector(result, &is_compiled_scope);
+ if (FLAG_trace_turbo_nci) CompilationCacheCode::TraceHit(sfi_, code);
+ }
+
+ Compiler::PostInstantiation(result);
+ return result;
+}
+
+Handle<JSFunction> Factory::JSFunctionBuilder::BuildRaw(Handle<Code> code) {
+ Isolate* isolate = isolate_;
+ Factory* factory = isolate_->factory();
+
+ Handle<Map> map = maybe_map_.ToHandleChecked();
+ Handle<FeedbackCell> feedback_cell = maybe_feedback_cell_.ToHandleChecked();
+
+ DCHECK_EQ(JS_FUNCTION_TYPE, map->instance_type());
+
+ // Allocation.
+ Handle<JSFunction> function(
+ JSFunction::cast(factory->New(map, allocation_type_)), isolate);
+
+ // Header initialization.
+ function->initialize_properties(isolate);
+ function->initialize_elements();
+ function->set_shared(*sfi_);
+ function->set_context(*context_);
+ function->set_raw_feedback_cell(*feedback_cell);
+ function->set_code(*code);
+ if (map->has_prototype_slot()) {
+ function->set_prototype_or_initial_map(
+ ReadOnlyRoots(isolate).the_hole_value());
+ }
+
+ // Potentially body initialization.
+ factory->InitializeJSObjectBody(
+ function, map, JSFunction::GetHeaderSize(map->has_prototype_slot()));
+
+ return function;
+}
+
+void Factory::JSFunctionBuilder::PrepareMap() {
+ if (maybe_map_.is_null()) {
+ // No specific map requested, use the default.
+ maybe_map_ = handle(
+ Map::cast(context_->native_context().get(sfi_->function_map_index())),
+ isolate_);
+ }
+}
+
+void Factory::JSFunctionBuilder::PrepareFeedbackCell() {
+ Handle<FeedbackCell> feedback_cell;
+ if (maybe_feedback_cell_.ToHandle(&feedback_cell)) {
+ // Track the newly-created closure, and check that the optimized code in
+ // the feedback cell wasn't marked for deoptimization while not pointed to
+ // by any live JSFunction.
+ feedback_cell->IncrementClosureCount(isolate_);
+ if (feedback_cell->value().IsFeedbackVector()) {
+ FeedbackVector::cast(feedback_cell->value())
+ .EvictOptimizedCodeMarkedForDeoptimization(
+ *sfi_, "new function from shared function info");
+ }
+ } else {
+ // Fall back to the many_closures_cell.
+ maybe_feedback_cell_ = isolate_->factory()->many_closures_cell();
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/factory.h b/deps/v8/src/heap/factory.h
index 05f271e3a2..350a400035 100644
--- a/deps/v8/src/heap/factory.h
+++ b/deps/v8/src/heap/factory.h
@@ -18,7 +18,9 @@
#include "src/objects/dictionary.h"
#include "src/objects/js-array.h"
#include "src/objects/js-regexp.h"
+#include "src/objects/shared-function-info.h"
#include "src/objects/string.h"
+#include "torque-generated/class-forward-declarations.h"
namespace v8 {
namespace internal {
@@ -69,6 +71,11 @@ class WasmCapiFunctionData;
class WasmExportedFunctionData;
class WasmJSFunctionData;
class WeakCell;
+
+namespace wasm {
+class ValueType;
+} // namespace wasm
+
enum class SharedFlag : uint8_t;
enum class InitializedFlag : uint8_t;
@@ -114,6 +121,10 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
#include "torque-generated/factory.inc"
+ // Avoid the Torque-generated factory function to shadow the one from
+ // FactoryBase.
+ using FactoryBase::NewDescriptorArray;
+
Handle<Oddball> NewOddball(Handle<Map> map, const char* to_string,
Handle<Object> to_number, const char* type_of,
byte kind);
@@ -157,10 +168,17 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
Handle<FrameArray> NewFrameArray(int number_of_frames);
+ // Allocates a |NameDictionary| with an internal capacity calculated such that
+ // |at_least_space_for| entries can be added without reallocating.
+ Handle<NameDictionary> NewNameDictionary(int at_least_space_for);
+
+ // Allocates an |OrderedNameDictionary| of the given capacity. This guarantees
+ // that |capacity| entries can be added without reallocating.
+ Handle<OrderedNameDictionary> NewOrderedNameDictionary(
+ int capacity = OrderedNameDictionary::kInitialCapacity);
+
Handle<OrderedHashSet> NewOrderedHashSet();
Handle<OrderedHashMap> NewOrderedHashMap();
- Handle<OrderedNameDictionary> NewOrderedNameDictionary();
-
Handle<SmallOrderedHashSet> NewSmallOrderedHashSet(
int capacity = kSmallOrderedHashSetMinCapacity,
AllocationType allocation = AllocationType::kYoung);
@@ -614,11 +632,6 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
// Function creation from SharedFunctionInfo.
Handle<JSFunction> NewFunctionFromSharedFunctionInfo(
- Handle<Map> initial_map, Handle<SharedFunctionInfo> function_info,
- Handle<Context> context, Handle<FeedbackCell> feedback_cell,
- AllocationType allocation = AllocationType::kOld);
-
- Handle<JSFunction> NewFunctionFromSharedFunctionInfo(
Handle<SharedFunctionInfo> function_info, Handle<Context> context,
Handle<FeedbackCell> feedback_cell,
AllocationType allocation = AllocationType::kOld);
@@ -792,6 +805,43 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
return New(map, allocation);
}
+ // Helper class for creating JSFunction objects.
+ class JSFunctionBuilder final {
+ public:
+ JSFunctionBuilder(Isolate* isolate, Handle<SharedFunctionInfo> sfi,
+ Handle<Context> context);
+
+ V8_WARN_UNUSED_RESULT Handle<JSFunction> Build();
+
+ JSFunctionBuilder& set_map(Handle<Map> v) {
+ maybe_map_ = v;
+ return *this;
+ }
+ JSFunctionBuilder& set_allocation_type(AllocationType v) {
+ allocation_type_ = v;
+ return *this;
+ }
+ JSFunctionBuilder& set_feedback_cell(Handle<FeedbackCell> v) {
+ maybe_feedback_cell_ = v;
+ return *this;
+ }
+
+ private:
+ void PrepareMap();
+ void PrepareFeedbackCell();
+
+ V8_WARN_UNUSED_RESULT Handle<JSFunction> BuildRaw(Handle<Code> code);
+
+ Isolate* const isolate_;
+ Handle<SharedFunctionInfo> sfi_;
+ Handle<Context> context_;
+ MaybeHandle<Map> maybe_map_;
+ MaybeHandle<FeedbackCell> maybe_feedback_cell_;
+ AllocationType allocation_type_ = AllocationType::kOld;
+
+ friend class Factory;
+ };
+
// Allows creation of Code objects. It provides two build methods, one of
// which tries to gracefully handle allocation failure.
class V8_EXPORT_PRIVATE CodeBuilder final {
@@ -837,11 +887,6 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
return *this;
}
- CodeBuilder& set_immovable() {
- is_movable_ = false;
- return *this;
- }
-
CodeBuilder& set_is_turbofanned() {
is_turbofanned_ = true;
return *this;
@@ -888,7 +933,6 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
BasicBlockProfilerData* profiler_data_ = nullptr;
bool is_executable_ = true;
bool read_only_data_container_ = false;
- bool is_movable_ = true;
bool is_turbofanned_ = false;
int stack_slots_ = 0;
};
diff --git a/deps/v8/src/heap/free-list.cc b/deps/v8/src/heap/free-list.cc
index e9bf77d171..80b4a4f01f 100644
--- a/deps/v8/src/heap/free-list.cc
+++ b/deps/v8/src/heap/free-list.cc
@@ -418,50 +418,6 @@ FreeSpace FreeListManyCachedOrigin::Allocate(size_t size_in_bytes,
}
// ------------------------------------------------
-// FreeListMap implementation
-
-FreeListMap::FreeListMap() {
- // Initializing base (FreeList) fields
- number_of_categories_ = 1;
- last_category_ = kOnlyCategory;
- min_block_size_ = kMinBlockSize;
- categories_ = new FreeListCategory*[number_of_categories_]();
-
- Reset();
-}
-
-size_t FreeListMap::GuaranteedAllocatable(size_t maximum_freed) {
- return maximum_freed;
-}
-
-Page* FreeListMap::GetPageForSize(size_t size_in_bytes) {
- return GetPageForCategoryType(kOnlyCategory);
-}
-
-FreeListMap::~FreeListMap() { delete[] categories_; }
-
-FreeSpace FreeListMap::Allocate(size_t size_in_bytes, size_t* node_size,
- AllocationOrigin origin) {
- DCHECK_GE(kMaxBlockSize, size_in_bytes);
-
- // The following DCHECK ensures that maps are allocated one by one (ie,
- // without folding). This assumption currently holds. However, if it were to
- // become untrue in the future, you'll get an error here. To fix it, I would
- // suggest removing the DCHECK, and replacing TryFindNodeIn by
- // SearchForNodeInList below.
- DCHECK_EQ(size_in_bytes, Map::kSize);
-
- FreeSpace node = TryFindNodeIn(kOnlyCategory, size_in_bytes, node_size);
-
- if (!node.is_null()) {
- Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size);
- }
-
- DCHECK_IMPLIES(node.is_null(), IsEmpty());
- return node;
-}
-
-// ------------------------------------------------
// Generic FreeList methods (non alloc/free related)
void FreeList::Reset() {
diff --git a/deps/v8/src/heap/free-list.h b/deps/v8/src/heap/free-list.h
index 25bba59836..afa23e051a 100644
--- a/deps/v8/src/heap/free-list.h
+++ b/deps/v8/src/heap/free-list.h
@@ -488,31 +488,6 @@ class V8_EXPORT_PRIVATE FreeListManyCachedOrigin
AllocationOrigin origin) override;
};
-// FreeList for maps: since maps are all the same size, uses a single freelist.
-class V8_EXPORT_PRIVATE FreeListMap : public FreeList {
- public:
- size_t GuaranteedAllocatable(size_t maximum_freed) override;
-
- Page* GetPageForSize(size_t size_in_bytes) override;
-
- FreeListMap();
- ~FreeListMap() override;
-
- V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
- size_t* node_size,
- AllocationOrigin origin) override;
-
- private:
- static const size_t kMinBlockSize = Map::kSize;
- static const size_t kMaxBlockSize = MemoryChunk::kPageSize;
- static const FreeListCategoryType kOnlyCategory = 0;
-
- FreeListCategoryType SelectFreeListCategoryType(
- size_t size_in_bytes) override {
- return kOnlyCategory;
- }
-};
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/gc-tracer.cc b/deps/v8/src/heap/gc-tracer.cc
index cc10d92f33..0469748c4e 100644
--- a/deps/v8/src/heap/gc-tracer.cc
+++ b/deps/v8/src/heap/gc-tracer.cc
@@ -576,7 +576,6 @@ void GCTracer::PrintNVP() const {
"fast_promote=%.2f "
"complete.sweep_array_buffers=%.2f "
"scavenge=%.2f "
- "scavenge.process_array_buffers=%.2f "
"scavenge.free_remembered_set=%.2f "
"scavenge.roots=%.2f "
"scavenge.weak=%.2f "
@@ -617,10 +616,9 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::HEAP_EXTERNAL_PROLOGUE],
current_.scopes[Scope::HEAP_EXTERNAL_EPILOGUE],
current_.scopes[Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES],
- current_.scopes[Scope::SCAVENGER_SWEEP_ARRAY_BUFFERS],
current_.scopes[Scope::SCAVENGER_FAST_PROMOTE],
+ current_.scopes[Scope::SCAVENGER_COMPLETE_SWEEP_ARRAY_BUFFERS],
current_.scopes[Scope::SCAVENGER_SCAVENGE],
- current_.scopes[Scope::SCAVENGER_PROCESS_ARRAY_BUFFERS],
current_.scopes[Scope::SCAVENGER_FREE_REMEMBERED_SET],
current_.scopes[Scope::SCAVENGER_SCAVENGE_ROOTS],
current_.scopes[Scope::SCAVENGER_SCAVENGE_WEAK],
@@ -1229,22 +1227,27 @@ void GCTracer::RecordGCPhasesHistograms(TimedHistogram* gc_timer) {
heap_->isolate()->counters()->gc_marking_sum()->AddSample(
static_cast<int>(overall_marking_time));
+ // Filter out samples where
+ // - we don't have high-resolution timers;
+ // - size of marked objects is very small;
+ // - marking time is rounded to 0;
constexpr size_t kMinObjectSizeForReportingThroughput = 1024 * 1024;
if (base::TimeTicks::IsHighResolution() &&
- heap_->SizeOfObjects() > kMinObjectSizeForReportingThroughput) {
- DCHECK_GT(overall_marking_time, 0.0);
+ heap_->SizeOfObjects() > kMinObjectSizeForReportingThroughput &&
+ overall_marking_time > 0) {
const double overall_v8_marking_time =
overall_marking_time -
current_.scopes[Scope::MC_MARK_EMBEDDER_TRACING];
- DCHECK_GT(overall_v8_marking_time, 0.0);
- const int main_thread_marking_throughput_mb_per_s =
- static_cast<int>(static_cast<double>(heap_->SizeOfObjects()) /
- overall_v8_marking_time * 1000 / 1024 / 1024);
- heap_->isolate()
- ->counters()
- ->gc_main_thread_marking_throughput()
- ->AddSample(
- static_cast<int>(main_thread_marking_throughput_mb_per_s));
+ if (overall_v8_marking_time > 0) {
+ const int main_thread_marking_throughput_mb_per_s =
+ static_cast<int>(static_cast<double>(heap_->SizeOfObjects()) /
+ overall_v8_marking_time * 1000 / 1024 / 1024);
+ heap_->isolate()
+ ->counters()
+ ->gc_main_thread_marking_throughput()
+ ->AddSample(
+ static_cast<int>(main_thread_marking_throughput_mb_per_s));
+ }
}
DCHECK_EQ(Scope::LAST_TOP_MC_SCOPE, Scope::MC_SWEEP);
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index d4dc7e2b8c..fe4f2b18bc 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -171,8 +171,8 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
DCHECK(AllowHandleAllocation::IsAllowed());
DCHECK(AllowHeapAllocation::IsAllowed());
DCHECK(AllowGarbageCollection::IsAllowed());
- DCHECK_IMPLIES(type == AllocationType::kCode,
- alignment == AllocationAlignment::kCodeAligned);
+ DCHECK_IMPLIES(type == AllocationType::kCode || type == AllocationType::kMap,
+ alignment == AllocationAlignment::kWordAligned);
DCHECK_EQ(gc_state(), NOT_IN_GC);
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) {
@@ -185,10 +185,7 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
IncrementObjectCounters();
#endif
- size_t large_object_threshold =
- AllocationType::kCode == type
- ? std::min(kMaxRegularHeapObjectSize, code_space()->AreaSize())
- : kMaxRegularHeapObjectSize;
+ size_t large_object_threshold = MaxRegularHeapObjectSize(type);
bool large_object =
static_cast<size_t>(size_in_bytes) > large_object_threshold;
@@ -223,6 +220,7 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
allocation = old_space_->AllocateRaw(size_in_bytes, alignment, origin);
}
} else if (AllocationType::kCode == type) {
+ DCHECK(AllowCodeAllocation::IsAllowed());
if (large_object) {
allocation = code_lo_space_->AllocateRaw(size_in_bytes);
} else {
@@ -231,7 +229,6 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
} else if (AllocationType::kMap == type) {
allocation = map_space_->AllocateRawUnaligned(size_in_bytes);
} else if (AllocationType::kReadOnly == type) {
- DCHECK(isolate_->serializer_enabled());
DCHECK(!large_object);
DCHECK(CanAllocateInReadOnlySpace());
DCHECK_EQ(AllocationOrigin::kRuntime, origin);
@@ -275,27 +272,24 @@ HeapObject Heap::AllocateRawWith(int size, AllocationType allocation,
DCHECK(AllowHandleAllocation::IsAllowed());
DCHECK(AllowHeapAllocation::IsAllowed());
DCHECK(AllowGarbageCollection::IsAllowed());
- if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
- AllocationResult result = AllocateRaw(size, allocation, origin, alignment);
- DCHECK(!result.IsRetry());
- return result.ToObjectChecked();
- }
DCHECK_EQ(gc_state(), NOT_IN_GC);
Heap* heap = isolate()->heap();
- Address* top = heap->NewSpaceAllocationTopAddress();
- Address* limit = heap->NewSpaceAllocationLimitAddress();
- if (allocation == AllocationType::kYoung &&
+ if (!V8_ENABLE_THIRD_PARTY_HEAP_BOOL &&
+ allocation == AllocationType::kYoung &&
alignment == AllocationAlignment::kWordAligned &&
- size <= kMaxRegularHeapObjectSize &&
- (*limit - *top >= static_cast<unsigned>(size)) &&
- V8_LIKELY(!FLAG_single_generation && FLAG_inline_new &&
- FLAG_gc_interval == 0)) {
- DCHECK(IsAligned(size, kTaggedSize));
- HeapObject obj = HeapObject::FromAddress(*top);
- *top += size;
- heap->CreateFillerObjectAt(obj.address(), size, ClearRecordedSlots::kNo);
- MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size);
- return obj;
+ size <= MaxRegularHeapObjectSize(allocation)) {
+ Address* top = heap->NewSpaceAllocationTopAddress();
+ Address* limit = heap->NewSpaceAllocationLimitAddress();
+ if ((*limit - *top >= static_cast<unsigned>(size)) &&
+ V8_LIKELY(!FLAG_single_generation && FLAG_inline_new &&
+ FLAG_gc_interval == 0)) {
+ DCHECK(IsAligned(size, kTaggedSize));
+ HeapObject obj = HeapObject::FromAddress(*top);
+ *top += size;
+ heap->CreateFillerObjectAt(obj.address(), size, ClearRecordedSlots::kNo);
+ MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size);
+ return obj;
+ }
}
switch (mode) {
case kLightRetry:
diff --git a/deps/v8/src/heap/heap-write-barrier-inl.h b/deps/v8/src/heap/heap-write-barrier-inl.h
index 79265c4db6..6c5fccb551 100644
--- a/deps/v8/src/heap/heap-write-barrier-inl.h
+++ b/deps/v8/src/heap/heap-write-barrier-inl.h
@@ -191,6 +191,7 @@ inline bool ObjectInYoungGeneration(Object object) {
}
inline bool IsReadOnlyHeapObject(HeapObject object) {
+ if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) return ReadOnlyHeap::Contains(object);
heap_internals::MemoryChunk* chunk =
heap_internals::MemoryChunk::FromHeapObject(object);
return chunk->InReadOnlySpace();
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index 4506ed71aa..e818600d5b 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -35,6 +35,7 @@
#include "src/heap/base/stack.h"
#include "src/heap/code-object-registry.h"
#include "src/heap/code-stats.h"
+#include "src/heap/collection-barrier.h"
#include "src/heap/combined-heap.h"
#include "src/heap/concurrent-allocator.h"
#include "src/heap/concurrent-marking.h"
@@ -195,7 +196,7 @@ Heap::Heap()
global_pretenuring_feedback_(kInitialFeedbackCapacity),
safepoint_(new GlobalSafepoint(this)),
external_string_table_(this),
- collection_barrier_(this) {
+ collection_barrier_(new CollectionBarrier(this)) {
// Ensure old_generation_size_ is a multiple of kPageSize.
DCHECK_EQ(0, max_old_generation_size() & (Page::kPageSize - 1));
@@ -395,7 +396,7 @@ size_t Heap::Available() {
}
bool Heap::CanExpandOldGeneration(size_t size) {
- if (force_oom_) return false;
+ if (force_oom_ || force_gc_on_next_allocation_) return false;
if (OldGenerationCapacity() + size > max_old_generation_size()) return false;
// The OldGenerationCapacity does not account compaction spaces used
// during evacuation. Ensure that expanding the old generation does push
@@ -1095,7 +1096,8 @@ void Heap::DeoptMarkedAllocationSites() {
void Heap::GarbageCollectionEpilogueInSafepoint(GarbageCollector collector) {
if (collector == MARK_COMPACTOR) {
- memory_pressure_level_ = MemoryPressureLevel::kNone;
+ memory_pressure_level_.store(MemoryPressureLevel::kNone,
+ std::memory_order_relaxed);
}
TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE_SAFEPOINT);
@@ -1151,6 +1153,9 @@ void Heap::GarbageCollectionEpilogueInSafepoint(GarbageCollector collector) {
TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE_REDUCE_NEW_SPACE);
ReduceNewSpaceSize();
}
+
+ // Resume all threads waiting for the GC.
+ collection_barrier_->ResumeThreadsAwaitingCollection();
}
void Heap::GarbageCollectionEpilogue() {
@@ -1212,6 +1217,8 @@ void Heap::HandleGCRequest() {
} else if (HighMemoryPressure()) {
incremental_marking()->reset_request_type();
CheckMemoryPressure();
+ } else if (CollectionRequested()) {
+ CheckCollectionRequested();
} else if (incremental_marking()->request_type() ==
IncrementalMarking::COMPLETE_MARKING) {
incremental_marking()->reset_request_type();
@@ -1502,16 +1509,14 @@ bool Heap::CollectGarbage(AllocationSpace space,
const char* collector_reason = nullptr;
GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
is_current_gc_forced_ = gc_callback_flags & v8::kGCCallbackFlagForced ||
- current_gc_flags_ & kForcedGC;
+ current_gc_flags_ & kForcedGC ||
+ force_gc_on_next_allocation_;
+ if (force_gc_on_next_allocation_) force_gc_on_next_allocation_ = false;
DevToolsTraceEventScope devtools_trace_event_scope(
this, IsYoungGenerationCollector(collector) ? "MinorGC" : "MajorGC",
GarbageCollectionReasonToString(gc_reason));
- if (!CanPromoteYoungAndExpandOldGeneration(0)) {
- InvokeNearHeapLimitCallback();
- }
-
// Filter on-stack reference below this method.
isolate()
->global_handles()
@@ -1678,8 +1683,6 @@ bool Heap::CollectGarbage(AllocationSpace space,
isolate()->CountUsage(v8::Isolate::kForcedGC);
}
- collection_barrier_.CollectionPerformed();
-
// Start incremental marking for the next cycle. We do this only for scavenger
// to avoid a loop where mark-compact causes another mark-compact.
if (IsYoungGenerationCollector(collector)) {
@@ -1688,6 +1691,13 @@ bool Heap::CollectGarbage(AllocationSpace space,
kGCCallbackScheduleIdleGarbageCollection);
}
+ if (!CanExpandOldGeneration(0)) {
+ InvokeNearHeapLimitCallback();
+ if (!CanExpandOldGeneration(0)) {
+ FatalProcessOutOfMemory("Reached heap limit");
+ }
+ }
+
return freed_global_handles > 0;
}
@@ -1696,7 +1706,7 @@ int Heap::NotifyContextDisposed(bool dependant_context) {
if (!dependant_context) {
tracer()->ResetSurvivalEvents();
old_generation_size_configured_ = false;
- old_generation_allocation_limit_ = initial_old_generation_size_;
+ set_old_generation_allocation_limit(initial_old_generation_size_);
MemoryReducer::Event event;
event.type = MemoryReducer::kPossibleGarbage;
event.time_ms = MonotonicallyIncreasingTimeInMs();
@@ -1878,125 +1888,6 @@ static void VerifyStringTable(Isolate* isolate) {
}
#endif // VERIFY_HEAP
-bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
- bool gc_performed = true;
- int counter = 0;
- static const int kThreshold = 20;
- while (gc_performed && counter++ < kThreshold) {
- gc_performed = false;
- for (int space = FIRST_SPACE;
- space < static_cast<int>(SnapshotSpace::kNumberOfHeapSpaces);
- space++) {
- DCHECK_NE(space, NEW_SPACE);
- DCHECK_NE(space, NEW_LO_SPACE);
- Reservation* reservation = &reservations[space];
- DCHECK_LE(1, reservation->size());
- if (reservation->at(0).size == 0) {
- DCHECK_EQ(1, reservation->size());
- continue;
- }
- bool perform_gc = false;
- if (space == MAP_SPACE) {
- // We allocate each map individually to avoid fragmentation.
- maps->clear();
- DCHECK_LE(reservation->size(), 2);
- int reserved_size = 0;
- for (const Chunk& c : *reservation) reserved_size += c.size;
- DCHECK_EQ(0, reserved_size % Map::kSize);
- int num_maps = reserved_size / Map::kSize;
- for (int i = 0; i < num_maps; i++) {
- AllocationResult allocation;
-#if V8_ENABLE_THIRD_PARTY_HEAP_BOOL
- allocation = AllocateRaw(Map::kSize, AllocationType::kMap,
- AllocationOrigin::kRuntime, kWordAligned);
-#else
- allocation = map_space()->AllocateRawUnaligned(Map::kSize);
-#endif
- HeapObject free_space;
- if (allocation.To(&free_space)) {
- // Mark with a free list node, in case we have a GC before
- // deserializing.
- Address free_space_address = free_space.address();
- CreateFillerObjectAt(free_space_address, Map::kSize,
- ClearRecordedSlots::kNo);
- maps->push_back(free_space_address);
- } else {
- perform_gc = true;
- break;
- }
- }
- } else if (space == LO_SPACE) {
- // Just check that we can allocate during deserialization.
- DCHECK_LE(reservation->size(), 2);
- int reserved_size = 0;
- for (const Chunk& c : *reservation) reserved_size += c.size;
- perform_gc = !CanExpandOldGeneration(reserved_size);
- } else {
- for (auto& chunk : *reservation) {
- AllocationResult allocation;
- int size = chunk.size;
- DCHECK_LE(static_cast<size_t>(size),
- MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
- static_cast<AllocationSpace>(space)));
-#if V8_ENABLE_THIRD_PARTY_HEAP_BOOL
- AllocationType type = (space == CODE_SPACE)
- ? AllocationType::kCode
- : (space == RO_SPACE)
- ? AllocationType::kReadOnly
- : AllocationType::kYoung;
- AllocationAlignment align =
- (space == CODE_SPACE) ? kCodeAligned : kWordAligned;
- allocation =
- AllocateRaw(size, type, AllocationOrigin::kRuntime, align);
-#else
- if (space == RO_SPACE) {
- allocation = read_only_space()->AllocateRaw(
- size, AllocationAlignment::kWordAligned);
- } else {
- // The deserializer will update the skip list.
- allocation = paged_space(space)->AllocateRawUnaligned(size);
- }
-#endif
- HeapObject free_space;
- if (allocation.To(&free_space)) {
- // Mark with a free list node, in case we have a GC before
- // deserializing.
- Address free_space_address = free_space.address();
- CreateFillerObjectAt(free_space_address, size,
- ClearRecordedSlots::kNo);
- DCHECK(IsPreAllocatedSpace(static_cast<SnapshotSpace>(space)));
- chunk.start = free_space_address;
- chunk.end = free_space_address + size;
- } else {
- perform_gc = true;
- break;
- }
- }
- }
- if (perform_gc) {
- // We cannot perfom a GC with an uninitialized isolate. This check
- // fails for example if the max old space size is chosen unwisely,
- // so that we cannot allocate space to deserialize the initial heap.
- if (!deserialization_complete_) {
- V8::FatalProcessOutOfMemory(
- isolate(), "insufficient memory to create an Isolate");
- }
- if (counter > 1) {
- CollectAllGarbage(kReduceMemoryFootprintMask,
- GarbageCollectionReason::kDeserializer);
- } else {
- CollectAllGarbage(kNoGCFlags, GarbageCollectionReason::kDeserializer);
- }
- gc_performed = true;
- break; // Abort for-loop over spaces and retry.
- }
- }
- }
-
- return !gc_performed;
-}
-
-
void Heap::EnsureFromSpaceIsCommitted() {
if (new_space_->CommitFromSpaceIfNeeded()) return;
@@ -2005,35 +1896,28 @@ void Heap::EnsureFromSpaceIsCommitted() {
FatalProcessOutOfMemory("Committing semi space failed.");
}
-void Heap::CollectionBarrier::CollectionPerformed() {
- base::MutexGuard guard(&mutex_);
- gc_requested_ = false;
- cond_.NotifyAll();
+bool Heap::CollectionRequested() {
+ return collection_barrier_->CollectionRequested();
}
-void Heap::CollectionBarrier::ShutdownRequested() {
- base::MutexGuard guard(&mutex_);
- shutdown_requested_ = true;
- cond_.NotifyAll();
+void Heap::RequestCollectionBackground(LocalHeap* local_heap) {
+ if (local_heap->is_main_thread()) {
+ CollectAllGarbage(current_gc_flags_,
+ GarbageCollectionReason::kBackgroundAllocationFailure,
+ current_gc_callback_flags_);
+ } else {
+ collection_barrier_->AwaitCollectionBackground();
+ }
}
-void Heap::CollectionBarrier::Wait() {
- base::MutexGuard guard(&mutex_);
-
- if (shutdown_requested_) return;
-
- if (!gc_requested_) {
- heap_->MemoryPressureNotification(MemoryPressureLevel::kCritical, false);
- gc_requested_ = true;
- }
+void Heap::CheckCollectionRequested() {
+ if (!collection_barrier_->CollectionRequested()) return;
- while (gc_requested_ && !shutdown_requested_) {
- cond_.Wait(&mutex_);
- }
+ CollectAllGarbage(current_gc_flags_,
+ GarbageCollectionReason::kBackgroundAllocationFailure,
+ current_gc_callback_flags_);
}
-void Heap::RequestAndWaitForCollection() { collection_barrier_.Wait(); }
-
void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
if (start_new_space_size == 0) return;
@@ -2060,6 +1944,11 @@ size_t Heap::PerformGarbageCollection(
GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
DisallowJavascriptExecution no_js(isolate());
base::Optional<SafepointScope> optional_safepoint_scope;
+
+ // Stop time-to-collection timer before safepoint - we do not want to measure
+ // time for safepointing.
+ collection_barrier_->StopTimeToCollectionTimer();
+
if (FLAG_local_heaps) {
optional_safepoint_scope.emplace(this);
}
@@ -2182,11 +2071,11 @@ void Heap::RecomputeLimits(GarbageCollector collector) {
if (collector == MARK_COMPACTOR) {
external_memory_.ResetAfterGC();
- old_generation_allocation_limit_ =
+ set_old_generation_allocation_limit(
MemoryController<V8HeapTrait>::CalculateAllocationLimit(
this, old_gen_size, min_old_generation_size_,
max_old_generation_size(), new_space_capacity, v8_growing_factor,
- mode);
+ mode));
if (UseGlobalMemoryScheduling()) {
DCHECK_GT(global_growing_factor, 0);
global_allocation_limit_ =
@@ -2204,8 +2093,8 @@ void Heap::RecomputeLimits(GarbageCollector collector) {
this, old_gen_size, min_old_generation_size_,
max_old_generation_size(), new_space_capacity, v8_growing_factor,
mode);
- if (new_old_generation_limit < old_generation_allocation_limit_) {
- old_generation_allocation_limit_ = new_old_generation_limit;
+ if (new_old_generation_limit < old_generation_allocation_limit()) {
+ set_old_generation_allocation_limit(new_old_generation_limit);
}
if (UseGlobalMemoryScheduling()) {
DCHECK_GT(global_growing_factor, 0);
@@ -2912,11 +2801,11 @@ void Heap::ConfigureInitialOldGenerationSize() {
const size_t new_old_generation_allocation_limit =
Max(OldGenerationSizeOfObjects() + minimum_growing_step,
static_cast<size_t>(
- static_cast<double>(old_generation_allocation_limit_) *
+ static_cast<double>(old_generation_allocation_limit()) *
(tracer()->AverageSurvivalRatio() / 100)));
if (new_old_generation_allocation_limit <
- old_generation_allocation_limit_) {
- old_generation_allocation_limit_ = new_old_generation_allocation_limit;
+ old_generation_allocation_limit()) {
+ set_old_generation_allocation_limit(new_old_generation_allocation_limit);
} else {
old_generation_size_configured_ = true;
}
@@ -3088,6 +2977,7 @@ class LeftTrimmerVerifierRootVisitor : public RootVisitor {
namespace {
bool MayContainRecordedSlots(HeapObject object) {
+ if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) return false;
// New space object do not have recorded slots.
if (BasicMemoryChunk::FromHeapObject(object)->InYoungGeneration())
return false;
@@ -3538,47 +3428,6 @@ void Heap::FinalizeIncrementalMarkingIncrementally(
InvokeIncrementalMarkingEpilogueCallbacks();
}
-void Heap::RegisterDeserializedObjectsForBlackAllocation(
- Reservation* reservations, const std::vector<HeapObject>& large_objects,
- const std::vector<Address>& maps) {
- // TODO(ulan): pause black allocation during deserialization to avoid
- // iterating all these objects in one go.
-
- if (!incremental_marking()->black_allocation()) return;
-
- // Iterate black objects in old space, code space, map space, and large
- // object space for side effects.
- IncrementalMarking::MarkingState* marking_state =
- incremental_marking()->marking_state();
- for (int i = OLD_SPACE;
- i < static_cast<int>(SnapshotSpace::kNumberOfHeapSpaces); i++) {
- const Heap::Reservation& res = reservations[i];
- for (auto& chunk : res) {
- Address addr = chunk.start;
- while (addr < chunk.end) {
- HeapObject obj = HeapObject::FromAddress(addr);
- // Objects can have any color because incremental marking can
- // start in the middle of Heap::ReserveSpace().
- if (marking_state->IsBlack(obj)) {
- incremental_marking()->ProcessBlackAllocatedObject(obj);
- }
- addr += obj.Size();
- }
- }
- }
-
- // Large object space doesn't use reservations, so it needs custom handling.
- for (HeapObject object : large_objects) {
- incremental_marking()->ProcessBlackAllocatedObject(object);
- }
-
- // Map space doesn't use reservations, so it needs custom handling.
- for (Address addr : maps) {
- incremental_marking()->ProcessBlackAllocatedObject(
- HeapObject::FromAddress(addr));
- }
-}
-
void Heap::NotifyObjectLayoutChange(
HeapObject object, const DisallowHeapAllocation&,
InvalidateRecordedSlots invalidate_recorded_slots) {
@@ -3636,13 +3485,19 @@ class SlotCollectingVisitor final : public ObjectVisitor {
void Heap::VerifyObjectLayoutChange(HeapObject object, Map new_map) {
if (!FLAG_verify_heap) return;
- // Check that Heap::NotifyObjectLayout was called for object transitions
+ // Check that Heap::NotifyObjectLayoutChange was called for object transitions
// that are not safe for concurrent marking.
// If you see this check triggering for a freshly allocated object,
// use object->set_map_after_allocation() to initialize its map.
if (pending_layout_change_object_.is_null()) {
if (object.IsJSObject()) {
DCHECK(!object.map().TransitionRequiresSynchronizationWithGC(new_map));
+ } else if (object.IsString() &&
+ (new_map == ReadOnlyRoots(this).thin_string_map() ||
+ new_map == ReadOnlyRoots(this).thin_one_byte_string_map())) {
+ // When transitioning a string to ThinString,
+ // Heap::NotifyObjectLayoutChange doesn't need to be invoked because only
+ // tagged fields are introduced.
} else {
// Check that the set of slots before and after the transition match.
SlotCollectingVisitor old_visitor;
@@ -3812,11 +3667,11 @@ void Heap::CheckMemoryPressure() {
// The optimizing compiler may be unnecessarily holding on to memory.
isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
}
- MemoryPressureLevel memory_pressure_level = memory_pressure_level_;
// Reset the memory pressure level to avoid recursive GCs triggered by
// CheckMemoryPressure from AdjustAmountOfExternalMemory called by
// the finalizers.
- memory_pressure_level_ = MemoryPressureLevel::kNone;
+ MemoryPressureLevel memory_pressure_level = memory_pressure_level_.exchange(
+ MemoryPressureLevel::kNone, std::memory_order_relaxed);
if (memory_pressure_level == MemoryPressureLevel::kCritical) {
TRACE_EVENT0("devtools.timeline,v8", "V8.CheckMemoryPressure");
CollectGarbageOnMemoryPressure();
@@ -3869,8 +3724,8 @@ void Heap::MemoryPressureNotification(MemoryPressureLevel level,
bool is_isolate_locked) {
TRACE_EVENT1("devtools.timeline,v8", "V8.MemoryPressureNotification", "level",
static_cast<int>(level));
- MemoryPressureLevel previous = memory_pressure_level_;
- memory_pressure_level_ = level;
+ MemoryPressureLevel previous =
+ memory_pressure_level_.exchange(level, std::memory_order_relaxed);
if ((previous != MemoryPressureLevel::kCritical &&
level == MemoryPressureLevel::kCritical) ||
(previous == MemoryPressureLevel::kNone &&
@@ -4048,6 +3903,8 @@ const char* Heap::GarbageCollectionReasonToString(
return "measure memory";
case GarbageCollectionReason::kUnknown:
return "unknown";
+ case GarbageCollectionReason::kBackgroundAllocationFailure:
+ return "background allocation failure";
}
UNREACHABLE();
}
@@ -4149,6 +4006,7 @@ void Heap::Verify() {
// We have to wait here for the sweeper threads to have an iterable heap.
mark_compact_collector()->EnsureSweepingCompleted();
+
array_buffer_sweeper()->EnsureFinished();
VerifyPointersVisitor visitor(this);
@@ -4160,6 +4018,12 @@ void Heap::Verify() {
.NormalizedMapCacheVerify(isolate());
}
+ // The heap verifier can't deal with partially deserialized objects, so
+ // disable it if a deserializer is active.
+ // TODO(leszeks): Enable verification during deserialization, e.g. by only
+ // blocklisting objects that are in a partially deserialized state.
+ if (isolate()->has_active_deserializer()) return;
+
VerifySmisVisitor smis_visitor;
IterateSmiRoots(&smis_visitor);
@@ -4450,11 +4314,12 @@ class FixStaleLeftTrimmedHandlesVisitor : public RootVisitor {
inline void FixHandle(FullObjectSlot p) {
if (!(*p).IsHeapObject()) return;
HeapObject current = HeapObject::cast(*p);
- const MapWord map_word = current.map_word();
- if (!map_word.IsForwardingAddress() && current.IsFreeSpaceOrFiller()) {
+ if (!current.map_word().IsForwardingAddress() &&
+ current.IsFreeSpaceOrFiller()) {
#ifdef DEBUG
// We need to find a FixedArrayBase map after walking the fillers.
- while (current.IsFreeSpaceOrFiller()) {
+ while (!current.map_word().IsForwardingAddress() &&
+ current.IsFreeSpaceOrFiller()) {
Address next = current.ptr();
if (current.map() == ReadOnlyRoots(heap_).one_pointer_filler_map()) {
next += kTaggedSize;
@@ -4466,7 +4331,8 @@ class FixStaleLeftTrimmedHandlesVisitor : public RootVisitor {
}
current = HeapObject::cast(Object(next));
}
- DCHECK(current.IsFixedArrayBase());
+ DCHECK(current.map_word().IsForwardingAddress() ||
+ current.IsFixedArrayBase());
#endif // DEBUG
p.store(Smi::zero());
}
@@ -4770,9 +4636,9 @@ void Heap::ConfigureHeap(const v8::ResourceConstraints& constraints) {
FLAG_semi_space_growth_factor = 2;
}
- old_generation_allocation_limit_ = initial_old_generation_size_;
+ set_old_generation_allocation_limit(initial_old_generation_size_);
global_allocation_limit_ =
- GlobalMemorySizeFromV8Size(old_generation_allocation_limit_);
+ GlobalMemorySizeFromV8Size(old_generation_allocation_limit());
initial_max_old_generation_size_ = max_old_generation_size();
// We rely on being able to allocate new arrays in paged spaces.
@@ -4881,8 +4747,8 @@ bool Heap::AllocationLimitOvershotByLargeMargin() {
uint64_t size_now =
OldGenerationSizeOfObjects() + AllocatedExternalMemorySinceMarkCompact();
- const size_t v8_overshoot = old_generation_allocation_limit_ < size_now
- ? size_now - old_generation_allocation_limit_
+ const size_t v8_overshoot = old_generation_allocation_limit() < size_now
+ ? size_now - old_generation_allocation_limit()
: 0;
const size_t global_overshoot =
global_allocation_limit_ < GlobalSizeOfObjects()
@@ -4898,8 +4764,8 @@ bool Heap::AllocationLimitOvershotByLargeMargin() {
// Overshoot margin is 50% of allocation limit or half-way to the max heap
// with special handling of small heaps.
const size_t v8_margin =
- Min(Max(old_generation_allocation_limit_ / 2, kMarginForSmallHeaps),
- (max_old_generation_size() - old_generation_allocation_limit_) / 2);
+ Min(Max(old_generation_allocation_limit() / 2, kMarginForSmallHeaps),
+ (max_old_generation_size() - old_generation_allocation_limit()) / 2);
const size_t global_margin =
Min(Max(global_allocation_limit_ / 2, kMarginForSmallHeaps),
(max_global_memory_size_ - global_allocation_limit_) / 2);
@@ -4907,6 +4773,15 @@ bool Heap::AllocationLimitOvershotByLargeMargin() {
return v8_overshoot >= v8_margin || global_overshoot >= global_margin;
}
+// static
+int Heap::MaxRegularHeapObjectSize(AllocationType allocation) {
+ if (!V8_ENABLE_THIRD_PARTY_HEAP_BOOL &&
+ (allocation == AllocationType::kCode)) {
+ return MemoryChunkLayout::MaxRegularCodeObjectSize();
+ }
+ return kMaxRegularHeapObjectSize;
+}
+
bool Heap::ShouldOptimizeForLoadTime() {
return isolate()->rail_mode() == PERFORMANCE_LOAD &&
!AllocationLimitOvershotByLargeMargin() &&
@@ -4930,6 +4805,9 @@ bool Heap::ShouldExpandOldGenerationOnSlowAllocation(LocalHeap* local_heap) {
// Ensure that retry of allocation on background thread succeeds
if (IsRetryOfFailedAllocation(local_heap)) return true;
+ // Background thread requested GC, allocation should fail
+ if (CollectionRequested()) return false;
+
if (ShouldOptimizeForMemoryUsage()) return false;
if (ShouldOptimizeForLoadTime()) return true;
@@ -4983,7 +4861,7 @@ double Heap::PercentToOldGenerationLimit() {
double size_now =
OldGenerationSizeOfObjects() + AllocatedExternalMemorySinceMarkCompact();
double current_bytes = size_now - size_at_gc;
- double total_bytes = old_generation_allocation_limit_ - size_at_gc;
+ double total_bytes = old_generation_allocation_limit() - size_at_gc;
return total_bytes > 0 ? (current_bytes / total_bytes) * 100.0 : 0;
}
@@ -4992,7 +4870,7 @@ double Heap::PercentToGlobalMemoryLimit() {
double size_now =
OldGenerationSizeOfObjects() + AllocatedExternalMemorySinceMarkCompact();
double current_bytes = size_now - size_at_gc;
- double total_bytes = old_generation_allocation_limit_ - size_at_gc;
+ double total_bytes = old_generation_allocation_limit() - size_at_gc;
return total_bytes > 0 ? (current_bytes / total_bytes) * 100.0 : 0;
}
@@ -5113,40 +4991,20 @@ void Heap::DisableInlineAllocation() {
}
}
-HeapObject Heap::EnsureImmovableCode(HeapObject heap_object, int object_size) {
- // Code objects which should stay at a fixed address are allocated either
- // in the first page of code space, in large object space, or (during
- // snapshot creation) the containing page is marked as immovable.
- DCHECK(!heap_object.is_null());
-#ifndef V8_ENABLE_THIRD_PARTY_HEAP
- DCHECK(code_space_->Contains(heap_object));
-#endif
- DCHECK_GE(object_size, 0);
- if (!Heap::IsImmovable(heap_object)) {
- if (isolate()->serializer_enabled() ||
- code_space_->first_page()->Contains(heap_object.address())) {
- BasicMemoryChunk::FromHeapObject(heap_object)->MarkNeverEvacuate();
- } else {
- // Discard the first code allocation, which was on a page where it could
- // be moved.
- CreateFillerObjectAt(heap_object.address(), object_size,
- ClearRecordedSlots::kNo);
- heap_object = AllocateRawCodeInLargeObjectSpace(object_size);
- UnprotectAndRegisterMemoryChunk(heap_object);
- ZapCodeObject(heap_object.address(), object_size);
- OnAllocationEvent(heap_object, object_size);
- }
- }
- return heap_object;
-}
-
HeapObject Heap::AllocateRawWithLightRetrySlowPath(
int size, AllocationType allocation, AllocationOrigin origin,
AllocationAlignment alignment) {
HeapObject result;
AllocationResult alloc = AllocateRaw(size, allocation, origin, alignment);
if (alloc.To(&result)) {
- DCHECK(result != ReadOnlyRoots(this).exception());
+ // DCHECK that the successful allocation is not "exception". The one
+ // exception to this is when allocating the "exception" object itself, in
+ // which case this must be an ROSpace allocation and the exception object
+ // in the roots has to be unset.
+ DCHECK((CanAllocateInReadOnlySpace() &&
+ allocation == AllocationType::kReadOnly &&
+ ReadOnlyRoots(this).unchecked_exception() == Smi::zero()) ||
+ result != ReadOnlyRoots(this).exception());
return result;
}
// Two GCs before panicking. In newspace will almost always succeed.
@@ -5185,40 +5043,6 @@ HeapObject Heap::AllocateRawWithRetryOrFailSlowPath(
return HeapObject();
}
-// TODO(jkummerow): Refactor this. AllocateRaw should take an "immovability"
-// parameter and just do what's necessary.
-HeapObject Heap::AllocateRawCodeInLargeObjectSpace(int size) {
- AllocationResult alloc = code_lo_space()->AllocateRaw(size);
- HeapObject result;
- if (alloc.To(&result)) {
- DCHECK(result != ReadOnlyRoots(this).exception());
- return result;
- }
- // Two GCs before panicking.
- for (int i = 0; i < 2; i++) {
- CollectGarbage(alloc.RetrySpace(),
- GarbageCollectionReason::kAllocationFailure);
- alloc = code_lo_space()->AllocateRaw(size);
- if (alloc.To(&result)) {
- DCHECK(result != ReadOnlyRoots(this).exception());
- return result;
- }
- }
- isolate()->counters()->gc_last_resort_from_handles()->Increment();
- CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
- {
- AlwaysAllocateScope scope(this);
- alloc = code_lo_space()->AllocateRaw(size);
- }
- if (alloc.To(&result)) {
- DCHECK(result != ReadOnlyRoots(this).exception());
- return result;
- }
- // TODO(1181417): Fix this.
- FatalProcessOutOfMemory("CALL_AND_RETRY_LAST");
- return HeapObject();
-}
-
void Heap::SetUp() {
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
allocation_timeout_ = NextAllocationTimeout();
@@ -5513,7 +5337,7 @@ void Heap::StartTearDown() {
// process the event queue anymore. Avoid this deadlock by allowing all
// allocations after tear down was requested to make sure all background
// threads finish.
- collection_barrier_.ShutdownRequested();
+ collection_barrier_->ShutdownRequested();
#ifdef VERIFY_HEAP
// {StartTearDown} is called fairly early during Isolate teardown, so it's
@@ -5529,6 +5353,9 @@ void Heap::StartTearDown() {
void Heap::TearDown() {
DCHECK_EQ(gc_state(), TEAR_DOWN);
+ if (FLAG_concurrent_marking || FLAG_parallel_marking)
+ concurrent_marking_->Pause();
+
// It's too late for Heap::Verify() here, as parts of the Isolate are
// already gone by the time this is called.
@@ -6826,7 +6653,7 @@ bool Heap::PageFlagsAreConsistent(HeapObject object) {
return true;
}
-void Heap::SetEmbedderStackStateForNextFinalizaton(
+void Heap::SetEmbedderStackStateForNextFinalization(
EmbedderHeapTracer::EmbedderStackState stack_state) {
local_embedder_heap_tracer()->SetEmbedderStackStateForNextFinalization(
stack_state);
@@ -6839,5 +6666,41 @@ void Heap::IncrementObjectCounters() {
}
#endif // DEBUG
+// StrongRootBlocks are allocated as a block of addresses, prefixed with a
+// StrongRootsEntry pointer:
+//
+// | StrongRootsEntry*
+// | Address 1
+// | ...
+// | Address N
+//
+// The allocate method registers the range "Address 1" to "Address N" with the
+// heap as a strong root array, saves that entry in StrongRootsEntry*, and
+// returns a pointer to Address 1.
+Address* StrongRootBlockAllocator::allocate(size_t n) {
+ void* block = malloc(sizeof(StrongRootsEntry*) + n * sizeof(Address));
+
+ StrongRootsEntry** header = reinterpret_cast<StrongRootsEntry**>(block);
+ Address* ret = reinterpret_cast<Address*>(reinterpret_cast<char*>(block) +
+ sizeof(StrongRootsEntry*));
+
+ memset(ret, kNullAddress, n * sizeof(Address));
+ *header =
+ heap_->RegisterStrongRoots(FullObjectSlot(ret), FullObjectSlot(ret + n));
+
+ return ret;
+}
+
+void StrongRootBlockAllocator::deallocate(Address* p, size_t n) noexcept {
+ // The allocate method returns a pointer to Address 1, so the deallocate
+ // method has to offset that pointer back by sizeof(StrongRootsEntry*).
+ void* block = reinterpret_cast<char*>(p) - sizeof(StrongRootsEntry*);
+ StrongRootsEntry** header = reinterpret_cast<StrongRootsEntry**>(block);
+
+ heap_->UnregisterStrongRoots(*header);
+
+ free(block);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index b8220dad5e..18064ac731 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -66,6 +66,7 @@ class ArrayBufferCollector;
class ArrayBufferSweeper;
class BasicMemoryChunk;
class CodeLargeObjectSpace;
+class CollectionBarrier;
class ConcurrentMarking;
class GCIdleTimeHandler;
class GCIdleTimeHeapState;
@@ -149,10 +150,11 @@ enum class GarbageCollectionReason {
kTesting = 21,
kExternalFinalize = 22,
kGlobalAllocationLimit = 23,
- kMeasureMemory = 24
+ kMeasureMemory = 24,
+ kBackgroundAllocationFailure = 25,
// If you add new items here, then update the incremental_marking_reason,
// mark_compact_reason, and scavenge_reason counters in counters.h.
- // Also update src/tools/metrics/histograms/histograms.xml in chromium.
+ // Also update src/tools/metrics/histograms/enums.xml in chromium.
};
enum class YoungGenerationHandling {
@@ -542,7 +544,7 @@ class Heap {
bool IsImmovable(HeapObject object);
- static bool IsLargeObject(HeapObject object);
+ V8_EXPORT_PRIVATE static bool IsLargeObject(HeapObject object);
// This method supports the deserialization allocator. All allocations
// are word-aligned. The method should never fail to allocate since the
@@ -658,6 +660,7 @@ class Heap {
}
void SetGCState(HeapState state);
bool IsTearingDown() const { return gc_state() == TEAR_DOWN; }
+ bool force_oom() const { return force_oom_; }
inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; }
@@ -666,10 +669,8 @@ class Heap {
template <FindMementoMode mode>
inline AllocationMemento FindAllocationMemento(Map map, HeapObject object);
- // Returns false if not able to reserve.
- bool ReserveSpace(Reservation* reservations, std::vector<Address>* maps);
-
- void RequestAndWaitForCollection();
+ // Requests collection and blocks until GC is finished.
+ void RequestCollectionBackground(LocalHeap* local_heap);
//
// Support for the API.
@@ -770,9 +771,14 @@ class Heap {
V8_EXPORT_PRIVATE bool ShouldOptimizeForMemoryUsage();
bool HighMemoryPressure() {
- return memory_pressure_level_ != MemoryPressureLevel::kNone;
+ return memory_pressure_level_.load(std::memory_order_relaxed) !=
+ MemoryPressureLevel::kNone;
}
+ bool CollectionRequested();
+
+ void CheckCollectionRequested();
+
void RestoreHeapLimit(size_t heap_limit) {
// Do not set the limit lower than the live size + some slack.
size_t min_limit = SizeOfObjects() + SizeOfObjects() / 4;
@@ -1061,10 +1067,6 @@ class Heap {
V8_EXPORT_PRIVATE void FinalizeIncrementalMarkingAtomically(
GarbageCollectionReason gc_reason);
- void RegisterDeserializedObjectsForBlackAllocation(
- Reservation* reservations, const std::vector<HeapObject>& large_objects,
- const std::vector<Address>& maps);
-
IncrementalMarking* incremental_marking() {
return incremental_marking_.get();
}
@@ -1126,7 +1128,7 @@ class Heap {
EmbedderHeapTracer* GetEmbedderHeapTracer() const;
void RegisterExternallyReferencedObject(Address* location);
- void SetEmbedderStackStateForNextFinalizaton(
+ V8_EXPORT_PRIVATE void SetEmbedderStackStateForNextFinalization(
EmbedderHeapTracer::EmbedderStackState stack_state);
EmbedderHeapTracer::TraceFlags flags_for_embedder_tracer() const;
@@ -1365,6 +1367,14 @@ class Heap {
// more eager to finalize incremental marking.
bool AllocationLimitOvershotByLargeMargin();
+ // Return the maximum size objects can be before having to allocate them as
+ // large objects. This takes into account allocating in the code space for
+ // which the size of the allocatable space per V8 page may depend on the OS
+ // page size at runtime. You may use kMaxRegularHeapObjectSize as a constant
+ // instead if you know the allocation isn't in the code spaces.
+ V8_EXPORT_PRIVATE static int MaxRegularHeapObjectSize(
+ AllocationType allocation);
+
// ===========================================================================
// Prologue/epilogue callback methods.========================================
// ===========================================================================
@@ -1574,22 +1584,6 @@ class Heap {
DISALLOW_COPY_AND_ASSIGN(ExternalStringTable);
};
- class CollectionBarrier {
- Heap* heap_;
- base::Mutex mutex_;
- base::ConditionVariable cond_;
- bool gc_requested_;
- bool shutdown_requested_;
-
- public:
- explicit CollectionBarrier(Heap* heap)
- : heap_(heap), gc_requested_(false), shutdown_requested_(false) {}
-
- void CollectionPerformed();
- void ShutdownRequested();
- void Wait();
- };
-
struct StringTypeTable {
InstanceType type;
int size;
@@ -1851,8 +1845,8 @@ class Heap {
uint64_t bytes = OldGenerationSizeOfObjects() +
AllocatedExternalMemorySinceMarkCompact();
- if (old_generation_allocation_limit_ <= bytes) return 0;
- return old_generation_allocation_limit_ - static_cast<size_t>(bytes);
+ if (old_generation_allocation_limit() <= bytes) return 0;
+ return old_generation_allocation_limit() - static_cast<size_t>(bytes);
}
void UpdateTotalGCTime(double duration);
@@ -1885,7 +1879,11 @@ class Heap {
bool ShouldOptimizeForLoadTime();
size_t old_generation_allocation_limit() const {
- return old_generation_allocation_limit_;
+ return old_generation_allocation_limit_.load(std::memory_order_relaxed);
+ }
+
+ void set_old_generation_allocation_limit(size_t newlimit) {
+ old_generation_allocation_limit_.store(newlimit, std::memory_order_relaxed);
}
size_t global_allocation_limit() const { return global_allocation_limit_; }
@@ -1984,17 +1982,10 @@ class Heap {
int size, AllocationType allocation, AllocationOrigin origin,
AllocationAlignment alignment = kWordAligned);
- V8_WARN_UNUSED_RESULT HeapObject AllocateRawCodeInLargeObjectSpace(int size);
-
// Allocates a heap object based on the map.
V8_WARN_UNUSED_RESULT AllocationResult Allocate(Map map,
AllocationType allocation);
- // Takes a code object and checks if it is on memory which is not subject to
- // compaction. This method will return a new code object on an immovable
- // memory location if the original code object was movable.
- HeapObject EnsureImmovableCode(HeapObject heap_object, int object_size);
-
// Allocates a partial map for bootstrapping.
V8_WARN_UNUSED_RESULT AllocationResult
AllocatePartialMap(InstanceType instance_type, int instance_size);
@@ -2002,6 +1993,9 @@ class Heap {
void FinalizePartialMap(Map map);
void set_force_oom(bool value) { force_oom_ = value; }
+ void set_force_gc_on_next_allocation() {
+ force_gc_on_next_allocation_ = true;
+ }
// ===========================================================================
// Retaining path tracing ====================================================
@@ -2072,7 +2066,7 @@ class Heap {
// and reset by a mark-compact garbage collection.
std::atomic<MemoryPressureLevel> memory_pressure_level_;
- std::vector<std::pair<v8::NearHeapLimitCallback, void*> >
+ std::vector<std::pair<v8::NearHeapLimitCallback, void*>>
near_heap_limit_callbacks_;
// For keeping track of context disposals.
@@ -2149,7 +2143,7 @@ class Heap {
// is checked when we have already decided to do a GC to help determine
// which collector to invoke, before expanding a paged space in the old
// generation and on every allocation in large object space.
- size_t old_generation_allocation_limit_ = 0;
+ std::atomic<size_t> old_generation_allocation_limit_{0};
size_t global_allocation_limit_ = 0;
// Indicates that inline bump-pointer allocation has been globally disabled
@@ -2275,7 +2269,7 @@ class Heap {
base::Mutex relocation_mutex_;
- CollectionBarrier collection_barrier_;
+ std::unique_ptr<CollectionBarrier> collection_barrier_;
int gc_callbacks_depth_ = 0;
@@ -2285,6 +2279,7 @@ class Heap {
// Used for testing purposes.
bool force_oom_ = false;
+ bool force_gc_on_next_allocation_ = false;
bool delay_sweeper_tasks_for_testing_ = false;
HeapObject pending_layout_change_object_;
@@ -2347,6 +2342,7 @@ class Heap {
// The allocator interface.
friend class Factory;
+ friend class Deserializer;
// The Isolate constructs us.
friend class Isolate;
@@ -2602,6 +2598,32 @@ T ForwardingAddress(T heap_obj) {
}
}
+// Address block allocator compatible with standard containers which registers
+// its allocated range as strong roots.
+class StrongRootBlockAllocator {
+ public:
+ using pointer = Address*;
+ using const_pointer = const Address*;
+ using reference = Address&;
+ using const_reference = const Address&;
+ using value_type = Address;
+ using size_type = size_t;
+ using difference_type = ptrdiff_t;
+ template <class U>
+ struct rebind {
+ STATIC_ASSERT((std::is_same<Address, U>::value));
+ using other = StrongRootBlockAllocator;
+ };
+
+ explicit StrongRootBlockAllocator(Heap* heap) : heap_(heap) {}
+
+ Address* allocate(size_t n);
+ void deallocate(Address* p, size_t n) noexcept;
+
+ private:
+ Heap* heap_;
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index c5206adf81..fb0ee2ecab 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -246,7 +246,7 @@ void IncrementalMarking::StartMarking() {
MarkRoots();
if (FLAG_concurrent_marking && !heap_->IsTearingDown()) {
- heap_->concurrent_marking()->ScheduleTasks();
+ heap_->concurrent_marking()->ScheduleJob();
}
// Ready to start incremental marking.
@@ -501,109 +501,7 @@ void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
}
});
- UpdateWeakReferencesAfterScavenge();
-}
-
-void IncrementalMarking::UpdateWeakReferencesAfterScavenge() {
- weak_objects_->weak_references.Update(
- [](std::pair<HeapObject, HeapObjectSlot> slot_in,
- std::pair<HeapObject, HeapObjectSlot>* slot_out) -> bool {
- HeapObject heap_obj = slot_in.first;
- HeapObject forwarded = ForwardingAddress(heap_obj);
-
- if (!forwarded.is_null()) {
- ptrdiff_t distance_to_slot =
- slot_in.second.address() - slot_in.first.ptr();
- Address new_slot = forwarded.ptr() + distance_to_slot;
- slot_out->first = forwarded;
- slot_out->second = HeapObjectSlot(new_slot);
- return true;
- }
-
- return false;
- });
- weak_objects_->weak_objects_in_code.Update(
- [](std::pair<HeapObject, Code> slot_in,
- std::pair<HeapObject, Code>* slot_out) -> bool {
- HeapObject heap_obj = slot_in.first;
- HeapObject forwarded = ForwardingAddress(heap_obj);
-
- if (!forwarded.is_null()) {
- slot_out->first = forwarded;
- slot_out->second = slot_in.second;
- return true;
- }
-
- return false;
- });
- weak_objects_->ephemeron_hash_tables.Update(
- [](EphemeronHashTable slot_in, EphemeronHashTable* slot_out) -> bool {
- EphemeronHashTable forwarded = ForwardingAddress(slot_in);
-
- if (!forwarded.is_null()) {
- *slot_out = forwarded;
- return true;
- }
-
- return false;
- });
-
- auto ephemeron_updater = [](Ephemeron slot_in, Ephemeron* slot_out) -> bool {
- HeapObject key = slot_in.key;
- HeapObject value = slot_in.value;
- HeapObject forwarded_key = ForwardingAddress(key);
- HeapObject forwarded_value = ForwardingAddress(value);
-
- if (!forwarded_key.is_null() && !forwarded_value.is_null()) {
- *slot_out = Ephemeron{forwarded_key, forwarded_value};
- return true;
- }
-
- return false;
- };
-
- weak_objects_->current_ephemerons.Update(ephemeron_updater);
- weak_objects_->next_ephemerons.Update(ephemeron_updater);
- weak_objects_->discovered_ephemerons.Update(ephemeron_updater);
-
- weak_objects_->flushed_js_functions.Update(
- [](JSFunction slot_in, JSFunction* slot_out) -> bool {
- JSFunction forwarded = ForwardingAddress(slot_in);
-
- if (!forwarded.is_null()) {
- *slot_out = forwarded;
- return true;
- }
-
- return false;
- });
-#ifdef DEBUG
- weak_objects_->bytecode_flushing_candidates.Iterate(
- [](SharedFunctionInfo candidate) {
- DCHECK(!Heap::InYoungGeneration(candidate));
- });
-#endif
-
- if (FLAG_harmony_weak_refs) {
- weak_objects_->js_weak_refs.Update(
- [](JSWeakRef js_weak_ref_in, JSWeakRef* js_weak_ref_out) -> bool {
- JSWeakRef forwarded = ForwardingAddress(js_weak_ref_in);
-
- if (!forwarded.is_null()) {
- *js_weak_ref_out = forwarded;
- return true;
- }
-
- return false;
- });
-
-#ifdef DEBUG
- // TODO(syg, marja): Support WeakCells in the young generation.
- weak_objects_->weak_cells.Iterate([](WeakCell weak_cell) {
- DCHECK(!Heap::InYoungGeneration(weak_cell));
- });
-#endif
- }
+ weak_objects_->UpdateAfterScavenge();
}
void IncrementalMarking::UpdateMarkedBytesAfterScavenge(
@@ -1104,7 +1002,7 @@ StepResult IncrementalMarking::Step(double max_step_size_in_ms,
}
if (FLAG_concurrent_marking) {
local_marking_worklists()->ShareWork();
- heap_->concurrent_marking()->RescheduleTasksIfNeeded();
+ heap_->concurrent_marking()->RescheduleJobIfNeeded();
}
}
if (state_ == MARKING) {
diff --git a/deps/v8/src/heap/incremental-marking.h b/deps/v8/src/heap/incremental-marking.h
index 29df137711..b259cacb93 100644
--- a/deps/v8/src/heap/incremental-marking.h
+++ b/deps/v8/src/heap/incremental-marking.h
@@ -81,7 +81,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
static constexpr size_t kGlobalActivationThreshold = 0;
#endif
-#ifdef V8_CONCURRENT_MARKING
+#ifdef V8_ATOMIC_MARKING_STATE
static const AccessMode kAtomicity = AccessMode::ATOMIC;
#else
static const AccessMode kAtomicity = AccessMode::NON_ATOMIC;
@@ -146,7 +146,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
void FinalizeIncrementally();
void UpdateMarkingWorklistAfterScavenge();
- void UpdateWeakReferencesAfterScavenge();
void UpdateMarkedBytesAfterScavenge(size_t dead_bytes_in_new_space);
void Hurry();
diff --git a/deps/v8/src/heap/local-heap-inl.h b/deps/v8/src/heap/local-heap-inl.h
index 770e1cb8e9..89f35ec21f 100644
--- a/deps/v8/src/heap/local-heap-inl.h
+++ b/deps/v8/src/heap/local-heap-inl.h
@@ -20,13 +20,13 @@ AllocationResult LocalHeap::AllocateRaw(int size_in_bytes, AllocationType type,
DCHECK(AllowHandleAllocation::IsAllowed());
DCHECK(AllowHeapAllocation::IsAllowed());
DCHECK(AllowGarbageCollection::IsAllowed());
- DCHECK_IMPLIES(type == AllocationType::kCode,
- alignment == AllocationAlignment::kCodeAligned);
+ DCHECK_IMPLIES(type == AllocationType::kCode || type == AllocationType::kMap,
+ alignment == AllocationAlignment::kWordAligned);
Heap::HeapState state = heap()->gc_state();
DCHECK(state == Heap::TEAR_DOWN || state == Heap::NOT_IN_GC);
#endif
- bool large_object = size_in_bytes > kMaxRegularHeapObjectSize;
+ bool large_object = size_in_bytes > Heap::MaxRegularHeapObjectSize(type);
CHECK_EQ(type, AllocationType::kOld);
if (large_object)
diff --git a/deps/v8/src/heap/local-heap.cc b/deps/v8/src/heap/local-heap.cc
index a17c22a6d2..b54df4aae1 100644
--- a/deps/v8/src/heap/local-heap.cc
+++ b/deps/v8/src/heap/local-heap.cc
@@ -24,10 +24,11 @@ thread_local LocalHeap* current_local_heap = nullptr;
LocalHeap* LocalHeap::Current() { return current_local_heap; }
-LocalHeap::LocalHeap(Heap* heap,
+LocalHeap::LocalHeap(Heap* heap, ThreadKind kind,
std::unique_ptr<PersistentHandles> persistent_handles)
: heap_(heap),
- state_(ThreadState::Running),
+ is_main_thread_(kind == ThreadKind::kMain),
+ state_(ThreadState::Parked),
safepoint_requested_(false),
allocation_failed_(false),
prev_(nullptr),
@@ -36,34 +37,35 @@ LocalHeap::LocalHeap(Heap* heap,
persistent_handles_(std::move(persistent_handles)),
marking_barrier_(new MarkingBarrier(this)),
old_space_allocator_(this, heap->old_space()) {
- heap_->safepoint()->AddLocalHeap(this);
+ heap_->safepoint()->AddLocalHeap(this, [this] {
+ if (FLAG_local_heaps) {
+ WriteBarrier::SetForThread(marking_barrier_.get());
+ if (heap_->incremental_marking()->IsMarking()) {
+ marking_barrier_->Activate(
+ heap_->incremental_marking()->IsCompacting());
+ }
+ }
+ });
+
if (persistent_handles_) {
persistent_handles_->Attach(this);
}
DCHECK_NULL(current_local_heap);
current_local_heap = this;
- // TODO(ulan): Ensure that LocalHeap cannot be created without --local-heaps.
- if (FLAG_local_heaps) {
- WriteBarrier::SetForThread(marking_barrier_.get());
- if (heap_->incremental_marking()->IsMarking()) {
- marking_barrier_->Activate(heap_->incremental_marking()->IsCompacting());
- }
- }
}
LocalHeap::~LocalHeap() {
- // TODO(ulan): Ensure that LocalHeap cannot be created without --local-heaps.
- if (FLAG_local_heaps) {
- marking_barrier_->Publish();
- WriteBarrier::ClearForThread(marking_barrier_.get());
- }
- // Give up LAB before parking thread
- old_space_allocator_.FreeLinearAllocationArea();
-
// Park thread since removing the local heap could block.
EnsureParkedBeforeDestruction();
- heap_->safepoint()->RemoveLocalHeap(this);
+ heap_->safepoint()->RemoveLocalHeap(this, [this] {
+ old_space_allocator_.FreeLinearAllocationArea();
+
+ if (FLAG_local_heaps) {
+ marking_barrier_->Publish();
+ WriteBarrier::ClearForThread(marking_barrier_.get());
+ }
+ });
DCHECK_EQ(current_local_heap, this);
current_local_heap = nullptr;
@@ -77,6 +79,13 @@ void LocalHeap::EnsurePersistentHandles() {
}
}
+void LocalHeap::AttachPersistentHandles(
+ std::unique_ptr<PersistentHandles> persistent_handles) {
+ DCHECK_NULL(persistent_handles_);
+ persistent_handles_ = std::move(persistent_handles);
+ persistent_handles_->Attach(this);
+}
+
std::unique_ptr<PersistentHandles> LocalHeap::DetachPersistentHandles() {
if (persistent_handles_) persistent_handles_->Detach();
return std::move(persistent_handles_);
@@ -116,6 +125,7 @@ void LocalHeap::Unpark() {
}
void LocalHeap::EnsureParkedBeforeDestruction() {
+ if (IsParked()) return;
base::MutexGuard guard(&state_mutex_);
state_ = ThreadState::Parked;
state_change_.NotifyAll();
@@ -150,6 +160,11 @@ void LocalHeap::UnmarkLinearAllocationArea() {
old_space_allocator_.UnmarkLinearAllocationArea();
}
+void LocalHeap::PerformCollection() {
+ ParkedScope scope(this);
+ heap_->RequestCollectionBackground(this);
+}
+
Address LocalHeap::PerformCollectionAndAllocateAgain(
int object_size, AllocationType type, AllocationOrigin origin,
AllocationAlignment alignment) {
@@ -157,10 +172,7 @@ Address LocalHeap::PerformCollectionAndAllocateAgain(
static const int kMaxNumberOfRetries = 3;
for (int i = 0; i < kMaxNumberOfRetries; i++) {
- {
- ParkedScope scope(this);
- heap_->RequestAndWaitForCollection();
- }
+ PerformCollection();
AllocationResult result = AllocateRaw(object_size, type, origin, alignment);
if (!result.IsRetry()) {
diff --git a/deps/v8/src/heap/local-heap.h b/deps/v8/src/heap/local-heap.h
index f6244aaefe..bd2a14760c 100644
--- a/deps/v8/src/heap/local-heap.h
+++ b/deps/v8/src/heap/local-heap.h
@@ -22,10 +22,19 @@ class Heap;
class Safepoint;
class LocalHandles;
+// LocalHeap is used by the GC to track all threads with heap access in order to
+// stop them before performing a collection. LocalHeaps can be either Parked or
+// Running and are in Parked mode when initialized.
+// Running: Thread is allowed to access the heap but needs to give the GC the
+// chance to run regularly by manually invoking Safepoint(). The
+// thread can be parked using ParkedScope.
+// Parked: Heap access is not allowed, so the GC will not stop this thread
+// for a collection. Useful when threads do not need heap access for
+// some time or for blocking operations like locking a mutex.
class V8_EXPORT_PRIVATE LocalHeap {
public:
explicit LocalHeap(
- Heap* heap,
+ Heap* heap, ThreadKind kind,
std::unique_ptr<PersistentHandles> persistent_handles = nullptr);
~LocalHeap();
@@ -70,6 +79,8 @@ class V8_EXPORT_PRIVATE LocalHeap {
return kNullMaybeHandle;
}
+ void AttachPersistentHandles(
+ std::unique_ptr<PersistentHandles> persistent_handles);
std::unique_ptr<PersistentHandles> DetachPersistentHandles();
#ifdef DEBUG
bool ContainsPersistentHandle(Address* location);
@@ -115,6 +126,11 @@ class V8_EXPORT_PRIVATE LocalHeap {
AllocationOrigin origin = AllocationOrigin::kRuntime,
AllocationAlignment alignment = kWordAligned);
+ bool is_main_thread() const { return is_main_thread_; }
+
+ // Requests GC and blocks until the collection finishes.
+ void PerformCollection();
+
private:
enum class ThreadState {
// Threads in this state need to be stopped in a safepoint.
@@ -147,6 +163,7 @@ class V8_EXPORT_PRIVATE LocalHeap {
void EnterSafepoint();
Heap* heap_;
+ bool is_main_thread_;
base::Mutex state_mutex_;
base::ConditionVariable state_change_;
diff --git a/deps/v8/src/heap/mark-compact-inl.h b/deps/v8/src/heap/mark-compact-inl.h
index c49bad62cc..a9db17f2aa 100644
--- a/deps/v8/src/heap/mark-compact-inl.h
+++ b/deps/v8/src/heap/mark-compact-inl.h
@@ -135,8 +135,8 @@ void MainMarkingVisitor<MarkingState>::MarkDescriptorArrayFromWriteBarrier(
}
template <LiveObjectIterationMode mode>
-LiveObjectRange<mode>::iterator::iterator(MemoryChunk* chunk, Bitmap* bitmap,
- Address start)
+LiveObjectRange<mode>::iterator::iterator(const MemoryChunk* chunk,
+ Bitmap* bitmap, Address start)
: chunk_(chunk),
one_word_filler_map_(
ReadOnlyRoots(chunk->heap()).one_pointer_filler_map()),
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index 6e00912e61..91a1902182 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -6,6 +6,7 @@
#include <unordered_map>
+#include "src/base/optional.h"
#include "src/base/utils/random-number-generator.h"
#include "src/codegen/compilation-cache.h"
#include "src/deoptimizer/deoptimizer.h"
@@ -19,6 +20,7 @@
#include "src/heap/code-object-registry.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/incremental-marking-inl.h"
+#include "src/heap/index-generator.h"
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/item-parallel-job.h"
#include "src/heap/large-spaces.h"
@@ -31,6 +33,7 @@
#include "src/heap/memory-measurement.h"
#include "src/heap/object-stats.h"
#include "src/heap/objects-visiting-inl.h"
+#include "src/heap/parallel-work-item.h"
#include "src/heap/read-only-heap.h"
#include "src/heap/read-only-spaces.h"
#include "src/heap/safepoint.h"
@@ -116,30 +119,28 @@ void MarkingVerifier::VerifyRoots() {
void MarkingVerifier::VerifyMarkingOnPage(const Page* page, Address start,
Address end) {
- HeapObject object;
Address next_object_must_be_here_or_later = start;
- for (Address current = start; current < end;) {
- object = HeapObject::FromAddress(current);
- // One word fillers at the end of a black area can be grey.
- if (IsBlackOrGrey(object) &&
- object.map() != ReadOnlyRoots(heap_).one_pointer_filler_map()) {
- CHECK(IsMarked(object));
- CHECK(current >= next_object_must_be_here_or_later);
- object.Iterate(this);
- next_object_must_be_here_or_later = current + object.Size();
- // The object is either part of a black area of black allocation or a
- // regular black object
- CHECK(
- bitmap(page)->AllBitsSetInRange(
+
+ for (auto object_and_size :
+ LiveObjectRange<kAllLiveObjects>(page, bitmap(page))) {
+ HeapObject object = object_and_size.first;
+ size_t size = object_and_size.second;
+ Address current = object.address();
+ if (current < start) continue;
+ if (current >= end) break;
+ CHECK(IsMarked(object));
+ CHECK(current >= next_object_must_be_here_or_later);
+ object.Iterate(this);
+ next_object_must_be_here_or_later = current + size;
+ // The object is either part of a black area of black allocation or a
+ // regular black object
+ CHECK(bitmap(page)->AllBitsSetInRange(
page->AddressToMarkbitIndex(current),
page->AddressToMarkbitIndex(next_object_must_be_here_or_later)) ||
bitmap(page)->AllBitsClearInRange(
page->AddressToMarkbitIndex(current + kTaggedSize * 2),
page->AddressToMarkbitIndex(next_object_must_be_here_or_later)));
- current = next_object_must_be_here_or_later;
- } else {
- current += kTaggedSize;
- }
+ current = next_object_must_be_here_or_later;
}
}
@@ -390,11 +391,8 @@ int NumberOfAvailableCores() {
} // namespace
-int MarkCompactCollectorBase::NumberOfParallelCompactionTasks(int pages) {
- DCHECK_GT(pages, 0);
- int tasks = FLAG_parallel_compaction ? Min(NumberOfAvailableCores(),
- pages / (MB / Page::kPageSize) + 1)
- : 1;
+int MarkCompactCollectorBase::NumberOfParallelCompactionTasks() {
+ int tasks = FLAG_parallel_compaction ? NumberOfAvailableCores() : 1;
if (!heap_->CanPromoteYoungAndExpandOldGeneration(
static_cast<size_t>(tasks * Page::kPageSize))) {
// Optimize for memory usage near the heap limit.
@@ -403,30 +401,6 @@ int MarkCompactCollectorBase::NumberOfParallelCompactionTasks(int pages) {
return tasks;
}
-int MarkCompactCollectorBase::NumberOfParallelPointerUpdateTasks(int pages,
- int slots) {
- DCHECK_GT(pages, 0);
- // Limit the number of update tasks as task creation often dominates the
- // actual work that is being done.
- const int kMaxPointerUpdateTasks = 8;
- const int kSlotsPerTask = 600;
- const int wanted_tasks =
- (slots >= 0) ? Max(1, Min(pages, slots / kSlotsPerTask)) : pages;
- return FLAG_parallel_pointer_update
- ? Min(kMaxPointerUpdateTasks,
- Min(NumberOfAvailableCores(), wanted_tasks))
- : 1;
-}
-
-int MarkCompactCollectorBase::NumberOfParallelToSpacePointerUpdateTasks(
- int pages) {
- DCHECK_GT(pages, 0);
- // No cap needed because all pages we need to process are fully filled with
- // interesting objects.
- return FLAG_parallel_pointer_update ? Min(NumberOfAvailableCores(), pages)
- : 1;
-}
-
MarkCompactCollector::MarkCompactCollector(Heap* heap)
: MarkCompactCollectorBase(heap),
page_parallel_job_semaphore_(0),
@@ -906,12 +880,11 @@ void MarkCompactCollector::Prepare() {
heap()->new_space()->original_top_acquire());
}
-void MarkCompactCollector::FinishConcurrentMarking(
- ConcurrentMarking::StopRequest stop_request) {
+void MarkCompactCollector::FinishConcurrentMarking() {
// FinishConcurrentMarking is called for both, concurrent and parallel,
// marking. It is safe to call this function when tasks are already finished.
if (FLAG_parallel_marking || FLAG_concurrent_marking) {
- heap()->concurrent_marking()->Stop(stop_request);
+ heap()->concurrent_marking()->Join();
heap()->concurrent_marking()->FlushMemoryChunkData(
non_atomic_marking_state());
heap()->concurrent_marking()->FlushNativeContexts(&native_context_stats_);
@@ -1665,12 +1638,12 @@ void MarkCompactCollector::ProcessEphemeronsUntilFixpoint() {
GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING);
if (FLAG_parallel_marking) {
- heap_->concurrent_marking()->RescheduleTasksIfNeeded();
+ heap_->concurrent_marking()->RescheduleJobIfNeeded(
+ TaskPriority::kUserBlocking);
}
work_to_do = ProcessEphemerons();
- FinishConcurrentMarking(
- ConcurrentMarking::StopRequest::COMPLETE_ONGOING_TASKS);
+ FinishConcurrentMarking();
}
CHECK(weak_objects_.current_ephemerons.IsEmpty());
@@ -1985,12 +1958,12 @@ void MarkCompactCollector::MarkLiveObjects() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_MAIN);
if (FLAG_parallel_marking) {
- heap_->concurrent_marking()->RescheduleTasksIfNeeded();
+ heap_->concurrent_marking()->RescheduleJobIfNeeded(
+ TaskPriority::kUserBlocking);
}
DrainMarkingWorklist();
- FinishConcurrentMarking(
- ConcurrentMarking::StopRequest::COMPLETE_ONGOING_TASKS);
+ FinishConcurrentMarking();
DrainMarkingWorklist();
}
@@ -2173,8 +2146,8 @@ void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map map,
DCHECK_EQ(map.raw_transitions(), HeapObjectReference::Weak(dead_target));
// Take ownership of the descriptor array.
int number_of_own_descriptors = map.NumberOfOwnDescriptors();
- DescriptorArray descriptors = map.instance_descriptors();
- if (descriptors == dead_target.instance_descriptors() &&
+ DescriptorArray descriptors = map.instance_descriptors(kRelaxedLoad);
+ if (descriptors == dead_target.instance_descriptors(kRelaxedLoad) &&
number_of_own_descriptors > 0) {
TrimDescriptorArray(map, descriptors);
DCHECK(descriptors.number_of_descriptors() == number_of_own_descriptors);
@@ -2244,7 +2217,7 @@ void MarkCompactCollector::FlushBytecodeFromSFI(
// Use the raw function data setter to avoid validity checks, since we're
// performing the unusual task of decompiling.
- shared_info.set_function_data(uncompiled_data);
+ shared_info.set_function_data(uncompiled_data, kReleaseStore);
DCHECK(!shared_info.is_compiled());
}
@@ -2292,11 +2265,19 @@ void MarkCompactCollector::ClearFullMapTransitions() {
// filled. Allow it.
if (array.GetTargetIfExists(0, isolate(), &map)) {
DCHECK(!map.is_null()); // Weak pointers aren't cleared yet.
+ Object constructor_or_backpointer = map.constructor_or_backpointer();
+ if (constructor_or_backpointer.IsSmi()) {
+ DCHECK(isolate()->has_active_deserializer());
+ DCHECK_EQ(constructor_or_backpointer,
+ Deserializer::uninitialized_field_value());
+ continue;
+ }
Map parent = Map::cast(map.constructor_or_backpointer());
bool parent_is_alive =
non_atomic_marking_state()->IsBlackOrGrey(parent);
DescriptorArray descriptors =
- parent_is_alive ? parent.instance_descriptors() : DescriptorArray();
+ parent_is_alive ? parent.instance_descriptors(kRelaxedLoad)
+ : DescriptorArray();
bool descriptors_owner_died =
CompactTransitionArray(parent, array, descriptors);
if (descriptors_owner_died) {
@@ -2320,7 +2301,7 @@ bool MarkCompactCollector::CompactTransitionArray(Map map,
DCHECK_EQ(target.constructor_or_backpointer(), map);
if (non_atomic_marking_state()->IsWhite(target)) {
if (!descriptors.is_null() &&
- target.instance_descriptors() == descriptors) {
+ target.instance_descriptors(kRelaxedLoad) == descriptors) {
DCHECK(!target.is_prototype_map());
descriptors_owner_died = true;
}
@@ -2394,7 +2375,7 @@ void MarkCompactCollector::TrimDescriptorArray(Map map,
descriptors.Sort();
if (FLAG_unbox_double_fields) {
- LayoutDescriptor layout_descriptor = map.layout_descriptor();
+ LayoutDescriptor layout_descriptor = map.layout_descriptor(kAcquireLoad);
layout_descriptor = layout_descriptor.Trim(heap_, map, descriptors,
number_of_own_descriptors);
SLOW_DCHECK(layout_descriptor.IsConsistentWithMap(map, true));
@@ -2702,8 +2683,7 @@ static inline SlotCallbackResult UpdateSlot(TSlot slot,
}
template <AccessMode access_mode, typename TSlot>
-static inline SlotCallbackResult UpdateSlot(const Isolate* isolate,
- TSlot slot) {
+static inline SlotCallbackResult UpdateSlot(IsolateRoot isolate, TSlot slot) {
typename TSlot::TObject obj = slot.Relaxed_Load(isolate);
HeapObject heap_obj;
if (TSlot::kCanBeWeak && obj->GetHeapObjectIfWeak(&heap_obj)) {
@@ -2716,7 +2696,7 @@ static inline SlotCallbackResult UpdateSlot(const Isolate* isolate,
}
template <AccessMode access_mode, typename TSlot>
-static inline SlotCallbackResult UpdateStrongSlot(const Isolate* isolate,
+static inline SlotCallbackResult UpdateStrongSlot(IsolateRoot isolate,
TSlot slot) {
typename TSlot::TObject obj = slot.Relaxed_Load(isolate);
DCHECK(!HAS_WEAK_HEAP_OBJECT_TAG(obj.ptr()));
@@ -2734,8 +2714,7 @@ static inline SlotCallbackResult UpdateStrongSlot(const Isolate* isolate,
// It does not expect to encounter pointers to dead objects.
class PointersUpdatingVisitor : public ObjectVisitor, public RootVisitor {
public:
- explicit PointersUpdatingVisitor(const Isolate* isolate)
- : isolate_(isolate) {}
+ explicit PointersUpdatingVisitor(IsolateRoot isolate) : isolate_(isolate) {}
void VisitPointer(HeapObject host, ObjectSlot p) override {
UpdateStrongSlotInternal(isolate_, p);
@@ -2790,32 +2769,32 @@ class PointersUpdatingVisitor : public ObjectVisitor, public RootVisitor {
}
private:
- static inline SlotCallbackResult UpdateRootSlotInternal(
- const Isolate* isolate, FullObjectSlot slot) {
+ static inline SlotCallbackResult UpdateRootSlotInternal(IsolateRoot isolate,
+ FullObjectSlot slot) {
return UpdateStrongSlot<AccessMode::NON_ATOMIC>(isolate, slot);
}
static inline SlotCallbackResult UpdateRootSlotInternal(
- const Isolate* isolate, OffHeapObjectSlot slot) {
+ IsolateRoot isolate, OffHeapObjectSlot slot) {
return UpdateStrongSlot<AccessMode::NON_ATOMIC>(isolate, slot);
}
static inline SlotCallbackResult UpdateStrongMaybeObjectSlotInternal(
- const Isolate* isolate, MaybeObjectSlot slot) {
+ IsolateRoot isolate, MaybeObjectSlot slot) {
return UpdateStrongSlot<AccessMode::NON_ATOMIC>(isolate, slot);
}
- static inline SlotCallbackResult UpdateStrongSlotInternal(
- const Isolate* isolate, ObjectSlot slot) {
+ static inline SlotCallbackResult UpdateStrongSlotInternal(IsolateRoot isolate,
+ ObjectSlot slot) {
return UpdateStrongSlot<AccessMode::NON_ATOMIC>(isolate, slot);
}
- static inline SlotCallbackResult UpdateSlotInternal(const Isolate* isolate,
+ static inline SlotCallbackResult UpdateSlotInternal(IsolateRoot isolate,
MaybeObjectSlot slot) {
return UpdateSlot<AccessMode::NON_ATOMIC>(isolate, slot);
}
- const Isolate* isolate_;
+ IsolateRoot isolate_;
};
static String UpdateReferenceInExternalStringTableEntry(Heap* heap,
@@ -2953,7 +2932,7 @@ class Evacuator : public Malloced {
// Merge back locally cached info sequentially. Note that this method needs
// to be called from the main thread.
- inline void Finalize();
+ virtual void Finalize();
virtual GCTracer::BackgroundScope::ScopeId GetBackgroundTracingScope() = 0;
virtual GCTracer::Scope::ScopeId GetTracingScope() = 0;
@@ -3052,7 +3031,7 @@ class FullEvacuator : public Evacuator {
return GCTracer::Scope::MC_EVACUATE_COPY_PARALLEL;
}
- inline void Finalize() {
+ void Finalize() override {
Evacuator::Finalize();
for (auto it = ephemeron_remembered_set_.begin();
@@ -3121,48 +3100,68 @@ void FullEvacuator::RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) {
}
}
-class EvacuationItem : public ItemParallelJob::Item {
- public:
- explicit EvacuationItem(MemoryChunk* chunk) : chunk_(chunk) {}
- ~EvacuationItem() override = default;
- MemoryChunk* chunk() const { return chunk_; }
-
- private:
- MemoryChunk* chunk_;
-};
-
-class PageEvacuationTask : public ItemParallelJob::Task {
+class PageEvacuationJob : public v8::JobTask {
public:
- PageEvacuationTask(Isolate* isolate, Evacuator* evacuator)
- : ItemParallelJob::Task(isolate),
- evacuator_(evacuator),
+ PageEvacuationJob(
+ Isolate* isolate, std::vector<std::unique_ptr<Evacuator>>* evacuators,
+ std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> evacuation_items)
+ : evacuators_(evacuators),
+ evacuation_items_(std::move(evacuation_items)),
+ remaining_evacuation_items_(evacuation_items_.size()),
+ generator_(evacuation_items_.size()),
tracer_(isolate->heap()->tracer()) {}
- void RunInParallel(Runner runner) override {
- if (runner == Runner::kForeground) {
- TRACE_GC(tracer_, evacuator_->GetTracingScope());
- ProcessItems();
+ void Run(JobDelegate* delegate) override {
+ Evacuator* evacuator = (*evacuators_)[delegate->GetTaskId()].get();
+ if (delegate->IsJoiningThread()) {
+ TRACE_GC(tracer_, evacuator->GetTracingScope());
+ ProcessItems(delegate, evacuator);
} else {
- TRACE_BACKGROUND_GC(tracer_, evacuator_->GetBackgroundTracingScope());
- ProcessItems();
+ TRACE_BACKGROUND_GC(tracer_, evacuator->GetBackgroundTracingScope());
+ ProcessItems(delegate, evacuator);
+ }
+ }
+
+ void ProcessItems(JobDelegate* delegate, Evacuator* evacuator) {
+ while (remaining_evacuation_items_.load(std::memory_order_relaxed) > 0) {
+ base::Optional<size_t> index = generator_.GetNext();
+ if (!index) return;
+ for (size_t i = *index; i < evacuation_items_.size(); ++i) {
+ auto& work_item = evacuation_items_[i];
+ if (!work_item.first.TryAcquire()) break;
+ evacuator->EvacuatePage(work_item.second);
+ if (remaining_evacuation_items_.fetch_sub(
+ 1, std::memory_order_relaxed) <= 1) {
+ return;
+ }
+ }
}
}
- private:
- void ProcessItems() {
- EvacuationItem* item = nullptr;
- while ((item = GetItem<EvacuationItem>()) != nullptr) {
- evacuator_->EvacuatePage(item->chunk());
- item->MarkFinished();
- }
+ size_t GetMaxConcurrency(size_t worker_count) const override {
+ const size_t kItemsPerWorker = MB / Page::kPageSize;
+ // Ceiling division to ensure enough workers for all
+ // |remaining_evacuation_items_|
+ const size_t wanted_num_workers =
+ (remaining_evacuation_items_.load(std::memory_order_relaxed) +
+ kItemsPerWorker - 1) /
+ kItemsPerWorker;
+ return std::min<size_t>(wanted_num_workers, evacuators_->size());
}
- Evacuator* evacuator_;
+
+ private:
+ std::vector<std::unique_ptr<Evacuator>>* evacuators_;
+ std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> evacuation_items_;
+ std::atomic<size_t> remaining_evacuation_items_{0};
+ IndexGenerator generator_;
+
GCTracer* tracer_;
};
template <class Evacuator, class Collector>
void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
- Collector* collector, ItemParallelJob* job,
+ Collector* collector,
+ std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> evacuation_items,
MigrationObserver* migration_observer, const intptr_t live_bytes) {
// Used for trace summary.
double compaction_speed = 0;
@@ -3173,31 +3172,33 @@ void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
const bool profiling = isolate()->LogObjectRelocation();
ProfilingMigrationObserver profiling_observer(heap());
- const int wanted_num_tasks =
- NumberOfParallelCompactionTasks(job->NumberOfItems());
- Evacuator** evacuators = new Evacuator*[wanted_num_tasks];
+ const size_t pages_count = evacuation_items.size();
+ std::vector<std::unique_ptr<v8::internal::Evacuator>> evacuators;
+ const int wanted_num_tasks = NumberOfParallelCompactionTasks();
for (int i = 0; i < wanted_num_tasks; i++) {
- evacuators[i] = new Evacuator(collector);
- if (profiling) evacuators[i]->AddObserver(&profiling_observer);
+ auto evacuator = std::make_unique<Evacuator>(collector);
+ if (profiling) evacuator->AddObserver(&profiling_observer);
if (migration_observer != nullptr)
- evacuators[i]->AddObserver(migration_observer);
- job->AddTask(new PageEvacuationTask(heap()->isolate(), evacuators[i]));
+ evacuator->AddObserver(migration_observer);
+ evacuators.push_back(std::move(evacuator));
}
- job->Run();
- for (int i = 0; i < wanted_num_tasks; i++) {
- evacuators[i]->Finalize();
- delete evacuators[i];
- }
- delete[] evacuators;
+ V8::GetCurrentPlatform()
+ ->PostJob(v8::TaskPriority::kUserBlocking,
+ std::make_unique<PageEvacuationJob>(
+ isolate(), &evacuators, std::move(evacuation_items)))
+ ->Join();
+
+ for (auto& evacuator : evacuators) evacuator->Finalize();
+ evacuators.clear();
if (FLAG_trace_evacuation) {
PrintIsolate(isolate(),
- "%8.0f ms: evacuation-summary: parallel=%s pages=%d "
- "wanted_tasks=%d tasks=%d cores=%d live_bytes=%" V8PRIdPTR
+ "%8.0f ms: evacuation-summary: parallel=%s pages=%zu "
+ "wanted_tasks=%d cores=%d live_bytes=%" V8PRIdPTR
" compaction_speed=%.f\n",
isolate()->time_millis_since_init(),
- FLAG_parallel_compaction ? "yes" : "no", job->NumberOfItems(),
- wanted_num_tasks, job->NumberOfTasks(),
+ FLAG_parallel_compaction ? "yes" : "no", pages_count,
+ wanted_num_tasks,
V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1,
live_bytes, compaction_speed);
}
@@ -3214,8 +3215,7 @@ bool MarkCompactCollectorBase::ShouldMovePage(Page* p, intptr_t live_bytes,
}
void MarkCompactCollector::EvacuatePagesInParallel() {
- ItemParallelJob evacuation_job(isolate()->cancelable_task_manager(),
- &page_parallel_job_semaphore_);
+ std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> evacuation_items;
intptr_t live_bytes = 0;
// Evacuation of new space pages cannot be aborted, so it needs to run
@@ -3238,12 +3238,12 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
}
}
- evacuation_job.AddItem(new EvacuationItem(page));
+ evacuation_items.emplace_back(ParallelWorkItem{}, page);
}
for (Page* page : old_space_evacuation_pages_) {
live_bytes += non_atomic_marking_state()->live_bytes(page);
- evacuation_job.AddItem(new EvacuationItem(page));
+ evacuation_items.emplace_back(ParallelWorkItem{}, page);
}
// Promote young generation large objects.
@@ -3259,18 +3259,18 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
if (marking_state->IsBlack(object)) {
heap_->lo_space()->PromoteNewLargeObject(current);
current->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
- evacuation_job.AddItem(new EvacuationItem(current));
+ evacuation_items.emplace_back(ParallelWorkItem{}, current);
}
}
- if (evacuation_job.NumberOfItems() == 0) return;
+ if (evacuation_items.empty()) return;
TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"MarkCompactCollector::EvacuatePagesInParallel", "pages",
- evacuation_job.NumberOfItems());
+ evacuation_items.size());
- CreateAndExecuteEvacuationTasks<FullEvacuator>(this, &evacuation_job, nullptr,
- live_bytes);
+ CreateAndExecuteEvacuationTasks<FullEvacuator>(
+ this, std::move(evacuation_items), nullptr, live_bytes);
// After evacuation there might still be swept pages that weren't
// added to one of the compaction space but still reside in the
@@ -3465,40 +3465,75 @@ void MarkCompactCollector::Evacuate() {
#endif
}
-class UpdatingItem : public ItemParallelJob::Item {
+class UpdatingItem : public ParallelWorkItem {
public:
- ~UpdatingItem() override = default;
+ virtual ~UpdatingItem() = default;
virtual void Process() = 0;
};
-class PointersUpdatingTask : public ItemParallelJob::Task {
+class PointersUpdatingJob : public v8::JobTask {
public:
- explicit PointersUpdatingTask(
- Isolate* isolate, GCTracer::Scope::ScopeId scope,
+ explicit PointersUpdatingJob(
+ Isolate* isolate,
+ std::vector<std::unique_ptr<UpdatingItem>> updating_items, int slots,
+ GCTracer::Scope::ScopeId scope,
GCTracer::BackgroundScope::ScopeId background_scope)
- : ItemParallelJob::Task(isolate),
+ : updating_items_(std::move(updating_items)),
+ remaining_updating_items_(updating_items_.size()),
+ generator_(updating_items_.size()),
+ slots_(slots),
tracer_(isolate->heap()->tracer()),
scope_(scope),
background_scope_(background_scope) {}
- void RunInParallel(Runner runner) override {
- if (runner == Runner::kForeground) {
+ void Run(JobDelegate* delegate) override {
+ if (delegate->IsJoiningThread()) {
TRACE_GC(tracer_, scope_);
- UpdatePointers();
+ UpdatePointers(delegate);
} else {
TRACE_BACKGROUND_GC(tracer_, background_scope_);
- UpdatePointers();
+ UpdatePointers(delegate);
}
}
- private:
- void UpdatePointers() {
- UpdatingItem* item = nullptr;
- while ((item = GetItem<UpdatingItem>()) != nullptr) {
- item->Process();
- item->MarkFinished();
+ void UpdatePointers(JobDelegate* delegate) {
+ while (remaining_updating_items_.load(std::memory_order_relaxed) > 0) {
+ base::Optional<size_t> index = generator_.GetNext();
+ if (!index) return;
+ for (size_t i = *index; i < updating_items_.size(); ++i) {
+ auto& work_item = updating_items_[i];
+ if (!work_item->TryAcquire()) break;
+ work_item->Process();
+ if (remaining_updating_items_.fetch_sub(1, std::memory_order_relaxed) <=
+ 1) {
+ return;
+ }
+ }
}
}
+
+ size_t GetMaxConcurrency(size_t worker_count) const override {
+ size_t items = remaining_updating_items_.load(std::memory_order_relaxed);
+ if (!FLAG_parallel_pointer_update) return items > 0;
+ const size_t kMaxPointerUpdateTasks = 8;
+ const size_t kSlotsPerTask = 600;
+ size_t wanted_tasks = items;
+ // Limit the number of update tasks as task creation often dominates the
+ // actual work that is being done.
+ if (slots_ >= 0) {
+ // Round up to ensure enough workers for all items.
+ wanted_tasks =
+ std::min<size_t>(items, (slots_ + kSlotsPerTask - 1) / kSlotsPerTask);
+ }
+ return std::min<size_t>(kMaxPointerUpdateTasks, wanted_tasks);
+ }
+
+ private:
+ std::vector<std::unique_ptr<UpdatingItem>> updating_items_;
+ std::atomic<size_t> remaining_updating_items_{0};
+ IndexGenerator generator_;
+ const int slots_;
+
GCTracer* tracer_;
GCTracer::Scope::ScopeId scope_;
GCTracer::BackgroundScope::ScopeId background_scope_;
@@ -3692,7 +3727,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
(chunk_->slot_set<OLD_TO_OLD, AccessMode::NON_ATOMIC>() != nullptr)) {
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(chunk_);
- const Isolate* isolate = heap_->isolate();
+ IsolateRoot isolate = heap_->isolate();
RememberedSet<OLD_TO_OLD>::Iterate(
chunk_,
[&filter, isolate](MaybeObjectSlot slot) {
@@ -3732,7 +3767,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
Address slot) {
// Using UpdateStrongSlot is OK here, because there are no weak
// typed slots.
- const Isolate* isolate = heap_->isolate();
+ IsolateRoot isolate = heap_->isolate();
return UpdateTypedSlotHelper::UpdateTypedSlot(
heap_, slot_type, slot, [isolate](FullMaybeObjectSlot slot) {
return UpdateStrongSlot<AccessMode::NON_ATOMIC>(isolate, slot);
@@ -3747,20 +3782,22 @@ class RememberedSetUpdatingItem : public UpdatingItem {
RememberedSetUpdatingMode updating_mode_;
};
-UpdatingItem* MarkCompactCollector::CreateToSpaceUpdatingItem(
+std::unique_ptr<UpdatingItem> MarkCompactCollector::CreateToSpaceUpdatingItem(
MemoryChunk* chunk, Address start, Address end) {
- return new ToSpaceUpdatingItem<NonAtomicMarkingState>(
+ return std::make_unique<ToSpaceUpdatingItem<NonAtomicMarkingState>>(
chunk, start, end, non_atomic_marking_state());
}
-UpdatingItem* MarkCompactCollector::CreateRememberedSetUpdatingItem(
+std::unique_ptr<UpdatingItem>
+MarkCompactCollector::CreateRememberedSetUpdatingItem(
MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) {
- return new RememberedSetUpdatingItem<NonAtomicMarkingState, MARK_COMPACTOR>(
+ return std::make_unique<
+ RememberedSetUpdatingItem<NonAtomicMarkingState, MARK_COMPACTOR>>(
heap(), non_atomic_marking_state(), chunk, updating_mode);
}
int MarkCompactCollectorBase::CollectToSpaceUpdatingItems(
- ItemParallelJob* job) {
+ std::vector<std::unique_ptr<UpdatingItem>>* items) {
// Seed to space pages.
const Address space_start = heap()->new_space()->first_allocatable_address();
const Address space_end = heap()->new_space()->top();
@@ -3769,16 +3806,15 @@ int MarkCompactCollectorBase::CollectToSpaceUpdatingItems(
Address start =
page->Contains(space_start) ? space_start : page->area_start();
Address end = page->Contains(space_end) ? space_end : page->area_end();
- job->AddItem(CreateToSpaceUpdatingItem(page, start, end));
+ items->emplace_back(CreateToSpaceUpdatingItem(page, start, end));
pages++;
}
- if (pages == 0) return 0;
- return NumberOfParallelToSpacePointerUpdateTasks(pages);
+ return pages;
}
template <typename IterateableSpace>
int MarkCompactCollectorBase::CollectRememberedSetUpdatingItems(
- ItemParallelJob* job, IterateableSpace* space,
+ std::vector<std::unique_ptr<UpdatingItem>>* items, IterateableSpace* space,
RememberedSetUpdatingMode mode) {
int pages = 0;
for (MemoryChunk* chunk : *space) {
@@ -3802,7 +3838,7 @@ int MarkCompactCollectorBase::CollectRememberedSetUpdatingItems(
contains_old_to_new_sweeping_slots ||
contains_old_to_old_invalidated_slots ||
contains_old_to_new_invalidated_slots) {
- job->AddItem(CreateRememberedSetUpdatingItem(chunk, mode));
+ items->emplace_back(CreateRememberedSetUpdatingItem(chunk, mode));
pages++;
}
}
@@ -3876,35 +3912,29 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_SLOTS_MAIN);
- ItemParallelJob updating_job(isolate()->cancelable_task_manager(),
- &page_parallel_job_semaphore_);
-
- int remembered_set_pages = 0;
- remembered_set_pages += CollectRememberedSetUpdatingItems(
- &updating_job, heap()->old_space(), RememberedSetUpdatingMode::ALL);
- remembered_set_pages += CollectRememberedSetUpdatingItems(
- &updating_job, heap()->code_space(), RememberedSetUpdatingMode::ALL);
- remembered_set_pages += CollectRememberedSetUpdatingItems(
- &updating_job, heap()->lo_space(), RememberedSetUpdatingMode::ALL);
- remembered_set_pages += CollectRememberedSetUpdatingItems(
- &updating_job, heap()->code_lo_space(), RememberedSetUpdatingMode::ALL);
- const int remembered_set_tasks =
- remembered_set_pages == 0
- ? 0
- : NumberOfParallelPointerUpdateTasks(remembered_set_pages,
- old_to_new_slots_);
- const int to_space_tasks = CollectToSpaceUpdatingItems(&updating_job);
- const int num_ephemeron_table_updating_tasks = 1;
- const int num_tasks =
- Max(to_space_tasks,
- remembered_set_tasks + num_ephemeron_table_updating_tasks);
- for (int i = 0; i < num_tasks; i++) {
- updating_job.AddTask(new PointersUpdatingTask(
- isolate(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_PARALLEL,
- GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
- }
- updating_job.AddItem(new EphemeronTableUpdatingItem(heap()));
- updating_job.Run();
+ std::vector<std::unique_ptr<UpdatingItem>> updating_items;
+
+ CollectRememberedSetUpdatingItems(&updating_items, heap()->old_space(),
+ RememberedSetUpdatingMode::ALL);
+ CollectRememberedSetUpdatingItems(&updating_items, heap()->code_space(),
+ RememberedSetUpdatingMode::ALL);
+ CollectRememberedSetUpdatingItems(&updating_items, heap()->lo_space(),
+ RememberedSetUpdatingMode::ALL);
+ CollectRememberedSetUpdatingItems(&updating_items, heap()->code_lo_space(),
+ RememberedSetUpdatingMode::ALL);
+
+ CollectToSpaceUpdatingItems(&updating_items);
+ updating_items.push_back(
+ std::make_unique<EphemeronTableUpdatingItem>(heap()));
+
+ V8::GetCurrentPlatform()
+ ->PostJob(v8::TaskPriority::kUserBlocking,
+ std::make_unique<PointersUpdatingJob>(
+ isolate(), std::move(updating_items), old_to_new_slots_,
+ GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_PARALLEL,
+ GCTracer::BackgroundScope::
+ MC_BACKGROUND_EVACUATE_UPDATE_POINTERS))
+ ->Join();
}
{
@@ -3914,27 +3944,19 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
// byte length which is potentially a HeapNumber.
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_SLOTS_MAP_SPACE);
- ItemParallelJob updating_job(isolate()->cancelable_task_manager(),
- &page_parallel_job_semaphore_);
-
- int array_buffer_pages = 0;
-
- int remembered_set_pages = 0;
- remembered_set_pages += CollectRememberedSetUpdatingItems(
- &updating_job, heap()->map_space(), RememberedSetUpdatingMode::ALL);
- const int remembered_set_tasks =
- remembered_set_pages == 0
- ? 0
- : NumberOfParallelPointerUpdateTasks(remembered_set_pages,
- old_to_new_slots_);
- const int num_tasks = Max(array_buffer_pages, remembered_set_tasks);
- if (num_tasks > 0) {
- for (int i = 0; i < num_tasks; i++) {
- updating_job.AddTask(new PointersUpdatingTask(
- isolate(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_PARALLEL,
- GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
- }
- updating_job.Run();
+ std::vector<std::unique_ptr<UpdatingItem>> updating_items;
+
+ CollectRememberedSetUpdatingItems(&updating_items, heap()->map_space(),
+ RememberedSetUpdatingMode::ALL);
+ if (!updating_items.empty()) {
+ V8::GetCurrentPlatform()
+ ->PostJob(v8::TaskPriority::kUserBlocking,
+ std::make_unique<PointersUpdatingJob>(
+ isolate(), std::move(updating_items), old_to_new_slots_,
+ GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_PARALLEL,
+ GCTracer::BackgroundScope::
+ MC_BACKGROUND_EVACUATE_UPDATE_POINTERS))
+ ->Join();
}
}
@@ -4316,18 +4338,6 @@ MinorMarkCompactCollector::~MinorMarkCompactCollector() {
delete main_marking_visitor_;
}
-int MinorMarkCompactCollector::NumberOfParallelMarkingTasks(int pages) {
- DCHECK_GT(pages, 0);
- if (!FLAG_minor_mc_parallel_marking) return 1;
- // Pages are not private to markers but we can still use them to estimate the
- // amount of marking that is required.
- const int kPagesPerTask = 2;
- const int wanted_tasks = Max(1, pages / kPagesPerTask);
- return Min(NumberOfAvailableCores(),
- Min(wanted_tasks,
- MinorMarkCompactCollector::MarkingWorklist::kMaxNumTasks));
-}
-
void MinorMarkCompactCollector::CleanupSweepToIteratePages() {
for (Page* p : sweep_to_iterate_pages_) {
if (p->IsFlagSet(Page::SWEEP_TO_ITERATE)) {
@@ -4413,38 +4423,20 @@ void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS);
PointersUpdatingVisitor updating_visitor(isolate());
- ItemParallelJob updating_job(isolate()->cancelable_task_manager(),
- &page_parallel_job_semaphore_);
+ std::vector<std::unique_ptr<UpdatingItem>> updating_items;
// Create batches of global handles.
- const int to_space_tasks = CollectToSpaceUpdatingItems(&updating_job);
- int remembered_set_pages = 0;
- remembered_set_pages += CollectRememberedSetUpdatingItems(
- &updating_job, heap()->old_space(),
- RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
- remembered_set_pages += CollectRememberedSetUpdatingItems(
- &updating_job, heap()->code_space(),
- RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
- remembered_set_pages += CollectRememberedSetUpdatingItems(
- &updating_job, heap()->map_space(),
- RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
- remembered_set_pages += CollectRememberedSetUpdatingItems(
- &updating_job, heap()->lo_space(),
- RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
- remembered_set_pages += CollectRememberedSetUpdatingItems(
- &updating_job, heap()->code_lo_space(),
- RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
- const int remembered_set_tasks =
- remembered_set_pages == 0 ? 0
- : NumberOfParallelPointerUpdateTasks(
- remembered_set_pages, old_to_new_slots_);
- const int num_tasks = Max(to_space_tasks, remembered_set_tasks);
- for (int i = 0; i < num_tasks; i++) {
- updating_job.AddTask(new PointersUpdatingTask(
- isolate(), GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_PARALLEL,
- GCTracer::BackgroundScope::
- MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
- }
+ CollectToSpaceUpdatingItems(&updating_items);
+ CollectRememberedSetUpdatingItems(&updating_items, heap()->old_space(),
+ RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
+ CollectRememberedSetUpdatingItems(&updating_items, heap()->code_space(),
+ RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
+ CollectRememberedSetUpdatingItems(&updating_items, heap()->map_space(),
+ RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
+ CollectRememberedSetUpdatingItems(&updating_items, heap()->lo_space(),
+ RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
+ CollectRememberedSetUpdatingItems(&updating_items, heap()->code_lo_space(),
+ RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
{
TRACE_GC(heap()->tracer(),
@@ -4456,7 +4448,15 @@ void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS);
- updating_job.Run();
+ V8::GetCurrentPlatform()
+ ->PostJob(
+ v8::TaskPriority::kUserBlocking,
+ std::make_unique<PointersUpdatingJob>(
+ isolate(), std::move(updating_items), old_to_new_slots_,
+ GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_PARALLEL,
+ GCTracer::BackgroundScope::
+ MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS))
+ ->Join();
}
{
@@ -4704,56 +4704,41 @@ void MinorMarkCompactCollector::EvacuateEpilogue() {
heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
}
-UpdatingItem* MinorMarkCompactCollector::CreateToSpaceUpdatingItem(
- MemoryChunk* chunk, Address start, Address end) {
- return new ToSpaceUpdatingItem<NonAtomicMarkingState>(
+std::unique_ptr<UpdatingItem>
+MinorMarkCompactCollector::CreateToSpaceUpdatingItem(MemoryChunk* chunk,
+ Address start,
+ Address end) {
+ return std::make_unique<ToSpaceUpdatingItem<NonAtomicMarkingState>>(
chunk, start, end, non_atomic_marking_state());
}
-UpdatingItem* MinorMarkCompactCollector::CreateRememberedSetUpdatingItem(
+std::unique_ptr<UpdatingItem>
+MinorMarkCompactCollector::CreateRememberedSetUpdatingItem(
MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) {
- return new RememberedSetUpdatingItem<NonAtomicMarkingState,
- MINOR_MARK_COMPACTOR>(
+ return std::make_unique<
+ RememberedSetUpdatingItem<NonAtomicMarkingState, MINOR_MARK_COMPACTOR>>(
heap(), non_atomic_marking_state(), chunk, updating_mode);
}
-class MarkingItem;
class PageMarkingItem;
class RootMarkingItem;
class YoungGenerationMarkingTask;
-class MarkingItem : public ItemParallelJob::Item {
- public:
- ~MarkingItem() override = default;
- virtual void Process(YoungGenerationMarkingTask* task) = 0;
-};
-
-class YoungGenerationMarkingTask : public ItemParallelJob::Task {
+class YoungGenerationMarkingTask {
public:
YoungGenerationMarkingTask(
Isolate* isolate, MinorMarkCompactCollector* collector,
MinorMarkCompactCollector::MarkingWorklist* global_worklist, int task_id)
- : ItemParallelJob::Task(isolate),
- collector_(collector),
- marking_worklist_(global_worklist, task_id),
+ : marking_worklist_(global_worklist, task_id),
marking_state_(collector->marking_state()),
visitor_(marking_state_, global_worklist, task_id) {
local_live_bytes_.reserve(isolate->heap()->new_space()->Capacity() /
Page::kPageSize);
}
- void RunInParallel(Runner runner) override {
- if (runner == Runner::kForeground) {
- TRACE_GC(collector_->heap()->tracer(),
- GCTracer::Scope::MINOR_MC_MARK_PARALLEL);
- ProcessItems();
- } else {
- TRACE_BACKGROUND_GC(
- collector_->heap()->tracer(),
- GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_MARKING);
- ProcessItems();
- }
- }
+ int slots() const { return slots_; }
+
+ void IncrementSlots() { ++slots_; }
void MarkObject(Object object) {
if (!Heap::InYoungGeneration(object)) return;
@@ -4764,34 +4749,6 @@ class YoungGenerationMarkingTask : public ItemParallelJob::Task {
}
}
- private:
- void ProcessItems() {
- double marking_time = 0.0;
- {
- TimedScope scope(&marking_time);
- MarkingItem* item = nullptr;
- while ((item = GetItem<MarkingItem>()) != nullptr) {
- item->Process(this);
- item->MarkFinished();
- EmptyLocalMarkingWorklist();
- }
- EmptyMarkingWorklist();
- DCHECK(marking_worklist_.IsLocalEmpty());
- FlushLiveBytes();
- }
- if (FLAG_trace_minor_mc_parallel_marking) {
- PrintIsolate(collector_->isolate(), "marking[%p]: time=%f\n",
- static_cast<void*>(this), marking_time);
- }
- }
- void EmptyLocalMarkingWorklist() {
- HeapObject object;
- while (marking_worklist_.Pop(&object)) {
- const int size = visitor_.Visit(object);
- IncrementLiveBytes(object, size);
- }
- }
-
void EmptyMarkingWorklist() {
HeapObject object;
while (marking_worklist_.Pop(&object)) {
@@ -4810,20 +4767,20 @@ class YoungGenerationMarkingTask : public ItemParallelJob::Task {
}
}
- MinorMarkCompactCollector* collector_;
+ private:
MinorMarkCompactCollector::MarkingWorklist::View marking_worklist_;
MinorMarkCompactCollector::MarkingState* marking_state_;
YoungGenerationMarkingVisitor visitor_;
std::unordered_map<Page*, intptr_t, Page::Hasher> local_live_bytes_;
+ int slots_ = 0;
};
-class PageMarkingItem : public MarkingItem {
+class PageMarkingItem : public ParallelWorkItem {
public:
- explicit PageMarkingItem(MemoryChunk* chunk, std::atomic<int>* global_slots)
- : chunk_(chunk), global_slots_(global_slots), slots_(0) {}
- ~PageMarkingItem() override { *global_slots_ = *global_slots_ + slots_; }
+ explicit PageMarkingItem(MemoryChunk* chunk) : chunk_(chunk) {}
+ ~PageMarkingItem() = default;
- void Process(YoungGenerationMarkingTask* task) override {
+ void Process(YoungGenerationMarkingTask* task) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"PageMarkingItem::Process");
base::MutexGuard guard(chunk_->mutex());
@@ -4880,23 +4837,102 @@ class PageMarkingItem : public MarkingItem {
USE(success);
DCHECK(success);
task->MarkObject(heap_object);
- slots_++;
+ task->IncrementSlots();
return KEEP_SLOT;
}
return REMOVE_SLOT;
}
MemoryChunk* chunk_;
- std::atomic<int>* global_slots_;
- int slots_;
+};
+
+class YoungGenerationMarkingJob : public v8::JobTask {
+ public:
+ YoungGenerationMarkingJob(
+ Isolate* isolate, MinorMarkCompactCollector* collector,
+ MinorMarkCompactCollector::MarkingWorklist* global_worklist,
+ std::vector<PageMarkingItem> marking_items, std::atomic<int>* slots)
+ : isolate_(isolate),
+ collector_(collector),
+ global_worklist_(global_worklist),
+ marking_items_(std::move(marking_items)),
+ remaining_marking_items_(marking_items_.size()),
+ generator_(marking_items_.size()),
+ slots_(slots) {}
+
+ void Run(JobDelegate* delegate) override {
+ if (delegate->IsJoiningThread()) {
+ TRACE_GC(collector_->heap()->tracer(),
+ GCTracer::Scope::MINOR_MC_MARK_PARALLEL);
+ ProcessItems(delegate);
+ } else {
+ TRACE_BACKGROUND_GC(
+ collector_->heap()->tracer(),
+ GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_MARKING);
+ ProcessItems(delegate);
+ }
+ }
+
+ size_t GetMaxConcurrency(size_t worker_count) const override {
+ // Pages are not private to markers but we can still use them to estimate
+ // the amount of marking that is required.
+ const int kPagesPerTask = 2;
+ size_t items = remaining_marking_items_.load(std::memory_order_relaxed);
+ size_t num_tasks = std::max((items + 1) / kPagesPerTask,
+ global_worklist_->GlobalPoolSize());
+ return std::min<size_t>(
+ num_tasks, MinorMarkCompactCollector::MarkingWorklist::kMaxNumTasks);
+ }
+
+ private:
+ void ProcessItems(JobDelegate* delegate) {
+ double marking_time = 0.0;
+ {
+ TimedScope scope(&marking_time);
+ YoungGenerationMarkingTask task(isolate_, collector_, global_worklist_,
+ delegate->GetTaskId());
+ ProcessMarkingItems(&task);
+ task.EmptyMarkingWorklist();
+ task.FlushLiveBytes();
+ *slots_ += task.slots();
+ }
+ if (FLAG_trace_minor_mc_parallel_marking) {
+ PrintIsolate(collector_->isolate(), "marking[%p]: time=%f\n",
+ static_cast<void*>(this), marking_time);
+ }
+ }
+
+ void ProcessMarkingItems(YoungGenerationMarkingTask* task) {
+ while (remaining_marking_items_.load(std::memory_order_relaxed) > 0) {
+ base::Optional<size_t> index = generator_.GetNext();
+ if (!index) return;
+ for (size_t i = *index; i < marking_items_.size(); ++i) {
+ auto& work_item = marking_items_[i];
+ if (!work_item.TryAcquire()) break;
+ work_item.Process(task);
+ task->EmptyMarkingWorklist();
+ if (remaining_marking_items_.fetch_sub(1, std::memory_order_relaxed) <=
+ 1) {
+ return;
+ }
+ }
+ }
+ }
+
+ Isolate* isolate_;
+ MinorMarkCompactCollector* collector_;
+ MinorMarkCompactCollector::MarkingWorklist* global_worklist_;
+ std::vector<PageMarkingItem> marking_items_;
+ std::atomic_size_t remaining_marking_items_{0};
+ IndexGenerator generator_;
+ std::atomic<int>* slots_;
};
void MinorMarkCompactCollector::MarkRootSetInParallel(
RootMarkingVisitor* root_visitor) {
std::atomic<int> slots;
{
- ItemParallelJob job(isolate()->cancelable_task_manager(),
- &page_parallel_job_semaphore_);
+ std::vector<PageMarkingItem> marking_items;
// Seed the root set (roots + old->new set).
{
@@ -4914,22 +4950,25 @@ void MinorMarkCompactCollector::MarkRootSetInParallel(
root_visitor);
// Create items for each page.
RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
- heap(), [&job, &slots](MemoryChunk* chunk) {
- job.AddItem(new PageMarkingItem(chunk, &slots));
+ heap(), [&marking_items](MemoryChunk* chunk) {
+ marking_items.emplace_back(chunk);
});
}
// Add tasks and run in parallel.
{
+ // The main thread might hold local items, while GlobalPoolSize() == 0.
+ // Flush to ensure these items are visible globally and picked up by the
+ // job.
+ worklist()->FlushToGlobal(kMainThreadTask);
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_ROOTS);
- const int new_space_pages =
- static_cast<int>(heap()->new_space()->Capacity()) / Page::kPageSize;
- const int num_tasks = NumberOfParallelMarkingTasks(new_space_pages);
- for (int i = 0; i < num_tasks; i++) {
- job.AddTask(
- new YoungGenerationMarkingTask(isolate(), this, worklist(), i));
- }
- job.Run();
+ V8::GetCurrentPlatform()
+ ->PostJob(v8::TaskPriority::kUserBlocking,
+ std::make_unique<YoungGenerationMarkingJob>(
+ isolate(), this, worklist(), std::move(marking_items),
+ &slots))
+ ->Join();
+
DCHECK(worklist()->IsEmpty());
}
}
@@ -5161,8 +5200,7 @@ void YoungGenerationEvacuator::RawEvacuatePage(MemoryChunk* chunk,
} // namespace
void MinorMarkCompactCollector::EvacuatePagesInParallel() {
- ItemParallelJob evacuation_job(isolate()->cancelable_task_manager(),
- &page_parallel_job_semaphore_);
+ std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> evacuation_items;
intptr_t live_bytes = 0;
for (Page* page : new_space_evacuation_pages_) {
@@ -5176,7 +5214,7 @@ void MinorMarkCompactCollector::EvacuatePagesInParallel() {
EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
}
}
- evacuation_job.AddItem(new EvacuationItem(page));
+ evacuation_items.emplace_back(ParallelWorkItem{}, page);
}
// Promote young generation large objects.
@@ -5189,15 +5227,15 @@ void MinorMarkCompactCollector::EvacuatePagesInParallel() {
if (non_atomic_marking_state_.IsGrey(object)) {
heap_->lo_space()->PromoteNewLargeObject(current);
current->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
- evacuation_job.AddItem(new EvacuationItem(current));
+ evacuation_items.emplace_back(ParallelWorkItem{}, current);
}
}
- if (evacuation_job.NumberOfItems() == 0) return;
+ if (evacuation_items.empty()) return;
YoungGenerationMigrationObserver observer(heap(),
heap()->mark_compact_collector());
CreateAndExecuteEvacuationTasks<YoungGenerationEvacuator>(
- this, &evacuation_job, &observer, live_bytes);
+ this, std::move(evacuation_items), &observer, live_bytes);
}
#endif // ENABLE_MINOR_MC
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index d369ac0183..4d598f71ff 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -13,6 +13,7 @@
#include "src/heap/marking-worklist.h"
#include "src/heap/marking.h"
#include "src/heap/memory-measurement.h"
+#include "src/heap/parallel-work-item.h"
#include "src/heap/spaces.h"
#include "src/heap/sweeper.h"
@@ -31,7 +32,8 @@ class YoungGenerationMarkingVisitor;
class MarkBitCellIterator {
public:
- MarkBitCellIterator(MemoryChunk* chunk, Bitmap* bitmap) : chunk_(chunk) {
+ MarkBitCellIterator(const MemoryChunk* chunk, Bitmap* bitmap)
+ : chunk_(chunk) {
last_cell_index_ =
Bitmap::IndexToCell(chunk_->AddressToMarkbitIndex(chunk_->area_end()));
cell_base_ = chunk_->address();
@@ -82,7 +84,7 @@ class MarkBitCellIterator {
}
private:
- MemoryChunk* chunk_;
+ const MemoryChunk* chunk_;
MarkBit::CellType* cells_;
unsigned int last_cell_index_;
unsigned int cell_index_;
@@ -101,7 +103,7 @@ class LiveObjectRange {
using reference = const value_type&;
using iterator_category = std::forward_iterator_tag;
- inline iterator(MemoryChunk* chunk, Bitmap* bitmap, Address start);
+ inline iterator(const MemoryChunk* chunk, Bitmap* bitmap, Address start);
inline iterator& operator++();
inline iterator operator++(int);
@@ -119,7 +121,7 @@ class LiveObjectRange {
private:
inline void AdvanceToNextValidObject();
- MemoryChunk* const chunk_;
+ const MemoryChunk* const chunk_;
Map const one_word_filler_map_;
Map const two_word_filler_map_;
Map const free_space_map_;
@@ -130,7 +132,7 @@ class LiveObjectRange {
int current_size_;
};
- LiveObjectRange(MemoryChunk* chunk, Bitmap* bitmap)
+ LiveObjectRange(const MemoryChunk* chunk, Bitmap* bitmap)
: chunk_(chunk),
bitmap_(bitmap),
start_(chunk_->area_start()),
@@ -142,7 +144,7 @@ class LiveObjectRange {
inline iterator end();
private:
- MemoryChunk* const chunk_;
+ const MemoryChunk* const chunk_;
Bitmap* bitmap_;
Address start_;
Address end_;
@@ -213,30 +215,28 @@ class MarkCompactCollectorBase {
virtual void Evacuate() = 0;
virtual void EvacuatePagesInParallel() = 0;
virtual void UpdatePointersAfterEvacuation() = 0;
- virtual UpdatingItem* CreateToSpaceUpdatingItem(MemoryChunk* chunk,
- Address start,
- Address end) = 0;
- virtual UpdatingItem* CreateRememberedSetUpdatingItem(
+ virtual std::unique_ptr<UpdatingItem> CreateToSpaceUpdatingItem(
+ MemoryChunk* chunk, Address start, Address end) = 0;
+ virtual std::unique_ptr<UpdatingItem> CreateRememberedSetUpdatingItem(
MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) = 0;
template <class Evacuator, class Collector>
- void CreateAndExecuteEvacuationTasks(Collector* collector,
- ItemParallelJob* job,
- MigrationObserver* migration_observer,
- const intptr_t live_bytes);
+ void CreateAndExecuteEvacuationTasks(
+ Collector* collector,
+ std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> evacuation_items,
+ MigrationObserver* migration_observer, const intptr_t live_bytes);
// Returns whether this page should be moved according to heuristics.
bool ShouldMovePage(Page* p, intptr_t live_bytes, bool promote_young);
- int CollectToSpaceUpdatingItems(ItemParallelJob* job);
+ int CollectToSpaceUpdatingItems(
+ std::vector<std::unique_ptr<UpdatingItem>>* items);
template <typename IterateableSpace>
- int CollectRememberedSetUpdatingItems(ItemParallelJob* job,
- IterateableSpace* space,
- RememberedSetUpdatingMode mode);
+ int CollectRememberedSetUpdatingItems(
+ std::vector<std::unique_ptr<UpdatingItem>>* items,
+ IterateableSpace* space, RememberedSetUpdatingMode mode);
- int NumberOfParallelCompactionTasks(int pages);
- int NumberOfParallelPointerUpdateTasks(int pages, int slots);
- int NumberOfParallelToSpacePointerUpdateTasks(int pages);
+ int NumberOfParallelCompactionTasks();
Heap* heap_;
// Number of old to new slots. Should be computed during MarkLiveObjects.
@@ -434,11 +434,11 @@ class MainMarkingVisitor final
// Collector for young and old generation.
class MarkCompactCollector final : public MarkCompactCollectorBase {
public:
-#ifdef V8_CONCURRENT_MARKING
+#ifdef V8_ATOMIC_MARKING_STATE
using MarkingState = MajorMarkingState;
#else
using MarkingState = MajorNonAtomicMarkingState;
-#endif // V8_CONCURRENT_MARKING
+#endif // V8_ATOMIC_MARKING_STATE
using AtomicMarkingState = MajorAtomicMarkingState;
using NonAtomicMarkingState = MajorNonAtomicMarkingState;
@@ -478,7 +478,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// Stop concurrent marking (either by preempting it right away or waiting for
// it to complete as requested by |stop_request|).
- void FinishConcurrentMarking(ConcurrentMarking::StopRequest stop_request);
+ void FinishConcurrentMarking();
bool StartCompaction();
@@ -710,9 +710,10 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void EvacuatePagesInParallel() override;
void UpdatePointersAfterEvacuation() override;
- UpdatingItem* CreateToSpaceUpdatingItem(MemoryChunk* chunk, Address start,
- Address end) override;
- UpdatingItem* CreateRememberedSetUpdatingItem(
+ std::unique_ptr<UpdatingItem> CreateToSpaceUpdatingItem(MemoryChunk* chunk,
+ Address start,
+ Address end) override;
+ std::unique_ptr<UpdatingItem> CreateRememberedSetUpdatingItem(
MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) override;
void ReleaseEvacuationCandidates();
@@ -851,13 +852,12 @@ class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
void EvacuatePagesInParallel() override;
void UpdatePointersAfterEvacuation() override;
- UpdatingItem* CreateToSpaceUpdatingItem(MemoryChunk* chunk, Address start,
- Address end) override;
- UpdatingItem* CreateRememberedSetUpdatingItem(
+ std::unique_ptr<UpdatingItem> CreateToSpaceUpdatingItem(MemoryChunk* chunk,
+ Address start,
+ Address end) override;
+ std::unique_ptr<UpdatingItem> CreateRememberedSetUpdatingItem(
MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) override;
- int NumberOfParallelMarkingTasks(int pages);
-
void SweepArrayBufferExtensions();
MarkingWorklist* worklist_;
@@ -871,6 +871,7 @@ class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
NonAtomicMarkingState non_atomic_marking_state_;
friend class YoungGenerationMarkingTask;
+ friend class YoungGenerationMarkingJob;
friend class YoungGenerationMarkingVisitor;
};
diff --git a/deps/v8/src/heap/marking-visitor-inl.h b/deps/v8/src/heap/marking-visitor-inl.h
index 532e1c9fd5..bdc955b4bb 100644
--- a/deps/v8/src/heap/marking-visitor-inl.h
+++ b/deps/v8/src/heap/marking-visitor-inl.h
@@ -9,6 +9,8 @@
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/spaces.h"
+#include "src/objects/objects.h"
+#include "src/snapshot/deserializer.h"
namespace v8 {
namespace internal {
@@ -349,8 +351,7 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitWeakCell(
// ===========================================================================
template <typename ConcreteVisitor, typename MarkingState>
-size_t
-MarkingVisitorBase<ConcreteVisitor, MarkingState>::MarkDescriptorArrayBlack(
+int MarkingVisitorBase<ConcreteVisitor, MarkingState>::MarkDescriptorArrayBlack(
DescriptorArray descriptors) {
concrete_visitor()->marking_state()->WhiteToGrey(descriptors);
if (concrete_visitor()->marking_state()->GreyToBlack(descriptors)) {
@@ -389,36 +390,64 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitDescriptorArray(
}
template <typename ConcreteVisitor, typename MarkingState>
+int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitDescriptorsForMap(
+ Map map) {
+ if (!map.CanTransition()) return 0;
+
+ // Maps that can transition share their descriptor arrays and require
+ // special visiting logic to avoid memory leaks.
+ // Since descriptor arrays are potentially shared, ensure that only the
+ // descriptors that belong to this map are marked. The first time a
+ // non-empty descriptor array is marked, its header is also visited. The
+ // slot holding the descriptor array will be implicitly recorded when the
+ // pointer fields of this map are visited.
+
+ Object maybe_descriptors =
+ TaggedField<Object, Map::kInstanceDescriptorsOffset>::Acquire_Load(
+ heap_->isolate(), map);
+
+ // If the descriptors are a Smi, then this Map is in the process of being
+ // deserialized, and doesn't yet have an initialized descriptor field.
+ if (maybe_descriptors.IsSmi()) {
+ DCHECK_EQ(maybe_descriptors, Deserializer::uninitialized_field_value());
+ return 0;
+ }
+
+ DescriptorArray descriptors = DescriptorArray::cast(maybe_descriptors);
+
+ // Don't do any special processing of strong descriptor arrays, let them get
+ // marked through the normal visitor mechanism.
+ if (descriptors.IsStrongDescriptorArray()) {
+ return 0;
+ }
+
+ int size = MarkDescriptorArrayBlack(descriptors);
+ int number_of_own_descriptors = map.NumberOfOwnDescriptors();
+ if (number_of_own_descriptors) {
+ // It is possible that the concurrent marker observes the
+ // number_of_own_descriptors out of sync with the descriptors. In that
+ // case the marking write barrier for the descriptor array will ensure
+ // that all required descriptors are marked. The concurrent marker
+ // just should avoid crashing in that case. That's why we need the
+ // std::min<int>() below.
+ VisitDescriptors(descriptors,
+ std::min<int>(number_of_own_descriptors,
+ descriptors.number_of_descriptors()));
+ }
+
+ return size;
+}
+
+template <typename ConcreteVisitor, typename MarkingState>
int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitMap(Map meta_map,
Map map) {
if (!concrete_visitor()->ShouldVisit(map)) return 0;
int size = Map::BodyDescriptor::SizeOf(meta_map, map);
- if (map.CanTransition()) {
- // Maps that can transition share their descriptor arrays and require
- // special visiting logic to avoid memory leaks.
- // Since descriptor arrays are potentially shared, ensure that only the
- // descriptors that belong to this map are marked. The first time a
- // non-empty descriptor array is marked, its header is also visited. The
- // slot holding the descriptor array will be implicitly recorded when the
- // pointer fields of this map are visited.
- DescriptorArray descriptors = map.synchronized_instance_descriptors();
- size += MarkDescriptorArrayBlack(descriptors);
- int number_of_own_descriptors = map.NumberOfOwnDescriptors();
- if (number_of_own_descriptors) {
- // It is possible that the concurrent marker observes the
- // number_of_own_descriptors out of sync with the descriptors. In that
- // case the marking write barrier for the descriptor array will ensure
- // that all required descriptors are marked. The concurrent marker
- // just should avoid crashing in that case. That's why we need the
- // std::min<int>() below.
- VisitDescriptors(descriptors,
- std::min<int>(number_of_own_descriptors,
- descriptors.number_of_descriptors()));
- }
- // Mark the pointer fields of the Map. Since the transitions array has
- // been marked already, it is fine that one of these fields contains a
- // pointer to it.
- }
+ size += VisitDescriptorsForMap(map);
+
+ // Mark the pointer fields of the Map. If there is a transitions array, it has
+ // been marked already, so it is fine that one of these fields contains a
+ // pointer to it.
Map::BodyDescriptor::IterateBody(meta_map, map, size, this);
return size;
}
diff --git a/deps/v8/src/heap/marking-visitor.h b/deps/v8/src/heap/marking-visitor.h
index 3707fc6031..45dda338d0 100644
--- a/deps/v8/src/heap/marking-visitor.h
+++ b/deps/v8/src/heap/marking-visitor.h
@@ -11,58 +11,12 @@
#include "src/heap/memory-chunk.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/spaces.h"
+#include "src/heap/weak-object-worklists.h"
#include "src/heap/worklist.h"
-#include "src/objects/heap-object.h" // For Worklist<HeapObject, ...>
-#include "src/objects/js-weak-refs.h" // For Worklist<WeakCell, ...>
namespace v8 {
namespace internal {
-struct Ephemeron {
- HeapObject key;
- HeapObject value;
-};
-
-using EphemeronWorklist = Worklist<Ephemeron, 64>;
-
-// Weak objects encountered during marking.
-struct WeakObjects {
- Worklist<TransitionArray, 64> transition_arrays;
-
- // Keep track of all EphemeronHashTables in the heap to process
- // them in the atomic pause.
- Worklist<EphemeronHashTable, 64> ephemeron_hash_tables;
-
- // Keep track of all ephemerons for concurrent marking tasks. Only store
- // ephemerons in these Worklists if both key and value are unreachable at the
- // moment.
- //
- // MarkCompactCollector::ProcessEphemeronsUntilFixpoint drains and fills these
- // worklists.
- //
- // current_ephemerons is used as draining worklist in the current fixpoint
- // iteration.
- EphemeronWorklist current_ephemerons;
-
- // Stores ephemerons to visit in the next fixpoint iteration.
- EphemeronWorklist next_ephemerons;
-
- // When draining the marking worklist new discovered ephemerons are pushed
- // into this worklist.
- EphemeronWorklist discovered_ephemerons;
-
- // TODO(marja): For old space, we only need the slot, not the host
- // object. Optimize this by adding a different storage for old space.
- Worklist<std::pair<HeapObject, HeapObjectSlot>, 64> weak_references;
- Worklist<std::pair<HeapObject, Code>, 64> weak_objects_in_code;
-
- Worklist<JSWeakRef, 64> js_weak_refs;
- Worklist<WeakCell, 64> weak_cells;
-
- Worklist<SharedFunctionInfo, 64> bytecode_flushing_candidates;
- Worklist<JSFunction, 64> flushed_js_functions;
-};
-
struct EphemeronMarking {
std::vector<HeapObject> newly_discovered;
bool newly_discovered_overflowed;
@@ -220,6 +174,9 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
V8_INLINE void VisitDescriptors(DescriptorArray descriptors,
int number_of_own_descriptors);
+
+ V8_INLINE int VisitDescriptorsForMap(Map map);
+
template <typename T>
int VisitEmbedderTracingSubclass(Map map, T object);
V8_INLINE int VisitFixedArrayWithProgressBar(Map map, FixedArray object,
@@ -227,7 +184,7 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
// Marks the descriptor array black without pushing it on the marking work
// list and visits its header. Returns the size of the descriptor array
// if it was successully marked as black.
- V8_INLINE size_t MarkDescriptorArrayBlack(DescriptorArray descriptors);
+ V8_INLINE int MarkDescriptorArrayBlack(DescriptorArray descriptors);
// Marks the object grey and pushes it on the marking work list.
V8_INLINE void MarkObject(HeapObject host, HeapObject obj);
diff --git a/deps/v8/src/heap/memory-allocator.cc b/deps/v8/src/heap/memory-allocator.cc
index 2c9daa3ec4..a3d4f0029e 100644
--- a/deps/v8/src/heap/memory-allocator.cc
+++ b/deps/v8/src/heap/memory-allocator.cc
@@ -154,68 +154,55 @@ void MemoryAllocator::TearDown() {
data_page_allocator_ = nullptr;
}
-class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public CancelableTask {
+class MemoryAllocator::Unmapper::UnmapFreeMemoryJob : public JobTask {
public:
- explicit UnmapFreeMemoryTask(Isolate* isolate, Unmapper* unmapper)
- : CancelableTask(isolate),
- unmapper_(unmapper),
- tracer_(isolate->heap()->tracer()) {}
+ explicit UnmapFreeMemoryJob(Isolate* isolate, Unmapper* unmapper)
+ : unmapper_(unmapper), tracer_(isolate->heap()->tracer()) {}
- private:
- void RunInternal() override {
+ void Run(JobDelegate* delegate) override {
TRACE_BACKGROUND_GC(tracer_,
GCTracer::BackgroundScope::BACKGROUND_UNMAPPER);
- unmapper_->PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
- unmapper_->active_unmapping_tasks_--;
- unmapper_->pending_unmapping_tasks_semaphore_.Signal();
+ unmapper_->PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>(
+ delegate);
if (FLAG_trace_unmapper) {
- PrintIsolate(unmapper_->heap_->isolate(),
- "UnmapFreeMemoryTask Done: id=%" PRIu64 "\n", id());
+ PrintIsolate(unmapper_->heap_->isolate(), "UnmapFreeMemoryTask Done\n");
}
}
+ size_t GetMaxConcurrency(size_t worker_count) const override {
+ const size_t kTaskPerChunk = 8;
+ return std::min<size_t>(
+ kMaxUnmapperTasks,
+ worker_count +
+ (unmapper_->NumberOfCommittedChunks() + kTaskPerChunk - 1) /
+ kTaskPerChunk);
+ }
+
+ private:
Unmapper* const unmapper_;
GCTracer* const tracer_;
- DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryTask);
+ DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryJob);
};
void MemoryAllocator::Unmapper::FreeQueuedChunks() {
if (!heap_->IsTearingDown() && FLAG_concurrent_sweeping) {
- if (!MakeRoomForNewTasks()) {
- // kMaxUnmapperTasks are already running. Avoid creating any more.
+ if (job_handle_ && job_handle_->IsValid()) {
+ job_handle_->NotifyConcurrencyIncrease();
+ } else {
+ job_handle_ = V8::GetCurrentPlatform()->PostJob(
+ TaskPriority::kUserVisible,
+ std::make_unique<UnmapFreeMemoryJob>(heap_->isolate(), this));
if (FLAG_trace_unmapper) {
- PrintIsolate(heap_->isolate(),
- "Unmapper::FreeQueuedChunks: reached task limit (%d)\n",
- kMaxUnmapperTasks);
+ PrintIsolate(heap_->isolate(), "Unmapper::FreeQueuedChunks: new Job\n");
}
- return;
- }
- auto task = std::make_unique<UnmapFreeMemoryTask>(heap_->isolate(), this);
- if (FLAG_trace_unmapper) {
- PrintIsolate(heap_->isolate(),
- "Unmapper::FreeQueuedChunks: new task id=%" PRIu64 "\n",
- task->id());
}
- DCHECK_LT(pending_unmapping_tasks_, kMaxUnmapperTasks);
- DCHECK_LE(active_unmapping_tasks_, pending_unmapping_tasks_);
- DCHECK_GE(active_unmapping_tasks_, 0);
- active_unmapping_tasks_++;
- task_ids_[pending_unmapping_tasks_++] = task->id();
- V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
} else {
PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
}
}
void MemoryAllocator::Unmapper::CancelAndWaitForPendingTasks() {
- for (int i = 0; i < pending_unmapping_tasks_; i++) {
- if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[i]) !=
- TryAbortResult::kTaskAborted) {
- pending_unmapping_tasks_semaphore_.Wait();
- }
- }
- pending_unmapping_tasks_ = 0;
- active_unmapping_tasks_ = 0;
+ if (job_handle_ && job_handle_->IsValid()) job_handle_->Join();
if (FLAG_trace_unmapper) {
PrintIsolate(
@@ -234,26 +221,18 @@ void MemoryAllocator::Unmapper::EnsureUnmappingCompleted() {
PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
}
-bool MemoryAllocator::Unmapper::MakeRoomForNewTasks() {
- DCHECK_LE(pending_unmapping_tasks_, kMaxUnmapperTasks);
-
- if (active_unmapping_tasks_ == 0 && pending_unmapping_tasks_ > 0) {
- // All previous unmapping tasks have been run to completion.
- // Finalize those tasks to make room for new ones.
- CancelAndWaitForPendingTasks();
- }
- return pending_unmapping_tasks_ != kMaxUnmapperTasks;
-}
-
-void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedNonRegularChunks() {
+void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedNonRegularChunks(
+ JobDelegate* delegate) {
MemoryChunk* chunk = nullptr;
while ((chunk = GetMemoryChunkSafe<kNonRegular>()) != nullptr) {
allocator_->PerformFreeMemory(chunk);
+ if (delegate && delegate->ShouldYield()) return;
}
}
template <MemoryAllocator::Unmapper::FreeMode mode>
-void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
+void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks(
+ JobDelegate* delegate) {
MemoryChunk* chunk = nullptr;
if (FLAG_trace_unmapper) {
PrintIsolate(
@@ -266,6 +245,7 @@ void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
bool pooled = chunk->IsFlagSet(MemoryChunk::POOLED);
allocator_->PerformFreeMemory(chunk);
if (pooled) AddMemoryChunkSafe<kPooled>(chunk);
+ if (delegate && delegate->ShouldYield()) return;
}
if (mode == MemoryAllocator::Unmapper::FreeMode::kReleasePooled) {
// The previous loop uncommitted any pages marked as pooled and added them
@@ -273,13 +253,14 @@ void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
// though.
while ((chunk = GetMemoryChunkSafe<kPooled>()) != nullptr) {
allocator_->Free<MemoryAllocator::kAlreadyPooled>(chunk);
+ if (delegate && delegate->ShouldYield()) return;
}
}
PerformFreeMemoryOnQueuedNonRegularChunks();
}
void MemoryAllocator::Unmapper::TearDown() {
- CHECK_EQ(0, pending_unmapping_tasks_);
+ CHECK(!job_handle_ || !job_handle_->IsValid());
PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
for (int i = 0; i < kNumberOfChunkQueues; i++) {
DCHECK(chunks_[i].empty());
diff --git a/deps/v8/src/heap/memory-allocator.h b/deps/v8/src/heap/memory-allocator.h
index 7f95c49629..179877e753 100644
--- a/deps/v8/src/heap/memory-allocator.h
+++ b/deps/v8/src/heap/memory-allocator.h
@@ -61,14 +61,10 @@ class MemoryAllocator {
// chunks.
class Unmapper {
public:
- class UnmapFreeMemoryTask;
+ class UnmapFreeMemoryJob;
Unmapper(Heap* heap, MemoryAllocator* allocator)
- : heap_(heap),
- allocator_(allocator),
- pending_unmapping_tasks_semaphore_(0),
- pending_unmapping_tasks_(0),
- active_unmapping_tasks_(0) {
+ : heap_(heap), allocator_(allocator) {
chunks_[kRegular].reserve(kReservedQueueingSlots);
chunks_[kPooled].reserve(kReservedQueueingSlots);
}
@@ -142,18 +138,16 @@ class MemoryAllocator {
bool MakeRoomForNewTasks();
template <FreeMode mode>
- void PerformFreeMemoryOnQueuedChunks();
+ void PerformFreeMemoryOnQueuedChunks(JobDelegate* delegate = nullptr);
- void PerformFreeMemoryOnQueuedNonRegularChunks();
+ void PerformFreeMemoryOnQueuedNonRegularChunks(
+ JobDelegate* delegate = nullptr);
Heap* const heap_;
MemoryAllocator* const allocator_;
base::Mutex mutex_;
std::vector<MemoryChunk*> chunks_[kNumberOfChunkQueues];
- CancelableTaskManager::Id task_ids_[kMaxUnmapperTasks];
- base::Semaphore pending_unmapping_tasks_semaphore_;
- intptr_t pending_unmapping_tasks_;
- std::atomic<intptr_t> active_unmapping_tasks_;
+ std::unique_ptr<v8::JobHandle> job_handle_;
friend class MemoryAllocator;
};
diff --git a/deps/v8/src/heap/memory-chunk-layout.cc b/deps/v8/src/heap/memory-chunk-layout.cc
index d4e1d1267e..e89a01fb0a 100644
--- a/deps/v8/src/heap/memory-chunk-layout.cc
+++ b/deps/v8/src/heap/memory-chunk-layout.cc
@@ -37,7 +37,6 @@ intptr_t MemoryChunkLayout::ObjectEndOffsetInCodePage() {
size_t MemoryChunkLayout::AllocatableMemoryInCodePage() {
size_t memory = ObjectEndOffsetInCodePage() - ObjectStartOffsetInCodePage();
- DCHECK_LE(kMaxRegularHeapObjectSize, memory);
return memory;
}
@@ -67,5 +66,11 @@ size_t MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
return AllocatableMemoryInDataPage();
}
+int MemoryChunkLayout::MaxRegularCodeObjectSize() {
+ int size = static_cast<int>(AllocatableMemoryInCodePage() / 2);
+ DCHECK_LE(size, kMaxRegularHeapObjectSize);
+ return size;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/memory-chunk-layout.h b/deps/v8/src/heap/memory-chunk-layout.h
index 0a95c70989..41512cbbce 100644
--- a/deps/v8/src/heap/memory-chunk-layout.h
+++ b/deps/v8/src/heap/memory-chunk-layout.h
@@ -83,6 +83,8 @@ class V8_EXPORT_PRIVATE MemoryChunkLayout {
static size_t AllocatableMemoryInDataPage();
static size_t ObjectStartOffsetInMemoryChunk(AllocationSpace space);
static size_t AllocatableMemoryInMemoryChunk(AllocationSpace space);
+
+ static int MaxRegularCodeObjectSize();
};
} // namespace internal
diff --git a/deps/v8/src/heap/memory-chunk.h b/deps/v8/src/heap/memory-chunk.h
index ba6c06d026..66196c1f13 100644
--- a/deps/v8/src/heap/memory-chunk.h
+++ b/deps/v8/src/heap/memory-chunk.h
@@ -59,7 +59,6 @@ class MemoryChunk : public BasicMemoryChunk {
// Only works if the object is in the first kPageSize of the MemoryChunk.
static MemoryChunk* FromHeapObject(HeapObject o) {
- DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
return cast(BasicMemoryChunk::FromHeapObject(o));
}
diff --git a/deps/v8/src/heap/memory-measurement-inl.h b/deps/v8/src/heap/memory-measurement-inl.h
index 905623e744..f6c75b6ca6 100644
--- a/deps/v8/src/heap/memory-measurement-inl.h
+++ b/deps/v8/src/heap/memory-measurement-inl.h
@@ -20,13 +20,13 @@ bool NativeContextInferrer::Infer(Isolate* isolate, Map map, HeapObject object,
Address* native_context) {
switch (map.visitor_id()) {
case kVisitContext:
- *native_context = Context::cast(object).native_context().ptr();
- return true;
+ return InferForContext(isolate, Context::cast(object), native_context);
case kVisitNativeContext:
*native_context = object.ptr();
return true;
case kVisitJSFunction:
- return InferForJSFunction(JSFunction::cast(object), native_context);
+ return InferForJSFunction(isolate, JSFunction::cast(object),
+ native_context);
case kVisitJSApiObject:
case kVisitJSArrayBuffer:
case kVisitJSObject:
diff --git a/deps/v8/src/heap/memory-measurement.cc b/deps/v8/src/heap/memory-measurement.cc
index 4b8f13e6bb..5f79439b05 100644
--- a/deps/v8/src/heap/memory-measurement.cc
+++ b/deps/v8/src/heap/memory-measurement.cc
@@ -337,15 +337,37 @@ std::unique_ptr<v8::MeasureMemoryDelegate> MemoryMeasurement::DefaultDelegate(
mode);
}
-bool NativeContextInferrer::InferForJSFunction(JSFunction function,
- Address* native_context) {
- if (function.has_context()) {
- *native_context = function.context().native_context().ptr();
+bool NativeContextInferrer::InferForContext(Isolate* isolate, Context context,
+ Address* native_context) {
+ Map context_map = context.synchronized_map();
+ Object maybe_native_context =
+ TaggedField<Object, Map::kConstructorOrBackPointerOrNativeContextOffset>::
+ Acquire_Load(isolate, context_map);
+ if (maybe_native_context.IsNativeContext()) {
+ *native_context = maybe_native_context.ptr();
return true;
}
return false;
}
+bool NativeContextInferrer::InferForJSFunction(Isolate* isolate,
+ JSFunction function,
+ Address* native_context) {
+ Object maybe_context =
+ TaggedField<Object, JSFunction::kContextOffset>::Acquire_Load(isolate,
+ function);
+ // The context may be a smi during deserialization.
+ if (maybe_context.IsSmi()) {
+ DCHECK_EQ(maybe_context, Deserializer::uninitialized_field_value());
+ return false;
+ }
+ if (!maybe_context.IsContext()) {
+ // The function does not have a context.
+ return false;
+ }
+ return InferForContext(isolate, Context::cast(maybe_context), native_context);
+}
+
bool NativeContextInferrer::InferForJSObject(Isolate* isolate, Map map,
JSObject object,
Address* native_context) {
@@ -361,7 +383,7 @@ bool NativeContextInferrer::InferForJSObject(Isolate* isolate, Map map,
const int kMaxSteps = 3;
Object maybe_constructor = map.TryGetConstructor(isolate, kMaxSteps);
if (maybe_constructor.IsJSFunction()) {
- return InferForJSFunction(JSFunction::cast(maybe_constructor),
+ return InferForJSFunction(isolate, JSFunction::cast(maybe_constructor),
native_context);
}
return false;
diff --git a/deps/v8/src/heap/memory-measurement.h b/deps/v8/src/heap/memory-measurement.h
index e71bdc1cfe..cf72c57abd 100644
--- a/deps/v8/src/heap/memory-measurement.h
+++ b/deps/v8/src/heap/memory-measurement.h
@@ -73,7 +73,10 @@ class V8_EXPORT_PRIVATE NativeContextInferrer {
Address* native_context);
private:
- bool InferForJSFunction(JSFunction function, Address* native_context);
+ bool InferForContext(Isolate* isolate, Context context,
+ Address* native_context);
+ bool InferForJSFunction(Isolate* isolate, JSFunction function,
+ Address* native_context);
bool InferForJSObject(Isolate* isolate, Map map, JSObject object,
Address* native_context);
};
diff --git a/deps/v8/src/heap/new-spaces.cc b/deps/v8/src/heap/new-spaces.cc
index 98a8c715f4..f1f31d6713 100644
--- a/deps/v8/src/heap/new-spaces.cc
+++ b/deps/v8/src/heap/new-spaces.cc
@@ -418,7 +418,7 @@ void NewSpace::TearDown() {
void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
void NewSpace::Grow() {
- DCHECK(heap()->safepoint()->IsActive());
+ DCHECK_IMPLIES(FLAG_local_heaps, heap()->safepoint()->IsActive());
// Double the semispace size but only up to maximum capacity.
DCHECK(TotalCapacity() < MaximumCapacity());
size_t new_capacity =
diff --git a/deps/v8/src/heap/object-stats.cc b/deps/v8/src/heap/object-stats.cc
index b84ae26c90..7ce2c07462 100644
--- a/deps/v8/src/heap/object-stats.cc
+++ b/deps/v8/src/heap/object-stats.cc
@@ -16,7 +16,7 @@
#include "src/heap/heap-inl.h"
#include "src/heap/mark-compact.h"
#include "src/logging/counters.h"
-#include "src/objects/compilation-cache-inl.h"
+#include "src/objects/compilation-cache-table-inl.h"
#include "src/objects/heap-object.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/js-collection-inl.h"
@@ -150,7 +150,7 @@ FieldStatsCollector::GetInobjectFieldStats(Map map) {
JSObjectFieldStats stats;
stats.embedded_fields_count_ = JSObject::GetEmbedderFieldCount(map);
if (!map.is_dictionary_map()) {
- DescriptorArray descriptors = map.instance_descriptors();
+ DescriptorArray descriptors = map.instance_descriptors(kRelaxedLoad);
for (InternalIndex descriptor : map.IterateOwnDescriptors()) {
PropertyDetails details = descriptors.GetDetails(descriptor);
if (details.location() == kField) {
@@ -565,9 +565,10 @@ void ObjectStatsCollectorImpl::RecordVirtualFunctionTemplateInfoDetails(
FunctionTemplateInfo fti) {
// named_property_handler and indexed_property_handler are recorded as
// INTERCEPTOR_INFO_TYPE.
- if (!fti.call_code().IsUndefined(isolate())) {
+ HeapObject call_code = fti.call_code(kAcquireLoad);
+ if (!call_code.IsUndefined(isolate())) {
RecordSimpleVirtualObjectStats(
- fti, CallHandlerInfo::cast(fti.call_code()),
+ fti, CallHandlerInfo::cast(call_code),
ObjectStats::FUNCTION_TEMPLATE_INFO_ENTRIES_TYPE);
}
if (!fti.GetInstanceCallHandler().IsUndefined(isolate())) {
@@ -883,7 +884,7 @@ void ObjectStatsCollectorImpl::RecordVirtualMapDetails(Map map) {
// This will be logged as MAP_TYPE in Phase2.
}
- DescriptorArray array = map.instance_descriptors();
+ DescriptorArray array = map.instance_descriptors(kRelaxedLoad);
if (map.owns_descriptors() &&
array != ReadOnlyRoots(heap_).empty_descriptor_array()) {
// Generally DescriptorArrays have their own instance type already
diff --git a/deps/v8/src/heap/objects-visiting-inl.h b/deps/v8/src/heap/objects-visiting-inl.h
index bc532cfa79..395f76bf60 100644
--- a/deps/v8/src/heap/objects-visiting-inl.h
+++ b/deps/v8/src/heap/objects-visiting-inl.h
@@ -5,10 +5,10 @@
#ifndef V8_HEAP_OBJECTS_VISITING_INL_H_
#define V8_HEAP_OBJECTS_VISITING_INL_H_
-#include "src/heap/objects-visiting.h"
-
#include "src/heap/embedder-tracing.h"
#include "src/heap/mark-compact.h"
+#include "src/heap/objects-visiting.h"
+#include "src/objects/arguments.h"
#include "src/objects/free-space-inl.h"
#include "src/objects/js-weak-refs-inl.h"
#include "src/objects/module-inl.h"
@@ -16,6 +16,8 @@
#include "src/objects/objects-inl.h"
#include "src/objects/oddball.h"
#include "src/objects/ordered-hash-table.h"
+#include "src/objects/synthetic-module-inl.h"
+#include "src/objects/torque-defined-classes.h"
#include "src/wasm/wasm-objects.h"
namespace v8 {
diff --git a/deps/v8/src/heap/objects-visiting.cc b/deps/v8/src/heap/objects-visiting.cc
index 218a7a03c9..64a05f48bf 100644
--- a/deps/v8/src/heap/objects-visiting.cc
+++ b/deps/v8/src/heap/objects-visiting.cc
@@ -87,16 +87,16 @@ static void ClearWeakList(Heap* heap, Object list) {
template <>
struct WeakListVisitor<Code> {
static void SetWeakNext(Code code, Object next) {
- code.code_data_container().set_next_code_link(next,
- UPDATE_WEAK_WRITE_BARRIER);
+ code.code_data_container(kAcquireLoad)
+ .set_next_code_link(next, UPDATE_WEAK_WRITE_BARRIER);
}
static Object WeakNext(Code code) {
- return code.code_data_container().next_code_link();
+ return code.code_data_container(kAcquireLoad).next_code_link();
}
static HeapObject WeakNextHolder(Code code) {
- return code.code_data_container();
+ return code.code_data_container(kAcquireLoad);
}
static int WeakNextOffset() { return CodeDataContainer::kNextCodeLinkOffset; }
diff --git a/deps/v8/src/heap/objects-visiting.h b/deps/v8/src/heap/objects-visiting.h
index 7ecb66bcee..310ea893cc 100644
--- a/deps/v8/src/heap/objects-visiting.h
+++ b/deps/v8/src/heap/objects-visiting.h
@@ -25,7 +25,6 @@ namespace internal {
V(Context) \
V(CoverageInfo) \
V(DataHandler) \
- V(DescriptorArray) \
V(EmbedderDataArray) \
V(EphemeronHashTable) \
V(FeedbackCell) \
@@ -41,7 +40,6 @@ namespace internal {
V(JSWeakRef) \
V(Map) \
V(NativeContext) \
- V(Oddball) \
V(PreparseData) \
V(PropertyArray) \
V(PropertyCell) \
@@ -57,7 +55,6 @@ namespace internal {
V(UncompiledDataWithoutPreparseData) \
V(UncompiledDataWithPreparseData) \
V(WasmArray) \
- V(WasmCapiFunctionData) \
V(WasmIndirectFunctionTable) \
V(WasmInstanceObject) \
V(WasmStruct) \
diff --git a/deps/v8/src/heap/paged-spaces.cc b/deps/v8/src/heap/paged-spaces.cc
index 5ab30e3aa8..ff6b390ccf 100644
--- a/deps/v8/src/heap/paged-spaces.cc
+++ b/deps/v8/src/heap/paged-spaces.cc
@@ -435,10 +435,9 @@ void PagedSpace::FreeLinearAllocationArea() {
MemoryChunk::FromAddress(current_top));
}
- DCHECK_IMPLIES(
- current_limit - current_top >= 2 * kTaggedSize,
- heap()->incremental_marking()->non_atomic_marking_state()->IsWhite(
- HeapObject::FromAddress(current_top)));
+ DCHECK_IMPLIES(current_limit - current_top >= 2 * kTaggedSize,
+ heap()->incremental_marking()->marking_state()->IsWhite(
+ HeapObject::FromAddress(current_top)));
Free(current_top, current_limit - current_top,
SpaceAccountingMode::kSpaceAccounted);
}
@@ -844,6 +843,18 @@ bool CompactionSpace::RefillLabMain(int size_in_bytes,
return RawRefillLabMain(size_in_bytes, origin);
}
+bool PagedSpace::TryExpand(int size_in_bytes, AllocationOrigin origin) {
+ Page* page = Expand();
+ if (!page) return false;
+ if (!is_compaction_space()) {
+ heap()->NotifyOldGenerationExpansion(identity(), page);
+ }
+ DCHECK((CountTotalPages() > 1) ||
+ (static_cast<size_t>(size_in_bytes) <= free_list_->Available()));
+ return TryAllocationFromFreeListMain(static_cast<size_t>(size_in_bytes),
+ origin);
+}
+
bool PagedSpace::RawRefillLabMain(int size_in_bytes, AllocationOrigin origin) {
// Non-compaction local spaces are not supported.
DCHECK_IMPLIES(is_local_space(), is_compaction_space());
@@ -886,33 +897,22 @@ bool PagedSpace::RawRefillLabMain(int size_in_bytes, AllocationOrigin origin) {
if (heap()->ShouldExpandOldGenerationOnSlowAllocation() &&
heap()->CanExpandOldGeneration(AreaSize())) {
- Page* page = Expand();
- if (page) {
- if (!is_compaction_space()) {
- heap()->NotifyOldGenerationExpansion(identity(), page);
- }
- DCHECK((CountTotalPages() > 1) ||
- (static_cast<size_t>(size_in_bytes) <= free_list_->Available()));
- return TryAllocationFromFreeListMain(static_cast<size_t>(size_in_bytes),
- origin);
+ if (TryExpand(size_in_bytes, origin)) {
+ return true;
}
}
- if (is_compaction_space()) {
- return ContributeToSweepingMain(0, 0, size_in_bytes, origin);
-
- } else {
- DCHECK(!is_local_space());
- if (collector->sweeping_in_progress()) {
- // Complete sweeping for this space.
- collector->DrainSweepingWorklistForSpace(identity());
- RefillFreeList();
+ // Try sweeping all pages.
+ if (ContributeToSweepingMain(0, 0, size_in_bytes, origin)) {
+ return true;
+ }
- // Last try to acquire memory from free list.
- return TryAllocationFromFreeListMain(size_in_bytes, origin);
- }
- return false;
+ if (heap()->gc_state() != Heap::NOT_IN_GC && !heap()->force_oom()) {
+ // Avoid OOM crash in the GC in order to invoke NearHeapLimitCallback after
+ // GC and give it a chance to increase the heap limit.
+ return TryExpand(size_in_bytes, origin);
}
+ return false;
}
bool PagedSpace::ContributeToSweepingMain(int required_freed_bytes,
@@ -926,12 +926,11 @@ bool PagedSpace::ContributeToSweepingMain(int required_freed_bytes,
MarkCompactCollector* collector = heap()->mark_compact_collector();
if (collector->sweeping_in_progress()) {
- int max_freed = collector->sweeper()->ParallelSweepSpace(
- identity(), required_freed_bytes, max_pages,
- invalidated_slots_in_free_space);
+ collector->sweeper()->ParallelSweepSpace(identity(), required_freed_bytes,
+ max_pages,
+ invalidated_slots_in_free_space);
RefillFreeList();
- if (max_freed >= size_in_bytes)
- return TryAllocationFromFreeListMain(size_in_bytes, origin);
+ return TryAllocationFromFreeListMain(size_in_bytes, origin);
}
return false;
}
diff --git a/deps/v8/src/heap/paged-spaces.h b/deps/v8/src/heap/paged-spaces.h
index 198f12e103..97670517cd 100644
--- a/deps/v8/src/heap/paged-spaces.h
+++ b/deps/v8/src/heap/paged-spaces.h
@@ -386,6 +386,9 @@ class V8_EXPORT_PRIVATE PagedSpace
AllocationAlignment alignment,
AllocationOrigin origin);
+ V8_WARN_UNUSED_RESULT bool TryExpand(int size_in_bytes,
+ AllocationOrigin origin);
+
Executability executable_;
LocalSpaceKind local_space_kind_;
@@ -512,7 +515,8 @@ class MapSpace : public PagedSpace {
public:
// Creates a map space object.
explicit MapSpace(Heap* heap)
- : PagedSpace(heap, MAP_SPACE, NOT_EXECUTABLE, new FreeListMap()) {}
+ : PagedSpace(heap, MAP_SPACE, NOT_EXECUTABLE,
+ FreeList::CreateFreeList()) {}
int RoundSizeDownToObjectAlignment(int size) override {
if (base::bits::IsPowerOfTwo(Map::kSize)) {
diff --git a/deps/v8/src/heap/parallel-work-item.h b/deps/v8/src/heap/parallel-work-item.h
new file mode 100644
index 0000000000..9f58d30519
--- /dev/null
+++ b/deps/v8/src/heap/parallel-work-item.h
@@ -0,0 +1,32 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_PARALLEL_WORK_ITEM_H_
+#define V8_HEAP_PARALLEL_WORK_ITEM_H_
+
+#include <atomic>
+
+namespace v8 {
+namespace internal {
+
+class ParallelWorkItem {
+ public:
+ ParallelWorkItem() = default;
+
+ bool TryAcquire() {
+ // memory_order_relaxed is sufficient as the work item's state itself hasn't
+ // been modified since the beginning of its associated job. This is only
+ // atomically acquiring the right to work on it.
+ return reinterpret_cast<std::atomic<bool>*>(&acquire_)->exchange(
+ true, std::memory_order_relaxed) == false;
+ }
+
+ private:
+ bool acquire_{false};
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_PARALLEL_WORK_ITEM_H_
diff --git a/deps/v8/src/heap/read-only-heap-inl.h b/deps/v8/src/heap/read-only-heap-inl.h
index d8358c5812..316f455013 100644
--- a/deps/v8/src/heap/read-only-heap-inl.h
+++ b/deps/v8/src/heap/read-only-heap-inl.h
@@ -15,8 +15,8 @@ namespace internal {
// static
ReadOnlyRoots ReadOnlyHeap::GetReadOnlyRoots(HeapObject object) {
#ifdef V8_COMPRESS_POINTERS
- const Isolate* isolate = GetIsolateForPtrCompr(object);
- return ReadOnlyRoots(const_cast<Isolate*>(isolate));
+ IsolateRoot isolate = GetIsolateForPtrCompr(object);
+ return ReadOnlyRoots(Isolate::FromRootAddress(isolate.address()));
#else
#ifdef V8_SHARED_RO_HEAP
// This fails if we are creating heap objects and the roots haven't yet been
diff --git a/deps/v8/src/heap/read-only-heap.cc b/deps/v8/src/heap/read-only-heap.cc
index 590b94bc0b..342ad1d031 100644
--- a/deps/v8/src/heap/read-only-heap.cc
+++ b/deps/v8/src/heap/read-only-heap.cc
@@ -60,21 +60,24 @@ bool ReadOnlyHeap::IsSharedMemoryAvailable() {
SoleReadOnlyHeap* SoleReadOnlyHeap::shared_ro_heap_ = nullptr;
// static
-void ReadOnlyHeap::SetUp(Isolate* isolate, ReadOnlyDeserializer* des) {
+void ReadOnlyHeap::SetUp(Isolate* isolate,
+ SnapshotData* read_only_snapshot_data,
+ bool can_rehash) {
DCHECK_NOT_NULL(isolate);
if (IsReadOnlySpaceShared()) {
ReadOnlyHeap* ro_heap;
- if (des != nullptr) {
+ if (read_only_snapshot_data != nullptr) {
bool read_only_heap_created = false;
base::MutexGuard guard(read_only_heap_creation_mutex_.Pointer());
std::shared_ptr<ReadOnlyArtifacts> artifacts =
read_only_artifacts_.Get().lock();
if (!artifacts) {
artifacts = InitializeSharedReadOnlyArtifacts();
- artifacts->InitializeChecksum(des);
+ artifacts->InitializeChecksum(read_only_snapshot_data);
ro_heap = CreateInitalHeapForBootstrapping(isolate, artifacts);
- ro_heap->DeseralizeIntoIsolate(isolate, des);
+ ro_heap->DeseralizeIntoIsolate(isolate, read_only_snapshot_data,
+ can_rehash);
read_only_heap_created = true;
} else {
// With pointer compression, there is one ReadOnlyHeap per Isolate.
@@ -82,7 +85,8 @@ void ReadOnlyHeap::SetUp(Isolate* isolate, ReadOnlyDeserializer* des) {
ro_heap = artifacts->GetReadOnlyHeapForIsolate(isolate);
isolate->SetUpFromReadOnlyArtifacts(artifacts, ro_heap);
}
- artifacts->VerifyChecksum(des, read_only_heap_created);
+ artifacts->VerifyChecksum(read_only_snapshot_data,
+ read_only_heap_created);
ro_heap->InitializeIsolateRoots(isolate);
} else {
// This path should only be taken in mksnapshot, should only be run once
@@ -94,21 +98,24 @@ void ReadOnlyHeap::SetUp(Isolate* isolate, ReadOnlyDeserializer* des) {
artifacts = InitializeSharedReadOnlyArtifacts();
ro_heap = CreateInitalHeapForBootstrapping(isolate, artifacts);
- artifacts->VerifyChecksum(des, true);
+ artifacts->VerifyChecksum(read_only_snapshot_data, true);
}
} else {
auto* ro_heap = new ReadOnlyHeap(new ReadOnlySpace(isolate->heap()));
isolate->SetUpFromReadOnlyArtifacts(nullptr, ro_heap);
- if (des != nullptr) {
- ro_heap->DeseralizeIntoIsolate(isolate, des);
+ if (read_only_snapshot_data != nullptr) {
+ ro_heap->DeseralizeIntoIsolate(isolate, read_only_snapshot_data,
+ can_rehash);
}
}
}
void ReadOnlyHeap::DeseralizeIntoIsolate(Isolate* isolate,
- ReadOnlyDeserializer* des) {
- DCHECK_NOT_NULL(des);
- des->DeserializeInto(isolate);
+ SnapshotData* read_only_snapshot_data,
+ bool can_rehash) {
+ DCHECK_NOT_NULL(read_only_snapshot_data);
+ ReadOnlyDeserializer des(isolate, read_only_snapshot_data, can_rehash);
+ des.DeserializeIntoIsolate();
InitFromIsolate(isolate);
}
@@ -212,7 +219,11 @@ void ReadOnlyHeap::PopulateReadOnlySpaceStatistics(
// static
bool ReadOnlyHeap::Contains(Address address) {
- return BasicMemoryChunk::FromAddress(address)->InReadOnlySpace();
+ if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
+ return third_party_heap::Heap::InReadOnlySpace(address);
+ } else {
+ return BasicMemoryChunk::FromAddress(address)->InReadOnlySpace();
+ }
}
// static
diff --git a/deps/v8/src/heap/read-only-heap.h b/deps/v8/src/heap/read-only-heap.h
index cc62b71e5b..aea6c8d0a7 100644
--- a/deps/v8/src/heap/read-only-heap.h
+++ b/deps/v8/src/heap/read-only-heap.h
@@ -25,10 +25,10 @@ class BasicMemoryChunk;
class Isolate;
class Page;
class ReadOnlyArtifacts;
-class ReadOnlyDeserializer;
class ReadOnlyPage;
class ReadOnlySpace;
class SharedReadOnlySpace;
+class SnapshotData;
// This class transparently manages read-only space, roots and cache creation
// and destruction.
@@ -47,7 +47,8 @@ class ReadOnlyHeap {
// V8_SHARED_RO_HEAP is enabled, a lock will be held until that method is
// called.
// TODO(v8:7464): Ideally we'd create this without needing a heap.
- static void SetUp(Isolate* isolate, ReadOnlyDeserializer* des);
+ static void SetUp(Isolate* isolate, SnapshotData* read_only_snapshot_data,
+ bool can_rehash);
// Indicates that the isolate has been set up and all read-only space objects
// have been created and will not be written to. This should only be called if
// a deserializer was not previously provided to Setup. When V8_SHARED_RO_HEAP
@@ -101,7 +102,9 @@ class ReadOnlyHeap {
Isolate* isolate, std::shared_ptr<ReadOnlyArtifacts> artifacts);
// Runs the read-only deserializer and calls InitFromIsolate to complete
// read-only heap initialization.
- void DeseralizeIntoIsolate(Isolate* isolate, ReadOnlyDeserializer* des);
+ void DeseralizeIntoIsolate(Isolate* isolate,
+ SnapshotData* read_only_snapshot_data,
+ bool can_rehash);
// Initializes read-only heap from an already set-up isolate, copying
// read-only roots from the isolate. This then seals the space off from
// further writes, marks it as read-only and detaches it from the heap
diff --git a/deps/v8/src/heap/read-only-spaces.cc b/deps/v8/src/heap/read-only-spaces.cc
index 1ceee90a50..b54bfc0389 100644
--- a/deps/v8/src/heap/read-only-spaces.cc
+++ b/deps/v8/src/heap/read-only-spaces.cc
@@ -28,7 +28,7 @@ namespace v8 {
namespace internal {
void CopyAndRebaseRoots(Address* src, Address* dst, Address new_base) {
- Address src_base = GetIsolateRoot(src[0]);
+ Address src_base = GetIsolateRootAddress(src[0]);
for (size_t i = 0; i < ReadOnlyHeap::kEntriesCount; ++i) {
dst[i] = src[i] - src_base + new_base;
}
@@ -39,22 +39,24 @@ void ReadOnlyArtifacts::set_read_only_heap(
read_only_heap_ = std::move(read_only_heap);
}
-void ReadOnlyArtifacts::InitializeChecksum(ReadOnlyDeserializer* des) {
+void ReadOnlyArtifacts::InitializeChecksum(
+ SnapshotData* read_only_snapshot_data) {
#ifdef DEBUG
- read_only_blob_checksum_ = des->GetChecksum();
+ read_only_blob_checksum_ = Checksum(read_only_snapshot_data->Payload());
#endif // DEBUG
}
-void ReadOnlyArtifacts::VerifyChecksum(ReadOnlyDeserializer* des,
+void ReadOnlyArtifacts::VerifyChecksum(SnapshotData* read_only_snapshot_data,
bool read_only_heap_created) {
#ifdef DEBUG
if (read_only_blob_checksum_) {
// The read-only heap was set up from a snapshot. Make sure it's the always
// the same snapshot.
- CHECK_WITH_MSG(des->GetChecksum(),
+ uint32_t snapshot_checksum = Checksum(read_only_snapshot_data->Payload());
+ CHECK_WITH_MSG(snapshot_checksum,
"Attempt to create the read-only heap after already "
"creating from a snapshot.");
- CHECK_EQ(read_only_blob_checksum_, des->GetChecksum());
+ CHECK_EQ(read_only_blob_checksum_, snapshot_checksum);
} else {
// If there's no checksum, then that means the read-only heap objects are
// being created.
@@ -113,7 +115,7 @@ void PointerCompressedReadOnlyArtifacts::InitializeRootsIn(Isolate* isolate) {
auto isolate_ro_roots =
isolate->roots_table().read_only_roots_begin().location();
CopyAndRebaseRoots(read_only_roots_, isolate_ro_roots,
- GetIsolateRoot(isolate));
+ isolate->isolate_root());
}
SharedReadOnlySpace* PointerCompressedReadOnlyArtifacts::CreateReadOnlySpace(
@@ -123,7 +125,7 @@ SharedReadOnlySpace* PointerCompressedReadOnlyArtifacts::CreateReadOnlySpace(
std::vector<std::unique_ptr<v8::PageAllocator::SharedMemoryMapping>> mappings;
std::vector<ReadOnlyPage*> pages;
- Address isolate_root = GetIsolateRoot(isolate);
+ Address isolate_root = isolate->isolate_root();
for (size_t i = 0; i < pages_.size(); ++i) {
const ReadOnlyPage* page = pages_[i];
const Tagged_t offset = OffsetForPage(i);
@@ -167,7 +169,7 @@ ReadOnlyHeap* PointerCompressedReadOnlyArtifacts::GetReadOnlyHeapForIsolate(
// ReadOnlyArtifacts and be decompressed on the fly.
auto original_cache = read_only_heap_->read_only_object_cache_;
auto& cache = read_only_heap->read_only_object_cache_;
- Address isolate_root = GetIsolateRoot(isolate);
+ Address isolate_root = isolate->isolate_root();
for (Object original_object : original_cache) {
Address original_address = original_object.ptr();
Address new_address = isolate_root + CompressTagged(original_address);
@@ -720,6 +722,7 @@ size_t ReadOnlyPage::ShrinkToHighWaterMark() {
}
void ReadOnlySpace::ShrinkPages() {
+ if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) return;
BasicMemoryChunk::UpdateHighWaterMark(top_);
heap()->CreateFillerObjectAt(top_, static_cast<int>(limit_ - top_),
ClearRecordedSlots::kNo);
diff --git a/deps/v8/src/heap/read-only-spaces.h b/deps/v8/src/heap/read-only-spaces.h
index 2bdf09d0ab..ffadcb55b3 100644
--- a/deps/v8/src/heap/read-only-spaces.h
+++ b/deps/v8/src/heap/read-only-spaces.h
@@ -20,9 +20,9 @@
namespace v8 {
namespace internal {
-class ReadOnlyDeserializer;
class MemoryAllocator;
class ReadOnlyHeap;
+class SnapshotData;
class ReadOnlyPage : public BasicMemoryChunk {
public:
@@ -100,8 +100,9 @@ class ReadOnlyArtifacts {
void set_read_only_heap(std::unique_ptr<ReadOnlyHeap> read_only_heap);
ReadOnlyHeap* read_only_heap() const { return read_only_heap_.get(); }
- void InitializeChecksum(ReadOnlyDeserializer* des);
- void VerifyChecksum(ReadOnlyDeserializer* des, bool read_only_heap_created);
+ void InitializeChecksum(SnapshotData* read_only_snapshot_data);
+ void VerifyChecksum(SnapshotData* read_only_snapshot_data,
+ bool read_only_heap_created);
protected:
ReadOnlyArtifacts() = default;
diff --git a/deps/v8/src/heap/safepoint.cc b/deps/v8/src/heap/safepoint.cc
index a306fbde08..2550851b30 100644
--- a/deps/v8/src/heap/safepoint.cc
+++ b/deps/v8/src/heap/safepoint.cc
@@ -21,7 +21,7 @@ void GlobalSafepoint::EnterSafepointScope() {
if (++active_safepoint_scopes_ > 1) return;
- TimedHistogramScope timer(heap_->isolate()->counters()->time_to_safepoint());
+ TimedHistogramScope timer(heap_->isolate()->counters()->stop_the_world());
TRACE_GC(heap_->tracer(), GCTracer::Scope::STOP_THE_WORLD);
local_heaps_mutex_.Lock();
@@ -40,8 +40,10 @@ void GlobalSafepoint::EnterSafepointScope() {
for (LocalHeap* current = local_heaps_head_; current;
current = current->next_) {
if (current == local_heap_of_this_thread_) {
+ DCHECK(current->is_main_thread());
continue;
}
+ DCHECK(!current->is_main_thread());
current->state_mutex_.Lock();
while (current->state_ == LocalHeap::ThreadState::Running) {
@@ -114,23 +116,6 @@ SafepointScope::SafepointScope(Heap* heap) : safepoint_(heap->safepoint()) {
SafepointScope::~SafepointScope() { safepoint_->LeaveSafepointScope(); }
-void GlobalSafepoint::AddLocalHeap(LocalHeap* local_heap) {
- base::MutexGuard guard(&local_heaps_mutex_);
- if (local_heaps_head_) local_heaps_head_->prev_ = local_heap;
- local_heap->prev_ = nullptr;
- local_heap->next_ = local_heaps_head_;
- local_heaps_head_ = local_heap;
-}
-
-void GlobalSafepoint::RemoveLocalHeap(LocalHeap* local_heap) {
- base::MutexGuard guard(&local_heaps_mutex_);
- if (local_heap->next_) local_heap->next_->prev_ = local_heap->prev_;
- if (local_heap->prev_)
- local_heap->prev_->next_ = local_heap->next_;
- else
- local_heaps_head_ = local_heap->next_;
-}
-
bool GlobalSafepoint::ContainsLocalHeap(LocalHeap* local_heap) {
base::MutexGuard guard(&local_heaps_mutex_);
LocalHeap* current = local_heaps_head_;
diff --git a/deps/v8/src/heap/safepoint.h b/deps/v8/src/heap/safepoint.h
index efe499ea13..dd2bb421be 100644
--- a/deps/v8/src/heap/safepoint.h
+++ b/deps/v8/src/heap/safepoint.h
@@ -62,8 +62,36 @@ class GlobalSafepoint {
void EnterSafepointScope();
void LeaveSafepointScope();
- void AddLocalHeap(LocalHeap* local_heap);
- void RemoveLocalHeap(LocalHeap* local_heap);
+ template <typename Callback>
+ void AddLocalHeap(LocalHeap* local_heap, Callback callback) {
+ // Safepoint holds this lock in order to stop threads from starting or
+ // stopping.
+ base::MutexGuard guard(&local_heaps_mutex_);
+
+ // Additional code protected from safepoint
+ callback();
+
+ // Add list to doubly-linked list
+ if (local_heaps_head_) local_heaps_head_->prev_ = local_heap;
+ local_heap->prev_ = nullptr;
+ local_heap->next_ = local_heaps_head_;
+ local_heaps_head_ = local_heap;
+ }
+
+ template <typename Callback>
+ void RemoveLocalHeap(LocalHeap* local_heap, Callback callback) {
+ base::MutexGuard guard(&local_heaps_mutex_);
+
+ // Additional code protected from safepoint
+ callback();
+
+ // Remove list from doubly-linked list
+ if (local_heap->next_) local_heap->next_->prev_ = local_heap->prev_;
+ if (local_heap->prev_)
+ local_heap->prev_->next_ = local_heap->next_;
+ else
+ local_heaps_head_ = local_heap->next_;
+ }
Barrier barrier_;
Heap* heap_;
diff --git a/deps/v8/src/heap/scavenger-inl.h b/deps/v8/src/heap/scavenger-inl.h
index 18933a5ac7..8560b5b62b 100644
--- a/deps/v8/src/heap/scavenger-inl.h
+++ b/deps/v8/src/heap/scavenger-inl.h
@@ -38,6 +38,10 @@ bool Scavenger::PromotionList::View::Pop(struct PromotionListEntry* entry) {
return promotion_list_->Pop(task_id_, entry);
}
+void Scavenger::PromotionList::View::FlushToGlobal() {
+ promotion_list_->FlushToGlobal(task_id_);
+}
+
bool Scavenger::PromotionList::View::IsGlobalPoolEmpty() {
return promotion_list_->IsGlobalPoolEmpty();
}
@@ -78,6 +82,16 @@ bool Scavenger::PromotionList::Pop(int task_id,
return large_object_promotion_list_.Pop(task_id, entry);
}
+void Scavenger::PromotionList::FlushToGlobal(int task_id) {
+ regular_object_promotion_list_.FlushToGlobal(task_id);
+ large_object_promotion_list_.FlushToGlobal(task_id);
+}
+
+size_t Scavenger::PromotionList::GlobalPoolSize() const {
+ return regular_object_promotion_list_.GlobalPoolSize() +
+ large_object_promotion_list_.GlobalPoolSize();
+}
+
bool Scavenger::PromotionList::IsGlobalPoolEmpty() {
return regular_object_promotion_list_.IsGlobalPoolEmpty() &&
large_object_promotion_list_.IsGlobalPoolEmpty();
@@ -109,7 +123,7 @@ bool Scavenger::MigrateObject(Map map, HeapObject source, HeapObject target,
heap()->CopyBlock(target.address() + kTaggedSize,
source.address() + kTaggedSize, size - kTaggedSize);
- if (!source.synchronized_compare_and_swap_map_word(
+ if (!source.release_compare_and_swap_map_word(
MapWord::FromMap(map), MapWord::FromForwardingAddress(target))) {
// Other task migrated the object.
return false;
@@ -214,7 +228,7 @@ bool Scavenger::HandleLargeObject(Map map, HeapObject object, int object_size,
BasicMemoryChunk::FromHeapObject(object)->InNewLargeObjectSpace())) {
DCHECK_EQ(NEW_LO_SPACE,
MemoryChunk::FromHeapObject(object)->owner_identity());
- if (object.synchronized_compare_and_swap_map_word(
+ if (object.release_compare_and_swap_map_word(
MapWord::FromMap(map), MapWord::FromForwardingAddress(object))) {
surviving_new_large_objects_.insert({object, map});
promoted_size_ += object_size;
diff --git a/deps/v8/src/heap/scavenger.cc b/deps/v8/src/heap/scavenger.cc
index f51a385085..ea4cb90459 100644
--- a/deps/v8/src/heap/scavenger.cc
+++ b/deps/v8/src/heap/scavenger.cc
@@ -25,65 +25,6 @@
namespace v8 {
namespace internal {
-class PageScavengingItem final : public ItemParallelJob::Item {
- public:
- explicit PageScavengingItem(MemoryChunk* chunk) : chunk_(chunk) {}
- ~PageScavengingItem() override = default;
-
- void Process(Scavenger* scavenger) { scavenger->ScavengePage(chunk_); }
-
- private:
- MemoryChunk* const chunk_;
-};
-
-class ScavengingTask final : public ItemParallelJob::Task {
- public:
- ScavengingTask(Heap* heap, Scavenger* scavenger, OneshotBarrier* barrier)
- : ItemParallelJob::Task(heap->isolate()),
- heap_(heap),
- scavenger_(scavenger),
- barrier_(barrier) {}
-
- void RunInParallel(Runner runner) final {
- if (runner == Runner::kForeground) {
- TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL);
- ProcessItems();
- } else {
- TRACE_BACKGROUND_GC(
- heap_->tracer(),
- GCTracer::BackgroundScope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL);
- ProcessItems();
- }
- }
-
- private:
- void ProcessItems() {
- double scavenging_time = 0.0;
- {
- barrier_->Start();
- TimedScope scope(&scavenging_time);
- PageScavengingItem* item = nullptr;
- while ((item = GetItem<PageScavengingItem>()) != nullptr) {
- item->Process(scavenger_);
- item->MarkFinished();
- }
- do {
- scavenger_->Process(barrier_);
- } while (!barrier_->Wait());
- scavenger_->Process();
- }
- if (FLAG_trace_parallel_scavenge) {
- PrintIsolate(heap_->isolate(),
- "scavenge[%p]: time=%.2f copied=%zu promoted=%zu\n",
- static_cast<void*>(this), scavenging_time,
- scavenger_->bytes_copied(), scavenger_->bytes_promoted());
- }
- }
- Heap* const heap_;
- Scavenger* const scavenger_;
- OneshotBarrier* const barrier_;
-};
-
class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
public:
IterateAndScavengePromotedObjectsVisitor(Scavenger* scavenger,
@@ -219,8 +160,81 @@ class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
}
};
+ScavengerCollector::JobTask::JobTask(
+ ScavengerCollector* outer,
+ std::vector<std::unique_ptr<Scavenger>>* scavengers,
+ std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> memory_chunks,
+ Scavenger::CopiedList* copied_list,
+ Scavenger::PromotionList* promotion_list)
+ : outer_(outer),
+ scavengers_(scavengers),
+ memory_chunks_(std::move(memory_chunks)),
+ remaining_memory_chunks_(memory_chunks_.size()),
+ generator_(memory_chunks_.size()),
+ copied_list_(copied_list),
+ promotion_list_(promotion_list) {}
+
+void ScavengerCollector::JobTask::Run(JobDelegate* delegate) {
+ DCHECK_LT(delegate->GetTaskId(), scavengers_->size());
+ Scavenger* scavenger = (*scavengers_)[delegate->GetTaskId()].get();
+ if (delegate->IsJoiningThread()) {
+ TRACE_GC(outer_->heap_->tracer(),
+ GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL);
+ ProcessItems(delegate, scavenger);
+ } else {
+ TRACE_BACKGROUND_GC(
+ outer_->heap_->tracer(),
+ GCTracer::BackgroundScope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL);
+ ProcessItems(delegate, scavenger);
+ }
+}
+
+size_t ScavengerCollector::JobTask::GetMaxConcurrency(
+ size_t worker_count) const {
+ // We need to account for local segments held by worker_count in addition to
+ // GlobalPoolSize() of copied_list_ and promotion_list_.
+ return std::min<size_t>(
+ scavengers_->size(),
+ std::max<size_t>(remaining_memory_chunks_.load(std::memory_order_relaxed),
+ worker_count + copied_list_->GlobalPoolSize() +
+ promotion_list_->GlobalPoolSize()));
+}
+
+void ScavengerCollector::JobTask::ProcessItems(JobDelegate* delegate,
+ Scavenger* scavenger) {
+ double scavenging_time = 0.0;
+ {
+ TimedScope scope(&scavenging_time);
+ ConcurrentScavengePages(scavenger);
+ scavenger->Process(delegate);
+ }
+ if (FLAG_trace_parallel_scavenge) {
+ PrintIsolate(outer_->heap_->isolate(),
+ "scavenge[%p]: time=%.2f copied=%zu promoted=%zu\n",
+ static_cast<void*>(this), scavenging_time,
+ scavenger->bytes_copied(), scavenger->bytes_promoted());
+ }
+}
+
+void ScavengerCollector::JobTask::ConcurrentScavengePages(
+ Scavenger* scavenger) {
+ while (remaining_memory_chunks_.load(std::memory_order_relaxed) > 0) {
+ base::Optional<size_t> index = generator_.GetNext();
+ if (!index) return;
+ for (size_t i = *index; i < memory_chunks_.size(); ++i) {
+ auto& work_item = memory_chunks_[i];
+ if (!work_item.first.TryAcquire()) break;
+ scavenger->ScavengePage(work_item.second);
+ if (remaining_memory_chunks_.fetch_sub(1, std::memory_order_relaxed) <=
+ 1) {
+ return;
+ }
+ }
+ }
+}
+
ScavengerCollector::ScavengerCollector(Heap* heap)
- : isolate_(heap->isolate()), heap_(heap), parallel_scavenge_semaphore_(0) {}
+ : isolate_(heap->isolate()), heap_(heap) {}
// Remove this crashkey after chromium:1010312 is fixed.
class ScopedFullHeapCrashKey {
@@ -246,23 +260,12 @@ void ScavengerCollector::CollectGarbage() {
}
DCHECK(surviving_new_large_objects_.empty());
- ItemParallelJob job(isolate_->cancelable_task_manager(),
- &parallel_scavenge_semaphore_);
- const int kMainThreadId = 0;
- Scavenger* scavengers[kMaxScavengerTasks];
- const bool is_logging = isolate_->LogObjectRelocation();
- const int num_scavenge_tasks = NumberOfScavengeTasks();
- OneshotBarrier barrier(base::TimeDelta::FromMilliseconds(kMaxWaitTimeMs));
+ std::vector<std::unique_ptr<Scavenger>> scavengers;
Worklist<MemoryChunk*, 64> empty_chunks;
+ const int num_scavenge_tasks = NumberOfScavengeTasks();
Scavenger::CopiedList copied_list(num_scavenge_tasks);
Scavenger::PromotionList promotion_list(num_scavenge_tasks);
EphemeronTableList ephemeron_table_list(num_scavenge_tasks);
- for (int i = 0; i < num_scavenge_tasks; i++) {
- scavengers[i] =
- new Scavenger(this, heap_, is_logging, &empty_chunks, &copied_list,
- &promotion_list, &ephemeron_table_list, i);
- job.AddTask(new ScavengingTask(heap_, scavengers[i], &barrier));
- }
{
Sweeper* sweeper = heap_->mark_compact_collector()->sweeper();
@@ -289,12 +292,20 @@ void ScavengerCollector::CollectGarbage() {
return !page->ContainsSlots<OLD_TO_NEW>() && !page->sweeping_slot_set();
});
+ const bool is_logging = isolate_->LogObjectRelocation();
+ for (int i = 0; i < num_scavenge_tasks; ++i) {
+ scavengers.emplace_back(
+ new Scavenger(this, heap_, is_logging, &empty_chunks, &copied_list,
+ &promotion_list, &ephemeron_table_list, i));
+ }
+
+ std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> memory_chunks;
RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
- heap_, [&job](MemoryChunk* chunk) {
- job.AddItem(new PageScavengingItem(chunk));
+ heap_, [&memory_chunks](MemoryChunk* chunk) {
+ memory_chunks.emplace_back(ParallelWorkItem{}, chunk);
});
- RootScavengeVisitor root_scavenge_visitor(scavengers[kMainThreadId]);
+ RootScavengeVisitor root_scavenge_visitor(scavengers[kMainThreadId].get());
{
// Identify weak unmodified handles. Requires an unmodified graph.
@@ -319,18 +330,24 @@ void ScavengerCollector::CollectGarbage() {
heap_->IterateRoots(&root_scavenge_visitor, options);
isolate_->global_handles()->IterateYoungStrongAndDependentRoots(
&root_scavenge_visitor);
+ scavengers[kMainThreadId]->Flush();
}
{
// Parallel phase scavenging all copied and promoted objects.
TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL);
- job.Run();
+ V8::GetCurrentPlatform()
+ ->PostJob(v8::TaskPriority::kUserBlocking,
+ std::make_unique<JobTask>(this, &scavengers,
+ std::move(memory_chunks),
+ &copied_list, &promotion_list))
+ ->Join();
DCHECK(copied_list.IsEmpty());
DCHECK(promotion_list.IsEmpty());
}
if (V8_UNLIKELY(FLAG_scavenge_separate_stack_scanning)) {
- IterateStackAndScavenge(&root_scavenge_visitor, scavengers,
- num_scavenge_tasks, kMainThreadId);
+ IterateStackAndScavenge(&root_scavenge_visitor, &scavengers,
+ kMainThreadId);
DCHECK(copied_list.IsEmpty());
DCHECK(promotion_list.IsEmpty());
}
@@ -357,10 +374,10 @@ void ScavengerCollector::CollectGarbage() {
DCHECK(surviving_new_large_objects_.empty());
- for (int i = 0; i < num_scavenge_tasks; i++) {
- scavengers[i]->Finalize();
- delete scavengers[i];
+ for (auto& scavenger : scavengers) {
+ scavenger->Finalize();
}
+ scavengers.clear();
HandleSurvivingNewLargeObjects();
}
@@ -420,23 +437,24 @@ void ScavengerCollector::CollectGarbage() {
}
void ScavengerCollector::IterateStackAndScavenge(
- RootScavengeVisitor* root_scavenge_visitor, Scavenger** scavengers,
- int num_scavenge_tasks, int main_thread_id) {
+
+ RootScavengeVisitor* root_scavenge_visitor,
+ std::vector<std::unique_ptr<Scavenger>>* scavengers, int main_thread_id) {
// Scan the stack, scavenge the newly discovered objects, and report
// the survival statistics before and afer the stack scanning.
// This code is not intended for production.
TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_STACK_ROOTS);
size_t survived_bytes_before = 0;
- for (int i = 0; i < num_scavenge_tasks; i++) {
+ for (auto& scavenger : *scavengers) {
survived_bytes_before +=
- scavengers[i]->bytes_copied() + scavengers[i]->bytes_promoted();
+ scavenger->bytes_copied() + scavenger->bytes_promoted();
}
heap_->IterateStackRoots(root_scavenge_visitor);
- scavengers[main_thread_id]->Process();
+ (*scavengers)[main_thread_id]->Process();
size_t survived_bytes_after = 0;
- for (int i = 0; i < num_scavenge_tasks; i++) {
+ for (auto& scavenger : *scavengers) {
survived_bytes_after +=
- scavengers[i]->bytes_copied() + scavengers[i]->bytes_promoted();
+ scavenger->bytes_copied() + scavenger->bytes_promoted();
}
TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"V8.GCScavengerStackScanning", "survived_bytes_before",
@@ -590,10 +608,9 @@ void Scavenger::ScavengePage(MemoryChunk* page) {
AddPageToSweeperIfNecessary(page);
}
-void Scavenger::Process(OneshotBarrier* barrier) {
+void Scavenger::Process(JobDelegate* delegate) {
ScavengeVisitor scavenge_visitor(this);
- const bool have_barrier = barrier != nullptr;
bool done;
size_t objects = 0;
do {
@@ -603,9 +620,9 @@ void Scavenger::Process(OneshotBarrier* barrier) {
copied_list_.Pop(&object_and_size)) {
scavenge_visitor.Visit(object_and_size.first);
done = false;
- if (have_barrier && ((++objects % kInterruptThreshold) == 0)) {
+ if (delegate && ((++objects % kInterruptThreshold) == 0)) {
if (!copied_list_.IsGlobalPoolEmpty()) {
- barrier->NotifyAll();
+ delegate->NotifyConcurrencyIncrease();
}
}
}
@@ -615,9 +632,9 @@ void Scavenger::Process(OneshotBarrier* barrier) {
HeapObject target = entry.heap_object;
IterateAndScavengePromotedObject(target, entry.map, entry.size);
done = false;
- if (have_barrier && ((++objects % kInterruptThreshold) == 0)) {
+ if (delegate && ((++objects % kInterruptThreshold) == 0)) {
if (!promotion_list_.IsGlobalPoolEmpty()) {
- barrier->NotifyAll();
+ delegate->NotifyConcurrencyIncrease();
}
}
}
@@ -705,6 +722,11 @@ void Scavenger::Finalize() {
}
}
+void Scavenger::Flush() {
+ copied_list_.FlushToGlobal();
+ promotion_list_.FlushToGlobal();
+}
+
void Scavenger::AddEphemeronHashTable(EphemeronHashTable table) {
ephemeron_table_list_.Push(table);
}
diff --git a/deps/v8/src/heap/scavenger.h b/deps/v8/src/heap/scavenger.h
index d96219fd51..481ec4d558 100644
--- a/deps/v8/src/heap/scavenger.h
+++ b/deps/v8/src/heap/scavenger.h
@@ -6,8 +6,10 @@
#define V8_HEAP_SCAVENGER_H_
#include "src/base/platform/condition-variable.h"
+#include "src/heap/index-generator.h"
#include "src/heap/local-allocator.h"
#include "src/heap/objects-visiting.h"
+#include "src/heap/parallel-work-item.h"
#include "src/heap/slot-set.h"
#include "src/heap/worklist.h"
@@ -33,38 +35,7 @@ constexpr int kEphemeronTableListSegmentSize = 128;
using EphemeronTableList =
Worklist<EphemeronHashTable, kEphemeronTableListSegmentSize>;
-class ScavengerCollector {
- public:
- static const int kMaxScavengerTasks = 8;
- static const int kMaxWaitTimeMs = 2;
-
- explicit ScavengerCollector(Heap* heap);
-
- void CollectGarbage();
-
- private:
- void MergeSurvivingNewLargeObjects(
- const SurvivingNewLargeObjectsMap& objects);
-
- int NumberOfScavengeTasks();
-
- void ProcessWeakReferences(EphemeronTableList* ephemeron_table_list);
- void ClearYoungEphemerons(EphemeronTableList* ephemeron_table_list);
- void ClearOldEphemerons();
- void HandleSurvivingNewLargeObjects();
-
- void SweepArrayBufferExtensions();
-
- void IterateStackAndScavenge(RootScavengeVisitor* root_scavenge_visitor,
- Scavenger** scavengers, int num_scavenge_tasks,
- int main_thread_id);
- Isolate* const isolate_;
- Heap* const heap_;
- base::Semaphore parallel_scavenge_semaphore_;
- SurvivingNewLargeObjectsMap surviving_new_large_objects_;
-
- friend class Scavenger;
-};
+class ScavengerCollector;
class Scavenger {
public:
@@ -88,6 +59,7 @@ class Scavenger {
inline bool Pop(struct PromotionListEntry* entry);
inline bool IsGlobalPoolEmpty();
inline bool ShouldEagerlyProcessPromotionList();
+ inline void FlushToGlobal();
private:
PromotionList* promotion_list_;
@@ -102,10 +74,12 @@ class Scavenger {
inline void PushLargeObject(int task_id, HeapObject object, Map map,
int size);
inline bool IsEmpty();
+ inline size_t GlobalPoolSize() const;
inline size_t LocalPushSegmentSize(int task_id);
inline bool Pop(int task_id, struct PromotionListEntry* entry);
inline bool IsGlobalPoolEmpty();
inline bool ShouldEagerlyProcessPromotionList(int task_id);
+ inline void FlushToGlobal(int task_id);
private:
static const int kRegularObjectPromotionListSegmentSize = 256;
@@ -134,10 +108,11 @@ class Scavenger {
// Processes remaining work (=objects) after single objects have been
// manually scavenged using ScavengeObject or CheckAndScavengeObject.
- void Process(OneshotBarrier* barrier = nullptr);
+ void Process(JobDelegate* delegate = nullptr);
// Finalize the Scavenger. Needs to be called from the main thread.
void Finalize();
+ void Flush();
void AddEphemeronHashTable(EphemeronHashTable table);
@@ -276,6 +251,66 @@ class ScavengeVisitor final : public NewSpaceVisitor<ScavengeVisitor> {
Scavenger* const scavenger_;
};
+class ScavengerCollector {
+ public:
+ static const int kMaxScavengerTasks = 8;
+ static const int kMainThreadId = 0;
+
+ explicit ScavengerCollector(Heap* heap);
+
+ void CollectGarbage();
+
+ private:
+ class JobTask : public v8::JobTask {
+ public:
+ explicit JobTask(
+ ScavengerCollector* outer,
+ std::vector<std::unique_ptr<Scavenger>>* scavengers,
+ std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> memory_chunks,
+ Scavenger::CopiedList* copied_list,
+ Scavenger::PromotionList* promotion_list);
+
+ void Run(JobDelegate* delegate) override;
+ size_t GetMaxConcurrency(size_t worker_count) const override;
+
+ private:
+ void ProcessItems(JobDelegate* delegate, Scavenger* scavenger);
+ void ConcurrentScavengePages(Scavenger* scavenger);
+
+ ScavengerCollector* outer_;
+
+ std::vector<std::unique_ptr<Scavenger>>* scavengers_;
+ std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> memory_chunks_;
+ std::atomic<size_t> remaining_memory_chunks_{0};
+ IndexGenerator generator_;
+
+ Scavenger::CopiedList* copied_list_;
+ Scavenger::PromotionList* promotion_list_;
+ };
+
+ void MergeSurvivingNewLargeObjects(
+ const SurvivingNewLargeObjectsMap& objects);
+
+ int NumberOfScavengeTasks();
+
+ void ProcessWeakReferences(EphemeronTableList* ephemeron_table_list);
+ void ClearYoungEphemerons(EphemeronTableList* ephemeron_table_list);
+ void ClearOldEphemerons();
+ void HandleSurvivingNewLargeObjects();
+
+ void SweepArrayBufferExtensions();
+
+ void IterateStackAndScavenge(
+ RootScavengeVisitor* root_scavenge_visitor,
+ std::vector<std::unique_ptr<Scavenger>>* scavengers, int main_thread_id);
+
+ Isolate* const isolate_;
+ Heap* const heap_;
+ SurvivingNewLargeObjectsMap surviving_new_large_objects_;
+
+ friend class Scavenger;
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/setup-heap-internal.cc b/deps/v8/src/heap/setup-heap-internal.cc
index b1844256e8..521d1a10c6 100644
--- a/deps/v8/src/heap/setup-heap-internal.cc
+++ b/deps/v8/src/heap/setup-heap-internal.cc
@@ -42,11 +42,9 @@
#include "src/objects/string.h"
#include "src/objects/synthetic-module.h"
#include "src/objects/template-objects-inl.h"
+#include "src/objects/torque-defined-classes-inl.h"
#include "src/regexp/regexp.h"
#include "src/wasm/wasm-objects.h"
-#include "torque-generated/class-definitions.h"
-#include "torque-generated/exported-class-definitions-inl.h"
-#include "torque-generated/internal-class-definitions-inl.h"
namespace v8 {
namespace internal {
@@ -167,7 +165,8 @@ AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
map.set_instance_size(instance_size);
// Initialize to only containing tagged fields.
if (FLAG_unbox_double_fields) {
- map.set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
+ map.set_layout_descriptor(LayoutDescriptor::FastPointerLayout(),
+ kReleaseStore);
}
// GetVisitorId requires a properly initialized LayoutDescriptor.
map.set_visitor_id(Map::GetVisitorId(map));
@@ -194,7 +193,8 @@ void Heap::FinalizePartialMap(Map map) {
map.set_raw_transitions(MaybeObject::FromSmi(Smi::zero()));
map.SetInstanceDescriptors(isolate(), roots.empty_descriptor_array(), 0);
if (FLAG_unbox_double_fields) {
- map.set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
+ map.set_layout_descriptor(LayoutDescriptor::FastPointerLayout(),
+ kReleaseStore);
}
map.set_prototype(roots.null_value());
map.set_constructor_or_backpointer(roots.null_value());
@@ -427,8 +427,11 @@ bool Heap::CreateInitialMaps() {
TORQUE_DEFINED_FIXED_INSTANCE_TYPE_LIST(TORQUE_ALLOCATE_MAP);
#undef TORQUE_ALLOCATE_MAP
-#define TORQUE_ALLOCATE_VARSIZE_MAP(NAME, Name, name) \
- ALLOCATE_VARSIZE_MAP(NAME, name)
+#define TORQUE_ALLOCATE_VARSIZE_MAP(NAME, Name, name) \
+ /* The DescriptorArray map is pre-allocated and initialized above. */ \
+ if (NAME != DESCRIPTOR_ARRAY_TYPE) { \
+ ALLOCATE_VARSIZE_MAP(NAME, name) \
+ }
TORQUE_DEFINED_VARSIZE_INSTANCE_TYPE_LIST(TORQUE_ALLOCATE_VARSIZE_MAP);
#undef TORQUE_ALLOCATE_VARSIZE_MAP
@@ -842,25 +845,23 @@ void Heap::CreateInitialObjects() {
set_next_template_serial_number(Smi::zero());
// Allocate the empty OrderedHashMap.
- Handle<FixedArray> empty_ordered_hash_map = factory->NewFixedArray(
- OrderedHashMap::HashTableStartIndex(), AllocationType::kReadOnly);
- empty_ordered_hash_map->set_map_no_write_barrier(
- *factory->ordered_hash_map_map());
- for (int i = 0; i < empty_ordered_hash_map->length(); ++i) {
- empty_ordered_hash_map->set(i, Smi::zero());
- }
+ Handle<OrderedHashMap> empty_ordered_hash_map =
+ OrderedHashMap::AllocateEmpty(isolate(), AllocationType::kReadOnly)
+ .ToHandleChecked();
set_empty_ordered_hash_map(*empty_ordered_hash_map);
// Allocate the empty OrderedHashSet.
- Handle<FixedArray> empty_ordered_hash_set = factory->NewFixedArray(
- OrderedHashSet::HashTableStartIndex(), AllocationType::kReadOnly);
- empty_ordered_hash_set->set_map_no_write_barrier(
- *factory->ordered_hash_set_map());
- for (int i = 0; i < empty_ordered_hash_set->length(); ++i) {
- empty_ordered_hash_set->set(i, Smi::zero());
- }
+ Handle<OrderedHashSet> empty_ordered_hash_set =
+ OrderedHashSet::AllocateEmpty(isolate(), AllocationType::kReadOnly)
+ .ToHandleChecked();
set_empty_ordered_hash_set(*empty_ordered_hash_set);
+ // Allocate the empty OrderedNameDictionary
+ Handle<OrderedNameDictionary> empty_ordered_property_dictionary =
+ OrderedNameDictionary::AllocateEmpty(isolate(), AllocationType::kReadOnly)
+ .ToHandleChecked();
+ set_empty_ordered_property_dictionary(*empty_ordered_property_dictionary);
+
// Allocate the empty FeedbackMetadata.
Handle<FeedbackMetadata> empty_feedback_metadata =
factory->NewFeedbackMetadata(0, 0, AllocationType::kReadOnly);
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index 8020226c00..7f2d243aec 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -101,8 +101,10 @@ class SemiSpace;
#define DCHECK_OBJECT_SIZE(size) \
DCHECK((0 < size) && (size <= kMaxRegularHeapObjectSize))
-#define DCHECK_CODEOBJECT_SIZE(size, code_space) \
- DCHECK((0 < size) && (size <= code_space->AreaSize()))
+#define DCHECK_CODEOBJECT_SIZE(size, code_space) \
+ DCHECK((0 < size) && \
+ (size <= std::min(MemoryChunkLayout::MaxRegularCodeObjectSize(), \
+ code_space->AreaSize())))
// ----------------------------------------------------------------------------
// Space is the abstract superclass for all allocation spaces that are not
diff --git a/deps/v8/src/heap/third-party/heap-api.h b/deps/v8/src/heap/third-party/heap-api.h
index 5eaae847a7..c4712b988e 100644
--- a/deps/v8/src/heap/third-party/heap-api.h
+++ b/deps/v8/src/heap/third-party/heap-api.h
@@ -30,8 +30,13 @@ class Heap {
static bool InReadOnlySpace(Address address);
+ static bool InLargeObjectSpace(Address address);
+
static bool IsValidHeapObject(HeapObject object);
+ void ResetIterator();
+ HeapObject NextObject();
+
bool CollectGarbage();
};
diff --git a/deps/v8/src/heap/weak-object-worklists.cc b/deps/v8/src/heap/weak-object-worklists.cc
new file mode 100644
index 0000000000..532739000f
--- /dev/null
+++ b/deps/v8/src/heap/weak-object-worklists.cc
@@ -0,0 +1,172 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/weak-object-worklists.h"
+
+#include "src/heap/heap-inl.h"
+#include "src/heap/heap.h"
+#include "src/heap/worklist.h"
+#include "src/objects/hash-table.h"
+#include "src/objects/heap-object.h"
+#include "src/objects/js-function.h"
+#include "src/objects/js-weak-refs-inl.h"
+#include "src/objects/js-weak-refs.h"
+#include "src/objects/shared-function-info.h"
+#include "src/objects/transitions.h"
+
+namespace v8 {
+
+namespace internal {
+
+void WeakObjects::UpdateAfterScavenge() {
+#define INVOKE_UPDATE(_, name, Name) Update##Name(name);
+ WEAK_OBJECT_WORKLISTS(INVOKE_UPDATE)
+#undef INVOKE_UPDATE
+}
+
+void WeakObjects::UpdateTransitionArrays(
+ WeakObjectWorklist<TransitionArray>& transition_arrays) {
+ DCHECK(!ContainsYoungObjects(transition_arrays));
+}
+
+void WeakObjects::UpdateEphemeronHashTables(
+ WeakObjectWorklist<EphemeronHashTable>& ephemeron_hash_tables) {
+ ephemeron_hash_tables.Update(
+ [](EphemeronHashTable slot_in, EphemeronHashTable* slot_out) -> bool {
+ EphemeronHashTable forwarded = ForwardingAddress(slot_in);
+
+ if (!forwarded.is_null()) {
+ *slot_out = forwarded;
+ return true;
+ }
+
+ return false;
+ });
+}
+
+namespace {
+bool EphemeronUpdater(Ephemeron slot_in, Ephemeron* slot_out) {
+ HeapObject key = slot_in.key;
+ HeapObject value = slot_in.value;
+ HeapObject forwarded_key = ForwardingAddress(key);
+ HeapObject forwarded_value = ForwardingAddress(value);
+
+ if (!forwarded_key.is_null() && !forwarded_value.is_null()) {
+ *slot_out = Ephemeron{forwarded_key, forwarded_value};
+ return true;
+ }
+
+ return false;
+}
+} // anonymous namespace
+
+void WeakObjects::UpdateCurrentEphemerons(
+ WeakObjectWorklist<Ephemeron>& current_ephemerons) {
+ current_ephemerons.Update(EphemeronUpdater);
+}
+
+void WeakObjects::UpdateNextEphemerons(
+ WeakObjectWorklist<Ephemeron>& next_ephemerons) {
+ next_ephemerons.Update(EphemeronUpdater);
+}
+
+void WeakObjects::UpdateDiscoveredEphemerons(
+ WeakObjectWorklist<Ephemeron>& discovered_ephemerons) {
+ discovered_ephemerons.Update(EphemeronUpdater);
+}
+
+void WeakObjects::UpdateWeakReferences(
+ WeakObjectWorklist<HeapObjectAndSlot>& weak_references) {
+ weak_references.Update(
+ [](HeapObjectAndSlot slot_in, HeapObjectAndSlot* slot_out) -> bool {
+ HeapObject heap_obj = slot_in.first;
+ HeapObject forwarded = ForwardingAddress(heap_obj);
+
+ if (!forwarded.is_null()) {
+ ptrdiff_t distance_to_slot =
+ slot_in.second.address() - slot_in.first.ptr();
+ Address new_slot = forwarded.ptr() + distance_to_slot;
+ slot_out->first = forwarded;
+ slot_out->second = HeapObjectSlot(new_slot);
+ return true;
+ }
+
+ return false;
+ });
+}
+
+void WeakObjects::UpdateWeakObjectsInCode(
+ WeakObjectWorklist<HeapObjectAndCode>& weak_objects_in_code) {
+ weak_objects_in_code.Update(
+ [](HeapObjectAndCode slot_in, HeapObjectAndCode* slot_out) -> bool {
+ HeapObject heap_obj = slot_in.first;
+ HeapObject forwarded = ForwardingAddress(heap_obj);
+
+ if (!forwarded.is_null()) {
+ slot_out->first = forwarded;
+ slot_out->second = slot_in.second;
+ return true;
+ }
+
+ return false;
+ });
+}
+
+void WeakObjects::UpdateJSWeakRefs(
+ WeakObjectWorklist<JSWeakRef>& js_weak_refs) {
+ if (FLAG_harmony_weak_refs) {
+ js_weak_refs.Update(
+ [](JSWeakRef js_weak_ref_in, JSWeakRef* js_weak_ref_out) -> bool {
+ JSWeakRef forwarded = ForwardingAddress(js_weak_ref_in);
+
+ if (!forwarded.is_null()) {
+ *js_weak_ref_out = forwarded;
+ return true;
+ }
+
+ return false;
+ });
+ }
+}
+
+void WeakObjects::UpdateWeakCells(WeakObjectWorklist<WeakCell>& weak_cells) {
+ // TODO(syg, marja): Support WeakCells in the young generation.
+ DCHECK(!ContainsYoungObjects(weak_cells));
+}
+
+void WeakObjects::UpdateBytecodeFlushingCandidates(
+ WeakObjectWorklist<SharedFunctionInfo>& bytecode_flushing_candidates) {
+ DCHECK(!ContainsYoungObjects(bytecode_flushing_candidates));
+}
+
+void WeakObjects::UpdateFlushedJSFunctions(
+ WeakObjectWorklist<JSFunction>& flushed_js_functions) {
+ flushed_js_functions.Update(
+ [](JSFunction slot_in, JSFunction* slot_out) -> bool {
+ JSFunction forwarded = ForwardingAddress(slot_in);
+
+ if (!forwarded.is_null()) {
+ *slot_out = forwarded;
+ return true;
+ }
+
+ return false;
+ });
+}
+
+#ifdef DEBUG
+template <typename Type>
+bool WeakObjects::ContainsYoungObjects(WeakObjectWorklist<Type>& worklist) {
+ bool result = false;
+ worklist.Iterate([&result](Type candidate) {
+ if (Heap::InYoungGeneration(candidate)) {
+ result = true;
+ }
+ });
+ return result;
+}
+#endif
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/weak-object-worklists.h b/deps/v8/src/heap/weak-object-worklists.h
new file mode 100644
index 0000000000..67df372b57
--- /dev/null
+++ b/deps/v8/src/heap/weak-object-worklists.h
@@ -0,0 +1,90 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_WEAK_OBJECT_WORKLISTS_H_
+#define V8_HEAP_WEAK_OBJECT_WORKLISTS_H_
+
+#include "src/common/globals.h"
+#include "src/heap/worklist.h"
+#include "src/objects/heap-object.h"
+#include "src/objects/js-weak-refs.h"
+
+namespace v8 {
+namespace internal {
+
+struct Ephemeron {
+ HeapObject key;
+ HeapObject value;
+};
+
+using HeapObjectAndSlot = std::pair<HeapObject, HeapObjectSlot>;
+using HeapObjectAndCode = std::pair<HeapObject, Code>;
+class EphemeronHashTable;
+class JSFunction;
+class SharedFunctionInfo;
+class TransitionArray;
+
+// Weak objects and weak references discovered during incremental/concurrent
+// marking. They are processed in ClearNonLiveReferences after marking.
+// Each entry in this list specifies:
+// 1) Type of the worklist entry.
+// 2) Lower-case name of the worklsit.
+// 3) Capitalized name of the worklist.
+//
+// If you add a new entry, then you also need to implement the corresponding
+// Update*() function in the cc file for updating pointers after Scavenge.
+#define WEAK_OBJECT_WORKLISTS(F) \
+ F(TransitionArray, transition_arrays, TransitionArrays) \
+ /* Keep track of all EphemeronHashTables in the heap to process \
+ them in the atomic pause. */ \
+ F(EphemeronHashTable, ephemeron_hash_tables, EphemeronHashTables) \
+ /* Keep track of all ephemerons for concurrent marking tasks. Only store \
+ ephemerons in these worklists if both (key, value) are unreachable at \
+ the moment. \
+ MarkCompactCollector::ProcessEphemeronsUntilFixpoint drains/fills \
+ these worklists. current_ephemerons is used as draining worklist in \
+ the current fixpoint iteration. */ \
+ F(Ephemeron, current_ephemerons, CurrentEphemerons) \
+ /* Stores ephemerons to visit in the next fixpoint iteration. */ \
+ F(Ephemeron, next_ephemerons, NextEphemerons) \
+ /* When draining the marking worklist new discovered ephemerons are pushed \
+ into this worklist. */ \
+ F(Ephemeron, discovered_ephemerons, DiscoveredEphemerons) \
+ /* TODO(marja): For old space, we only need the slot, not the host object. \
+ Optimize this by adding a different storage for old space. */ \
+ F(HeapObjectAndSlot, weak_references, WeakReferences) \
+ F(HeapObjectAndCode, weak_objects_in_code, WeakObjectsInCode) \
+ F(JSWeakRef, js_weak_refs, JSWeakRefs) \
+ F(WeakCell, weak_cells, WeakCells) \
+ F(SharedFunctionInfo, bytecode_flushing_candidates, \
+ BytecodeFlushingCandidates) \
+ F(JSFunction, flushed_js_functions, FlushedJSFunctions)
+
+class WeakObjects {
+ public:
+ template <typename Type>
+ using WeakObjectWorklist = Worklist<Type, 64>;
+
+#define DECLARE_WORKLIST(Type, name, _) WeakObjectWorklist<Type> name;
+ WEAK_OBJECT_WORKLISTS(DECLARE_WORKLIST)
+#undef DECLARE_WORKLIST
+
+ void UpdateAfterScavenge();
+
+ private:
+#define DECLARE_UPDATE_METHODS(Type, _, Name) \
+ void Update##Name(WeakObjectWorklist<Type>&);
+ WEAK_OBJECT_WORKLISTS(DECLARE_UPDATE_METHODS)
+#undef DECLARE_UPDATE_METHODS
+
+#ifdef DEBUG
+ template <typename Type>
+ bool ContainsYoungObjects(WeakObjectWorklist<Type>& worklist);
+#endif
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_WEAK_OBJECT_WORKLISTS_H_
diff --git a/deps/v8/src/ic/DIR_METADATA b/deps/v8/src/ic/DIR_METADATA
new file mode 100644
index 0000000000..b183b81885
--- /dev/null
+++ b/deps/v8/src/ic/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>Runtime"
+} \ No newline at end of file
diff --git a/deps/v8/src/ic/OWNERS b/deps/v8/src/ic/OWNERS
index 816ddb52c5..5bf39a2df1 100644
--- a/deps/v8/src/ic/OWNERS
+++ b/deps/v8/src/ic/OWNERS
@@ -4,5 +4,3 @@ jkummerow@chromium.org
mvstanton@chromium.org
verwaest@chromium.org
mythria@chromium.org
-
-# COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/ic/accessor-assembler.cc b/deps/v8/src/ic/accessor-assembler.cc
index 40728edf90..c9e517dccb 100644
--- a/deps/v8/src/ic/accessor-assembler.cc
+++ b/deps/v8/src/ic/accessor-assembler.cc
@@ -211,8 +211,7 @@ void AccessorAssembler::HandleLoadAccessor(
TNode<Foreign> foreign = LoadObjectField<Foreign>(
call_handler_info, CallHandlerInfo::kJsCallbackOffset);
- TNode<RawPtrT> callback =
- DecodeExternalPointer(LoadForeignForeignAddress(foreign));
+ TNode<RawPtrT> callback = LoadForeignForeignAddressPtr(foreign);
TNode<Object> data =
LoadObjectField(call_handler_info, CallHandlerInfo::kDataOffset);
@@ -1669,8 +1668,7 @@ void AccessorAssembler::HandleStoreICProtoHandler(
TNode<Foreign> foreign = LoadObjectField<Foreign>(
call_handler_info, CallHandlerInfo::kJsCallbackOffset);
- TNode<RawPtrT> callback =
- DecodeExternalPointer(LoadForeignForeignAddress(foreign));
+ TNode<RawPtrT> callback = LoadForeignForeignAddressPtr(foreign);
TNode<Object> data =
LoadObjectField(call_handler_info, CallHandlerInfo::kDataOffset);
@@ -2499,9 +2497,9 @@ void AccessorAssembler::GenericPropertyLoad(
var_holder_map = proto_map;
var_holder_instance_type = proto_instance_type;
Label next_proto(this), return_value(this, &var_value), goto_slow(this);
- TryGetOwnProperty(p->context(), CAST(p->receiver()), CAST(proto),
- proto_map, proto_instance_type, name, &return_value,
- &var_value, &next_proto, &goto_slow);
+ TryGetOwnProperty(p->context(), p->receiver(), CAST(proto), proto_map,
+ proto_instance_type, name, &return_value, &var_value,
+ &next_proto, &goto_slow);
// This trampoline and the next are required to appease Turbofan's
// variable merging.
@@ -3731,11 +3729,11 @@ void AccessorAssembler::StoreInArrayLiteralIC(const StoreICParameters* p) {
void AccessorAssembler::GenerateLoadIC() {
using Descriptor = LoadWithVectorDescriptor;
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<HeapObject> vector = CAST(Parameter(Descriptor::kVector));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ auto vector = Parameter<HeapObject>(Descriptor::kVector);
+ auto context = Parameter<Context>(Descriptor::kContext);
LoadICParameters p(context, receiver, name, slot, vector);
LoadIC(&p);
@@ -3744,11 +3742,11 @@ void AccessorAssembler::GenerateLoadIC() {
void AccessorAssembler::GenerateLoadIC_Megamorphic() {
using Descriptor = LoadWithVectorDescriptor;
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<HeapObject> vector = CAST(Parameter(Descriptor::kVector));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ auto vector = Parameter<HeapObject>(Descriptor::kVector);
+ auto context = Parameter<Context>(Descriptor::kContext);
ExitPoint direct_exit(this);
TVARIABLE(MaybeObject, var_handler);
@@ -3778,11 +3776,11 @@ void AccessorAssembler::GenerateLoadIC_Megamorphic() {
void AccessorAssembler::GenerateLoadIC_Noninlined() {
using Descriptor = LoadWithVectorDescriptor;
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<FeedbackVector> vector = CAST(Parameter(Descriptor::kVector));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ auto vector = Parameter<FeedbackVector>(Descriptor::kVector);
+ auto context = Parameter<Context>(Descriptor::kContext);
ExitPoint direct_exit(this);
TVARIABLE(MaybeObject, var_handler);
@@ -3811,10 +3809,10 @@ void AccessorAssembler::GenerateLoadIC_Noninlined() {
void AccessorAssembler::GenerateLoadIC_NoFeedback() {
using Descriptor = LoadNoFeedbackDescriptor;
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Smi> ic_kind = CAST(Parameter(Descriptor::kICKind));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto ic_kind = Parameter<Smi>(Descriptor::kICKind);
LoadICParameters p(context, receiver, name,
TaggedIndexConstant(FeedbackSlot::Invalid().ToInt()),
@@ -3825,10 +3823,10 @@ void AccessorAssembler::GenerateLoadIC_NoFeedback() {
void AccessorAssembler::GenerateLoadICTrampoline() {
using Descriptor = LoadDescriptor;
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ auto context = Parameter<Context>(Descriptor::kContext);
TNode<FeedbackVector> vector = LoadFeedbackVectorForStub();
TailCallBuiltin(Builtins::kLoadIC, context, receiver, name, slot, vector);
@@ -3837,10 +3835,10 @@ void AccessorAssembler::GenerateLoadICTrampoline() {
void AccessorAssembler::GenerateLoadICTrampoline_Megamorphic() {
using Descriptor = LoadDescriptor;
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ auto context = Parameter<Context>(Descriptor::kContext);
TNode<FeedbackVector> vector = LoadFeedbackVectorForStub();
TailCallBuiltin(Builtins::kLoadIC_Megamorphic, context, receiver, name, slot,
@@ -3850,13 +3848,12 @@ void AccessorAssembler::GenerateLoadICTrampoline_Megamorphic() {
void AccessorAssembler::GenerateLoadSuperIC() {
using Descriptor = LoadWithReceiverAndVectorDescriptor;
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> lookup_start_object =
- CAST(Parameter(Descriptor::kLookupStartObject));
- TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<HeapObject> vector = CAST(Parameter(Descriptor::kVector));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto lookup_start_object = Parameter<Object>(Descriptor::kLookupStartObject);
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ auto vector = Parameter<HeapObject>(Descriptor::kVector);
+ auto context = Parameter<Context>(Descriptor::kContext);
LoadICParameters p(context, receiver, name, slot, vector,
lookup_start_object);
@@ -3866,9 +3863,9 @@ void AccessorAssembler::GenerateLoadSuperIC() {
void AccessorAssembler::GenerateLoadGlobalIC_NoFeedback() {
using Descriptor = LoadGlobalNoFeedbackDescriptor;
- TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Smi> ic_kind = CAST(Parameter(Descriptor::kICKind));
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto context = Parameter<Context>(Descriptor::kContext);
+ auto ic_kind = Parameter<Smi>(Descriptor::kICKind);
LoadGlobalIC_NoFeedback(context, name, ic_kind);
}
@@ -3876,10 +3873,10 @@ void AccessorAssembler::GenerateLoadGlobalIC_NoFeedback() {
void AccessorAssembler::GenerateLoadGlobalIC(TypeofMode typeof_mode) {
using Descriptor = LoadGlobalWithVectorDescriptor;
- TNode<Name> name = CAST(Parameter(Descriptor::kName));
- TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<HeapObject> vector = CAST(Parameter(Descriptor::kVector));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto name = Parameter<Name>(Descriptor::kName);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ auto vector = Parameter<HeapObject>(Descriptor::kVector);
+ auto context = Parameter<Context>(Descriptor::kContext);
ExitPoint direct_exit(this);
LoadGlobalIC(
@@ -3895,9 +3892,9 @@ void AccessorAssembler::GenerateLoadGlobalIC(TypeofMode typeof_mode) {
void AccessorAssembler::GenerateLoadGlobalICTrampoline(TypeofMode typeof_mode) {
using Descriptor = LoadGlobalDescriptor;
- TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ auto context = Parameter<Context>(Descriptor::kContext);
TNode<FeedbackVector> vector = LoadFeedbackVectorForStub();
Callable callable =
@@ -3908,11 +3905,11 @@ void AccessorAssembler::GenerateLoadGlobalICTrampoline(TypeofMode typeof_mode) {
void AccessorAssembler::GenerateKeyedLoadIC() {
using Descriptor = LoadWithVectorDescriptor;
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<HeapObject> vector = CAST(Parameter(Descriptor::kVector));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ auto vector = Parameter<HeapObject>(Descriptor::kVector);
+ auto context = Parameter<Context>(Descriptor::kContext);
LoadICParameters p(context, receiver, name, slot, vector);
KeyedLoadIC(&p, LoadAccessMode::kLoad);
@@ -3921,11 +3918,11 @@ void AccessorAssembler::GenerateKeyedLoadIC() {
void AccessorAssembler::GenerateKeyedLoadIC_Megamorphic() {
using Descriptor = LoadWithVectorDescriptor;
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<HeapObject> vector = CAST(Parameter(Descriptor::kVector));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ auto vector = Parameter<HeapObject>(Descriptor::kVector);
+ auto context = Parameter<Context>(Descriptor::kContext);
LoadICParameters p(context, receiver, name, slot, vector);
KeyedLoadICGeneric(&p);
@@ -3934,10 +3931,10 @@ void AccessorAssembler::GenerateKeyedLoadIC_Megamorphic() {
void AccessorAssembler::GenerateKeyedLoadICTrampoline() {
using Descriptor = LoadDescriptor;
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ auto context = Parameter<Context>(Descriptor::kContext);
TNode<FeedbackVector> vector = LoadFeedbackVectorForStub();
TailCallBuiltin(Builtins::kKeyedLoadIC, context, receiver, name, slot,
@@ -3947,10 +3944,10 @@ void AccessorAssembler::GenerateKeyedLoadICTrampoline() {
void AccessorAssembler::GenerateKeyedLoadICTrampoline_Megamorphic() {
using Descriptor = LoadDescriptor;
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ auto context = Parameter<Context>(Descriptor::kContext);
TNode<FeedbackVector> vector = LoadFeedbackVectorForStub();
TailCallBuiltin(Builtins::kKeyedLoadIC_Megamorphic, context, receiver, name,
@@ -3960,11 +3957,11 @@ void AccessorAssembler::GenerateKeyedLoadICTrampoline_Megamorphic() {
void AccessorAssembler::GenerateKeyedLoadIC_PolymorphicName() {
using Descriptor = LoadWithVectorDescriptor;
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<FeedbackVector> vector = CAST(Parameter(Descriptor::kVector));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ auto vector = Parameter<FeedbackVector>(Descriptor::kVector);
+ auto context = Parameter<Context>(Descriptor::kContext);
LoadICParameters p(context, receiver, name, slot, vector);
KeyedLoadICPolymorphicName(&p, LoadAccessMode::kLoad);
@@ -3973,11 +3970,11 @@ void AccessorAssembler::GenerateKeyedLoadIC_PolymorphicName() {
void AccessorAssembler::GenerateStoreGlobalIC() {
using Descriptor = StoreGlobalWithVectorDescriptor;
- TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<HeapObject> vector = CAST(Parameter(Descriptor::kVector));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto value = Parameter<Object>(Descriptor::kValue);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ auto vector = Parameter<HeapObject>(Descriptor::kVector);
+ auto context = Parameter<Context>(Descriptor::kContext);
StoreICParameters p(context, base::nullopt, name, value, slot, vector);
StoreGlobalIC(&p);
@@ -3986,10 +3983,10 @@ void AccessorAssembler::GenerateStoreGlobalIC() {
void AccessorAssembler::GenerateStoreGlobalICTrampoline() {
using Descriptor = StoreGlobalDescriptor;
- TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto value = Parameter<Object>(Descriptor::kValue);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ auto context = Parameter<Context>(Descriptor::kContext);
TNode<FeedbackVector> vector = LoadFeedbackVectorForStub();
TailCallBuiltin(Builtins::kStoreGlobalIC, context, name, value, slot, vector);
@@ -3998,12 +3995,12 @@ void AccessorAssembler::GenerateStoreGlobalICTrampoline() {
void AccessorAssembler::GenerateStoreIC() {
using Descriptor = StoreWithVectorDescriptor;
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<HeapObject> vector = CAST(Parameter(Descriptor::kVector));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto value = Parameter<Object>(Descriptor::kValue);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ auto vector = Parameter<HeapObject>(Descriptor::kVector);
+ auto context = Parameter<Context>(Descriptor::kContext);
StoreICParameters p(context, receiver, name, value, slot, vector);
StoreIC(&p);
@@ -4012,11 +4009,11 @@ void AccessorAssembler::GenerateStoreIC() {
void AccessorAssembler::GenerateStoreICTrampoline() {
using Descriptor = StoreDescriptor;
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto value = Parameter<Object>(Descriptor::kValue);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ auto context = Parameter<Context>(Descriptor::kContext);
TNode<FeedbackVector> vector = LoadFeedbackVectorForStub();
TailCallBuiltin(Builtins::kStoreIC, context, receiver, name, value, slot,
@@ -4026,12 +4023,12 @@ void AccessorAssembler::GenerateStoreICTrampoline() {
void AccessorAssembler::GenerateKeyedStoreIC() {
using Descriptor = StoreWithVectorDescriptor;
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<HeapObject> vector = CAST(Parameter(Descriptor::kVector));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto value = Parameter<Object>(Descriptor::kValue);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ auto vector = Parameter<HeapObject>(Descriptor::kVector);
+ auto context = Parameter<Context>(Descriptor::kContext);
StoreICParameters p(context, receiver, name, value, slot, vector);
KeyedStoreIC(&p);
@@ -4040,11 +4037,11 @@ void AccessorAssembler::GenerateKeyedStoreIC() {
void AccessorAssembler::GenerateKeyedStoreICTrampoline() {
using Descriptor = StoreDescriptor;
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto value = Parameter<Object>(Descriptor::kValue);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ auto context = Parameter<Context>(Descriptor::kContext);
TNode<FeedbackVector> vector = LoadFeedbackVectorForStub();
TailCallBuiltin(Builtins::kKeyedStoreIC, context, receiver, name, value, slot,
@@ -4054,12 +4051,12 @@ void AccessorAssembler::GenerateKeyedStoreICTrampoline() {
void AccessorAssembler::GenerateStoreInArrayLiteralIC() {
using Descriptor = StoreWithVectorDescriptor;
- TNode<Object> array = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> index = CAST(Parameter(Descriptor::kName));
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<HeapObject> vector = CAST(Parameter(Descriptor::kVector));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto array = Parameter<Object>(Descriptor::kReceiver);
+ auto index = Parameter<Object>(Descriptor::kName);
+ auto value = Parameter<Object>(Descriptor::kValue);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ auto vector = Parameter<HeapObject>(Descriptor::kVector);
+ auto context = Parameter<Context>(Descriptor::kContext);
StoreICParameters p(context, array, index, value, slot, vector);
StoreInArrayLiteralIC(&p);
@@ -4067,9 +4064,9 @@ void AccessorAssembler::GenerateStoreInArrayLiteralIC() {
void AccessorAssembler::GenerateCloneObjectIC_Slow() {
using Descriptor = CloneObjectWithVectorDescriptor;
- TNode<Object> source = CAST(Parameter(Descriptor::kSource));
- TNode<Smi> flags = CAST(Parameter(Descriptor::kFlags));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto source = Parameter<Object>(Descriptor::kSource);
+ auto flags = Parameter<Smi>(Descriptor::kFlags);
+ auto context = Parameter<Context>(Descriptor::kContext);
// The Slow case uses the same call interface as CloneObjectIC, so that it
// can be tail called from it. However, the feedback slot and vector are not
@@ -4120,11 +4117,11 @@ void AccessorAssembler::GenerateCloneObjectIC_Slow() {
void AccessorAssembler::GenerateCloneObjectIC() {
using Descriptor = CloneObjectWithVectorDescriptor;
- TNode<Object> source = CAST(Parameter(Descriptor::kSource));
- TNode<Smi> flags = CAST(Parameter(Descriptor::kFlags));
- TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<HeapObject> maybe_vector = CAST(Parameter(Descriptor::kVector));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto source = Parameter<Object>(Descriptor::kSource);
+ auto flags = Parameter<Smi>(Descriptor::kFlags);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ auto maybe_vector = Parameter<HeapObject>(Descriptor::kVector);
+ auto context = Parameter<Context>(Descriptor::kContext);
TVARIABLE(MaybeObject, var_handler);
Label if_handler(this, &var_handler), miss(this, Label::kDeferred),
try_polymorphic(this, Label::kDeferred),
@@ -4269,11 +4266,11 @@ void AccessorAssembler::GenerateCloneObjectIC() {
void AccessorAssembler::GenerateKeyedHasIC() {
using Descriptor = LoadWithVectorDescriptor;
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<HeapObject> vector = CAST(Parameter(Descriptor::kVector));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ auto vector = Parameter<HeapObject>(Descriptor::kVector);
+ auto context = Parameter<Context>(Descriptor::kContext);
LoadICParameters p(context, receiver, name, slot, vector);
KeyedLoadIC(&p, LoadAccessMode::kHas);
@@ -4282,9 +4279,9 @@ void AccessorAssembler::GenerateKeyedHasIC() {
void AccessorAssembler::GenerateKeyedHasIC_Megamorphic() {
using Descriptor = LoadWithVectorDescriptor;
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto context = Parameter<Context>(Descriptor::kContext);
// TODO(magardn): implement HasProperty handling in KeyedLoadICGeneric
Return(HasProperty(context, receiver, name,
HasPropertyLookupMode::kHasProperty));
@@ -4293,11 +4290,11 @@ void AccessorAssembler::GenerateKeyedHasIC_Megamorphic() {
void AccessorAssembler::GenerateKeyedHasIC_PolymorphicName() {
using Descriptor = LoadWithVectorDescriptor;
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<HeapObject> vector = CAST(Parameter(Descriptor::kVector));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ auto vector = Parameter<HeapObject>(Descriptor::kVector);
+ auto context = Parameter<Context>(Descriptor::kContext);
LoadICParameters p(context, receiver, name, slot, vector);
KeyedLoadICPolymorphicName(&p, LoadAccessMode::kHas);
diff --git a/deps/v8/src/ic/call-optimization.cc b/deps/v8/src/ic/call-optimization.cc
index 54795d4202..72f43743d2 100644
--- a/deps/v8/src/ic/call-optimization.cc
+++ b/deps/v8/src/ic/call-optimization.cc
@@ -97,9 +97,11 @@ bool CallOptimization::IsCompatibleReceiverMap(Handle<Map> map,
void CallOptimization::Initialize(
Isolate* isolate, Handle<FunctionTemplateInfo> function_template_info) {
- if (function_template_info->call_code().IsUndefined(isolate)) return;
+ if (function_template_info->call_code(kAcquireLoad).IsUndefined(isolate))
+ return;
api_call_info_ = handle(
- CallHandlerInfo::cast(function_template_info->call_code()), isolate);
+ CallHandlerInfo::cast(function_template_info->call_code(kAcquireLoad)),
+ isolate);
if (!function_template_info->signature().IsUndefined(isolate)) {
expected_receiver_type_ =
@@ -124,8 +126,9 @@ void CallOptimization::AnalyzePossibleApiFunction(Isolate* isolate,
isolate);
// Require a C++ callback.
- if (info->call_code().IsUndefined(isolate)) return;
- api_call_info_ = handle(CallHandlerInfo::cast(info->call_code()), isolate);
+ HeapObject call_code = info->call_code(kAcquireLoad);
+ if (call_code.IsUndefined(isolate)) return;
+ api_call_info_ = handle(CallHandlerInfo::cast(call_code), isolate);
if (!info->signature().IsUndefined(isolate)) {
expected_receiver_type_ =
diff --git a/deps/v8/src/ic/handler-configuration.cc b/deps/v8/src/ic/handler-configuration.cc
index 73cd228001..8418962172 100644
--- a/deps/v8/src/ic/handler-configuration.cc
+++ b/deps/v8/src/ic/handler-configuration.cc
@@ -225,8 +225,8 @@ MaybeObjectHandle StoreHandler::StoreTransition(Isolate* isolate,
#ifdef DEBUG
if (!is_dictionary_map) {
InternalIndex descriptor = transition_map->LastAdded();
- Handle<DescriptorArray> descriptors(transition_map->instance_descriptors(),
- isolate);
+ Handle<DescriptorArray> descriptors(
+ transition_map->instance_descriptors(kRelaxedLoad), isolate);
PropertyDetails details = descriptors->GetDetails(descriptor);
if (descriptors->GetKey(descriptor).IsPrivate()) {
DCHECK_EQ(DONT_ENUM, details.attributes());
diff --git a/deps/v8/src/ic/ic-inl.h b/deps/v8/src/ic/ic-inl.h
index 35218f7df5..4c1de81ae3 100644
--- a/deps/v8/src/ic/ic-inl.h
+++ b/deps/v8/src/ic/ic-inl.h
@@ -37,8 +37,7 @@ bool IC::IsHandler(MaybeObject object) {
bool IC::vector_needs_update() {
if (state() == NO_FEEDBACK) return false;
return (!vector_set_ &&
- (state() != MEGAMORPHIC ||
- nexus()->GetFeedbackExtra().ToSmi().value() != ELEMENT));
+ (state() != MEGAMORPHIC || nexus()->GetKeyType() != ELEMENT));
}
} // namespace internal
diff --git a/deps/v8/src/ic/ic.cc b/deps/v8/src/ic/ic.cc
index d8e25f3c74..b077d4eabf 100644
--- a/deps/v8/src/ic/ic.cc
+++ b/deps/v8/src/ic/ic.cc
@@ -620,17 +620,9 @@ bool IC::UpdatePolymorphicIC(Handle<Name> name,
DCHECK_LE(i, maps_and_handlers.size());
}
- // Reorder the deprecated maps to be at the end, so that
- // minimorphic ICs have the best chance of succeeding as they only
- // check the first FLAG_max_minimorphic_map_checks maps.
- if (deprecated_maps_and_handlers.size() > 0) {
- maps_and_handlers.insert(maps_and_handlers.end(),
- deprecated_maps_and_handlers.begin(),
- deprecated_maps_and_handlers.end());
- }
-
- int number_of_maps = static_cast<int>(maps_and_handlers.size());
int deprecated_maps = static_cast<int>(deprecated_maps_and_handlers.size());
+ int number_of_maps =
+ static_cast<int>(maps_and_handlers.size()) + deprecated_maps;
int number_of_valid_maps =
number_of_maps - deprecated_maps - (handler_to_overwrite != -1);
@@ -655,6 +647,15 @@ bool IC::UpdatePolymorphicIC(Handle<Name> name,
maps_and_handlers.push_back(MapAndHandler(map, handler));
}
+ // Reorder the deprecated maps to be at the end, so that
+ // minimorphic ICs have the best chance of succeeding as they only
+ // check the first FLAG_max_minimorphic_map_checks maps.
+ if (deprecated_maps_and_handlers.size() > 0) {
+ maps_and_handlers.insert(maps_and_handlers.end(),
+ deprecated_maps_and_handlers.begin(),
+ deprecated_maps_and_handlers.end());
+ }
+
ConfigureVectorState(name, maps_and_handlers);
}
@@ -1862,7 +1863,9 @@ void KeyedStoreIC::UpdateStoreElement(Handle<Map> receiver_map,
KeyedAccessStoreMode store_mode,
Handle<Map> new_receiver_map) {
std::vector<MapAndHandler> target_maps_and_handlers;
- nexus()->ExtractMapsAndHandlers(&target_maps_and_handlers, true);
+ nexus()->ExtractMapsAndHandlers(
+ &target_maps_and_handlers,
+ [this](Handle<Map> map) { return Map::TryUpdate(isolate(), map); });
if (target_maps_and_handlers.empty()) {
Handle<Map> monomorphic_map = receiver_map;
// If we transitioned to a map that is a more general map than incoming
@@ -2711,7 +2714,7 @@ static bool CanFastCloneObject(Handle<Map> map) {
return false;
}
- DescriptorArray descriptors = map->instance_descriptors();
+ DescriptorArray descriptors = map->instance_descriptors(kRelaxedLoad);
for (InternalIndex i : map->IterateOwnDescriptors()) {
PropertyDetails details = descriptors.GetDetails(i);
Name key = descriptors.GetKey(i);
@@ -2760,8 +2763,8 @@ static Handle<Map> FastCloneObjectMap(Isolate* isolate, Handle<Map> source_map,
map = Map::Copy(isolate, map, "InitializeClonedDescriptors");
}
- Handle<DescriptorArray> source_descriptors(source_map->instance_descriptors(),
- isolate);
+ Handle<DescriptorArray> source_descriptors(
+ source_map->instance_descriptors(kRelaxedLoad), isolate);
int size = source_map->NumberOfOwnDescriptors();
int slack = 0;
Handle<DescriptorArray> descriptors = DescriptorArray::CopyForFastObjectClone(
diff --git a/deps/v8/src/ic/keyed-store-generic.cc b/deps/v8/src/ic/keyed-store-generic.cc
index 7604e8d8f4..3a0c9076cd 100644
--- a/deps/v8/src/ic/keyed-store-generic.cc
+++ b/deps/v8/src/ic/keyed-store-generic.cc
@@ -1031,10 +1031,10 @@ void KeyedStoreGenericAssembler::KeyedStoreGeneric(
void KeyedStoreGenericAssembler::KeyedStoreGeneric() {
using Descriptor = StoreDescriptor;
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto value = Parameter<Object>(Descriptor::kValue);
+ auto context = Parameter<Context>(Descriptor::kContext);
KeyedStoreGeneric(context, receiver, name, value, Nothing<LanguageMode>());
}
@@ -1050,11 +1050,11 @@ void KeyedStoreGenericAssembler::SetProperty(TNode<Context> context,
void KeyedStoreGenericAssembler::StoreIC_NoFeedback() {
using Descriptor = StoreDescriptor;
- TNode<Object> receiver_maybe_smi = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> name = CAST(Parameter(Descriptor::kName));
- TNode<Object> value = CAST(Parameter(Descriptor::kValue));
- TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ auto receiver_maybe_smi = Parameter<Object>(Descriptor::kReceiver);
+ auto name = Parameter<Object>(Descriptor::kName);
+ auto value = Parameter<Object>(Descriptor::kValue);
+ auto slot = Parameter<TaggedIndex>(Descriptor::kSlot);
+ auto context = Parameter<Context>(Descriptor::kContext);
Label miss(this, Label::kDeferred), store_property(this);
diff --git a/deps/v8/src/init/DIR_METADATA b/deps/v8/src/init/DIR_METADATA
new file mode 100644
index 0000000000..b183b81885
--- /dev/null
+++ b/deps/v8/src/init/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>Runtime"
+} \ No newline at end of file
diff --git a/deps/v8/src/init/OWNERS b/deps/v8/src/init/OWNERS
index 933637e2e9..8e374f760c 100644
--- a/deps/v8/src/init/OWNERS
+++ b/deps/v8/src/init/OWNERS
@@ -9,5 +9,3 @@ marja@chromium.org
mathias@chromium.org
ulan@chromium.org
verwaest@chromium.org
-
-# COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/init/bootstrapper.cc b/deps/v8/src/init/bootstrapper.cc
index 990a7804fd..b1a3361919 100644
--- a/deps/v8/src/init/bootstrapper.cc
+++ b/deps/v8/src/init/bootstrapper.cc
@@ -225,6 +225,7 @@ class Genesis {
HARMONY_STAGED(DECLARE_FEATURE_INITIALIZATION)
HARMONY_SHIPPING(DECLARE_FEATURE_INITIALIZATION)
#undef DECLARE_FEATURE_INITIALIZATION
+ void InitializeGlobal_regexp_linear_flag();
enum ArrayBufferKind {
ARRAY_BUFFER,
@@ -365,6 +366,7 @@ void Bootstrapper::DetachGlobal(Handle<Context> env) {
if (FLAG_track_detached_contexts) {
isolate_->AddDetachedContext(env);
}
+ DCHECK(global_proxy->IsDetached());
env->native_context().set_microtask_queue(isolate_, nullptr);
}
@@ -1107,7 +1109,7 @@ namespace {
void ReplaceAccessors(Isolate* isolate, Handle<Map> map, Handle<String> name,
PropertyAttributes attributes,
Handle<AccessorPair> accessor_pair) {
- DescriptorArray descriptors = map->instance_descriptors();
+ DescriptorArray descriptors = map->instance_descriptors(kRelaxedLoad);
InternalIndex entry = descriptors.SearchWithCache(isolate, *name, *map);
Descriptor d = Descriptor::AccessorConstant(name, accessor_pair, attributes);
descriptors.Replace(entry, &d);
@@ -1569,8 +1571,10 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kFastFunctionPrototypeBind, 1, false);
SimpleInstallFunction(isolate_, prototype, "call",
Builtins::kFunctionPrototypeCall, 1, false);
- SimpleInstallFunction(isolate_, prototype, "toString",
- Builtins::kFunctionPrototypeToString, 0, false);
+ Handle<JSFunction> function_to_string =
+ SimpleInstallFunction(isolate_, prototype, "toString",
+ Builtins::kFunctionPrototypeToString, 0, false);
+ native_context()->set_function_to_string(*function_to_string);
// Install the @@hasInstance function.
Handle<JSFunction> has_instance = InstallFunctionAtSymbol(
@@ -2299,6 +2303,9 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
isolate_, promise_fun, "all", Builtins::kPromiseAll, 1, true);
native_context()->set_promise_all(*promise_all);
+ InstallFunctionWithBuiltinId(isolate_, promise_fun, "allSettled",
+ Builtins::kPromiseAllSettled, 1, true);
+
InstallFunctionWithBuiltinId(isolate_, promise_fun, "race",
Builtins::kPromiseRace, 1, true);
@@ -3848,6 +3855,7 @@ void Genesis::InitializeExperimentalGlobal() {
HARMONY_STAGED(FEATURE_INITIALIZE_GLOBAL)
HARMONY_INPROGRESS(FEATURE_INITIALIZE_GLOBAL)
#undef FEATURE_INITIALIZE_GLOBAL
+ InitializeGlobal_regexp_linear_flag();
}
bool Genesis::CompileExtension(Isolate* isolate, v8::Extension* extension) {
@@ -4090,10 +4098,6 @@ void Genesis::InitializeCallSiteBuiltins() {
FunctionInfo infos[] = {
{"getColumnNumber", Builtins::kCallSitePrototypeGetColumnNumber},
- {"getEnclosingColumnNumber",
- Builtins::kCallSitePrototypeGetEnclosingColumnNumber},
- {"getEnclosingLineNumber",
- Builtins::kCallSitePrototypeGetEnclosingLineNumber},
{"getEvalOrigin", Builtins::kCallSitePrototypeGetEvalOrigin},
{"getFileName", Builtins::kCallSitePrototypeGetFileName},
{"getFunction", Builtins::kCallSitePrototypeGetFunction},
@@ -4127,13 +4131,11 @@ void Genesis::InitializeCallSiteBuiltins() {
#define EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(id) \
void Genesis::InitializeGlobal_##id() {}
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_namespace_exports)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_private_methods)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_dynamic_import)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_import_meta)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_sequence)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_top_level_await)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_logical_assignment)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_import_assertions)
#ifdef V8_INTL_SUPPORT
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_displaynames_date_types)
@@ -4277,12 +4279,6 @@ void Genesis::InitializeGlobal_harmony_promise_any() {
native_context()->set_promise_any(*promise_any);
}
-void Genesis::InitializeGlobal_harmony_promise_all_settled() {
- if (!FLAG_harmony_promise_all_settled) return;
- SimpleInstallFunction(isolate(), isolate()->promise_function(), "allSettled",
- Builtins::kPromiseAllSettled, 1, true);
-}
-
void Genesis::InitializeGlobal_harmony_regexp_match_indices() {
if (!FLAG_harmony_regexp_match_indices) return;
@@ -4306,6 +4302,20 @@ void Genesis::InitializeGlobal_harmony_string_replaceall() {
Builtins::kStringPrototypeReplaceAll, 2, true);
}
+void Genesis::InitializeGlobal_regexp_linear_flag() {
+ if (!FLAG_enable_experimental_regexp_engine) return;
+
+ Handle<JSFunction> regexp_fun(native_context()->regexp_function(), isolate());
+ Handle<JSObject> regexp_prototype(
+ JSObject::cast(regexp_fun->instance_prototype()), isolate());
+ SimpleInstallGetter(isolate(), regexp_prototype,
+ isolate()->factory()->linear_string(),
+ Builtins::kRegExpPrototypeLinearGetter, true);
+
+ // Store regexp prototype map again after change.
+ native_context()->set_regexp_prototype_map(regexp_prototype->map());
+}
+
#ifdef V8_INTL_SUPPORT
void Genesis::InitializeGlobal_harmony_intl_segmenter() {
@@ -5032,8 +5042,8 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
// The global template must not create properties that already exist
// in the snapshotted global object.
if (from->HasFastProperties()) {
- Handle<DescriptorArray> descs =
- Handle<DescriptorArray>(from->map().instance_descriptors(), isolate());
+ Handle<DescriptorArray> descs = Handle<DescriptorArray>(
+ from->map().instance_descriptors(kRelaxedLoad), isolate());
for (InternalIndex i : from->map().IterateOwnDescriptors()) {
PropertyDetails details = descs->GetDetails(i);
if (details.location() == kField) {
@@ -5162,7 +5172,8 @@ Handle<Map> Genesis::CreateInitialMapForArraySubclass(int size,
{
JSFunction array_function = native_context()->array_function();
Handle<DescriptorArray> array_descriptors(
- array_function.initial_map().instance_descriptors(), isolate());
+ array_function.initial_map().instance_descriptors(kRelaxedLoad),
+ isolate());
Handle<String> length = factory()->length_string();
InternalIndex old = array_descriptors->SearchWithCache(
isolate(), *length, array_function.initial_map());
@@ -5278,6 +5289,14 @@ Genesis::Genesis(
}
}
+ // TODO(v8:10391): The reason is that the NativeContext::microtask_queue
+ // serialization is not actually supported, and therefore the field is
+ // serialized as raw data instead of being serialized as ExternalReference.
+ // As a result, when V8 heap sandbox is enabled, the external pointer entry
+ // is not allocated for microtask queue field during deserialization, so we
+ // allocate it manually here.
+ native_context()->AllocateExternalPointerEntries(isolate);
+
native_context()->set_microtask_queue(
isolate, microtask_queue ? static_cast<MicrotaskQueue*>(microtask_queue)
: isolate->default_microtask_queue());
diff --git a/deps/v8/src/init/heap-symbols.h b/deps/v8/src/init/heap-symbols.h
index 1188411efc..eaf441d6e8 100644
--- a/deps/v8/src/init/heap-symbols.h
+++ b/deps/v8/src/init/heap-symbols.h
@@ -133,6 +133,7 @@
V(_, ArrayBuffer_string, "ArrayBuffer") \
V(_, ArrayIterator_string, "Array Iterator") \
V(_, as_string, "as") \
+ V(_, assert_string, "assert") \
V(_, async_string, "async") \
V(_, auto_string, "auto") \
V(_, await_string, "await") \
@@ -225,6 +226,7 @@
V(_, length_string, "length") \
V(_, let_string, "let") \
V(_, line_string, "line") \
+ V(_, linear_string, "linear") \
V(_, LinkError_string, "LinkError") \
V(_, long_string, "long") \
V(_, Map_string, "Map") \
@@ -344,7 +346,6 @@
V(_, error_script_symbol) \
V(_, error_start_pos_symbol) \
V(_, frozen_symbol) \
- V(_, generic_symbol) \
V(_, home_object_symbol) \
V(_, interpreter_trampoline_symbol) \
V(_, megamorphic_symbol) \
@@ -495,7 +496,6 @@
F(SCAVENGER_FAST_PROMOTE) \
F(SCAVENGER_FREE_REMEMBERED_SET) \
F(SCAVENGER_SCAVENGE) \
- F(SCAVENGER_PROCESS_ARRAY_BUFFERS) \
F(SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_IDENTIFY) \
F(SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_PROCESS) \
F(SCAVENGER_SCAVENGE_PARALLEL) \
@@ -510,6 +510,7 @@
#define TRACER_BACKGROUND_SCOPES(F) \
F(BACKGROUND_ARRAY_BUFFER_FREE) \
F(BACKGROUND_ARRAY_BUFFER_SWEEP) \
+ F(BACKGROUND_COLLECTION) \
F(BACKGROUND_STORE_BUFFER) \
F(BACKGROUND_UNMAPPER) \
F(MC_BACKGROUND_EVACUATE_COPY) \
diff --git a/deps/v8/src/init/isolate-allocator.cc b/deps/v8/src/init/isolate-allocator.cc
index b9ec6c3f43..01ae416181 100644
--- a/deps/v8/src/init/isolate-allocator.cc
+++ b/deps/v8/src/init/isolate-allocator.cc
@@ -12,20 +12,16 @@
namespace v8 {
namespace internal {
-IsolateAllocator::IsolateAllocator(IsolateAllocationMode mode) {
-#if V8_TARGET_ARCH_64_BIT
- if (mode == IsolateAllocationMode::kInV8Heap) {
- Address heap_reservation_address = InitReservation();
- CommitPagesForIsolate(heap_reservation_address);
- return;
- }
-#endif // V8_TARGET_ARCH_64_BIT
-
+IsolateAllocator::IsolateAllocator() {
+#ifdef V8_COMPRESS_POINTERS
+ Address heap_reservation_address = InitReservation();
+ CommitPagesForIsolate(heap_reservation_address);
+#else
// Allocate Isolate in C++ heap.
- CHECK_EQ(mode, IsolateAllocationMode::kInCppHeap);
page_allocator_ = GetPlatformPageAllocator();
isolate_memory_ = ::operator new(sizeof(Isolate));
DCHECK(!reservation_.IsReserved());
+#endif // V8_COMPRESS_POINTERS
}
IsolateAllocator::~IsolateAllocator() {
@@ -38,7 +34,7 @@ IsolateAllocator::~IsolateAllocator() {
::operator delete(isolate_memory_);
}
-#if V8_TARGET_ARCH_64_BIT
+#ifdef V8_COMPRESS_POINTERS
namespace {
@@ -192,7 +188,7 @@ void IsolateAllocator::CommitPagesForIsolate(Address heap_reservation_address) {
}
isolate_memory_ = reinterpret_cast<void*>(isolate_address);
}
-#endif // V8_TARGET_ARCH_64_BIT
+#endif // V8_COMPRESS_POINTERS
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/init/isolate-allocator.h b/deps/v8/src/init/isolate-allocator.h
index c176bf32cb..8d843702cc 100644
--- a/deps/v8/src/init/isolate-allocator.h
+++ b/deps/v8/src/init/isolate-allocator.h
@@ -22,7 +22,8 @@ class BoundedPageAllocator;
namespace internal {
// IsolateAllocator object is responsible for allocating memory for one (!)
-// Isolate object. Depending on the allocation mode the memory can be allocated
+// Isolate object. Depending on the whether pointer compression is enabled,
+// the memory can be allocated
// 1) in the C++ heap (when pointer compression is disabled)
// 2) in a proper part of a properly aligned region of a reserved address space
// (when pointer compression is enabled).
@@ -34,18 +35,13 @@ namespace internal {
// Isolate::Delete() takes care of the proper order of the objects destruction.
class V8_EXPORT_PRIVATE IsolateAllocator final {
public:
- explicit IsolateAllocator(IsolateAllocationMode mode);
+ IsolateAllocator();
~IsolateAllocator();
void* isolate_memory() const { return isolate_memory_; }
v8::PageAllocator* page_allocator() const { return page_allocator_; }
- IsolateAllocationMode mode() {
- return reservation_.IsReserved() ? IsolateAllocationMode::kInV8Heap
- : IsolateAllocationMode::kInCppHeap;
- }
-
private:
Address InitReservation();
void CommitPagesForIsolate(Address heap_reservation_address);
diff --git a/deps/v8/src/inspector/DIR_METADATA b/deps/v8/src/inspector/DIR_METADATA
new file mode 100644
index 0000000000..3ba1106a5f
--- /dev/null
+++ b/deps/v8/src/inspector/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Platform>DevTools>JavaScript"
+} \ No newline at end of file
diff --git a/deps/v8/src/inspector/OWNERS b/deps/v8/src/inspector/OWNERS
index faa8c326b0..ea8456bbe5 100644
--- a/deps/v8/src/inspector/OWNERS
+++ b/deps/v8/src/inspector/OWNERS
@@ -7,5 +7,3 @@ szuend@chromium.org
yangguo@chromium.org
per-file PRESUBMIT.py=file:../../INFRA_OWNERS
-
-# COMPONENT: Platform>DevTools>JavaScript
diff --git a/deps/v8/src/inspector/injected-script.cc b/deps/v8/src/inspector/injected-script.cc
index acd9609a9c..c72c531178 100644
--- a/deps/v8/src/inspector/injected-script.cc
+++ b/deps/v8/src/inspector/injected-script.cc
@@ -34,6 +34,8 @@
#include <unordered_set>
#include "../../third_party/inspector_protocol/crdtp/json.h"
+#include "include/v8-inspector.h"
+#include "src/debug/debug-interface.h"
#include "src/inspector/custom-preview.h"
#include "src/inspector/inspected-context.h"
#include "src/inspector/protocol/Protocol.h"
@@ -46,8 +48,6 @@
#include "src/inspector/v8-value-utils.h"
#include "src/inspector/value-mirror.h"
-#include "include/v8-inspector.h"
-
namespace v8_inspector {
namespace {
@@ -518,19 +518,9 @@ Response InjectedScript::getInternalAndPrivateProperties(
}
void InjectedScript::releaseObject(const String16& objectId) {
- std::vector<uint8_t> cbor;
- v8_crdtp::json::ConvertJSONToCBOR(
- v8_crdtp::span<uint16_t>(objectId.characters16(), objectId.length()),
- &cbor);
- std::unique_ptr<protocol::Value> parsedObjectId =
- protocol::Value::parseBinary(cbor.data(), cbor.size());
- if (!parsedObjectId) return;
- protocol::DictionaryValue* object =
- protocol::DictionaryValue::cast(parsedObjectId.get());
- if (!object) return;
- int boundId = 0;
- if (!object->getInteger("id", &boundId)) return;
- unbindObject(boundId);
+ std::unique_ptr<RemoteObjectId> remoteId;
+ Response response = RemoteObjectId::parse(objectId, &remoteId);
+ if (response.IsSuccess()) unbindObject(remoteId->id());
}
Response InjectedScript::wrapObject(
@@ -722,10 +712,12 @@ Response InjectedScript::resolveCallArgument(
Response response =
RemoteObjectId::parse(callArgument->getObjectId(""), &remoteObjectId);
if (!response.IsSuccess()) return response;
- if (remoteObjectId->contextId() != m_context->contextId())
+ if (remoteObjectId->contextId() != m_context->contextId() ||
+ remoteObjectId->isolateId() != m_context->inspector()->isolateId()) {
return Response::ServerError(
"Argument should belong to the same JavaScript world as target "
"object");
+ }
return findObject(*remoteObjectId, result);
}
if (callArgument->hasValue() || callArgument->hasUnserializableValue()) {
@@ -861,6 +853,7 @@ Response InjectedScript::wrapEvaluateResult(
v8::Local<v8::Object> InjectedScript::commandLineAPI() {
if (m_commandLineAPI.IsEmpty()) {
+ v8::debug::DisableBreakScope disable_break(m_context->isolate());
m_commandLineAPI.Reset(
m_context->isolate(),
m_context->inspector()->console()->createCommandLineAPI(
@@ -1011,10 +1004,8 @@ String16 InjectedScript::bindObject(v8::Local<v8::Value> value,
m_idToObjectGroupName[id] = groupName;
m_nameToObjectGroup[groupName].push_back(id);
}
- // TODO(dgozman): get rid of "injectedScript" notion.
- return String16::concat(
- "{\"injectedScriptId\":", String16::fromInteger(m_context->contextId()),
- ",\"id\":", String16::fromInteger(id), "}");
+ return RemoteObjectId::serialize(m_context->inspector()->isolateId(),
+ m_context->contextId(), id);
}
// static
diff --git a/deps/v8/src/inspector/remote-object-id.cc b/deps/v8/src/inspector/remote-object-id.cc
index e3c67bb6c9..330cdb2b66 100644
--- a/deps/v8/src/inspector/remote-object-id.cc
+++ b/deps/v8/src/inspector/remote-object-id.cc
@@ -10,63 +10,68 @@
namespace v8_inspector {
-RemoteObjectIdBase::RemoteObjectIdBase() : m_injectedScriptId(0) {}
+namespace {
-std::unique_ptr<protocol::DictionaryValue>
-RemoteObjectIdBase::parseInjectedScriptId(const String16& objectId) {
- std::vector<uint8_t> cbor;
- v8_crdtp::json::ConvertJSONToCBOR(
- v8_crdtp::span<uint16_t>(objectId.characters16(), objectId.length()),
- &cbor);
- std::unique_ptr<protocol::Value> parsedValue =
- protocol::Value::parseBinary(cbor.data(), cbor.size());
- if (!parsedValue || parsedValue->type() != protocol::Value::TypeObject)
- return nullptr;
-
- std::unique_ptr<protocol::DictionaryValue> parsedObjectId(
- protocol::DictionaryValue::cast(parsedValue.release()));
- bool success =
- parsedObjectId->getInteger("injectedScriptId", &m_injectedScriptId);
- if (success) return parsedObjectId;
- return nullptr;
+String16 serializeId(uint64_t isolateId, int injectedScriptId, int id) {
+ return String16::concat(
+ String16::fromInteger64(static_cast<int64_t>(isolateId)), ".",
+ String16::fromInteger(injectedScriptId), ".", String16::fromInteger(id));
}
-RemoteObjectId::RemoteObjectId() : RemoteObjectIdBase(), m_id(0) {}
+} // namespace
+
+RemoteObjectIdBase::RemoteObjectIdBase()
+ : m_isolateId(0), m_injectedScriptId(0), m_id(0) {}
+
+bool RemoteObjectIdBase::parseId(const String16& objectId) {
+ const UChar dot = '.';
+ size_t firstDotPos = objectId.find(dot);
+ if (firstDotPos == String16::kNotFound) return false;
+ bool ok = false;
+ int64_t isolateId = objectId.substring(0, firstDotPos).toInteger64(&ok);
+ if (!ok) return false;
+ firstDotPos++;
+ size_t secondDotPos = objectId.find(dot, firstDotPos);
+ if (secondDotPos == String16::kNotFound) return false;
+ int injectedScriptId =
+ objectId.substring(firstDotPos, secondDotPos - firstDotPos)
+ .toInteger(&ok);
+ if (!ok) return false;
+ secondDotPos++;
+ int id = objectId.substring(secondDotPos).toInteger(&ok);
+ if (!ok) return false;
+ m_isolateId = static_cast<uint64_t>(isolateId);
+ m_injectedScriptId = injectedScriptId;
+ m_id = id;
+ return true;
+}
Response RemoteObjectId::parse(const String16& objectId,
std::unique_ptr<RemoteObjectId>* result) {
std::unique_ptr<RemoteObjectId> remoteObjectId(new RemoteObjectId());
- std::unique_ptr<protocol::DictionaryValue> parsedObjectId =
- remoteObjectId->parseInjectedScriptId(objectId);
- if (!parsedObjectId) return Response::ServerError("Invalid remote object id");
-
- bool success = parsedObjectId->getInteger("id", &remoteObjectId->m_id);
- if (!success) return Response::ServerError("Invalid remote object id");
+ if (!remoteObjectId->parseId(objectId))
+ return Response::ServerError("Invalid remote object id");
*result = std::move(remoteObjectId);
return Response::Success();
}
-RemoteCallFrameId::RemoteCallFrameId()
- : RemoteObjectIdBase(), m_frameOrdinal(0) {}
+String16 RemoteObjectId::serialize(uint64_t isolateId, int injectedScriptId,
+ int id) {
+ return serializeId(isolateId, injectedScriptId, id);
+}
Response RemoteCallFrameId::parse(const String16& objectId,
std::unique_ptr<RemoteCallFrameId>* result) {
std::unique_ptr<RemoteCallFrameId> remoteCallFrameId(new RemoteCallFrameId());
- std::unique_ptr<protocol::DictionaryValue> parsedObjectId =
- remoteCallFrameId->parseInjectedScriptId(objectId);
- if (!parsedObjectId) return Response::ServerError("Invalid call frame id");
-
- bool success =
- parsedObjectId->getInteger("ordinal", &remoteCallFrameId->m_frameOrdinal);
- if (!success) return Response::ServerError("Invalid call frame id");
+ if (!remoteCallFrameId->parseId(objectId))
+ return Response::ServerError("Invalid call frame id");
*result = std::move(remoteCallFrameId);
return Response::Success();
}
-String16 RemoteCallFrameId::serialize(int injectedScriptId, int frameOrdinal) {
- return "{\"ordinal\":" + String16::fromInteger(frameOrdinal) +
- ",\"injectedScriptId\":" + String16::fromInteger(injectedScriptId) +
- "}";
+String16 RemoteCallFrameId::serialize(uint64_t isolateId, int injectedScriptId,
+ int frameOrdinal) {
+ return serializeId(isolateId, injectedScriptId, frameOrdinal);
}
} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/remote-object-id.h b/deps/v8/src/inspector/remote-object-id.h
index 5a35c13e58..1c60124120 100644
--- a/deps/v8/src/inspector/remote-object-id.h
+++ b/deps/v8/src/inspector/remote-object-id.h
@@ -15,16 +15,18 @@ using protocol::Response;
class RemoteObjectIdBase {
public:
+ uint64_t isolateId() const { return m_isolateId; }
int contextId() const { return m_injectedScriptId; }
protected:
RemoteObjectIdBase();
~RemoteObjectIdBase() = default;
- std::unique_ptr<protocol::DictionaryValue> parseInjectedScriptId(
- const String16&);
+ bool parseId(const String16&);
+ uint64_t m_isolateId;
int m_injectedScriptId;
+ int m_id;
};
class RemoteObjectId final : public RemoteObjectIdBase {
@@ -33,10 +35,7 @@ class RemoteObjectId final : public RemoteObjectIdBase {
~RemoteObjectId() = default;
int id() const { return m_id; }
- private:
- RemoteObjectId();
-
- int m_id;
+ static String16 serialize(uint64_t isolateId, int injectedScriptId, int id);
};
class RemoteCallFrameId final : public RemoteObjectIdBase {
@@ -44,14 +43,10 @@ class RemoteCallFrameId final : public RemoteObjectIdBase {
static Response parse(const String16&, std::unique_ptr<RemoteCallFrameId>*);
~RemoteCallFrameId() = default;
- int frameOrdinal() const { return m_frameOrdinal; }
-
- static String16 serialize(int injectedScriptId, int frameOrdinal);
-
- private:
- RemoteCallFrameId();
+ int frameOrdinal() const { return m_id; }
- int m_frameOrdinal;
+ static String16 serialize(uint64_t isolateId, int injectedScriptId,
+ int frameOrdinal);
};
} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/string-16.h b/deps/v8/src/inspector/string-16.h
index 4143f6c311..88a7584255 100644
--- a/deps/v8/src/inspector/string-16.h
+++ b/deps/v8/src/inspector/string-16.h
@@ -39,10 +39,12 @@ class String16 {
static String16 fromInteger(int);
static String16 fromInteger(size_t);
static String16 fromInteger64(int64_t);
+ static String16 fromUInt64(uint64_t);
static String16 fromDouble(double);
static String16 fromDouble(double, int precision);
int64_t toInteger64(bool* ok = nullptr) const;
+ uint64_t toUInt64(bool* ok = nullptr) const;
int toInteger(bool* ok = nullptr) const;
String16 stripWhiteSpace() const;
const UChar* characters16() const { return m_impl.c_str(); }
diff --git a/deps/v8/src/inspector/v8-console.cc b/deps/v8/src/inspector/v8-console.cc
index 6dda6ef90c..12645cecbc 100644
--- a/deps/v8/src/inspector/v8-console.cc
+++ b/deps/v8/src/inspector/v8-console.cc
@@ -872,7 +872,6 @@ V8Console::CommandLineAPIScope::~CommandLineAPIScope() {
->GetOwnPropertyDescriptor(
m_context, v8::Local<v8::String>::Cast(name))
.ToLocal(&descriptor);
- DCHECK(success);
USE(success);
}
}
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.cc b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
index 399fa4c409..f82ce98600 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
@@ -1446,8 +1446,8 @@ Response V8DebuggerAgentImpl::currentCallFrames(
int contextId = iterator->GetContextId();
InjectedScript* injectedScript = nullptr;
if (contextId) m_session->findInjectedScript(contextId, injectedScript);
- String16 callFrameId =
- RemoteCallFrameId::serialize(contextId, frameOrdinal);
+ String16 callFrameId = RemoteCallFrameId::serialize(
+ m_inspector->isolateId(), contextId, frameOrdinal);
v8::debug::Location loc = iterator->GetSourceLocation();
diff --git a/deps/v8/src/inspector/v8-debugger.cc b/deps/v8/src/inspector/v8-debugger.cc
index 6549308cc4..f1330dcf12 100644
--- a/deps/v8/src/inspector/v8-debugger.cc
+++ b/deps/v8/src/inspector/v8-debugger.cc
@@ -502,6 +502,10 @@ size_t HeapLimitForDebugging(size_t initial_heap_limit) {
size_t V8Debugger::nearHeapLimitCallback(void* data, size_t current_heap_limit,
size_t initial_heap_limit) {
V8Debugger* thisPtr = static_cast<V8Debugger*>(data);
+// TODO(solanes, v8:10876): Remove when bug is solved.
+#if DEBUG
+ printf("nearHeapLimitCallback\n");
+#endif
thisPtr->m_originalHeapLimit = current_heap_limit;
thisPtr->m_scheduledOOMBreak = true;
v8::Local<v8::Context> context =
diff --git a/deps/v8/src/inspector/v8-inspector-session-impl.cc b/deps/v8/src/inspector/v8-inspector-session-impl.cc
index 8db491bf68..4303b35c62 100644
--- a/deps/v8/src/inspector/v8-inspector-session-impl.cc
+++ b/deps/v8/src/inspector/v8-inspector-session-impl.cc
@@ -239,6 +239,8 @@ Response V8InspectorSessionImpl::findInjectedScript(
Response V8InspectorSessionImpl::findInjectedScript(
RemoteObjectIdBase* objectId, InjectedScript*& injectedScript) {
+ if (objectId->isolateId() != m_inspector->isolateId())
+ return Response::ServerError("Cannot find context with specified id");
return findInjectedScript(objectId->contextId(), injectedScript);
}
diff --git a/deps/v8/src/inspector/v8-runtime-agent-impl.cc b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
index ac505be5cc..2109348d07 100644
--- a/deps/v8/src/inspector/v8-runtime-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
@@ -56,6 +56,7 @@ static const char customObjectFormatterEnabled[] =
"customObjectFormatterEnabled";
static const char runtimeEnabled[] = "runtimeEnabled";
static const char bindings[] = "bindings";
+static const char globalBindingsKey[] = "";
} // namespace V8RuntimeAgentImplState
using protocol::Runtime::RemoteObject;
@@ -663,32 +664,61 @@ void V8RuntimeAgentImpl::terminateExecution(
m_inspector->debugger()->terminateExecution(std::move(callback));
}
+namespace {
+protocol::DictionaryValue* getOrCreateDictionary(
+ protocol::DictionaryValue* dict, const String16& key) {
+ if (protocol::DictionaryValue* bindings = dict->getObject(key))
+ return bindings;
+ dict->setObject(key, protocol::DictionaryValue::create());
+ return dict->getObject(key);
+}
+} // namespace
+
Response V8RuntimeAgentImpl::addBinding(const String16& name,
- Maybe<int> executionContextId) {
- if (!m_state->getObject(V8RuntimeAgentImplState::bindings)) {
- m_state->setObject(V8RuntimeAgentImplState::bindings,
- protocol::DictionaryValue::create());
- }
- protocol::DictionaryValue* bindings =
- m_state->getObject(V8RuntimeAgentImplState::bindings);
- if (bindings->booleanProperty(name, false)) return Response::Success();
+ Maybe<int> executionContextId,
+ Maybe<String16> executionContextName) {
+ if (m_activeBindings.count(name)) return Response::Success();
if (executionContextId.isJust()) {
+ if (executionContextName.isJust()) {
+ return Response::InvalidParams(
+ "executionContextName is mutually exclusive with executionContextId");
+ }
int contextId = executionContextId.fromJust();
InspectedContext* context =
m_inspector->getContext(m_session->contextGroupId(), contextId);
if (!context) {
- return Response::ServerError(
+ return Response::InvalidParams(
"Cannot find execution context with given executionContextId");
}
addBinding(context, name);
- // false means that we should not add this binding later.
- bindings->setBoolean(name, false);
return Response::Success();
}
- bindings->setBoolean(name, true);
+
+ // If it's a globally exposed binding, i.e. no context name specified, use
+ // a special value for the context name.
+ String16 contextKey = V8RuntimeAgentImplState::globalBindingsKey;
+ if (executionContextName.isJust()) {
+ contextKey = executionContextName.fromJust();
+ if (contextKey == V8RuntimeAgentImplState::globalBindingsKey) {
+ return Response::InvalidParams("Invalid executionContextName");
+ }
+ }
+ // Only persist non context-specific bindings, as contextIds don't make
+ // any sense when state is restored in a different process.
+ protocol::DictionaryValue* bindings =
+ getOrCreateDictionary(m_state, V8RuntimeAgentImplState::bindings);
+ protocol::DictionaryValue* contextBindings =
+ getOrCreateDictionary(bindings, contextKey);
+ contextBindings->setBoolean(name, true);
+
m_inspector->forEachContext(
m_session->contextGroupId(),
- [&name, this](InspectedContext* context) { addBinding(context, name); });
+ [&name, &executionContextName, this](InspectedContext* context) {
+ if (executionContextName.isJust() &&
+ executionContextName.fromJust() != context->humanReadableName())
+ return;
+ addBinding(context, name);
+ });
return Response::Success();
}
@@ -730,34 +760,42 @@ void V8RuntimeAgentImpl::addBinding(InspectedContext* context,
.ToLocal(&functionValue)) {
v8::Maybe<bool> success = global->Set(localContext, v8Name, functionValue);
USE(success);
+ m_activeBindings.insert(name);
}
}
Response V8RuntimeAgentImpl::removeBinding(const String16& name) {
protocol::DictionaryValue* bindings =
m_state->getObject(V8RuntimeAgentImplState::bindings);
- if (!bindings) return Response::Success();
- bindings->remove(name);
+ if (bindings) bindings->remove(name);
+ m_activeBindings.erase(name);
return Response::Success();
}
void V8RuntimeAgentImpl::bindingCalled(const String16& name,
const String16& payload,
int executionContextId) {
- protocol::DictionaryValue* bindings =
- m_state->getObject(V8RuntimeAgentImplState::bindings);
- if (!bindings || !bindings->get(name)) return;
+ if (!m_activeBindings.count(name)) return;
m_frontend.bindingCalled(name, payload, executionContextId);
}
void V8RuntimeAgentImpl::addBindings(InspectedContext* context) {
+ const String16 contextName = context->humanReadableName();
if (!m_enabled) return;
protocol::DictionaryValue* bindings =
m_state->getObject(V8RuntimeAgentImplState::bindings);
if (!bindings) return;
- for (size_t i = 0; i < bindings->size(); ++i) {
- if (!bindings->at(i).second) continue;
- addBinding(context, bindings->at(i).first);
+ protocol::DictionaryValue* globalBindings =
+ bindings->getObject(V8RuntimeAgentImplState::globalBindingsKey);
+ if (globalBindings) {
+ for (size_t i = 0; i < globalBindings->size(); ++i)
+ addBinding(context, globalBindings->at(i).first);
+ }
+ protocol::DictionaryValue* contextBindings =
+ contextName.isEmpty() ? nullptr : bindings->getObject(contextName);
+ if (contextBindings) {
+ for (size_t i = 0; i < contextBindings->size(); ++i)
+ addBinding(context, contextBindings->at(i).first);
}
}
diff --git a/deps/v8/src/inspector/v8-runtime-agent-impl.h b/deps/v8/src/inspector/v8-runtime-agent-impl.h
index d0491eac5a..80c2096fed 100644
--- a/deps/v8/src/inspector/v8-runtime-agent-impl.h
+++ b/deps/v8/src/inspector/v8-runtime-agent-impl.h
@@ -32,14 +32,14 @@
#define V8_INSPECTOR_V8_RUNTIME_AGENT_IMPL_H_
#include <memory>
+#include <set>
#include <unordered_map>
+#include "include/v8.h"
#include "src/base/macros.h"
#include "src/inspector/protocol/Forward.h"
#include "src/inspector/protocol/Runtime.h"
-#include "include/v8.h"
-
namespace v8_inspector {
class InjectedScript;
@@ -117,8 +117,8 @@ class V8RuntimeAgentImpl : public protocol::Runtime::Backend {
void terminateExecution(
std::unique_ptr<TerminateExecutionCallback> callback) override;
- Response addBinding(const String16& name,
- Maybe<int> executionContextId) override;
+ Response addBinding(const String16& name, Maybe<int> executionContextId,
+ Maybe<String16> executionContextName) override;
Response removeBinding(const String16& name) override;
void addBindings(InspectedContext* context);
@@ -145,6 +145,7 @@ class V8RuntimeAgentImpl : public protocol::Runtime::Backend {
bool m_enabled;
std::unordered_map<String16, std::unique_ptr<v8::Global<v8::Script>>>
m_compiledScripts;
+ std::set<String16> m_activeBindings;
DISALLOW_COPY_AND_ASSIGN(V8RuntimeAgentImpl);
};
diff --git a/deps/v8/src/inspector/v8-stack-trace-impl.cc b/deps/v8/src/inspector/v8-stack-trace-impl.cc
index e08af26ad7..f2fc99e389 100644
--- a/deps/v8/src/inspector/v8-stack-trace-impl.cc
+++ b/deps/v8/src/inspector/v8-stack-trace-impl.cc
@@ -9,6 +9,7 @@
#include "../../third_party/inspector_protocol/crdtp/json.h"
#include "src/inspector/v8-debugger.h"
#include "src/inspector/v8-inspector-impl.h"
+#include "src/tracing/trace-event.h"
using v8_crdtp::SpanFrom;
using v8_crdtp::json::ConvertCBORToJSON;
@@ -34,6 +35,10 @@ std::vector<std::shared_ptr<StackFrame>> toFramesVector(
int maxStackSize) {
DCHECK(debugger->isolate()->InContext());
int frameCount = std::min(v8StackTrace->GetFrameCount(), maxStackSize);
+
+ TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.stack_trace"),
+ "SymbolizeStackTrace", "frameCount", frameCount);
+
std::vector<std::shared_ptr<StackFrame>> frames(frameCount);
for (int i = 0; i < frameCount; ++i) {
frames[i] =
@@ -253,6 +258,10 @@ std::unique_ptr<V8StackTraceImpl> V8StackTraceImpl::create(
std::unique_ptr<V8StackTraceImpl> V8StackTraceImpl::capture(
V8Debugger* debugger, int contextGroupId, int maxStackSize) {
DCHECK(debugger);
+
+ TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.stack_trace"),
+ "V8StackTraceImpl::capture", "maxFrameCount", maxStackSize);
+
v8::Isolate* isolate = debugger->isolate();
v8::HandleScope handleScope(isolate);
v8::Local<v8::StackTrace> v8StackTrace;
@@ -404,6 +413,9 @@ std::shared_ptr<AsyncStackTrace> AsyncStackTrace::capture(
int maxStackSize) {
DCHECK(debugger);
+ TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.stack_trace"),
+ "AsyncStackTrace::capture", "maxFrameCount", maxStackSize);
+
v8::Isolate* isolate = debugger->isolate();
v8::HandleScope handleScope(isolate);
diff --git a/deps/v8/src/inspector/value-mirror.cc b/deps/v8/src/inspector/value-mirror.cc
index 6bfb3dc3e4..e6f66c9821 100644
--- a/deps/v8/src/inspector/value-mirror.cc
+++ b/deps/v8/src/inspector/value-mirror.cc
@@ -272,6 +272,7 @@ String16 descriptionForRegExp(v8::Isolate* isolate,
v8::RegExp::Flags flags = value->GetFlags();
if (flags & v8::RegExp::Flags::kGlobal) description.append('g');
if (flags & v8::RegExp::Flags::kIgnoreCase) description.append('i');
+ if (flags & v8::RegExp::Flags::kLinear) description.append('l');
if (flags & v8::RegExp::Flags::kMultiline) description.append('m');
if (flags & v8::RegExp::Flags::kDotAll) description.append('s');
if (flags & v8::RegExp::Flags::kUnicode) description.append('u');
@@ -1738,14 +1739,36 @@ String16 descriptionForNode(v8::Local<v8::Context> context,
return description;
}
+String16 descriptionForTrustedType(v8::Local<v8::Context> context,
+ v8::Local<v8::Value> value) {
+ if (!value->IsObject()) return String16();
+ v8::Local<v8::Object> object = value.As<v8::Object>();
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::TryCatch tryCatch(isolate);
+
+ v8::Local<v8::String> description;
+ if (!object->ToString(context).ToLocal(&description)) return String16();
+ return toProtocolString(isolate, description);
+}
+
std::unique_ptr<ValueMirror> clientMirror(v8::Local<v8::Context> context,
v8::Local<v8::Value> value,
const String16& subtype) {
// TODO(alph): description and length retrieval should move to embedder.
+ auto descriptionForValueSubtype =
+ clientFor(context)->descriptionForValueSubtype(context, value);
+ if (descriptionForValueSubtype) {
+ return std::make_unique<ObjectMirror>(
+ value, subtype, toString16(descriptionForValueSubtype->string()));
+ }
if (subtype == "node") {
return std::make_unique<ObjectMirror>(value, subtype,
descriptionForNode(context, value));
}
+ if (subtype == "trustedtype") {
+ return std::make_unique<ObjectMirror>(
+ value, subtype, descriptionForTrustedType(context, value));
+ }
if (subtype == "error") {
return std::make_unique<ObjectMirror>(
value, RemoteObject::SubtypeEnum::Error,
diff --git a/deps/v8/src/interpreter/DIR_METADATA b/deps/v8/src/interpreter/DIR_METADATA
new file mode 100644
index 0000000000..3de1f73a3d
--- /dev/null
+++ b/deps/v8/src/interpreter/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>Interpreter"
+} \ No newline at end of file
diff --git a/deps/v8/src/interpreter/OWNERS b/deps/v8/src/interpreter/OWNERS
index f013999565..481caea50b 100644
--- a/deps/v8/src/interpreter/OWNERS
+++ b/deps/v8/src/interpreter/OWNERS
@@ -1,5 +1,3 @@
leszeks@chromium.org
mythria@chromium.org
rmcilroy@chromium.org
-
-# COMPONENT: Blink>JavaScript>Interpreter
diff --git a/deps/v8/src/interpreter/bytecode-array-accessor.h b/deps/v8/src/interpreter/bytecode-array-accessor.h
index 86a42a5570..e536c52228 100644
--- a/deps/v8/src/interpreter/bytecode-array-accessor.h
+++ b/deps/v8/src/interpreter/bytecode-array-accessor.h
@@ -91,6 +91,9 @@ class V8_EXPORT_PRIVATE BytecodeArrayAccessor {
BytecodeArrayAccessor(Handle<BytecodeArray> bytecode_array,
int initial_offset);
+ BytecodeArrayAccessor(const BytecodeArrayAccessor&) = delete;
+ BytecodeArrayAccessor& operator=(const BytecodeArrayAccessor&) = delete;
+
void SetOffset(int offset);
void ApplyDebugBreak();
@@ -157,8 +160,6 @@ class V8_EXPORT_PRIVATE BytecodeArrayAccessor {
int bytecode_offset_;
OperandScale operand_scale_;
int prefix_offset_;
-
- DISALLOW_COPY_AND_ASSIGN(BytecodeArrayAccessor);
};
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.cc b/deps/v8/src/interpreter/bytecode-array-builder.cc
index 74c2065355..dc41db71c4 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.cc
+++ b/deps/v8/src/interpreter/bytecode-array-builder.cc
@@ -77,8 +77,7 @@ Register BytecodeArrayBuilder::Receiver() const {
}
Register BytecodeArrayBuilder::Local(int index) const {
- // TODO(marja): Make a DCHECK once crbug.com/706234 is fixed.
- CHECK_LT(index, locals_count());
+ DCHECK_LT(index, locals_count());
return Register(index);
}
@@ -1330,6 +1329,12 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::ThrowSuperAlreadyCalledIfNotHole() {
return *this;
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::ThrowIfNotSuperConstructor(
+ Register constructor) {
+ OutputThrowIfNotSuperConstructor(constructor);
+ return *this;
+}
+
BytecodeArrayBuilder& BytecodeArrayBuilder::Debugger() {
OutputDebugger();
return *this;
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.h b/deps/v8/src/interpreter/bytecode-array-builder.h
index ff7e9cb21f..b03cebdd60 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.h
+++ b/deps/v8/src/interpreter/bytecode-array-builder.h
@@ -43,6 +43,9 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
SourcePositionTableBuilder::RecordingMode source_position_mode =
SourcePositionTableBuilder::RECORD_SOURCE_POSITIONS);
+ BytecodeArrayBuilder(const BytecodeArrayBuilder&) = delete;
+ BytecodeArrayBuilder& operator=(const BytecodeArrayBuilder&) = delete;
+
template <typename LocalIsolate>
EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
Handle<BytecodeArray> ToBytecodeArray(LocalIsolate* isolate);
@@ -459,6 +462,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
BytecodeArrayBuilder& ThrowReferenceErrorIfHole(const AstRawString* name);
BytecodeArrayBuilder& ThrowSuperNotCalledIfHole();
BytecodeArrayBuilder& ThrowSuperAlreadyCalledIfNotHole();
+ BytecodeArrayBuilder& ThrowIfNotSuperConstructor(Register constructor);
// Debugger.
BytecodeArrayBuilder& Debugger();
@@ -643,8 +647,6 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
BytecodeRegisterOptimizer* register_optimizer_;
BytecodeSourceInfo latest_source_info_;
BytecodeSourceInfo deferred_source_info_;
-
- DISALLOW_COPY_AND_ASSIGN(BytecodeArrayBuilder);
};
V8_EXPORT_PRIVATE std::ostream& operator<<(
diff --git a/deps/v8/src/interpreter/bytecode-array-iterator.h b/deps/v8/src/interpreter/bytecode-array-iterator.h
index b992ffc037..58b0b1a55a 100644
--- a/deps/v8/src/interpreter/bytecode-array-iterator.h
+++ b/deps/v8/src/interpreter/bytecode-array-iterator.h
@@ -20,11 +20,11 @@ class V8_EXPORT_PRIVATE BytecodeArrayIterator final
explicit BytecodeArrayIterator(Handle<BytecodeArray> array);
+ BytecodeArrayIterator(const BytecodeArrayIterator&) = delete;
+ BytecodeArrayIterator& operator=(const BytecodeArrayIterator&) = delete;
+
void Advance();
bool done() const;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(BytecodeArrayIterator);
};
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/bytecode-array-random-iterator.h b/deps/v8/src/interpreter/bytecode-array-random-iterator.h
index 68905a146c..99fe758bbb 100644
--- a/deps/v8/src/interpreter/bytecode-array-random-iterator.h
+++ b/deps/v8/src/interpreter/bytecode-array-random-iterator.h
@@ -23,6 +23,10 @@ class V8_EXPORT_PRIVATE BytecodeArrayRandomIterator final
BytecodeArrayRandomIterator(Handle<BytecodeArray> bytecode_array, Zone* zone);
+ BytecodeArrayRandomIterator(const BytecodeArrayRandomIterator&) = delete;
+ BytecodeArrayRandomIterator& operator=(const BytecodeArrayRandomIterator&) =
+ delete;
+
BytecodeArrayRandomIterator& operator++() {
++current_index_;
UpdateOffsetFromIndex();
@@ -72,8 +76,6 @@ class V8_EXPORT_PRIVATE BytecodeArrayRandomIterator final
void Initialize();
void UpdateOffsetFromIndex();
-
- DISALLOW_COPY_AND_ASSIGN(BytecodeArrayRandomIterator);
};
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/bytecode-array-writer.h b/deps/v8/src/interpreter/bytecode-array-writer.h
index c1f4266e49..6517ad9f5e 100644
--- a/deps/v8/src/interpreter/bytecode-array-writer.h
+++ b/deps/v8/src/interpreter/bytecode-array-writer.h
@@ -36,6 +36,8 @@ class V8_EXPORT_PRIVATE BytecodeArrayWriter final {
BytecodeArrayWriter(
Zone* zone, ConstantArrayBuilder* constant_array_builder,
SourcePositionTableBuilder::RecordingMode source_position_mode);
+ BytecodeArrayWriter(const BytecodeArrayWriter&) = delete;
+ BytecodeArrayWriter& operator=(const BytecodeArrayWriter&) = delete;
void Write(BytecodeNode* node);
void WriteJump(BytecodeNode* node, BytecodeLabel* label);
@@ -126,7 +128,6 @@ class V8_EXPORT_PRIVATE BytecodeArrayWriter final {
bool exit_seen_in_block_;
friend class bytecode_array_writer_unittest::BytecodeArrayWriterUnittest;
- DISALLOW_COPY_AND_ASSIGN(BytecodeArrayWriter);
};
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc
index 64ffa39b20..675715420b 100644
--- a/deps/v8/src/interpreter/bytecode-generator.cc
+++ b/deps/v8/src/interpreter/bytecode-generator.cc
@@ -108,6 +108,8 @@ class BytecodeGenerator::ControlScope {
generator_->set_execution_control(this);
}
virtual ~ControlScope() { generator_->set_execution_control(outer()); }
+ ControlScope(const ControlScope&) = delete;
+ ControlScope& operator=(const ControlScope&) = delete;
void Break(Statement* stmt) {
PerformCommand(CMD_BREAK, stmt, kNoSourcePosition);
@@ -154,8 +156,6 @@ class BytecodeGenerator::ControlScope {
BytecodeGenerator* generator_;
ControlScope* outer_;
ContextScope* context_;
-
- DISALLOW_COPY_AND_ASSIGN(ControlScope);
};
// Helper class for a try-finally control scope. It can record intercepted
@@ -562,13 +562,14 @@ class BytecodeGenerator::RegisterAllocationScope final {
outer_next_register_index_);
}
+ RegisterAllocationScope(const RegisterAllocationScope&) = delete;
+ RegisterAllocationScope& operator=(const RegisterAllocationScope&) = delete;
+
BytecodeGenerator* generator() const { return generator_; }
private:
BytecodeGenerator* generator_;
int outer_next_register_index_;
-
- DISALLOW_COPY_AND_ASSIGN(RegisterAllocationScope);
};
class BytecodeGenerator::AccumulatorPreservingScope final {
@@ -591,11 +592,13 @@ class BytecodeGenerator::AccumulatorPreservingScope final {
}
}
+ AccumulatorPreservingScope(const AccumulatorPreservingScope&) = delete;
+ AccumulatorPreservingScope& operator=(const AccumulatorPreservingScope&) =
+ delete;
+
private:
BytecodeGenerator* generator_;
Register saved_accumulator_register_;
-
- DISALLOW_COPY_AND_ASSIGN(AccumulatorPreservingScope);
};
// Scoped base class for determining how the result of an expression will be
@@ -614,6 +617,9 @@ class BytecodeGenerator::ExpressionResultScope {
allocator_.generator()->set_execution_result(outer_);
}
+ ExpressionResultScope(const ExpressionResultScope&) = delete;
+ ExpressionResultScope& operator=(const ExpressionResultScope&) = delete;
+
bool IsEffect() const { return kind_ == Expression::kEffect; }
bool IsValue() const { return kind_ == Expression::kValue; }
bool IsTest() const { return kind_ == Expression::kTest; }
@@ -641,8 +647,6 @@ class BytecodeGenerator::ExpressionResultScope {
RegisterAllocationScope allocator_;
Expression::Context kind_;
TypeHint type_hint_;
-
- DISALLOW_COPY_AND_ASSIGN(ExpressionResultScope);
};
// Scoped class used when the result of the current expression is not
@@ -674,6 +678,9 @@ class BytecodeGenerator::TestResultScope final : public ExpressionResultScope {
then_labels_(then_labels),
else_labels_(else_labels) {}
+ TestResultScope(const TestResultScope&) = delete;
+ TestResultScope& operator=(const TestResultScope&) = delete;
+
// Used when code special cases for TestResultScope and consumes any
// possible value by testing and jumping to a then/else label.
void SetResultConsumedByTest() { result_consumed_by_test_ = true; }
@@ -719,8 +726,6 @@ class BytecodeGenerator::TestResultScope final : public ExpressionResultScope {
TestFallthrough fallthrough_;
BytecodeLabels* then_labels_;
BytecodeLabels* else_labels_;
-
- DISALLOW_COPY_AND_ASSIGN(TestResultScope);
};
// Used to build a list of toplevel declaration data.
@@ -2531,7 +2536,7 @@ void BytecodeGenerator::BuildInstanceMemberInitialization(Register constructor,
void BytecodeGenerator::VisitNativeFunctionLiteral(
NativeFunctionLiteral* expr) {
size_t entry = builder()->AllocateDeferredConstantPoolEntry();
- int index = feedback_spec()->AddFeedbackCellForCreateClosure();
+ int index = feedback_spec()->AddCreateClosureSlot();
uint8_t flags = CreateClosureFlags::Encode(false, false, false);
builder()->CreateClosure(entry, index, flags);
native_function_literals_.push_back(std::make_pair(expr, entry));
@@ -4590,11 +4595,8 @@ void BytecodeGenerator::VisitThrow(Throw* expr) {
void BytecodeGenerator::VisitPropertyLoad(Register obj, Property* property) {
if (property->is_optional_chain_link()) {
DCHECK_NOT_NULL(optional_chaining_null_labels_);
- int right_range =
- AllocateBlockCoverageSlotIfEnabled(property, SourceRangeKind::kRight);
builder()->LoadAccumulatorWithRegister(obj).JumpIfUndefinedOrNull(
optional_chaining_null_labels_->New());
- BuildIncrementBlockCoverageCounterIfEnabled(right_range);
}
AssignType property_kind = Property::GetAssignType(property);
@@ -4924,8 +4926,9 @@ void BytecodeGenerator::VisitCall(Call* expr) {
Property* property = chain->expression()->AsProperty();
BuildOptionalChain([&]() {
VisitAndPushIntoRegisterList(property->obj(), &args);
- VisitPropertyLoadForRegister(args.last_register(), property, callee);
+ VisitPropertyLoad(args.last_register(), property);
});
+ builder()->StoreAccumulatorInRegister(callee);
break;
}
case Call::SUPER_CALL:
@@ -4934,11 +4937,8 @@ void BytecodeGenerator::VisitCall(Call* expr) {
if (expr->is_optional_chain_link()) {
DCHECK_NOT_NULL(optional_chaining_null_labels_);
- int right_range =
- AllocateBlockCoverageSlotIfEnabled(expr, SourceRangeKind::kRight);
builder()->LoadAccumulatorWithRegister(callee).JumpIfUndefinedOrNull(
optional_chaining_null_labels_->New());
- BuildIncrementBlockCoverageCounterIfEnabled(right_range);
}
// Evaluate all arguments to the function call and store in sequential args
@@ -5025,6 +5025,9 @@ void BytecodeGenerator::VisitCallSuper(Call* expr) {
// First generate the array containing all arguments.
BuildCreateArrayLiteral(args, nullptr);
+ // Check if the constructor is in fact a constructor.
+ builder()->ThrowIfNotSuperConstructor(constructor);
+
// Now pass that array to %reflect_construct.
RegisterList construct_args = register_allocator()->NewRegisterList(3);
builder()->StoreAccumulatorInRegister(construct_args[1]);
@@ -5034,6 +5037,10 @@ void BytecodeGenerator::VisitCallSuper(Call* expr) {
} else {
RegisterList args_regs = register_allocator()->NewGrowableRegisterList();
VisitArguments(args, &args_regs);
+
+ // Check if the constructor is in fact a constructor.
+ builder()->ThrowIfNotSuperConstructor(constructor);
+
// The new target is loaded into the accumulator from the
// {new.target} variable.
VisitForAccumulatorValue(super->new_target_var());
@@ -5210,10 +5217,7 @@ void BytecodeGenerator::VisitDelete(UnaryOperation* unary) {
OptionalChainNullLabelScope label_scope(this);
VisitForAccumulatorValue(property->obj());
if (property->is_optional_chain_link()) {
- int right_range = AllocateBlockCoverageSlotIfEnabled(
- property, SourceRangeKind::kRight);
builder()->JumpIfUndefinedOrNull(label_scope.labels()->New());
- BuildIncrementBlockCoverageCounterIfEnabled(right_range);
}
Register object = register_allocator()->NewRegister();
builder()->StoreAccumulatorInRegister(object);
@@ -6660,7 +6664,7 @@ int BytecodeGenerator::GetCachedCreateClosureSlot(FunctionLiteral* literal) {
if (index != -1) {
return index;
}
- index = feedback_spec()->AddFeedbackCellForCreateClosure();
+ index = feedback_spec()->AddCreateClosureSlot();
feedback_slot_cache()->Put(slot_kind, literal, index);
return index;
}
diff --git a/deps/v8/src/interpreter/bytecode-label.h b/deps/v8/src/interpreter/bytecode-label.h
index 4581f4f4e2..1c9d0e9d6b 100644
--- a/deps/v8/src/interpreter/bytecode-label.h
+++ b/deps/v8/src/interpreter/bytecode-label.h
@@ -84,6 +84,8 @@ class V8_EXPORT_PRIVATE BytecodeLabel final {
class V8_EXPORT_PRIVATE BytecodeLabels {
public:
explicit BytecodeLabels(Zone* zone) : labels_(zone), is_bound_(false) {}
+ BytecodeLabels(const BytecodeLabels&) = delete;
+ BytecodeLabels& operator=(const BytecodeLabels&) = delete;
BytecodeLabel* New();
@@ -103,8 +105,6 @@ class V8_EXPORT_PRIVATE BytecodeLabels {
private:
ZoneLinkedList<BytecodeLabel> labels_;
bool is_bound_;
-
- DISALLOW_COPY_AND_ASSIGN(BytecodeLabels);
};
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/bytecode-register-allocator.h b/deps/v8/src/interpreter/bytecode-register-allocator.h
index b270e3d38b..442e3d27aa 100644
--- a/deps/v8/src/interpreter/bytecode-register-allocator.h
+++ b/deps/v8/src/interpreter/bytecode-register-allocator.h
@@ -30,6 +30,9 @@ class BytecodeRegisterAllocator final {
max_register_count_(start_index),
observer_(nullptr) {}
~BytecodeRegisterAllocator() = default;
+ BytecodeRegisterAllocator(const BytecodeRegisterAllocator&) = delete;
+ BytecodeRegisterAllocator& operator=(const BytecodeRegisterAllocator&) =
+ delete;
// Returns a new register.
Register NewRegister() {
@@ -101,8 +104,6 @@ class BytecodeRegisterAllocator final {
int next_register_index_;
int max_register_count_;
Observer* observer_;
-
- DISALLOW_COPY_AND_ASSIGN(BytecodeRegisterAllocator);
};
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/bytecode-register-optimizer.cc b/deps/v8/src/interpreter/bytecode-register-optimizer.cc
index e3bbfaa6f9..3d9c9e1dac 100644
--- a/deps/v8/src/interpreter/bytecode-register-optimizer.cc
+++ b/deps/v8/src/interpreter/bytecode-register-optimizer.cc
@@ -24,6 +24,8 @@ class BytecodeRegisterOptimizer::RegisterInfo final : public ZoneObject {
needs_flush_(false),
next_(this),
prev_(this) {}
+ RegisterInfo(const RegisterInfo&) = delete;
+ RegisterInfo& operator=(const RegisterInfo&) = delete;
void AddToEquivalenceSetOf(RegisterInfo* info);
void MoveToNewEquivalenceSet(uint32_t equivalence_id, bool materialized);
@@ -85,8 +87,6 @@ class BytecodeRegisterOptimizer::RegisterInfo final : public ZoneObject {
// Equivalence set pointers.
RegisterInfo* next_;
RegisterInfo* prev_;
-
- DISALLOW_COPY_AND_ASSIGN(RegisterInfo);
};
void BytecodeRegisterOptimizer::RegisterInfo::AddToEquivalenceSetOf(
@@ -233,11 +233,7 @@ BytecodeRegisterOptimizer::BytecodeRegisterOptimizer(
// a vector of register metadata.
// There is at least one parameter, which is the JS receiver.
DCHECK_NE(parameter_count, 0);
-#ifdef V8_REVERSE_JSARGS
int first_slot_index = parameter_count - 1;
-#else
- int first_slot_index = 0;
-#endif
register_info_table_offset_ =
-Register::FromParameterIndex(first_slot_index, parameter_count).index();
diff --git a/deps/v8/src/interpreter/bytecode-register-optimizer.h b/deps/v8/src/interpreter/bytecode-register-optimizer.h
index 674a4e3ac5..289b8983f3 100644
--- a/deps/v8/src/interpreter/bytecode-register-optimizer.h
+++ b/deps/v8/src/interpreter/bytecode-register-optimizer.h
@@ -25,14 +25,13 @@ class V8_EXPORT_PRIVATE BytecodeRegisterOptimizer final
public:
BytecodeWriter() = default;
virtual ~BytecodeWriter() = default;
+ BytecodeWriter(const BytecodeWriter&) = delete;
+ BytecodeWriter& operator=(const BytecodeWriter&) = delete;
// Called to emit a register transfer bytecode.
virtual void EmitLdar(Register input) = 0;
virtual void EmitStar(Register output) = 0;
virtual void EmitMov(Register input, Register output) = 0;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(BytecodeWriter);
};
BytecodeRegisterOptimizer(Zone* zone,
@@ -40,6 +39,9 @@ class V8_EXPORT_PRIVATE BytecodeRegisterOptimizer final
int fixed_registers_count, int parameter_count,
BytecodeWriter* bytecode_writer);
~BytecodeRegisterOptimizer() override = default;
+ BytecodeRegisterOptimizer(const BytecodeRegisterOptimizer&) = delete;
+ BytecodeRegisterOptimizer& operator=(const BytecodeRegisterOptimizer&) =
+ delete;
// Perform explicit register transfer operations.
void DoLdar(Register input) {
@@ -201,8 +203,6 @@ class V8_EXPORT_PRIVATE BytecodeRegisterOptimizer final
BytecodeWriter* bytecode_writer_;
bool flush_required_;
Zone* zone_;
-
- DISALLOW_COPY_AND_ASSIGN(BytecodeRegisterOptimizer);
};
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/bytecode-register.cc b/deps/v8/src/interpreter/bytecode-register.cc
index 13d831e8b7..e8eb347f16 100644
--- a/deps/v8/src/interpreter/bytecode-register.cc
+++ b/deps/v8/src/interpreter/bytecode-register.cc
@@ -8,17 +8,10 @@ namespace v8 {
namespace internal {
namespace interpreter {
-#ifdef V8_REVERSE_JSARGS
static const int kFirstParamRegisterIndex =
(InterpreterFrameConstants::kRegisterFileFromFp -
InterpreterFrameConstants::kFirstParamFromFp) /
kSystemPointerSize;
-#else
-static const int kLastParamRegisterIndex =
- (InterpreterFrameConstants::kRegisterFileFromFp -
- InterpreterFrameConstants::kLastParamFromFp) /
- kSystemPointerSize;
-#endif
static const int kFunctionClosureRegisterIndex =
(InterpreterFrameConstants::kRegisterFileFromFp -
StandardFrameConstants::kFunctionOffset) /
@@ -43,22 +36,14 @@ static const int kCallerPCOffsetRegisterIndex =
Register Register::FromParameterIndex(int index, int parameter_count) {
DCHECK_GE(index, 0);
DCHECK_LT(index, parameter_count);
-#ifdef V8_REVERSE_JSARGS
int register_index = kFirstParamRegisterIndex - index;
-#else
- int register_index = kLastParamRegisterIndex - parameter_count + index + 1;
-#endif
DCHECK_LT(register_index, 0);
return Register(register_index);
}
int Register::ToParameterIndex(int parameter_count) const {
DCHECK(is_parameter());
-#ifdef V8_REVERSE_JSARGS
return kFirstParamRegisterIndex - index();
-#else
- return index() - kLastParamRegisterIndex + parameter_count - 1;
-#endif
}
Register Register::function_closure() {
diff --git a/deps/v8/src/interpreter/bytecodes.h b/deps/v8/src/interpreter/bytecodes.h
index fe22559f18..2cff678920 100644
--- a/deps/v8/src/interpreter/bytecodes.h
+++ b/deps/v8/src/interpreter/bytecodes.h
@@ -345,6 +345,7 @@ namespace interpreter {
V(ThrowReferenceErrorIfHole, AccumulatorUse::kRead, OperandType::kIdx) \
V(ThrowSuperNotCalledIfHole, AccumulatorUse::kRead) \
V(ThrowSuperAlreadyCalledIfNotHole, AccumulatorUse::kRead) \
+ V(ThrowIfNotSuperConstructor, AccumulatorUse::kNone, OperandType::kReg) \
\
/* Generators */ \
V(SwitchOnGeneratorState, AccumulatorUse::kNone, OperandType::kReg, \
diff --git a/deps/v8/src/interpreter/constant-array-builder.h b/deps/v8/src/interpreter/constant-array-builder.h
index a44ce0b7a1..87fe0559ae 100644
--- a/deps/v8/src/interpreter/constant-array-builder.h
+++ b/deps/v8/src/interpreter/constant-array-builder.h
@@ -198,6 +198,9 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final {
struct ConstantArraySlice final : public ZoneObject {
ConstantArraySlice(Zone* zone, size_t start_index, size_t capacity,
OperandSize operand_size);
+ ConstantArraySlice(const ConstantArraySlice&) = delete;
+ ConstantArraySlice& operator=(const ConstantArraySlice&) = delete;
+
void Reserve();
void Unreserve();
size_t Allocate(Entry entry, size_t count = 1);
@@ -223,8 +226,6 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final {
size_t reserved_;
OperandSize operand_size_;
ZoneVector<Entry> constants_;
-
- DISALLOW_COPY_AND_ASSIGN(ConstantArraySlice);
};
ConstantArraySlice* IndexToSlice(size_t index) const;
diff --git a/deps/v8/src/interpreter/handler-table-builder.h b/deps/v8/src/interpreter/handler-table-builder.h
index 9bf2b17258..f5f264d7c7 100644
--- a/deps/v8/src/interpreter/handler-table-builder.h
+++ b/deps/v8/src/interpreter/handler-table-builder.h
@@ -25,6 +25,8 @@ namespace interpreter {
class V8_EXPORT_PRIVATE HandlerTableBuilder final {
public:
explicit HandlerTableBuilder(Zone* zone);
+ HandlerTableBuilder(const HandlerTableBuilder&) = delete;
+ HandlerTableBuilder& operator=(const HandlerTableBuilder&) = delete;
// Builds the actual handler table by copying the current values into a heap
// object. Any further mutations to the builder won't be reflected.
@@ -55,8 +57,6 @@ class V8_EXPORT_PRIVATE HandlerTableBuilder final {
};
ZoneVector<Entry> entries_;
-
- DISALLOW_COPY_AND_ASSIGN(HandlerTableBuilder);
};
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/interpreter-assembler.cc b/deps/v8/src/interpreter/interpreter-assembler.cc
index e6fd97ddf2..596783b64f 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.cc
+++ b/deps/v8/src/interpreter/interpreter-assembler.cc
@@ -21,7 +21,6 @@ namespace internal {
namespace interpreter {
using compiler::CodeAssemblerState;
-using compiler::Node;
InterpreterAssembler::InterpreterAssembler(CodeAssemblerState* state,
Bytecode bytecode,
@@ -30,19 +29,19 @@ InterpreterAssembler::InterpreterAssembler(CodeAssemblerState* state,
bytecode_(bytecode),
operand_scale_(operand_scale),
TVARIABLE_CONSTRUCTOR(interpreted_frame_pointer_),
- TVARIABLE_CONSTRUCTOR(
- bytecode_array_,
- CAST(Parameter(InterpreterDispatchDescriptor::kBytecodeArray))),
+ TVARIABLE_CONSTRUCTOR(bytecode_array_,
+ Parameter<BytecodeArray>(
+ InterpreterDispatchDescriptor::kBytecodeArray)),
TVARIABLE_CONSTRUCTOR(
bytecode_offset_,
- UncheckedCast<IntPtrT>(
- Parameter(InterpreterDispatchDescriptor::kBytecodeOffset))),
- TVARIABLE_CONSTRUCTOR(
- dispatch_table_, UncheckedCast<ExternalReference>(Parameter(
- InterpreterDispatchDescriptor::kDispatchTable))),
+ UncheckedParameter<IntPtrT>(
+ InterpreterDispatchDescriptor::kBytecodeOffset)),
+ TVARIABLE_CONSTRUCTOR(dispatch_table_,
+ UncheckedParameter<ExternalReference>(
+ InterpreterDispatchDescriptor::kDispatchTable)),
TVARIABLE_CONSTRUCTOR(
accumulator_,
- CAST(Parameter(InterpreterDispatchDescriptor::kAccumulator))),
+ Parameter<Object>(InterpreterDispatchDescriptor::kAccumulator)),
accumulator_use_(AccumulatorUse::kNone),
made_call_(false),
reloaded_frame_ptr_(false),
@@ -83,7 +82,8 @@ TNode<RawPtrT> InterpreterAssembler::GetInterpretedFramePointer() {
TNode<IntPtrT> InterpreterAssembler::BytecodeOffset() {
if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
(bytecode_offset_.value() ==
- Parameter(InterpreterDispatchDescriptor::kBytecodeOffset))) {
+ UncheckedParameter<IntPtrT>(
+ InterpreterDispatchDescriptor::kBytecodeOffset))) {
bytecode_offset_ = ReloadBytecodeOffset();
}
return bytecode_offset_.value();
@@ -140,7 +140,8 @@ TNode<BytecodeArray> InterpreterAssembler::BytecodeArrayTaggedPointer() {
TNode<ExternalReference> InterpreterAssembler::DispatchTablePointer() {
if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
(dispatch_table_.value() ==
- Parameter(InterpreterDispatchDescriptor::kDispatchTable))) {
+ UncheckedParameter<ExternalReference>(
+ InterpreterDispatchDescriptor::kDispatchTable))) {
dispatch_table_ = ExternalConstant(
ExternalReference::interpreter_dispatch_table_address(isolate()));
}
@@ -772,15 +773,9 @@ void InterpreterAssembler::CallJSAndDispatch(TNode<Object> function,
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
// The first argument parameter (the receiver) is implied to be undefined.
-#ifdef V8_REVERSE_JSARGS
TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target,
context, function, arg_count, args...,
UndefinedConstant());
-#else
- TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target,
- context, function, arg_count,
- UndefinedConstant(), args...);
-#endif
} else {
TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target,
context, function, arg_count, args...);
@@ -846,10 +841,9 @@ TNode<Object> InterpreterAssembler::Construct(
Comment("call using Construct builtin");
Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
isolate(), InterpreterPushArgsMode::kOther);
- TNode<Code> code_target = HeapConstant(callable.code());
- var_result = CallStub(callable.descriptor(), code_target, context,
- args.reg_count(), args.base_reg_location(), target,
- new_target, UndefinedConstant());
+ var_result =
+ CallStub(callable, context, args.reg_count(), args.base_reg_location(),
+ target, new_target, UndefinedConstant());
Goto(&return_result);
}
@@ -860,10 +854,9 @@ TNode<Object> InterpreterAssembler::Construct(
Comment("call using ConstructArray builtin");
Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
isolate(), InterpreterPushArgsMode::kArrayFunction);
- TNode<Code> code_target = HeapConstant(callable.code());
- var_result = CallStub(callable.descriptor(), code_target, context,
- args.reg_count(), args.base_reg_location(), target,
- new_target, var_site.value());
+ var_result =
+ CallStub(callable, context, args.reg_count(), args.base_reg_location(),
+ target, new_target, var_site.value());
Goto(&return_result);
}
@@ -988,19 +981,18 @@ TNode<Object> InterpreterAssembler::ConstructWithSpread(
Comment("call using ConstructWithSpread builtin");
Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
isolate(), InterpreterPushArgsMode::kWithFinalSpread);
- TNode<Code> code_target = HeapConstant(callable.code());
- return CallStub(callable.descriptor(), code_target, context, args.reg_count(),
- args.base_reg_location(), target, new_target,
- UndefinedConstant());
+ return CallStub(callable, context, args.reg_count(), args.base_reg_location(),
+ target, new_target, UndefinedConstant());
}
-Node* InterpreterAssembler::CallRuntimeN(TNode<Uint32T> function_id,
- TNode<Context> context,
- const RegListNodePair& args,
- int result_size) {
+template <class T>
+TNode<T> InterpreterAssembler::CallRuntimeN(TNode<Uint32T> function_id,
+ TNode<Context> context,
+ const RegListNodePair& args,
+ int return_count) {
DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
DCHECK(Bytecodes::IsCallRuntime(bytecode_));
- Callable callable = CodeFactory::InterpreterCEntry(isolate(), result_size);
+ Callable callable = CodeFactory::InterpreterCEntry(isolate(), return_count);
TNode<Code> code_target = HeapConstant(callable.code());
// Get the function entry from the function id.
@@ -1013,11 +1005,20 @@ Node* InterpreterAssembler::CallRuntimeN(TNode<Uint32T> function_id,
TNode<RawPtrT> function_entry = Load<RawPtrT>(
function, IntPtrConstant(offsetof(Runtime::Function, entry)));
- return CallStubR(StubCallMode::kCallCodeObject, callable.descriptor(),
- result_size, code_target, context, args.reg_count(),
- args.base_reg_location(), function_entry);
+ return CallStub<T>(callable.descriptor(), code_target, context,
+ args.reg_count(), args.base_reg_location(),
+ function_entry);
}
+template V8_EXPORT_PRIVATE TNode<Object> InterpreterAssembler::CallRuntimeN(
+ TNode<Uint32T> function_id, TNode<Context> context,
+ const RegListNodePair& args, int return_count);
+template V8_EXPORT_PRIVATE TNode<PairT<Object, Object>>
+InterpreterAssembler::CallRuntimeN(TNode<Uint32T> function_id,
+ TNode<Context> context,
+ const RegListNodePair& args,
+ int return_count);
+
void InterpreterAssembler::UpdateInterruptBudget(TNode<Int32T> weight,
bool backward) {
Comment("[ UpdateInterruptBudget");
@@ -1399,14 +1400,8 @@ TNode<FixedArray> InterpreterAssembler::ExportParametersAndRegisterFile(
// Iterate over parameters and write them into the array.
Label loop(this, &var_index), done_loop(this);
-#ifdef V8_REVERSE_JSARGS
TNode<IntPtrT> reg_base =
IntPtrConstant(Register::FromParameterIndex(0, 1).ToOperand() + 1);
-#else
- TNode<IntPtrT> reg_base = IntPtrAdd(
- IntPtrConstant(Register::FromParameterIndex(0, 1).ToOperand() - 1),
- formal_parameter_count_intptr);
-#endif
Goto(&loop);
BIND(&loop);
@@ -1415,11 +1410,7 @@ TNode<FixedArray> InterpreterAssembler::ExportParametersAndRegisterFile(
GotoIfNot(UintPtrLessThan(index, formal_parameter_count_intptr),
&done_loop);
-#ifdef V8_REVERSE_JSARGS
TNode<IntPtrT> reg_index = IntPtrAdd(reg_base, index);
-#else
- TNode<IntPtrT> reg_index = IntPtrSub(reg_base, index);
-#endif
TNode<Object> value = LoadRegister(reg_index);
StoreFixedArrayElement(array, index, value);
diff --git a/deps/v8/src/interpreter/interpreter-assembler.h b/deps/v8/src/interpreter/interpreter-assembler.h
index 729e23c7a6..2884aaed1a 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.h
+++ b/deps/v8/src/interpreter/interpreter-assembler.h
@@ -22,6 +22,8 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
InterpreterAssembler(compiler::CodeAssemblerState* state, Bytecode bytecode,
OperandScale operand_scale);
~InterpreterAssembler();
+ InterpreterAssembler(const InterpreterAssembler&) = delete;
+ InterpreterAssembler& operator=(const InterpreterAssembler&) = delete;
// Returns the 32-bit unsigned count immediate for bytecode operand
// |operand_index| in the current bytecode.
@@ -191,12 +193,10 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
TNode<UintPtrT> slot_id,
TNode<HeapObject> maybe_feedback_vector);
- // Call runtime function with |args| arguments which will return |return_size|
- // number of values.
- compiler::Node* CallRuntimeN(TNode<Uint32T> function_id,
- TNode<Context> context,
- const RegListNodePair& args,
- int return_size = 1);
+ // Call runtime function with |args| arguments.
+ template <class T = Object>
+ TNode<T> CallRuntimeN(TNode<Uint32T> function_id, TNode<Context> context,
+ const RegListNodePair& args, int return_count);
// Jump forward relative to the current bytecode by the |jump_offset|.
void Jump(TNode<IntPtrT> jump_offset);
@@ -402,8 +402,6 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
bool made_call_;
bool reloaded_frame_ptr_;
bool bytecode_array_valid_;
-
- DISALLOW_COPY_AND_ASSIGN(InterpreterAssembler);
};
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/interpreter-generator.cc b/deps/v8/src/interpreter/interpreter-generator.cc
index b3ca4a1b9c..3b7172867e 100644
--- a/deps/v8/src/interpreter/interpreter-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-generator.cc
@@ -36,7 +36,6 @@ namespace interpreter {
namespace {
using compiler::CodeAssemblerState;
-using compiler::Node;
using Label = CodeStubAssembler::Label;
#define IGNITION_HANDLER(Name, BaseAssembler) \
@@ -45,12 +44,13 @@ using Label = CodeStubAssembler::Label;
explicit Name##Assembler(compiler::CodeAssemblerState* state, \
Bytecode bytecode, OperandScale scale) \
: BaseAssembler(state, bytecode, scale) {} \
+ Name##Assembler(const Name##Assembler&) = delete; \
+ Name##Assembler& operator=(const Name##Assembler&) = delete; \
static void Generate(compiler::CodeAssemblerState* state, \
OperandScale scale); \
\
private: \
void GenerateImpl(); \
- DISALLOW_COPY_AND_ASSIGN(Name##Assembler); \
}; \
void Name##Assembler::Generate(compiler::CodeAssemblerState* state, \
OperandScale scale) { \
@@ -600,7 +600,6 @@ class InterpreterStoreNamedPropertyAssembler : public InterpreterAssembler {
: InterpreterAssembler(state, bytecode, operand_scale) {}
void StaNamedProperty(Callable ic, NamedPropertyType property_type) {
- TNode<Code> code_target = HeapConstant(ic.code());
TNode<Object> object = LoadRegisterAtOperandIndex(0);
TNode<Name> name = CAST(LoadConstantPoolEntryAtOperandIndex(1));
TNode<Object> value = GetAccumulator();
@@ -609,8 +608,7 @@ class InterpreterStoreNamedPropertyAssembler : public InterpreterAssembler {
TNode<Context> context = GetContext();
TVARIABLE(Object, var_result);
- var_result = CallStub(ic.descriptor(), code_target, context, object, name,
- value, slot, maybe_vector);
+ var_result = CallStub(ic, context, object, name, value, slot, maybe_vector);
// To avoid special logic in the deoptimizer to re-materialize the value in
// the accumulator, we overwrite the accumulator after the IC call. It
// doesn't really matter what we write to the accumulator here, since we
@@ -1340,8 +1338,7 @@ IGNITION_HANDLER(DeletePropertySloppy, InterpreterAssembler) {
// The result is stored in register |reg|.
IGNITION_HANDLER(GetSuperConstructor, InterpreterAssembler) {
TNode<JSFunction> active_function = CAST(GetAccumulator());
- TNode<Context> context = GetContext();
- TNode<Object> result = GetSuperConstructor(context, active_function);
+ TNode<Object> result = GetSuperConstructor(active_function);
StoreRegisterAtOperandIndex(result, 0);
Dispatch();
}
@@ -1407,32 +1404,17 @@ class InterpreterJSCallAssembler : public InterpreterAssembler {
LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex));
break;
case 2:
-#ifdef V8_REVERSE_JSARGS
CallJSAndDispatch(
function, context, Int32Constant(arg_count), receiver_mode,
LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 1),
LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex));
-#else
- CallJSAndDispatch(
- function, context, Int32Constant(arg_count), receiver_mode,
- LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex),
- LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 1));
-#endif
break;
case 3:
-#ifdef V8_REVERSE_JSARGS
CallJSAndDispatch(
function, context, Int32Constant(arg_count), receiver_mode,
LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 2),
LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 1),
LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex));
-#else
- CallJSAndDispatch(
- function, context, Int32Constant(arg_count), receiver_mode,
- LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex),
- LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 1),
- LoadRegisterAtOperandIndex(kFirstArgumentOperandIndex + 2));
-#endif
break;
default:
UNREACHABLE();
@@ -1494,7 +1476,7 @@ IGNITION_HANDLER(CallRuntime, InterpreterAssembler) {
TNode<Uint32T> function_id = BytecodeOperandRuntimeId(0);
RegListNodePair args = GetRegisterListAtOperandIndex(1);
TNode<Context> context = GetContext();
- TNode<Object> result = CAST(CallRuntimeN(function_id, context, args));
+ TNode<Object> result = CallRuntimeN(function_id, context, args, 1);
SetAccumulator(result);
Dispatch();
}
@@ -1525,10 +1507,11 @@ IGNITION_HANDLER(CallRuntimeForPair, InterpreterAssembler) {
TNode<Uint32T> function_id = BytecodeOperandRuntimeId(0);
RegListNodePair args = GetRegisterListAtOperandIndex(1);
TNode<Context> context = GetContext();
- Node* result_pair = CallRuntimeN(function_id, context, args, 2);
+ auto result_pair =
+ CallRuntimeN<PairT<Object, Object>>(function_id, context, args, 2);
// Store the results in <first_return> and <first_return + 1>
- TNode<Object> result0 = CAST(Projection(0, result_pair));
- TNode<Object> result1 = CAST(Projection(1, result_pair));
+ TNode<Object> result0 = Projection<0>(result_pair);
+ TNode<Object> result1 = Projection<1>(result_pair);
StoreRegisterPairAtOperandIndex(result0, result1, 3);
Dispatch();
}
@@ -2209,8 +2192,7 @@ IGNITION_HANDLER(JumpLoop, InterpreterAssembler) {
BIND(&osr_armed);
{
Callable callable = CodeFactory::InterpreterOnStackReplacement(isolate());
- TNode<Code> target = HeapConstant(callable.code());
- CallStub(callable.descriptor(), target, context);
+ CallStub(callable, context);
JumpBackward(relative_jump);
}
}
@@ -2737,7 +2719,7 @@ IGNITION_HANDLER(ThrowSuperNotCalledIfHole, InterpreterAssembler) {
// ThrowSuperAlreadyCalledIfNotHole
//
-// Throws SuperAleradyCalled exception if the value in the accumulator is not
+// Throws SuperAlreadyCalled exception if the value in the accumulator is not
// TheHole.
IGNITION_HANDLER(ThrowSuperAlreadyCalledIfNotHole, InterpreterAssembler) {
TNode<Object> value = GetAccumulator();
@@ -2755,6 +2737,31 @@ IGNITION_HANDLER(ThrowSuperAlreadyCalledIfNotHole, InterpreterAssembler) {
}
}
+// ThrowIfNotSuperConstructor <constructor>
+//
+// Throws an exception if the value in |constructor| is not in fact a
+// constructor.
+IGNITION_HANDLER(ThrowIfNotSuperConstructor, InterpreterAssembler) {
+ TNode<HeapObject> constructor = CAST(LoadRegisterAtOperandIndex(0));
+ TNode<Context> context = GetContext();
+
+ Label is_not_constructor(this, Label::kDeferred);
+ TNode<Map> constructor_map = LoadMap(constructor);
+ GotoIfNot(IsConstructorMap(constructor_map), &is_not_constructor);
+ Dispatch();
+
+ BIND(&is_not_constructor);
+ {
+ TNode<JSFunction> function =
+ CAST(LoadRegister(Register::function_closure()));
+ CallRuntime(Runtime::kThrowNotSuperConstructor, context, constructor,
+ function);
+ // We shouldn't ever return from a throw.
+ Abort(AbortReason::kUnexpectedReturnFromThrow);
+ Unreachable();
+ }
+}
+
// Debugger
//
// Call runtime to handle debugger statement.
@@ -2771,10 +2778,10 @@ IGNITION_HANDLER(Debugger, InterpreterAssembler) {
IGNITION_HANDLER(Name, InterpreterAssembler) { \
TNode<Context> context = GetContext(); \
TNode<Object> accumulator = GetAccumulator(); \
- TNode<Object> result_pair = \
- CallRuntime(Runtime::kDebugBreakOnBytecode, context, accumulator); \
- TNode<Object> return_value = CAST(Projection(0, result_pair)); \
- TNode<IntPtrT> original_bytecode = SmiUntag(Projection(1, result_pair)); \
+ TNode<PairT<Object, Smi>> result_pair = CallRuntime<PairT<Object, Smi>>( \
+ Runtime::kDebugBreakOnBytecode, context, accumulator); \
+ TNode<Object> return_value = Projection<0>(result_pair); \
+ TNode<IntPtrT> original_bytecode = SmiUntag(Projection<1>(result_pair)); \
MaybeDropFrames(context); \
SetAccumulator(return_value); \
DispatchToBytecode(original_bytecode, BytecodeOffset()); \
@@ -2841,57 +2848,14 @@ IGNITION_HANDLER(ForInPrepare, InterpreterAssembler) {
TNode<UintPtrT> vector_index = BytecodeOperandIdx(1);
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
- // Check if we're using an enum cache.
- Label if_fast(this), if_slow(this);
- Branch(IsMap(enumerator), &if_fast, &if_slow);
-
- BIND(&if_fast);
- {
- // Load the enumeration length and cache from the {enumerator}.
- TNode<Map> map_enumerator = CAST(enumerator);
- TNode<WordT> enum_length = LoadMapEnumLength(map_enumerator);
- CSA_ASSERT(this, WordNotEqual(enum_length,
- IntPtrConstant(kInvalidEnumCacheSentinel)));
- TNode<DescriptorArray> descriptors = LoadMapDescriptors(map_enumerator);
- TNode<EnumCache> enum_cache = LoadObjectField<EnumCache>(
- descriptors, DescriptorArray::kEnumCacheOffset);
- TNode<FixedArray> enum_keys =
- LoadObjectField<FixedArray>(enum_cache, EnumCache::kKeysOffset);
-
- // Check if we have enum indices available.
- TNode<FixedArray> enum_indices =
- LoadObjectField<FixedArray>(enum_cache, EnumCache::kIndicesOffset);
- TNode<IntPtrT> enum_indices_length =
- LoadAndUntagFixedArrayBaseLength(enum_indices);
- TNode<Smi> feedback = SelectSmiConstant(
- IntPtrLessThanOrEqual(enum_length, enum_indices_length),
- ForInFeedback::kEnumCacheKeysAndIndices, ForInFeedback::kEnumCacheKeys);
- UpdateFeedback(feedback, maybe_feedback_vector, vector_index);
-
- // Construct the cache info triple.
- TNode<Map> cache_type = map_enumerator;
- TNode<FixedArray> cache_array = enum_keys;
- TNode<Smi> cache_length = SmiTag(Signed(enum_length));
- StoreRegisterTripleAtOperandIndex(cache_type, cache_array, cache_length, 0);
- Dispatch();
- }
+ TNode<HeapObject> cache_type = enumerator; // Just to clarify the rename.
+ TNode<FixedArray> cache_array;
+ TNode<Smi> cache_length;
+ ForInPrepare(enumerator, vector_index, maybe_feedback_vector, &cache_array,
+ &cache_length);
- BIND(&if_slow);
- {
- // The {enumerator} is a FixedArray with all the keys to iterate.
- TNode<FixedArray> array_enumerator = CAST(enumerator);
-
- // Record the fact that we hit the for-in slow-path.
- UpdateFeedback(SmiConstant(ForInFeedback::kAny), maybe_feedback_vector,
- vector_index);
-
- // Construct the cache info triple.
- TNode<FixedArray> cache_type = array_enumerator;
- TNode<FixedArray> cache_array = array_enumerator;
- TNode<Smi> cache_length = LoadFixedArrayBaseLength(array_enumerator);
- StoreRegisterTripleAtOperandIndex(cache_type, cache_array, cache_length, 0);
- Dispatch();
- }
+ StoreRegisterTripleAtOperandIndex(cache_type, cache_array, cache_length, 0);
+ Dispatch();
}
// ForInNext <receiver> <index> <cache_info_pair>
@@ -2921,14 +2885,9 @@ IGNITION_HANDLER(ForInNext, InterpreterAssembler) {
}
BIND(&if_slow);
{
- // Record the fact that we hit the for-in slow-path.
- UpdateFeedback(SmiConstant(ForInFeedback::kAny), maybe_feedback_vector,
- vector_index);
-
- // Need to filter the {key} for the {receiver}.
- TNode<Context> context = GetContext();
TNode<Object> result =
- CallBuiltin(Builtins::kForInFilter, context, key, receiver);
+ ForInNextSlow(GetContext(), vector_index, receiver, key, cache_type,
+ maybe_feedback_vector);
SetAccumulator(result);
Dispatch();
}
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc b/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
index 03ca61de52..b9975e66ea 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
@@ -20,14 +20,14 @@ namespace v8 {
namespace internal {
namespace interpreter {
-using compiler::Node;
-
class IntrinsicsGenerator {
public:
explicit IntrinsicsGenerator(InterpreterAssembler* assembler)
: isolate_(assembler->isolate()),
zone_(assembler->zone()),
assembler_(assembler) {}
+ IntrinsicsGenerator(const IntrinsicsGenerator&) = delete;
+ IntrinsicsGenerator& operator=(const IntrinsicsGenerator&) = delete;
TNode<Object> InvokeIntrinsic(
TNode<Uint32T> function_id, TNode<Context> context,
@@ -42,17 +42,14 @@ class IntrinsicsGenerator {
TNode<Oddball> IsInstanceType(TNode<Object> input, int type);
TNode<BoolT> CompareInstanceType(TNode<HeapObject> map, int type,
InstanceTypeCompareMode mode);
- TNode<Object> IntrinsicAsStubCall(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
- Callable const& callable);
TNode<Object> IntrinsicAsBuiltinCall(
const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
- Builtins::Name name);
+ Builtins::Name name, int arg_count);
void AbortIfArgCountMismatch(int expected, TNode<Word32T> actual);
#define DECLARE_INTRINSIC_HELPER(name, lower_case, count) \
TNode<Object> name(const InterpreterAssembler::RegListNodePair& args, \
- TNode<Context> context);
+ TNode<Context> context, int arg_count);
INTRINSICS_LIST(DECLARE_INTRINSIC_HELPER)
#undef DECLARE_INTRINSIC_HELPER
@@ -63,8 +60,6 @@ class IntrinsicsGenerator {
Isolate* isolate_;
Zone* zone_;
InterpreterAssembler* assembler_;
-
- DISALLOW_COPY_AND_ASSIGN(IntrinsicsGenerator);
};
TNode<Object> GenerateInvokeIntrinsic(
@@ -103,7 +98,7 @@ TNode<Object> IntrinsicsGenerator::InvokeIntrinsic(
if (FLAG_debug_code && expected_arg_count >= 0) { \
AbortIfArgCountMismatch(expected_arg_count, args.reg_count()); \
} \
- TNode<Object> value = name(args, context); \
+ TNode<Object> value = name(args, context, expected_arg_count); \
if (value) { \
result = value; \
__ Goto(&end); \
@@ -146,8 +141,34 @@ TNode<Oddball> IntrinsicsGenerator::IsInstanceType(TNode<Object> input,
return result;
}
+TNode<Object> IntrinsicsGenerator::IntrinsicAsBuiltinCall(
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ Builtins::Name name, int arg_count) {
+ Callable callable = Builtins::CallableFor(isolate_, name);
+ switch (arg_count) {
+ case 1:
+ return __ CallStub(callable, context,
+ __ LoadRegisterFromRegisterList(args, 0));
+ break;
+ case 2:
+ return __ CallStub(callable, context,
+ __ LoadRegisterFromRegisterList(args, 0),
+ __ LoadRegisterFromRegisterList(args, 1));
+ break;
+ case 3:
+ return __ CallStub(callable, context,
+ __ LoadRegisterFromRegisterList(args, 0),
+ __ LoadRegisterFromRegisterList(args, 1),
+ __ LoadRegisterFromRegisterList(args, 2));
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
TNode<Object> IntrinsicsGenerator::IsJSReceiver(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ int arg_count) {
TNode<Object> input = __ LoadRegisterFromRegisterList(args, 0);
TNode<Oddball> result = __ Select<Oddball>(
__ TaggedIsSmi(input), [=] { return __ FalseConstant(); },
@@ -158,81 +179,61 @@ TNode<Object> IntrinsicsGenerator::IsJSReceiver(
}
TNode<Object> IntrinsicsGenerator::IsArray(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ int arg_count) {
TNode<Object> input = __ LoadRegisterFromRegisterList(args, 0);
return IsInstanceType(input, JS_ARRAY_TYPE);
}
TNode<Object> IntrinsicsGenerator::IsSmi(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ int arg_count) {
TNode<Object> input = __ LoadRegisterFromRegisterList(args, 0);
return __ SelectBooleanConstant(__ TaggedIsSmi(input));
}
-TNode<Object> IntrinsicsGenerator::IntrinsicAsStubCall(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
- Callable const& callable) {
- int param_count = callable.descriptor().GetParameterCount();
- int input_count = param_count + 2; // +2 for target and context
- Node** stub_args = zone()->NewArray<Node*>(input_count);
- int index = 0;
- stub_args[index++] = __ HeapConstant(callable.code());
- for (int i = 0; i < param_count; i++) {
- stub_args[index++] = __ LoadRegisterFromRegisterList(args, i);
- }
- stub_args[index++] = context;
- return __ CAST(__ CallStubN(StubCallMode::kCallCodeObject,
- callable.descriptor(), 1, input_count,
- stub_args));
-}
-
-TNode<Object> IntrinsicsGenerator::IntrinsicAsBuiltinCall(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
- Builtins::Name name) {
- Callable callable = Builtins::CallableFor(isolate_, name);
- return IntrinsicAsStubCall(args, context, callable);
-}
-
TNode<Object> IntrinsicsGenerator::CopyDataProperties(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
- return IntrinsicAsStubCall(
- args, context,
- Builtins::CallableFor(isolate(), Builtins::kCopyDataProperties));
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ int arg_count) {
+ return IntrinsicAsBuiltinCall(args, context, Builtins::kCopyDataProperties,
+ arg_count);
}
TNode<Object> IntrinsicsGenerator::CreateIterResultObject(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
- return IntrinsicAsStubCall(
- args, context,
- Builtins::CallableFor(isolate(), Builtins::kCreateIterResultObject));
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ int arg_count) {
+ return IntrinsicAsBuiltinCall(args, context,
+ Builtins::kCreateIterResultObject, arg_count);
}
TNode<Object> IntrinsicsGenerator::HasProperty(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
- return IntrinsicAsStubCall(
- args, context, Builtins::CallableFor(isolate(), Builtins::kHasProperty));
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ int arg_count) {
+ return IntrinsicAsBuiltinCall(args, context, Builtins::kHasProperty,
+ arg_count);
}
TNode<Object> IntrinsicsGenerator::ToString(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
- return IntrinsicAsStubCall(
- args, context, Builtins::CallableFor(isolate(), Builtins::kToString));
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ int arg_count) {
+ return IntrinsicAsBuiltinCall(args, context, Builtins::kToString, arg_count);
}
TNode<Object> IntrinsicsGenerator::ToLength(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
- return IntrinsicAsStubCall(
- args, context, Builtins::CallableFor(isolate(), Builtins::kToLength));
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ int arg_count) {
+ return IntrinsicAsBuiltinCall(args, context, Builtins::kToLength, arg_count);
}
TNode<Object> IntrinsicsGenerator::ToObject(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
- return IntrinsicAsStubCall(
- args, context, Builtins::CallableFor(isolate(), Builtins::kToObject));
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ int arg_count) {
+ return IntrinsicAsBuiltinCall(args, context, Builtins::kToObject, arg_count);
}
TNode<Object> IntrinsicsGenerator::Call(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ int arg_count) {
// First argument register contains the function target.
TNode<Object> function = __ LoadRegisterFromRegisterList(args, 0);
@@ -258,7 +259,8 @@ TNode<Object> IntrinsicsGenerator::Call(
}
TNode<Object> IntrinsicsGenerator::CreateAsyncFromSyncIterator(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ int arg_count) {
InterpreterAssembler::Label not_receiver(
assembler_, InterpreterAssembler::Label::kDeferred);
InterpreterAssembler::Label done(assembler_);
@@ -299,13 +301,15 @@ TNode<Object> IntrinsicsGenerator::CreateAsyncFromSyncIterator(
}
TNode<Object> IntrinsicsGenerator::CreateJSGeneratorObject(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
- return IntrinsicAsBuiltinCall(args, context,
- Builtins::kCreateGeneratorObject);
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ int arg_count) {
+ return IntrinsicAsBuiltinCall(args, context, Builtins::kCreateGeneratorObject,
+ arg_count);
}
TNode<Object> IntrinsicsGenerator::GeneratorGetResumeMode(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ int arg_count) {
TNode<JSGeneratorObject> generator =
__ CAST(__ LoadRegisterFromRegisterList(args, 0));
const TNode<Object> value =
@@ -315,7 +319,8 @@ TNode<Object> IntrinsicsGenerator::GeneratorGetResumeMode(
}
TNode<Object> IntrinsicsGenerator::GeneratorClose(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ int arg_count) {
TNode<JSGeneratorObject> generator =
__ CAST(__ LoadRegisterFromRegisterList(args, 0));
__ StoreObjectFieldNoWriteBarrier(
@@ -325,7 +330,8 @@ TNode<Object> IntrinsicsGenerator::GeneratorClose(
}
TNode<Object> IntrinsicsGenerator::GetImportMetaObject(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ int arg_count) {
const TNode<Context> module_context = __ LoadModuleContext(context);
const TNode<HeapObject> module =
__ CAST(__ LoadContextElement(module_context, Context::EXTENSION_INDEX));
@@ -346,58 +352,73 @@ TNode<Object> IntrinsicsGenerator::GetImportMetaObject(
}
TNode<Object> IntrinsicsGenerator::AsyncFunctionAwaitCaught(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ int arg_count) {
return IntrinsicAsBuiltinCall(args, context,
- Builtins::kAsyncFunctionAwaitCaught);
+ Builtins::kAsyncFunctionAwaitCaught, arg_count);
}
TNode<Object> IntrinsicsGenerator::AsyncFunctionAwaitUncaught(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
- return IntrinsicAsBuiltinCall(args, context,
- Builtins::kAsyncFunctionAwaitUncaught);
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ int arg_count) {
+ return IntrinsicAsBuiltinCall(
+ args, context, Builtins::kAsyncFunctionAwaitUncaught, arg_count);
}
TNode<Object> IntrinsicsGenerator::AsyncFunctionEnter(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
- return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncFunctionEnter);
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ int arg_count) {
+ return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncFunctionEnter,
+ arg_count);
}
TNode<Object> IntrinsicsGenerator::AsyncFunctionReject(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
- return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncFunctionReject);
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ int arg_count) {
+ return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncFunctionReject,
+ arg_count);
}
TNode<Object> IntrinsicsGenerator::AsyncFunctionResolve(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
- return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncFunctionResolve);
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ int arg_count) {
+ return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncFunctionResolve,
+ arg_count);
}
TNode<Object> IntrinsicsGenerator::AsyncGeneratorAwaitCaught(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
- return IntrinsicAsBuiltinCall(args, context,
- Builtins::kAsyncGeneratorAwaitCaught);
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ int arg_count) {
+ return IntrinsicAsBuiltinCall(
+ args, context, Builtins::kAsyncGeneratorAwaitCaught, arg_count);
}
TNode<Object> IntrinsicsGenerator::AsyncGeneratorAwaitUncaught(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
- return IntrinsicAsBuiltinCall(args, context,
- Builtins::kAsyncGeneratorAwaitUncaught);
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ int arg_count) {
+ return IntrinsicAsBuiltinCall(
+ args, context, Builtins::kAsyncGeneratorAwaitUncaught, arg_count);
}
TNode<Object> IntrinsicsGenerator::AsyncGeneratorReject(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
- return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncGeneratorReject);
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ int arg_count) {
+ return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncGeneratorReject,
+ arg_count);
}
TNode<Object> IntrinsicsGenerator::AsyncGeneratorResolve(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
- return IntrinsicAsBuiltinCall(args, context,
- Builtins::kAsyncGeneratorResolve);
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ int arg_count) {
+ return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncGeneratorResolve,
+ arg_count);
}
TNode<Object> IntrinsicsGenerator::AsyncGeneratorYield(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context) {
- return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncGeneratorYield);
+ const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
+ int arg_count) {
+ return IntrinsicAsBuiltinCall(args, context, Builtins::kAsyncGeneratorYield,
+ arg_count);
}
void IntrinsicsGenerator::AbortIfArgCountMismatch(int expected,
diff --git a/deps/v8/src/interpreter/interpreter.cc b/deps/v8/src/interpreter/interpreter.cc
index 702c0474bc..40fdc60337 100644
--- a/deps/v8/src/interpreter/interpreter.cc
+++ b/deps/v8/src/interpreter/interpreter.cc
@@ -35,6 +35,9 @@ class InterpreterCompilationJob final : public UnoptimizedCompilationJob {
ParseInfo* parse_info, FunctionLiteral* literal,
AccountingAllocator* allocator,
std::vector<FunctionLiteral*>* eager_inner_literals);
+ InterpreterCompilationJob(const InterpreterCompilationJob&) = delete;
+ InterpreterCompilationJob& operator=(const InterpreterCompilationJob&) =
+ delete;
protected:
Status ExecuteJobImpl() final;
@@ -57,8 +60,6 @@ class InterpreterCompilationJob final : public UnoptimizedCompilationJob {
Zone zone_;
UnoptimizedCompilationInfo compilation_info_;
BytecodeGenerator generator_;
-
- DISALLOW_COPY_AND_ASSIGN(InterpreterCompilationJob);
};
Interpreter::Interpreter(Isolate* isolate)
@@ -78,12 +79,21 @@ Interpreter::Interpreter(Isolate* isolate)
namespace {
int BuiltinIndexFromBytecode(Bytecode bytecode, OperandScale operand_scale) {
- int index = BytecodeOperands::OperandScaleAsIndex(operand_scale) *
- kNumberOfBytecodeHandlers +
- static_cast<int>(bytecode);
- int offset = kBytecodeToBuiltinsMapping[index];
- return offset >= 0 ? Builtins::kFirstBytecodeHandler + offset
- : Builtins::kIllegalHandler;
+ int index = static_cast<int>(bytecode);
+ if (operand_scale != OperandScale::kSingle) {
+ // The table contains uint8_t offsets starting at 0 with
+ // kIllegalBytecodeHandlerEncoding for illegal bytecode/scale combinations.
+ uint8_t offset = kWideBytecodeToBuiltinsMapping[index];
+ if (offset == kIllegalBytecodeHandlerEncoding) {
+ return Builtins::kIllegalHandler;
+ } else {
+ index = kNumberOfBytecodeHandlers + offset;
+ if (operand_scale == OperandScale::kQuadruple) {
+ index += kNumberOfWideBytecodeHandlers;
+ }
+ }
+ }
+ return Builtins::kFirstBytecodeHandler + index;
}
} // namespace
@@ -250,7 +260,7 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::DoFinalizeJobImpl(
SourcePositionTableBuilder::RecordingMode::RECORD_SOURCE_POSITIONS) {
Handle<ByteArray> source_position_table =
generator()->FinalizeSourcePositionTable(isolate);
- bytecodes->set_synchronized_source_position_table(*source_position_table);
+ bytecodes->set_source_position_table(*source_position_table, kReleaseStore);
}
if (ShouldPrintBytecode(shared_info)) {
@@ -286,7 +296,7 @@ Interpreter::NewSourcePositionCollectionJob(
auto job = std::make_unique<InterpreterCompilationJob>(parse_info, literal,
allocator, nullptr);
job->compilation_info()->SetBytecodeArray(existing_bytecode);
- return std::unique_ptr<UnoptimizedCompilationJob> { static_cast<UnoptimizedCompilationJob*>(job.release()) };
+ return job;
}
void Interpreter::ForEachBytecode(
diff --git a/deps/v8/src/interpreter/interpreter.h b/deps/v8/src/interpreter/interpreter.h
index 3ef28fdfbf..3bbd93fffb 100644
--- a/deps/v8/src/interpreter/interpreter.h
+++ b/deps/v8/src/interpreter/interpreter.h
@@ -37,6 +37,8 @@ class Interpreter {
public:
explicit Interpreter(Isolate* isolate);
virtual ~Interpreter() = default;
+ Interpreter(const Interpreter&) = delete;
+ Interpreter& operator=(const Interpreter&) = delete;
// Creates a compilation job which will generate bytecode for |literal|.
// Additionally, if |eager_inner_literals| is not null, adds any eagerly
@@ -105,8 +107,6 @@ class Interpreter {
Address dispatch_table_[kDispatchTableSize];
std::unique_ptr<uintptr_t[]> bytecode_dispatch_counters_table_;
Address interpreter_entry_trampoline_instruction_start_;
-
- DISALLOW_COPY_AND_ASSIGN(Interpreter);
};
} // namespace interpreter
diff --git a/deps/v8/src/json/DIR_METADATA b/deps/v8/src/json/DIR_METADATA
new file mode 100644
index 0000000000..b183b81885
--- /dev/null
+++ b/deps/v8/src/json/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>Runtime"
+} \ No newline at end of file
diff --git a/deps/v8/src/json/OWNERS b/deps/v8/src/json/OWNERS
index 48b6dfd658..85599ae570 100644
--- a/deps/v8/src/json/OWNERS
+++ b/deps/v8/src/json/OWNERS
@@ -1,5 +1,3 @@
ishell@chromium.org
jkummerow@chromium.org
verwaest@chromium.org
-
-# COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/json/json-parser.cc b/deps/v8/src/json/json-parser.cc
index d099fa36cb..13d4d6c91d 100644
--- a/deps/v8/src/json/json-parser.cc
+++ b/deps/v8/src/json/json-parser.cc
@@ -273,8 +273,7 @@ void JsonParser<Char>::ReportUnexpectedToken(JsonToken token) {
// separated source file.
isolate()->debug()->OnCompileError(script);
MessageLocation location(script, pos, pos + 1);
- Handle<Object> error = factory->NewSyntaxError(message, arg1, arg2);
- isolate()->Throw(*error, &location);
+ isolate()->ThrowAt(factory->NewSyntaxError(message, arg1, arg2), &location);
// Move the cursor to the end so we won't be able to proceed parsing.
cursor_ = end_;
@@ -464,9 +463,10 @@ Handle<Object> JsonParser<Char>::BuildJsonObject(
Handle<Map> target;
InternalIndex descriptor_index(descriptor);
if (descriptor < feedback_descriptors) {
- expected = handle(String::cast(feedback->instance_descriptors().GetKey(
- descriptor_index)),
- isolate_);
+ expected =
+ handle(String::cast(feedback->instance_descriptors(kRelaxedLoad)
+ .GetKey(descriptor_index)),
+ isolate_);
} else {
DisallowHeapAllocation no_gc;
TransitionsAccessor transitions(isolate(), *map, &no_gc);
@@ -497,7 +497,7 @@ Handle<Object> JsonParser<Char>::BuildJsonObject(
Handle<Object> value = property.value;
PropertyDetails details =
- target->instance_descriptors().GetDetails(descriptor_index);
+ target->instance_descriptors(kRelaxedLoad).GetDetails(descriptor_index);
Representation expected_representation = details.representation();
if (!value->FitsRepresentation(expected_representation)) {
@@ -512,7 +512,7 @@ Handle<Object> JsonParser<Char>::BuildJsonObject(
Map::GeneralizeField(isolate(), target, descriptor_index,
details.constness(), representation, value_type);
} else if (expected_representation.IsHeapObject() &&
- !target->instance_descriptors()
+ !target->instance_descriptors(kRelaxedLoad)
.GetFieldType(descriptor_index)
.NowContains(value)) {
Handle<FieldType> value_type =
@@ -525,7 +525,7 @@ Handle<Object> JsonParser<Char>::BuildJsonObject(
new_mutable_double++;
}
- DCHECK(target->instance_descriptors()
+ DCHECK(target->instance_descriptors(kRelaxedLoad)
.GetFieldType(descriptor_index)
.NowContains(value));
map = target;
@@ -575,7 +575,7 @@ Handle<Object> JsonParser<Char>::BuildJsonObject(
if (property.string.is_index()) continue;
InternalIndex descriptor_index(descriptor);
PropertyDetails details =
- map->instance_descriptors().GetDetails(descriptor_index);
+ map->instance_descriptors(kRelaxedLoad).GetDetails(descriptor_index);
Object value = *property.value;
FieldIndex index = FieldIndex::ForDescriptor(*map, descriptor_index);
descriptor++;
diff --git a/deps/v8/src/json/json-stringifier.cc b/deps/v8/src/json/json-stringifier.cc
index 47d6a0ddad..0dabdd3082 100644
--- a/deps/v8/src/json/json-stringifier.cc
+++ b/deps/v8/src/json/json-stringifier.cc
@@ -772,11 +772,13 @@ JsonStringifier::Result JsonStringifier::SerializeJSObject(
Indent();
bool comma = false;
for (InternalIndex i : map->IterateOwnDescriptors()) {
- Handle<Name> name(map->instance_descriptors().GetKey(i), isolate_);
+ Handle<Name> name(map->instance_descriptors(kRelaxedLoad).GetKey(i),
+ isolate_);
// TODO(rossberg): Should this throw?
if (!name->IsString()) continue;
Handle<String> key = Handle<String>::cast(name);
- PropertyDetails details = map->instance_descriptors().GetDetails(i);
+ PropertyDetails details =
+ map->instance_descriptors(kRelaxedLoad).GetDetails(i);
if (details.IsDontEnum()) continue;
Handle<Object> property;
if (details.location() == kField && *map == object->map()) {
diff --git a/deps/v8/src/libplatform/DIR_METADATA b/deps/v8/src/libplatform/DIR_METADATA
new file mode 100644
index 0000000000..a27ea1b53a
--- /dev/null
+++ b/deps/v8/src/libplatform/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>API"
+} \ No newline at end of file
diff --git a/deps/v8/src/libplatform/OWNERS b/deps/v8/src/libplatform/OWNERS
index 7ec6602147..65e7ba445c 100644
--- a/deps/v8/src/libplatform/OWNERS
+++ b/deps/v8/src/libplatform/OWNERS
@@ -1,4 +1,2 @@
mlippautz@chromium.org
ulan@chromium.org
-
-# COMPONENT: Blink>JavaScript>API
diff --git a/deps/v8/src/libplatform/default-job.cc b/deps/v8/src/libplatform/default-job.cc
index 728c1f5b28..8a8064c24c 100644
--- a/deps/v8/src/libplatform/default-job.cc
+++ b/deps/v8/src/libplatform/default-job.cc
@@ -122,10 +122,15 @@ void DefaultJobState::CancelAndWait() {
}
}
-bool DefaultJobState::IsCompleted() {
+void DefaultJobState::CancelAndDetach() {
base::MutexGuard guard(&mutex_);
- return job_task_->GetMaxConcurrency(active_workers_) == 0 &&
- active_workers_ == 0;
+ is_canceled_.store(true, std::memory_order_relaxed);
+}
+
+bool DefaultJobState::IsActive() {
+ base::MutexGuard guard(&mutex_);
+ return job_task_->GetMaxConcurrency(active_workers_) != 0 ||
+ active_workers_ != 0;
}
bool DefaultJobState::CanRunFirstTask() {
@@ -204,6 +209,11 @@ void DefaultJobState::CallOnWorkerThread(TaskPriority priority,
}
}
+void DefaultJobState::UpdatePriority(TaskPriority priority) {
+ base::MutexGuard guard(&mutex_);
+ priority_ = priority;
+}
+
DefaultJobHandle::DefaultJobHandle(std::shared_ptr<DefaultJobState> state)
: state_(std::move(state)) {
state_->NotifyConcurrencyIncrease();
@@ -220,7 +230,16 @@ void DefaultJobHandle::Cancel() {
state_ = nullptr;
}
-bool DefaultJobHandle::IsCompleted() { return state_->IsCompleted(); }
+void DefaultJobHandle::CancelAndDetach() {
+ state_->CancelAndDetach();
+ state_ = nullptr;
+}
+
+bool DefaultJobHandle::IsActive() { return state_->IsActive(); }
+
+void DefaultJobHandle::UpdatePriority(TaskPriority priority) {
+ state_->UpdatePriority(priority);
+}
} // namespace platform
} // namespace v8
diff --git a/deps/v8/src/libplatform/default-job.h b/deps/v8/src/libplatform/default-job.h
index 15517f49ac..082fa1ef6f 100644
--- a/deps/v8/src/libplatform/default-job.h
+++ b/deps/v8/src/libplatform/default-job.h
@@ -54,7 +54,8 @@ class V8_PLATFORM_EXPORT DefaultJobState
void Join();
void CancelAndWait();
- bool IsCompleted();
+ void CancelAndDetach();
+ bool IsActive();
// Must be called before running |job_task_| for the first time. If it returns
// true, then the worker thread must contribute and must call DidRunTask(), or
@@ -64,6 +65,8 @@ class V8_PLATFORM_EXPORT DefaultJobState
// must contribute again, or false if it should return.
bool DidRunTask();
+ void UpdatePriority(TaskPriority);
+
private:
// Called from the joining thread. Waits for the worker count to be below or
// equal to max concurrency (will happen when a worker calls
@@ -109,8 +112,15 @@ class V8_PLATFORM_EXPORT DefaultJobHandle : public JobHandle {
void Join() override;
void Cancel() override;
- bool IsCompleted() override;
- bool IsRunning() override { return state_ != nullptr; }
+ void CancelAndDetach() override;
+ bool IsCompleted() override { return !IsActive(); }
+ bool IsActive() override;
+ bool IsRunning() override { return IsValid(); }
+ bool IsValid() override { return state_ != nullptr; }
+
+ bool UpdatePriorityEnabled() const override { return true; }
+
+ void UpdatePriority(TaskPriority) override;
private:
std::shared_ptr<DefaultJobState> state_;
@@ -127,9 +137,11 @@ class DefaultJobWorker : public Task {
void Run() override {
auto shared_state = state_.lock();
if (!shared_state) return;
- DefaultJobState::JobDelegate delegate(shared_state.get());
if (!shared_state->CanRunFirstTask()) return;
do {
+ // Scope of |delegate| must not outlive DidRunTask() so that associated
+ // state is freed before the worker becomes inactive.
+ DefaultJobState::JobDelegate delegate(shared_state.get());
job_task_->Run(&delegate);
} while (shared_state->DidRunTask());
}
diff --git a/deps/v8/src/libsampler/DIR_METADATA b/deps/v8/src/libsampler/DIR_METADATA
new file mode 100644
index 0000000000..3ba1106a5f
--- /dev/null
+++ b/deps/v8/src/libsampler/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Platform>DevTools>JavaScript"
+} \ No newline at end of file
diff --git a/deps/v8/src/libsampler/OWNERS b/deps/v8/src/libsampler/OWNERS
index 7ab7c063da..6afd4d0fee 100644
--- a/deps/v8/src/libsampler/OWNERS
+++ b/deps/v8/src/libsampler/OWNERS
@@ -1,4 +1,2 @@
alph@chromium.org
petermarshall@chromium.org
-
-# COMPONENT: Platform>DevTools>JavaScript
diff --git a/deps/v8/src/libsampler/sampler.cc b/deps/v8/src/libsampler/sampler.cc
index 9631d2f478..1dac546262 100644
--- a/deps/v8/src/libsampler/sampler.cc
+++ b/deps/v8/src/libsampler/sampler.cc
@@ -329,9 +329,9 @@ class SignalHandler {
sa.sa_sigaction = &HandleProfilerSignal;
sigemptyset(&sa.sa_mask);
#if V8_OS_QNX
- sa.sa_flags = SA_SIGINFO;
+ sa.sa_flags = SA_SIGINFO | SA_ONSTACK;
#else
- sa.sa_flags = SA_RESTART | SA_SIGINFO;
+ sa.sa_flags = SA_RESTART | SA_SIGINFO | SA_ONSTACK;
#endif
signal_handler_installed_ =
(sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
diff --git a/deps/v8/src/logging/counters-definitions.h b/deps/v8/src/logging/counters-definitions.h
index 4623dc4b8a..9cb58dd9fc 100644
--- a/deps/v8/src/logging/counters-definitions.h
+++ b/deps/v8/src/logging/counters-definitions.h
@@ -16,9 +16,9 @@ namespace internal {
HR(code_cache_reject_reason, V8.CodeCacheRejectReason, 1, 6, 6) \
HR(errors_thrown_per_context, V8.ErrorsThrownPerContext, 0, 200, 20) \
HR(debug_feature_usage, V8.DebugFeatureUsage, 1, 7, 7) \
- HR(incremental_marking_reason, V8.GCIncrementalMarkingReason, 0, 22, 23) \
+ HR(incremental_marking_reason, V8.GCIncrementalMarkingReason, 0, 25, 26) \
HR(incremental_marking_sum, V8.GCIncrementalMarkingSum, 0, 10000, 101) \
- HR(mark_compact_reason, V8.GCMarkCompactReason, 0, 22, 23) \
+ HR(mark_compact_reason, V8.GCMarkCompactReason, 0, 25, 26) \
HR(gc_finalize_clear, V8.GCFinalizeMC.Clear, 0, 10000, 101) \
HR(gc_finalize_epilogue, V8.GCFinalizeMC.Epilogue, 0, 10000, 101) \
HR(gc_finalize_evacuate, V8.GCFinalizeMC.Evacuate, 0, 10000, 101) \
@@ -33,7 +33,7 @@ namespace internal {
/* Range and bucket matches BlinkGC.MainThreadMarkingThroughput. */ \
HR(gc_main_thread_marking_throughput, V8.GCMainThreadMarkingThroughput, 0, \
100000, 50) \
- HR(scavenge_reason, V8.GCScavengeReason, 0, 22, 23) \
+ HR(scavenge_reason, V8.GCScavengeReason, 0, 25, 26) \
HR(young_generation_handling, V8.GCYoungGenerationHandling, 0, 2, 3) \
/* Asm/Wasm. */ \
HR(wasm_functions_per_asm_module, V8.WasmFunctionsPerModule.asm, 1, 1000000, \
@@ -138,9 +138,10 @@ namespace internal {
HT(gc_scavenger, V8.GCScavenger, 10000, MILLISECOND) \
HT(gc_scavenger_background, V8.GCScavengerBackground, 10000, MILLISECOND) \
HT(gc_scavenger_foreground, V8.GCScavengerForeground, 10000, MILLISECOND) \
- HT(time_to_safepoint, V8.TimeToSafepoint, 10000, MILLISECOND) \
HT(measure_memory_delay_ms, V8.MeasureMemoryDelayMilliseconds, 100000, \
MILLISECOND) \
+ HT(stop_the_world, V8.StopTheWorld, 10000, MICROSECOND) \
+ HT(time_to_collection, V8.TimeToCollection, 10000, MICROSECOND) \
/* TurboFan timers. */ \
HT(turbofan_optimize_prepare, V8.TurboFanOptimizePrepare, 1000000, \
MICROSECOND) \
@@ -183,8 +184,6 @@ namespace internal {
1000000, MICROSECOND) \
HT(wasm_compile_wasm_function_time, V8.WasmCompileFunctionMicroSeconds.wasm, \
1000000, MICROSECOND) \
- HT(liftoff_compile_time, V8.LiftoffCompileMicroSeconds, 10000000, \
- MICROSECOND) \
HT(wasm_instantiate_wasm_module_time, \
V8.WasmInstantiateModuleMicroSeconds.wasm, 10000000, MICROSECOND) \
HT(wasm_instantiate_asm_module_time, \
diff --git a/deps/v8/src/logging/counters.cc b/deps/v8/src/logging/counters.cc
index 986848361e..c9c9aa0ebe 100644
--- a/deps/v8/src/logging/counters.cc
+++ b/deps/v8/src/logging/counters.cc
@@ -7,6 +7,7 @@
#include <iomanip>
#include "src/base/platform/platform.h"
+#include "src/base/platform/time.h"
#include "src/builtins/builtins-definitions.h"
#include "src/execution/isolate.h"
#include "src/logging/counters-inl.h"
@@ -79,6 +80,15 @@ void* Histogram::CreateHistogram() const {
return counters_->CreateHistogram(name_, min_, max_, num_buckets_);
}
+void TimedHistogram::AddTimedSample(base::TimeDelta sample) {
+ if (Enabled()) {
+ int64_t sample_int = resolution_ == HistogramTimerResolution::MICROSECOND
+ ? sample.InMicroseconds()
+ : sample.InMilliseconds();
+ AddSample(static_cast<int>(sample_int));
+ }
+}
+
void TimedHistogram::Start(base::ElapsedTimer* timer, Isolate* isolate) {
if (Enabled()) timer->Start();
if (isolate) Logger::CallEventLogger(isolate, name(), Logger::START, true);
@@ -86,11 +96,9 @@ void TimedHistogram::Start(base::ElapsedTimer* timer, Isolate* isolate) {
void TimedHistogram::Stop(base::ElapsedTimer* timer, Isolate* isolate) {
if (Enabled()) {
- int64_t sample = resolution_ == HistogramTimerResolution::MICROSECOND
- ? timer->Elapsed().InMicroseconds()
- : timer->Elapsed().InMilliseconds();
+ base::TimeDelta delta = timer->Elapsed();
timer->Stop();
- AddSample(static_cast<int>(sample));
+ AddTimedSample(delta);
}
if (isolate != nullptr) {
Logger::CallEventLogger(isolate, name(), Logger::END, true);
@@ -601,6 +609,7 @@ WorkerThreadRuntimeCallStats::~WorkerThreadRuntimeCallStats() {
}
base::Thread::LocalStorageKey WorkerThreadRuntimeCallStats::GetKey() {
+ base::MutexGuard lock(&mutex_);
DCHECK(TracingFlags::is_runtime_stats_enabled());
if (!tls_key_) tls_key_ = base::Thread::CreateThreadLocalKey();
return *tls_key_;
diff --git a/deps/v8/src/logging/counters.h b/deps/v8/src/logging/counters.h
index b1b61aca0a..cb879e3c23 100644
--- a/deps/v8/src/logging/counters.h
+++ b/deps/v8/src/logging/counters.h
@@ -276,6 +276,9 @@ class TimedHistogram : public Histogram {
// that never got to run in a given scenario. Log if isolate non-null.
void RecordAbandon(base::ElapsedTimer* timer, Isolate* isolate);
+ // Add a single sample to this histogram.
+ void AddTimedSample(base::TimeDelta sample);
+
protected:
friend class Counters;
HistogramTimerResolution resolution_;
@@ -736,6 +739,7 @@ class RuntimeCallTimer final {
V(Float64Array_New) \
V(Function_Call) \
V(Function_New) \
+ V(Function_FunctionProtoToString) \
V(Function_NewInstance) \
V(FunctionTemplate_GetFunction) \
V(FunctionTemplate_New) \
@@ -747,7 +751,6 @@ class RuntimeCallTimer final {
V(Int8Array_New) \
V(Isolate_DateTimeConfigurationChangeNotification) \
V(Isolate_LocaleConfigurationChangeNotification) \
- V(JSMemberBase_New) \
V(JSON_Parse) \
V(JSON_Stringify) \
V(Map_AsArray) \
@@ -788,6 +791,7 @@ class RuntimeCallTimer final {
V(Object_HasRealIndexedProperty) \
V(Object_HasRealNamedCallbackProperty) \
V(Object_HasRealNamedProperty) \
+ V(Object_IsCodeLike) \
V(Object_New) \
V(Object_ObjectProtoToString) \
V(Object_Set) \
diff --git a/deps/v8/src/logging/log.cc b/deps/v8/src/logging/log.cc
index efd7c2b5f3..61909ae967 100644
--- a/deps/v8/src/logging/log.cc
+++ b/deps/v8/src/logging/log.cc
@@ -78,11 +78,13 @@ static v8::CodeEventType GetCodeEventTypeForTag(
}
static const char* ComputeMarker(SharedFunctionInfo shared, AbstractCode code) {
+ // TODO(mythria,jgruber): Use different markers for Turboprop/NCI.
switch (code.kind()) {
case CodeKind::INTERPRETED_FUNCTION:
return shared.optimization_disabled() ? "" : "~";
- case CodeKind::OPTIMIZED_FUNCTION:
+ case CodeKind::TURBOFAN:
case CodeKind::NATIVE_CONTEXT_INDEPENDENT:
+ case CodeKind::TURBOPROP:
return "*";
default:
return "";
@@ -991,6 +993,13 @@ Logger::~Logger() = default;
const LogSeparator Logger::kNext = LogSeparator::kSeparator;
+int64_t Logger::Time() {
+ if (V8_UNLIKELY(FLAG_verify_predictable)) {
+ return isolate_->heap()->MonotonicallyIncreasingTimeInMs() * 1000;
+ }
+ return timer_.Elapsed().InMicroseconds();
+}
+
void Logger::AddCodeEventListener(CodeEventListener* listener) {
bool result = isolate_->code_event_dispatcher()->AddListener(listener);
CHECK(result);
@@ -1069,7 +1078,7 @@ void Logger::CurrentTimeEvent() {
std::unique_ptr<Log::MessageBuilder> msg_ptr = log_->NewMessageBuilder();
if (!msg_ptr) return;
Log::MessageBuilder& msg = *msg_ptr.get();
- msg << "current-time" << kNext << timer_.Elapsed().InMicroseconds();
+ msg << "current-time" << kNext << Time();
msg.WriteToLogFile();
}
@@ -1087,7 +1096,7 @@ void Logger::TimerEvent(Logger::StartEnd se, const char* name) {
case STAMP:
msg << "timer-event";
}
- msg << kNext << name << kNext << timer_.Elapsed().InMicroseconds();
+ msg << kNext << name << kNext << Time();
msg.WriteToLogFile();
}
@@ -1190,22 +1199,20 @@ namespace {
void AppendCodeCreateHeader(
Log::MessageBuilder& msg, // NOLINT(runtime/references)
CodeEventListener::LogEventsAndTags tag, CodeKind kind, uint8_t* address,
- int size, base::ElapsedTimer* timer) {
+ int size, uint64_t time) {
msg << kLogEventsNames[CodeEventListener::CODE_CREATION_EVENT]
<< Logger::kNext << kLogEventsNames[tag] << Logger::kNext
- << static_cast<int>(kind) << Logger::kNext
- << timer->Elapsed().InMicroseconds() << Logger::kNext
+ << static_cast<int>(kind) << Logger::kNext << time << Logger::kNext
<< reinterpret_cast<void*>(address) << Logger::kNext << size
<< Logger::kNext;
}
void AppendCodeCreateHeader(
Log::MessageBuilder& msg, // NOLINT(runtime/references)
- CodeEventListener::LogEventsAndTags tag, AbstractCode code,
- base::ElapsedTimer* timer) {
+ CodeEventListener::LogEventsAndTags tag, AbstractCode code, uint64_t time) {
AppendCodeCreateHeader(msg, tag, code.kind(),
reinterpret_cast<uint8_t*>(code.InstructionStart()),
- code.InstructionSize(), timer);
+ code.InstructionSize(), time);
}
} // namespace
@@ -1217,7 +1224,7 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, Handle<AbstractCode> code,
std::unique_ptr<Log::MessageBuilder> msg_ptr = log_->NewMessageBuilder();
if (!msg_ptr) return;
Log::MessageBuilder& msg = *msg_ptr.get();
- AppendCodeCreateHeader(msg, tag, *code, &timer_);
+ AppendCodeCreateHeader(msg, tag, *code, Time());
msg << name;
msg.WriteToLogFile();
}
@@ -1229,7 +1236,7 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, Handle<AbstractCode> code,
std::unique_ptr<Log::MessageBuilder> msg_ptr = log_->NewMessageBuilder();
if (!msg_ptr) return;
Log::MessageBuilder& msg = *msg_ptr.get();
- AppendCodeCreateHeader(msg, tag, *code, &timer_);
+ AppendCodeCreateHeader(msg, tag, *code, Time());
msg << *name;
msg.WriteToLogFile();
}
@@ -1247,7 +1254,7 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, Handle<AbstractCode> code,
std::unique_ptr<Log::MessageBuilder> msg_ptr = log_->NewMessageBuilder();
if (!msg_ptr) return;
Log::MessageBuilder& msg = *msg_ptr.get();
- AppendCodeCreateHeader(msg, tag, *code, &timer_);
+ AppendCodeCreateHeader(msg, tag, *code, Time());
msg << *script_name << kNext << reinterpret_cast<void*>(shared->address())
<< kNext << ComputeMarker(*shared, *code);
msg.WriteToLogFile();
@@ -1265,7 +1272,7 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, Handle<AbstractCode> code,
std::unique_ptr<Log::MessageBuilder> msg_ptr = log_->NewMessageBuilder();
if (!msg_ptr) return;
Log::MessageBuilder& msg = *msg_ptr.get();
- AppendCodeCreateHeader(msg, tag, *code, &timer_);
+ AppendCodeCreateHeader(msg, tag, *code, Time());
msg << shared->DebugName() << " " << *script_name << ":" << line << ":"
<< column << kNext << reinterpret_cast<void*>(shared->address())
<< kNext << ComputeMarker(*shared, *code);
@@ -1365,7 +1372,7 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, const wasm::WasmCode* code,
Log::MessageBuilder& msg = *msg_ptr.get();
AppendCodeCreateHeader(msg, tag, CodeKind::WASM_FUNCTION,
code->instructions().begin(),
- code->instructions().length(), &timer_);
+ code->instructions().length(), Time());
DCHECK(!name.empty());
msg.AppendString(name);
@@ -1388,9 +1395,8 @@ void Logger::CallbackEventInternal(const char* prefix, Handle<Name> name,
Log::MessageBuilder& msg = *msg_ptr.get();
msg << kLogEventsNames[CodeEventListener::CODE_CREATION_EVENT] << kNext
<< kLogEventsNames[CodeEventListener::CALLBACK_TAG] << kNext << -2
- << kNext << timer_.Elapsed().InMicroseconds() << kNext
- << reinterpret_cast<void*>(entry_point) << kNext << 1 << kNext << prefix
- << *name;
+ << kNext << Time() << kNext << reinterpret_cast<void*>(entry_point)
+ << kNext << 1 << kNext << prefix << *name;
msg.WriteToLogFile();
}
@@ -1413,7 +1419,7 @@ void Logger::RegExpCodeCreateEvent(Handle<AbstractCode> code,
std::unique_ptr<Log::MessageBuilder> msg_ptr = log_->NewMessageBuilder();
if (!msg_ptr) return;
Log::MessageBuilder& msg = *msg_ptr.get();
- AppendCodeCreateHeader(msg, CodeEventListener::REG_EXP_TAG, *code, &timer_);
+ AppendCodeCreateHeader(msg, CodeEventListener::REG_EXP_TAG, *code, Time());
msg << *source;
msg.WriteToLogFile();
}
@@ -1453,8 +1459,7 @@ void Logger::ProcessDeoptEvent(Handle<Code> code, SourcePosition position,
std::unique_ptr<Log::MessageBuilder> msg_ptr = log_->NewMessageBuilder();
if (!msg_ptr) return;
Log::MessageBuilder& msg = *msg_ptr.get();
- msg << "code-deopt" << kNext << timer_.Elapsed().InMicroseconds() << kNext
- << code->CodeSize() << kNext
+ msg << "code-deopt" << kNext << Time() << kNext << code->CodeSize() << kNext
<< reinterpret_cast<void*>(code->InstructionStart());
std::ostringstream deopt_location;
@@ -1578,11 +1583,16 @@ namespace {
void AppendFunctionMessage(
Log::MessageBuilder& msg, // NOLINT(runtime/references)
const char* reason, int script_id, double time_delta, int start_position,
- int end_position, base::ElapsedTimer* timer) {
+ int end_position, uint64_t time) {
msg << "function" << Logger::kNext << reason << Logger::kNext << script_id
<< Logger::kNext << start_position << Logger::kNext << end_position
- << Logger::kNext << time_delta << Logger::kNext
- << timer->Elapsed().InMicroseconds() << Logger::kNext;
+ << Logger::kNext;
+ if (V8_UNLIKELY(FLAG_predictable)) {
+ msg << 0.1;
+ } else {
+ msg << time_delta;
+ }
+ msg << Logger::kNext << time << Logger::kNext;
}
} // namespace
@@ -1594,7 +1604,7 @@ void Logger::FunctionEvent(const char* reason, int script_id, double time_delta,
if (!msg_ptr) return;
Log::MessageBuilder& msg = *msg_ptr.get();
AppendFunctionMessage(msg, reason, script_id, time_delta, start_position,
- end_position, &timer_);
+ end_position, Time());
if (!function_name.is_null()) msg << function_name;
msg.WriteToLogFile();
}
@@ -1608,7 +1618,7 @@ void Logger::FunctionEvent(const char* reason, int script_id, double time_delta,
if (!msg_ptr) return;
Log::MessageBuilder& msg = *msg_ptr.get();
AppendFunctionMessage(msg, reason, script_id, time_delta, start_position,
- end_position, &timer_);
+ end_position, Time());
if (function_name_length > 0) {
msg.AppendString(function_name, function_name_length, is_one_byte);
}
@@ -1628,7 +1638,7 @@ void Logger::CompilationCacheEvent(const char* action, const char* cache_type,
msg << "compilation-cache" << Logger::kNext << action << Logger::kNext
<< cache_type << Logger::kNext << script_id << Logger::kNext
<< sfi.StartPosition() << Logger::kNext << sfi.EndPosition()
- << Logger::kNext << timer_.Elapsed().InMicroseconds();
+ << Logger::kNext << Time();
msg.WriteToLogFile();
}
@@ -1655,8 +1665,7 @@ void Logger::ScriptEvent(ScriptEventType type, int script_id) {
msg << "streaming-compile";
break;
}
- msg << Logger::kNext << script_id << Logger::kNext
- << timer_.Elapsed().InMicroseconds();
+ msg << Logger::kNext << script_id << Logger::kNext << Time();
msg.WriteToLogFile();
}
@@ -1730,8 +1739,7 @@ void Logger::TickEvent(TickSample* sample, bool overflow) {
if (!msg_ptr) return;
Log::MessageBuilder& msg = *msg_ptr.get();
msg << kLogEventsNames[CodeEventListener::TICK_EVENT] << kNext
- << reinterpret_cast<void*>(sample->pc) << kNext
- << timer_.Elapsed().InMicroseconds();
+ << reinterpret_cast<void*>(sample->pc) << kNext << Time();
if (sample->has_external_callback) {
msg << kNext << 1 << kNext
<< reinterpret_cast<void*>(sample->external_callback_entry);
@@ -1757,9 +1765,9 @@ void Logger::ICEvent(const char* type, bool keyed, Handle<Map> map,
int line;
int column;
Address pc = isolate_->GetAbstractPC(&line, &column);
- msg << type << kNext << reinterpret_cast<void*>(pc) << kNext
- << timer_.Elapsed().InMicroseconds() << kNext << line << kNext << column
- << kNext << old_state << kNext << new_state << kNext
+ msg << type << kNext << reinterpret_cast<void*>(pc) << kNext << Time()
+ << kNext << line << kNext << column << kNext << old_state << kNext
+ << new_state << kNext
<< AsHex::Address(map.is_null() ? kNullAddress : map->ptr()) << kNext;
if (key->IsSmi()) {
msg << Smi::ToInt(*key);
@@ -1789,11 +1797,11 @@ void Logger::MapEvent(const char* type, Handle<Map> from, Handle<Map> to,
std::unique_ptr<Log::MessageBuilder> msg_ptr = log_->NewMessageBuilder();
if (!msg_ptr) return;
Log::MessageBuilder& msg = *msg_ptr.get();
- msg << "map" << kNext << type << kNext << timer_.Elapsed().InMicroseconds()
- << kNext << AsHex::Address(from.is_null() ? kNullAddress : from->ptr())
- << kNext << AsHex::Address(to.is_null() ? kNullAddress : to->ptr())
- << kNext << AsHex::Address(pc) << kNext << line << kNext << column
- << kNext << reason << kNext;
+ msg << "map" << kNext << type << kNext << Time() << kNext
+ << AsHex::Address(from.is_null() ? kNullAddress : from->ptr()) << kNext
+ << AsHex::Address(to.is_null() ? kNullAddress : to->ptr()) << kNext
+ << AsHex::Address(pc) << kNext << line << kNext << column << kNext
+ << reason << kNext;
if (!name_or_sfi.is_null()) {
if (name_or_sfi->IsName()) {
@@ -1815,8 +1823,7 @@ void Logger::MapCreate(Map map) {
std::unique_ptr<Log::MessageBuilder> msg_ptr = log_->NewMessageBuilder();
if (!msg_ptr) return;
Log::MessageBuilder& msg = *msg_ptr.get();
- msg << "map-create" << kNext << timer_.Elapsed().InMicroseconds() << kNext
- << AsHex::Address(map.ptr());
+ msg << "map-create" << kNext << Time() << kNext << AsHex::Address(map.ptr());
msg.WriteToLogFile();
}
@@ -1826,8 +1833,8 @@ void Logger::MapDetails(Map map) {
std::unique_ptr<Log::MessageBuilder> msg_ptr = log_->NewMessageBuilder();
if (!msg_ptr) return;
Log::MessageBuilder& msg = *msg_ptr.get();
- msg << "map-details" << kNext << timer_.Elapsed().InMicroseconds() << kNext
- << AsHex::Address(map.ptr()) << kNext;
+ msg << "map-details" << kNext << Time() << kNext << AsHex::Address(map.ptr())
+ << kNext;
if (FLAG_trace_maps_details) {
std::ostringstream buffer;
map.PrintMapDetails(buffer);
@@ -2061,22 +2068,17 @@ bool Logger::SetUp(Isolate* isolate) {
ticker_ = std::make_unique<Ticker>(isolate, FLAG_prof_sampling_interval);
- bool activate_logging = false;
-
- if (Log::InitLogAtStart()) activate_logging = true;
+ if (Log::InitLogAtStart()) UpdateIsLogging(true);
timer_.Start();
if (FLAG_prof_cpp) {
+ UpdateIsLogging(true);
profiler_ = std::make_unique<Profiler>(isolate);
- activate_logging = true;
profiler_->Engage();
}
- if (activate_logging) {
- AddCodeEventListener(this);
- UpdateIsLogging(true);
- }
+ if (is_logging_) AddCodeEventListener(this);
return true;
}
@@ -2162,12 +2164,13 @@ void ExistingCodeLogger::LogCodeObject(Object object) {
const char* description = "Unknown code from before profiling";
switch (abstract_code->kind()) {
case CodeKind::INTERPRETED_FUNCTION:
- case CodeKind::OPTIMIZED_FUNCTION:
+ case CodeKind::TURBOFAN:
case CodeKind::NATIVE_CONTEXT_INDEPENDENT:
+ case CodeKind::TURBOPROP:
return; // We log this later using LogCompiledFunctions.
case CodeKind::BYTECODE_HANDLER:
return; // We log it later by walking the dispatch table.
- case CodeKind::STUB:
+ case CodeKind::FOR_TESTING:
description = "STUB code";
tag = CodeEventListener::STUB_TAG;
break;
@@ -2237,7 +2240,7 @@ void ExistingCodeLogger::LogCompiledFunctions() {
// GetScriptLineNumber call.
for (int i = 0; i < compiled_funcs_count; ++i) {
SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate_, sfis[i]);
- if (sfis[i]->function_data().IsInterpreterData()) {
+ if (sfis[i]->function_data(kAcquireLoad).IsInterpreterData()) {
LogExistingFunction(
sfis[i],
Handle<AbstractCode>(
@@ -2287,7 +2290,7 @@ void ExistingCodeLogger::LogExistingFunction(
} else if (shared->IsApiFunction()) {
// API function.
FunctionTemplateInfo fun_data = shared->get_api_func_data();
- Object raw_call_data = fun_data.call_code();
+ Object raw_call_data = fun_data.call_code(kAcquireLoad);
if (!raw_call_data.IsUndefined(isolate_)) {
CallHandlerInfo call_data = CallHandlerInfo::cast(raw_call_data);
Object callback_obj = call_data.callback();
diff --git a/deps/v8/src/logging/log.h b/deps/v8/src/logging/log.h
index aa9b0c4237..303fbd236a 100644
--- a/deps/v8/src/logging/log.h
+++ b/deps/v8/src/logging/log.h
@@ -314,6 +314,8 @@ class Logger : public CodeEventListener {
// each script is logged only once.
bool EnsureLogScriptSource(Script script);
+ int64_t Time();
+
Isolate* isolate_;
// The sampler used by the profiler and the sliding state window.
diff --git a/deps/v8/src/logging/metrics.h b/deps/v8/src/logging/metrics.h
index 615b8b5498..0b59deb1fd 100644
--- a/deps/v8/src/logging/metrics.h
+++ b/deps/v8/src/logging/metrics.h
@@ -85,9 +85,7 @@ template <class T, int64_t (base::TimeDelta::*precision)() const =
&base::TimeDelta::InMicroseconds>
class TimedScope {
public:
- TimedScope(T* event, int64_t T::*time) : event_(event), time_(time) {
- Start();
- }
+ explicit TimedScope(T* event) : event_(event) { Start(); }
~TimedScope() { Stop(); }
void Start() { start_time_ = base::TimeTicks::Now(); }
@@ -95,13 +93,12 @@ class TimedScope {
void Stop() {
if (start_time_.IsMin()) return;
base::TimeDelta duration = base::TimeTicks::Now() - start_time_;
- event_->*time_ = (duration.*precision)();
+ event_->wall_clock_duration_in_us = (duration.*precision)();
start_time_ = base::TimeTicks::Min();
}
private:
T* event_;
- int64_t T::*time_;
base::TimeTicks start_time_;
};
diff --git a/deps/v8/src/numbers/DIR_METADATA b/deps/v8/src/numbers/DIR_METADATA
new file mode 100644
index 0000000000..b183b81885
--- /dev/null
+++ b/deps/v8/src/numbers/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>Runtime"
+} \ No newline at end of file
diff --git a/deps/v8/src/numbers/OWNERS b/deps/v8/src/numbers/OWNERS
index 882d275fe8..c4022e3ada 100644
--- a/deps/v8/src/numbers/OWNERS
+++ b/deps/v8/src/numbers/OWNERS
@@ -3,5 +3,3 @@ jgruber@chromium.org
jkummerow@chromium.org
sigurds@chromium.org
verwaest@chromium.org
-
-# COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/objects/DIR_METADATA b/deps/v8/src/objects/DIR_METADATA
new file mode 100644
index 0000000000..b183b81885
--- /dev/null
+++ b/deps/v8/src/objects/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>Runtime"
+} \ No newline at end of file
diff --git a/deps/v8/src/objects/OWNERS b/deps/v8/src/objects/OWNERS
index f52e1c9ca8..48d72aea5e 100644
--- a/deps/v8/src/objects/OWNERS
+++ b/deps/v8/src/objects/OWNERS
@@ -1,3 +1 @@
file:../../COMMON_OWNERS
-
-# COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/objects/all-objects-inl.h b/deps/v8/src/objects/all-objects-inl.h
new file mode 100644
index 0000000000..6e7c7a59ce
--- /dev/null
+++ b/deps/v8/src/objects/all-objects-inl.h
@@ -0,0 +1,104 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_ALL_OBJECTS_INL_H_
+#define V8_OBJECTS_ALL_OBJECTS_INL_H_
+
+// This file includes all inline headers from src/objects, which is handy for
+// compilation units that need it like object printing or verification.
+// New inline headers should be added here.
+
+#include "src/objects/allocation-site-inl.h"
+#include "src/objects/allocation-site-scopes-inl.h"
+#include "src/objects/api-callbacks-inl.h"
+#include "src/objects/arguments-inl.h"
+#include "src/objects/bigint-inl.h"
+#include "src/objects/cell-inl.h"
+#include "src/objects/code-inl.h"
+#include "src/objects/compilation-cache-table-inl.h"
+#include "src/objects/compressed-slots-inl.h"
+#include "src/objects/contexts-inl.h"
+#include "src/objects/data-handler-inl.h"
+#include "src/objects/debug-objects-inl.h"
+#include "src/objects/descriptor-array-inl.h"
+#include "src/objects/dictionary-inl.h"
+#include "src/objects/elements-inl.h"
+#include "src/objects/embedder-data-array-inl.h"
+#include "src/objects/embedder-data-slot-inl.h"
+#include "src/objects/feedback-cell-inl.h"
+#include "src/objects/feedback-vector-inl.h"
+#include "src/objects/field-index-inl.h"
+#include "src/objects/fixed-array-inl.h"
+#include "src/objects/foreign-inl.h"
+#include "src/objects/frame-array-inl.h"
+#include "src/objects/free-space-inl.h"
+#include "src/objects/hash-table-inl.h"
+#include "src/objects/heap-number-inl.h"
+#include "src/objects/heap-object-inl.h"
+#include "src/objects/instance-type-inl.h"
+#include "src/objects/js-array-buffer-inl.h"
+#include "src/objects/js-array-inl.h"
+#include "src/objects/js-collection-inl.h"
+#include "src/objects/js-function-inl.h"
+#include "src/objects/js-generator-inl.h"
+#include "src/objects/js-objects-inl.h"
+#include "src/objects/js-promise-inl.h"
+#include "src/objects/js-proxy-inl.h"
+#include "src/objects/js-regexp-inl.h"
+#include "src/objects/js-regexp-string-iterator-inl.h"
+#include "src/objects/js-weak-refs-inl.h"
+#include "src/objects/layout-descriptor-inl.h"
+#include "src/objects/literal-objects-inl.h"
+#include "src/objects/lookup-cache-inl.h"
+#include "src/objects/lookup-inl.h"
+#include "src/objects/map-inl.h"
+#include "src/objects/maybe-object-inl.h"
+#include "src/objects/microtask-inl.h"
+#include "src/objects/module-inl.h"
+#include "src/objects/name-inl.h"
+#include "src/objects/objects-body-descriptors-inl.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/oddball-inl.h"
+#include "src/objects/ordered-hash-table-inl.h"
+#include "src/objects/osr-optimized-code-cache-inl.h"
+#include "src/objects/primitive-heap-object-inl.h"
+#include "src/objects/promise-inl.h"
+#include "src/objects/property-array-inl.h"
+#include "src/objects/property-cell-inl.h"
+#include "src/objects/property-descriptor-object-inl.h"
+#include "src/objects/prototype-info-inl.h"
+#include "src/objects/script-inl.h"
+#include "src/objects/shared-function-info-inl.h"
+#include "src/objects/slots-atomic-inl.h"
+#include "src/objects/slots-inl.h"
+#include "src/objects/stack-frame-info-inl.h"
+#include "src/objects/string-inl.h"
+#include "src/objects/string-set-inl.h"
+#include "src/objects/string-table-inl.h"
+#include "src/objects/struct-inl.h"
+#include "src/objects/synthetic-module-inl.h"
+#include "src/objects/tagged-field-inl.h"
+#include "src/objects/tagged-impl-inl.h"
+#include "src/objects/tagged-value-inl.h"
+#include "src/objects/template-objects-inl.h"
+#include "src/objects/templates-inl.h"
+#include "src/objects/torque-defined-classes-inl.h"
+#include "src/objects/transitions-inl.h"
+
+#ifdef V8_INTL_SUPPORT
+#include "src/objects/js-break-iterator-inl.h"
+#include "src/objects/js-collator-inl.h"
+#include "src/objects/js-date-time-format-inl.h"
+#include "src/objects/js-display-names-inl.h"
+#include "src/objects/js-list-format-inl.h"
+#include "src/objects/js-locale-inl.h"
+#include "src/objects/js-number-format-inl.h"
+#include "src/objects/js-plural-rules-inl.h"
+#include "src/objects/js-relative-time-format-inl.h"
+#include "src/objects/js-segment-iterator-inl.h"
+#include "src/objects/js-segmenter-inl.h"
+#include "src/objects/js-segments-inl.h"
+#endif // V8_INTL_SUPPORT
+
+#endif // V8_OBJECTS_ALL_OBJECTS_INL_H_
diff --git a/deps/v8/src/objects/allocation-site-inl.h b/deps/v8/src/objects/allocation-site-inl.h
index 7447eb5ec3..d9911bc826 100644
--- a/deps/v8/src/objects/allocation-site-inl.h
+++ b/deps/v8/src/objects/allocation-site-inl.h
@@ -16,6 +16,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/allocation-site-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(AllocationMemento)
OBJECT_CONSTRUCTORS_IMPL(AllocationSite, Struct)
diff --git a/deps/v8/src/objects/allocation-site.h b/deps/v8/src/objects/allocation-site.h
index 1da5925bee..437876d94c 100644
--- a/deps/v8/src/objects/allocation-site.h
+++ b/deps/v8/src/objects/allocation-site.h
@@ -16,6 +16,8 @@ namespace internal {
enum InstanceType : uint16_t;
+#include "torque-generated/src/objects/allocation-site-tq.inc"
+
class AllocationSite : public Struct {
public:
NEVER_READ_ONLY_SPACE
diff --git a/deps/v8/src/objects/api-callbacks-inl.h b/deps/v8/src/objects/api-callbacks-inl.h
index 8fa0f40c73..1572a3b352 100644
--- a/deps/v8/src/objects/api-callbacks-inl.h
+++ b/deps/v8/src/objects/api-callbacks-inl.h
@@ -13,7 +13,6 @@
#include "src/objects/js-objects-inl.h"
#include "src/objects/name.h"
#include "src/objects/templates.h"
-#include "torque-generated/class-definitions-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -21,6 +20,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/api-callbacks-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(AccessCheckInfo)
TQ_OBJECT_CONSTRUCTORS_IMPL(AccessorInfo)
TQ_OBJECT_CONSTRUCTORS_IMPL(InterceptorInfo)
diff --git a/deps/v8/src/objects/api-callbacks.h b/deps/v8/src/objects/api-callbacks.h
index ffd9e9f02e..f5d81dd986 100644
--- a/deps/v8/src/objects/api-callbacks.h
+++ b/deps/v8/src/objects/api-callbacks.h
@@ -7,7 +7,6 @@
#include "src/objects/struct.h"
#include "torque-generated/bit-fields.h"
-#include "torque-generated/class-definitions.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -15,6 +14,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/api-callbacks-tq.inc"
+
// An accessor must have a getter, but can have no setter.
//
// When setting a property, V8 searches accessors in prototypes.
diff --git a/deps/v8/src/objects/arguments-inl.h b/deps/v8/src/objects/arguments-inl.h
index 494a8960bd..b2576a6c8b 100644
--- a/deps/v8/src/objects/arguments-inl.h
+++ b/deps/v8/src/objects/arguments-inl.h
@@ -17,6 +17,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/arguments-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(JSArgumentsObject)
TQ_OBJECT_CONSTRUCTORS_IMPL(AliasedArgumentsEntry)
diff --git a/deps/v8/src/objects/arguments.h b/deps/v8/src/objects/arguments.h
index d8cbdbae50..372fc745e4 100644
--- a/deps/v8/src/objects/arguments.h
+++ b/deps/v8/src/objects/arguments.h
@@ -16,6 +16,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/arguments-tq.inc"
+
// Superclass for all objects with instance type {JS_ARGUMENTS_OBJECT_TYPE}
class JSArgumentsObject
: public TorqueGeneratedJSArgumentsObject<JSArgumentsObject, JSObject> {
diff --git a/deps/v8/src/objects/backing-store.cc b/deps/v8/src/objects/backing-store.cc
index c67fff0fa9..9f755f5d04 100644
--- a/deps/v8/src/objects/backing-store.cc
+++ b/deps/v8/src/objects/backing-store.cc
@@ -9,6 +9,7 @@
#include "src/execution/isolate.h"
#include "src/handles/global-handles.h"
#include "src/logging/counters.h"
+#include "src/trap-handler/trap-handler.h"
#include "src/wasm/wasm-constants.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-limits.h"
@@ -23,12 +24,6 @@ namespace v8 {
namespace internal {
namespace {
-#if V8_TARGET_ARCH_64_BIT
-constexpr bool kUseGuardRegions = true;
-#else
-constexpr bool kUseGuardRegions = false;
-#endif
-
#if V8_TARGET_ARCH_MIPS64
// MIPS64 has a user space of 2^40 bytes on most processors,
// address space limits needs to be smaller.
@@ -39,11 +34,10 @@ constexpr size_t kAddressSpaceLimit = 0x10100000000L; // 1 TiB + 4 GiB
constexpr size_t kAddressSpaceLimit = 0xC0000000; // 3 GiB
#endif
-constexpr uint64_t kOneGiB = 1024 * 1024 * 1024;
-constexpr uint64_t kNegativeGuardSize = 2 * kOneGiB;
+constexpr uint64_t kNegativeGuardSize = uint64_t{2} * GB;
#if V8_TARGET_ARCH_64_BIT
-constexpr uint64_t kFullGuardSize = 10 * kOneGiB;
+constexpr uint64_t kFullGuardSize = uint64_t{10} * GB;
#endif
std::atomic<uint64_t> reserved_address_space_{0};
@@ -62,30 +56,26 @@ enum class AllocationStatus {
kOtherFailure // Failed for an unknown reason
};
+base::AddressRegion GetReservedRegion(bool has_guard_regions,
+ void* buffer_start,
+ size_t byte_capacity) {
#if V8_TARGET_ARCH_64_BIT
-base::AddressRegion GetGuardedRegion(void* buffer_start, size_t byte_length) {
- // Guard regions always look like this:
- // |xxx(2GiB)xxx|.......(4GiB)..xxxxx|xxxxxx(4GiB)xxxxxx|
- // ^ buffer_start
- // ^ byte_length
- // ^ negative guard region ^ positive guard region
-
- Address start = reinterpret_cast<Address>(buffer_start);
- DCHECK_EQ(8, sizeof(size_t)); // only use on 64-bit
- DCHECK_EQ(0, start % AllocatePageSize());
- return base::AddressRegion(start - (2 * kOneGiB),
- static_cast<size_t>(kFullGuardSize));
-}
+ if (has_guard_regions) {
+ // Guard regions always look like this:
+ // |xxx(2GiB)xxx|.......(4GiB)..xxxxx|xxxxxx(4GiB)xxxxxx|
+ // ^ buffer_start
+ // ^ byte_length
+ // ^ negative guard region ^ positive guard region
+
+ Address start = reinterpret_cast<Address>(buffer_start);
+ DCHECK_EQ(8, sizeof(size_t)); // only use on 64-bit
+ DCHECK_EQ(0, start % AllocatePageSize());
+ return base::AddressRegion(start - kNegativeGuardSize,
+ static_cast<size_t>(kFullGuardSize));
+ }
#endif
-base::AddressRegion GetRegion(bool has_guard_regions, void* buffer_start,
- size_t byte_length, size_t byte_capacity) {
-#if V8_TARGET_ARCH_64_BIT
- if (has_guard_regions) return GetGuardedRegion(buffer_start, byte_length);
-#else
DCHECK(!has_guard_regions);
-#endif
-
return base::AddressRegion(reinterpret_cast<Address>(buffer_start),
byte_capacity);
}
@@ -173,8 +163,11 @@ BackingStore::~BackingStore() {
if (is_wasm_memory_) {
DCHECK(free_on_destruct_);
DCHECK(!custom_deleter_);
- TRACE_BS("BSw:free bs=%p mem=%p (length=%zu, capacity=%zu)\n", this,
- buffer_start_, byte_length(), byte_capacity_);
+ size_t reservation_size =
+ GetReservationSize(has_guard_regions_, byte_capacity_);
+ TRACE_BS(
+ "BSw:free bs=%p mem=%p (length=%zu, capacity=%zu, reservation=%zu)\n",
+ this, buffer_start_, byte_length(), byte_capacity_, reservation_size);
if (is_shared_) {
// Deallocate the list of attached memory objects.
SharedWasmMemoryData* shared_data = get_shared_wasm_memory_data();
@@ -183,22 +176,21 @@ BackingStore::~BackingStore() {
}
// Wasm memories are always allocated through the page allocator.
- auto region = GetRegion(has_guard_regions_, buffer_start_, byte_length_,
- byte_capacity_);
+ auto region =
+ GetReservedRegion(has_guard_regions_, buffer_start_, byte_capacity_);
bool pages_were_freed =
region.size() == 0 /* no need to free any pages */ ||
FreePages(GetPlatformPageAllocator(),
reinterpret_cast<void*>(region.begin()), region.size());
CHECK(pages_were_freed);
- BackingStore::ReleaseReservation(
- GetReservationSize(has_guard_regions_, byte_capacity_));
+ BackingStore::ReleaseReservation(reservation_size);
Clear();
return;
}
if (custom_deleter_) {
DCHECK(free_on_destruct_);
- TRACE_BS("BS:custome deleter bs=%p mem=%p (length=%zu, capacity=%zu)\n",
+ TRACE_BS("BS:custom deleter bs=%p mem=%p (length=%zu, capacity=%zu)\n",
this, buffer_start_, byte_length(), byte_capacity_);
type_specific_data_.deleter.callback(buffer_start_, byte_length_,
type_specific_data_.deleter.data);
@@ -304,7 +296,7 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateWasmMemory(
TRACE_BS("BSw:try %zu pages, %zu max\n", initial_pages, maximum_pages);
- bool guards = kUseGuardRegions;
+ bool guards = trap_handler::IsTrapHandlerEnabled();
// For accounting purposes, whether a GC was necessary.
bool did_retry = false;
@@ -348,7 +340,8 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateWasmMemory(
FATAL("could not allocate wasm memory backing store");
}
RecordStatus(isolate, AllocationStatus::kAddressSpaceLimitReachedFailure);
- TRACE_BS("BSw:try failed to reserve address space\n");
+ TRACE_BS("BSw:try failed to reserve address space (size %zu)\n",
+ reservation_size);
return {};
}
@@ -385,9 +378,10 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateWasmMemory(
PageAllocator::kReadWrite);
};
if (!gc_retry(commit_memory)) {
+ TRACE_BS("BSw:try failed to set permissions (%p, %zu)\n", buffer_start,
+ byte_length);
// SetPermissions put us over the process memory limit.
V8::FatalProcessOutOfMemory(nullptr, "BackingStore::AllocateWasmMemory()");
- TRACE_BS("BSw:try failed to set permissions\n");
}
DebugCheckZero(buffer_start, byte_length); // touch the bytes.
@@ -405,8 +399,10 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateWasmMemory(
false, // custom_deleter
false); // empty_deleter
- TRACE_BS("BSw:alloc bs=%p mem=%p (length=%zu, capacity=%zu)\n", result,
- result->buffer_start(), byte_length, byte_capacity);
+ TRACE_BS(
+ "BSw:alloc bs=%p mem=%p (length=%zu, capacity=%zu, reservation=%zu)\n",
+ result, result->buffer_start(), byte_length, byte_capacity,
+ reservation_size);
// Shared Wasm memories need an anchor for the memory object list.
if (shared == SharedFlag::kShared) {
diff --git a/deps/v8/src/objects/backing-store.h b/deps/v8/src/objects/backing-store.h
index 0a460cef8a..18505baf67 100644
--- a/deps/v8/src/objects/backing-store.h
+++ b/deps/v8/src/objects/backing-store.h
@@ -157,6 +157,8 @@ class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase {
globally_registered_(false),
custom_deleter_(custom_deleter),
empty_deleter_(empty_deleter) {}
+ BackingStore(const BackingStore&) = delete;
+ BackingStore& operator=(const BackingStore&) = delete;
void SetAllocatorFromIsolate(Isolate* isolate);
void* buffer_start_ = nullptr;
@@ -209,8 +211,6 @@ class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase {
static std::unique_ptr<BackingStore> TryAllocateWasmMemory(
Isolate* isolate, size_t initial_pages, size_t maximum_pages,
SharedFlag shared);
-
- DISALLOW_COPY_AND_ASSIGN(BackingStore);
};
// A global, per-process mapping from buffer addresses to backing stores.
diff --git a/deps/v8/src/objects/bigint-inl.h b/deps/v8/src/objects/bigint-inl.h
new file mode 100644
index 0000000000..1455bed92d
--- /dev/null
+++ b/deps/v8/src/objects/bigint-inl.h
@@ -0,0 +1,24 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_BIGINT_INL_H_
+#define V8_OBJECTS_BIGINT_INL_H_
+
+#include "src/objects/bigint.h"
+#include "src/objects/objects-inl.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+#include "torque-generated/src/objects/bigint-tq-inl.inc"
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_BIGINT_INL_H_
diff --git a/deps/v8/src/objects/bigint.cc b/deps/v8/src/objects/bigint.cc
index fbbfbeb69d..129bb14b20 100644
--- a/deps/v8/src/objects/bigint.cc
+++ b/deps/v8/src/objects/bigint.cc
@@ -715,7 +715,7 @@ MaybeHandle<MutableBigInt> MutableBigInt::BitwiseAnd(Isolate* isolate,
if (!x->sign() && !y->sign()) {
return AbsoluteAnd(isolate, x, y);
} else if (x->sign() && y->sign()) {
- int result_length = Max(x->length(), y->length()) + 1;
+ int result_length = std::max(x->length(), y->length()) + 1;
// (-x) & (-y) == ~(x-1) & ~(y-1) == ~((x-1) | (y-1))
// == -(((x-1) | (y-1)) + 1)
Handle<MutableBigInt> result;
@@ -746,7 +746,7 @@ MaybeHandle<MutableBigInt> MutableBigInt::BitwiseXor(Isolate* isolate,
if (!x->sign() && !y->sign()) {
return AbsoluteXor(isolate, x, y);
} else if (x->sign() && y->sign()) {
- int result_length = Max(x->length(), y->length());
+ int result_length = std::max(x->length(), y->length());
// (-x) ^ (-y) == ~(x-1) ^ ~(y-1) == (x-1) ^ (y-1)
Handle<MutableBigInt> result =
AbsoluteSubOne(isolate, x, result_length).ToHandleChecked();
@@ -754,7 +754,7 @@ MaybeHandle<MutableBigInt> MutableBigInt::BitwiseXor(Isolate* isolate,
return AbsoluteXor(isolate, result, y_1, *result);
} else {
DCHECK(x->sign() != y->sign());
- int result_length = Max(x->length(), y->length()) + 1;
+ int result_length = std::max(x->length(), y->length()) + 1;
// Assume that x is the positive BigInt.
if (x->sign()) std::swap(x, y);
// x ^ (-y) == x ^ ~(y-1) == ~(x ^ (y-1)) == -((x ^ (y-1)) + 1)
@@ -775,7 +775,7 @@ MaybeHandle<BigInt> BigInt::BitwiseOr(Isolate* isolate, Handle<BigInt> x,
MaybeHandle<MutableBigInt> MutableBigInt::BitwiseOr(Isolate* isolate,
Handle<BigInt> x,
Handle<BigInt> y) {
- int result_length = Max(x->length(), y->length());
+ int result_length = std::max(x->length(), y->length());
if (!x->sign() && !y->sign()) {
return AbsoluteOr(isolate, x, y);
} else if (x->sign() && y->sign()) {
@@ -1371,7 +1371,7 @@ inline Handle<MutableBigInt> MutableBigInt::AbsoluteBitwiseOp(
std::swap(x_length, y_length);
}
}
- DCHECK(num_pairs == Min(x_length, y_length));
+ DCHECK(num_pairs == std::min(x_length, y_length));
Handle<MutableBigInt> result(result_storage, isolate);
int result_length = extra_digits == kCopy ? x_length : num_pairs;
if (result_storage.is_null()) {
@@ -1872,6 +1872,8 @@ Handle<BigInt> MutableBigInt::RightShiftByAbsolute(Isolate* isolate,
DCHECK_LE(result_length, length);
Handle<MutableBigInt> result = New(isolate, result_length).ToHandleChecked();
if (bits_shift == 0) {
+ // Zero out any overflow digit (see "rounding_can_overflow" above).
+ result->set_digit(result_length - 1, 0);
for (int i = digit_shift; i < length; i++) {
result->set_digit(i - digit_shift, x->digit(i));
}
@@ -2392,7 +2394,7 @@ Handle<BigInt> MutableBigInt::TruncateAndSubFromPowerOfTwo(Isolate* isolate,
int x_length = x->length();
digit_t borrow = 0;
// Take digits from {x} unless its length is exhausted.
- int limit = Min(last, x_length);
+ int limit = std::min(last, x_length);
for (; i < limit; i++) {
digit_t new_borrow = 0;
digit_t difference = digit_sub(0, x->digit(i), &new_borrow);
diff --git a/deps/v8/src/objects/bigint.h b/deps/v8/src/objects/bigint.h
index 4fdd9b1501..a7494a54c5 100644
--- a/deps/v8/src/objects/bigint.h
+++ b/deps/v8/src/objects/bigint.h
@@ -26,6 +26,8 @@ class BigInt;
class ValueDeserializer;
class ValueSerializer;
+#include "torque-generated/src/objects/bigint-tq.inc"
+
// BigIntBase is just the raw data object underlying a BigInt. Use with care!
// Most code should be using BigInts instead.
class BigIntBase : public PrimitiveHeapObject {
diff --git a/deps/v8/src/objects/bigint.tq b/deps/v8/src/objects/bigint.tq
new file mode 100644
index 0000000000..60be844cc6
--- /dev/null
+++ b/deps/v8/src/objects/bigint.tq
@@ -0,0 +1,21 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// TODO(nicohartmann): Discuss whether types used by multiple builtins should be
+// in global namespace
+extern class BigIntBase extends PrimitiveHeapObject
+ generates 'TNode<BigInt>' {}
+
+type BigInt extends BigIntBase;
+
+@noVerifier
+@hasSameInstanceTypeAsParent
+@doNotGenerateCast
+extern class MutableBigInt extends BigIntBase generates 'TNode<BigInt>' {
+}
+
+Convert<BigInt, MutableBigInt>(i: MutableBigInt): BigInt {
+ assert(bigint::IsCanonicalized(i));
+ return %RawDownCast<BigInt>(Convert<BigIntBase>(i));
+}
diff --git a/deps/v8/src/objects/cell-inl.h b/deps/v8/src/objects/cell-inl.h
index 0bd6808fbc..dbfdb3ae98 100644
--- a/deps/v8/src/objects/cell-inl.h
+++ b/deps/v8/src/objects/cell-inl.h
@@ -16,6 +16,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/cell-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(Cell)
Cell Cell::FromValueAddress(Address value) {
diff --git a/deps/v8/src/objects/cell.h b/deps/v8/src/objects/cell.h
index de43897350..da75249990 100644
--- a/deps/v8/src/objects/cell.h
+++ b/deps/v8/src/objects/cell.h
@@ -6,7 +6,6 @@
#define V8_OBJECTS_CELL_H_
#include "src/objects/heap-object.h"
-#include "torque-generated/class-definitions.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -14,6 +13,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/cell-tq.inc"
+
class Cell : public TorqueGeneratedCell<Cell, HeapObject> {
public:
static inline Cell FromValueAddress(Address value);
diff --git a/deps/v8/src/objects/class-definitions-tq-deps-inl.h b/deps/v8/src/objects/class-definitions-tq-deps-inl.h
deleted file mode 100644
index ad046cbf8b..0000000000
--- a/deps/v8/src/objects/class-definitions-tq-deps-inl.h
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_OBJECTS_CLASS_DEFINITIONS_TQ_DEPS_INL_H_
-#define V8_OBJECTS_CLASS_DEFINITIONS_TQ_DEPS_INL_H_
-
-// This is a collection of -inl.h files required by the generated file
-// class-definitions.cc. Generally, classes using @generateCppClass need an
-// entry here.
-#include "src/objects/allocation-site-inl.h"
-#include "src/objects/arguments-inl.h"
-#include "src/objects/embedder-data-array-inl.h"
-#include "src/objects/free-space-inl.h"
-#include "src/objects/js-collection-inl.h"
-#include "src/objects/js-generator-inl.h"
-#include "src/objects/js-regexp-inl.h"
-#include "src/objects/js-regexp-string-iterator-inl.h"
-#include "src/objects/js-weak-refs-inl.h"
-#include "src/objects/literal-objects-inl.h"
-#include "src/objects/microtask-inl.h"
-#include "src/objects/module-inl.h"
-#include "src/objects/promise-inl.h"
-#include "src/objects/property-descriptor-object-inl.h"
-#include "src/objects/stack-frame-info-inl.h"
-#include "src/objects/struct-inl.h"
-#include "src/objects/template-objects-inl.h"
-
-#ifdef V8_INTL_SUPPORT
-#include "src/objects/js-break-iterator-inl.h"
-#include "src/objects/js-collator-inl.h"
-#include "src/objects/js-date-time-format-inl.h"
-#include "src/objects/js-display-names-inl.h"
-#include "src/objects/js-list-format-inl.h"
-#include "src/objects/js-locale-inl.h"
-#include "src/objects/js-number-format-inl.h"
-#include "src/objects/js-plural-rules-inl.h"
-#include "src/objects/js-relative-time-format-inl.h"
-#include "src/objects/js-segment-iterator-inl.h"
-#include "src/objects/js-segmenter-inl.h"
-#include "src/objects/js-segments-inl.h"
-#endif
-
-#endif // V8_OBJECTS_CLASS_DEFINITIONS_TQ_DEPS_INL_H_
diff --git a/deps/v8/src/objects/code-inl.h b/deps/v8/src/objects/code-inl.h
index 3c772f855a..1a928b20f1 100644
--- a/deps/v8/src/objects/code-inl.h
+++ b/deps/v8/src/objects/code-inl.h
@@ -70,13 +70,6 @@ int AbstractCode::SizeIncludingMetadata() {
return GetBytecodeArray().SizeIncludingMetadata();
}
}
-int AbstractCode::ExecutableSize() {
- if (IsCode()) {
- return GetCode().ExecutableSize();
- } else {
- return GetBytecodeArray().BytecodeArraySize();
- }
-}
Address AbstractCode::raw_instruction_start() {
if (IsCode()) {
@@ -171,24 +164,25 @@ OBJECT_CONSTRUCTORS_IMPL(Code, HeapObject)
NEVER_READ_ONLY_SPACE_IMPL(Code)
INT_ACCESSORS(Code, raw_instruction_size, kInstructionSizeOffset)
-INT_ACCESSORS(Code, safepoint_table_offset, kSafepointTableOffsetOffset)
+INT_ACCESSORS(Code, raw_metadata_size, kMetadataSizeOffset)
INT_ACCESSORS(Code, handler_table_offset, kHandlerTableOffsetOffset)
INT_ACCESSORS(Code, code_comments_offset, kCodeCommentsOffsetOffset)
+INT32_ACCESSORS(Code, unwinding_info_offset, kUnwindingInfoOffsetOffset)
#define CODE_ACCESSORS(name, type, offset) \
ACCESSORS_CHECKED2(Code, name, type, offset, true, \
!ObjectInYoungGeneration(value))
-#define SYNCHRONIZED_CODE_ACCESSORS(name, type, offset) \
- SYNCHRONIZED_ACCESSORS_CHECKED2(Code, name, type, offset, true, \
- !ObjectInYoungGeneration(value))
+#define RELEASE_ACQUIRE_CODE_ACCESSORS(name, type, offset) \
+ RELEASE_ACQUIRE_ACCESSORS_CHECKED2(Code, name, type, offset, true, \
+ !ObjectInYoungGeneration(value))
CODE_ACCESSORS(relocation_info, ByteArray, kRelocationInfoOffset)
CODE_ACCESSORS(deoptimization_data, FixedArray, kDeoptimizationDataOffset)
CODE_ACCESSORS(source_position_table, Object, kSourcePositionTableOffset)
// Concurrent marker needs to access kind specific flags in code data container.
-SYNCHRONIZED_CODE_ACCESSORS(code_data_container, CodeDataContainer,
- kCodeDataContainerOffset)
+RELEASE_ACQUIRE_CODE_ACCESSORS(code_data_container, CodeDataContainer,
+ kCodeDataContainerOffset)
#undef CODE_ACCESSORS
-#undef SYNCHRONIZED_CODE_ACCESSORS
+#undef RELEASE_ACQUIRE_CODE_ACCESSORS
void Code::WipeOutHeader() {
WRITE_FIELD(*this, kRelocationInfoOffset, Smi::FromInt(0));
@@ -198,14 +192,16 @@ void Code::WipeOutHeader() {
}
void Code::clear_padding() {
+ // Clear the padding between the header and `raw_body_start`.
if (FIELD_SIZE(kOptionalPaddingOffset) != 0) {
memset(reinterpret_cast<void*>(address() + kOptionalPaddingOffset), 0,
FIELD_SIZE(kOptionalPaddingOffset));
}
- Address data_end =
- has_unwinding_info() ? unwinding_info_end() : raw_instruction_end();
- memset(reinterpret_cast<void*>(data_end), 0,
- CodeSize() - (data_end - address()));
+
+ // Clear the padding after `raw_body_end`.
+ size_t trailing_padding_size =
+ CodeSize() - Code::kHeaderSize - raw_body_size();
+ memset(reinterpret_cast<void*>(raw_body_end()), 0, trailing_padding_size);
}
ByteArray Code::SourcePositionTable() const {
@@ -217,25 +213,35 @@ ByteArray Code::SourcePositionTable() const {
}
Object Code::next_code_link() const {
- return code_data_container().next_code_link();
+ return code_data_container(kAcquireLoad).next_code_link();
}
void Code::set_next_code_link(Object value) {
- code_data_container().set_next_code_link(value);
+ code_data_container(kAcquireLoad).set_next_code_link(value);
+}
+
+Address Code::raw_body_start() const { return raw_instruction_start(); }
+
+Address Code::raw_body_end() const {
+ return raw_body_start() + raw_body_size();
+}
+
+int Code::raw_body_size() const {
+ return raw_instruction_size() + raw_metadata_size();
}
int Code::InstructionSize() const {
- if (is_off_heap_trampoline()) return OffHeapInstructionSize();
- return raw_instruction_size();
+ return V8_UNLIKELY(is_off_heap_trampoline()) ? OffHeapInstructionSize()
+ : raw_instruction_size();
}
Address Code::raw_instruction_start() const {
- return FIELD_ADDR(*this, kHeaderSize);
+ return field_address(kHeaderSize);
}
Address Code::InstructionStart() const {
- if (is_off_heap_trampoline()) return OffHeapInstructionStart();
- return raw_instruction_start();
+ return V8_UNLIKELY(is_off_heap_trampoline()) ? OffHeapInstructionStart()
+ : raw_instruction_start();
}
Address Code::raw_instruction_end() const {
@@ -243,41 +249,32 @@ Address Code::raw_instruction_end() const {
}
Address Code::InstructionEnd() const {
- if (is_off_heap_trampoline()) return OffHeapInstructionEnd();
- return raw_instruction_end();
-}
-
-int Code::GetUnwindingInfoSizeOffset() const {
- DCHECK(has_unwinding_info());
- return RoundUp(kHeaderSize + raw_instruction_size(), kInt64Size);
+ return V8_UNLIKELY(is_off_heap_trampoline()) ? OffHeapInstructionEnd()
+ : raw_instruction_end();
}
-int Code::unwinding_info_size() const {
- DCHECK(has_unwinding_info());
- return static_cast<int>(ReadField<uint64_t>(GetUnwindingInfoSizeOffset()));
+Address Code::raw_metadata_start() const {
+ return raw_instruction_start() + raw_instruction_size();
}
-void Code::set_unwinding_info_size(int value) {
- DCHECK(has_unwinding_info());
- WriteField<uint64_t>(GetUnwindingInfoSizeOffset(), value);
+Address Code::MetadataStart() const {
+ STATIC_ASSERT(kOnHeapBodyIsContiguous);
+ return V8_UNLIKELY(is_off_heap_trampoline()) ? OffHeapMetadataStart()
+ : raw_metadata_start();
}
-Address Code::unwinding_info_start() const {
- DCHECK(has_unwinding_info());
- return FIELD_ADDR(*this, GetUnwindingInfoSizeOffset()) + kInt64Size;
+Address Code::raw_metadata_end() const {
+ return raw_metadata_start() + raw_metadata_size();
}
-Address Code::unwinding_info_end() const {
- DCHECK(has_unwinding_info());
- return unwinding_info_start() + unwinding_info_size();
+Address Code::MetadataEnd() const {
+ return V8_UNLIKELY(is_off_heap_trampoline()) ? OffHeapMetadataEnd()
+ : raw_metadata_end();
}
-int Code::body_size() const {
- int unpadded_body_size =
- has_unwinding_info()
- ? static_cast<int>(unwinding_info_end() - raw_instruction_start())
- : raw_instruction_size();
- return RoundUp(unpadded_body_size, kObjectAlignment);
+int Code::MetadataSize() const {
+ return V8_UNLIKELY(is_off_heap_trampoline()) ? OffHeapMetadataSize()
+ : raw_metadata_size();
}
int Code::SizeIncludingMetadata() const {
@@ -288,7 +285,7 @@ int Code::SizeIncludingMetadata() const {
}
ByteArray Code::unchecked_relocation_info() const {
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
return ByteArray::unchecked_cast(
TaggedField<HeapObject, kRelocationInfoOffset>::load(isolate, *this));
}
@@ -317,13 +314,6 @@ bool Code::contains(Address inner_pointer) {
return (address() <= inner_pointer) && (inner_pointer < address() + Size());
}
-int Code::ExecutableSize() const {
- // Check that the assumptions about the layout of the code object holds.
- DCHECK_EQ(static_cast<int>(raw_instruction_start() - address()),
- Code::kHeaderSize);
- return raw_instruction_size() + Code::kHeaderSize;
-}
-
// static
void Code::CopyRelocInfoToByteArray(ByteArray dest, const CodeDesc& desc) {
DCHECK_EQ(dest.length(), desc.reloc_size);
@@ -332,20 +322,18 @@ void Code::CopyRelocInfoToByteArray(ByteArray dest, const CodeDesc& desc) {
static_cast<size_t>(desc.reloc_size));
}
-int Code::CodeSize() const { return SizeFor(body_size()); }
+int Code::CodeSize() const { return SizeFor(raw_body_size()); }
CodeKind Code::kind() const {
STATIC_ASSERT(FIELD_SIZE(kFlagsOffset) == kInt32Size);
return KindField::decode(ReadField<uint32_t>(kFlagsOffset));
}
-void Code::initialize_flags(CodeKind kind, bool has_unwinding_info,
- bool is_turbofanned, int stack_slots,
+void Code::initialize_flags(CodeKind kind, bool is_turbofanned, int stack_slots,
bool is_off_heap_trampoline) {
CHECK(0 <= stack_slots && stack_slots < StackSlotsField::kMax);
DCHECK(!CodeKindIsInterpretedJSFunction(kind));
- uint32_t flags = HasUnwindingInfoField::encode(has_unwinding_info) |
- KindField::encode(kind) |
+ uint32_t flags = KindField::encode(kind) |
IsTurbofannedField::encode(is_turbofanned) |
StackSlotsField::encode(stack_slots) |
IsOffHeapTrampoline::encode(is_off_heap_trampoline);
@@ -368,7 +356,7 @@ inline bool Code::checks_optimization_marker() const {
bool checks_marker =
(builtin_index() == Builtins::kCompileLazy ||
builtin_index() == Builtins::kInterpreterEntryTrampoline ||
- CodeKindChecksOptimizationMarker(kind()));
+ CodeKindCanTierUp(kind()));
return checks_marker ||
(CodeKindCanDeoptimize(kind()) && marked_for_deoptimization());
}
@@ -378,51 +366,50 @@ inline bool Code::has_tagged_params() const {
kind() != CodeKind::C_WASM_ENTRY && kind() != CodeKind::WASM_FUNCTION;
}
-inline bool Code::has_unwinding_info() const {
- return HasUnwindingInfoField::decode(ReadField<uint32_t>(kFlagsOffset));
-}
-
inline bool Code::is_turbofanned() const {
return IsTurbofannedField::decode(ReadField<uint32_t>(kFlagsOffset));
}
inline bool Code::can_have_weak_objects() const {
DCHECK(CodeKindIsOptimizedJSFunction(kind()));
- int32_t flags = code_data_container().kind_specific_flags();
+ int32_t flags = code_data_container(kAcquireLoad).kind_specific_flags();
return CanHaveWeakObjectsField::decode(flags);
}
inline void Code::set_can_have_weak_objects(bool value) {
DCHECK(CodeKindIsOptimizedJSFunction(kind()));
- int32_t previous = code_data_container().kind_specific_flags();
+ CodeDataContainer container = code_data_container(kAcquireLoad);
+ int32_t previous = container.kind_specific_flags();
int32_t updated = CanHaveWeakObjectsField::update(previous, value);
- code_data_container().set_kind_specific_flags(updated);
+ container.set_kind_specific_flags(updated);
}
inline bool Code::is_promise_rejection() const {
DCHECK(kind() == CodeKind::BUILTIN);
- int32_t flags = code_data_container().kind_specific_flags();
+ int32_t flags = code_data_container(kAcquireLoad).kind_specific_flags();
return IsPromiseRejectionField::decode(flags);
}
inline void Code::set_is_promise_rejection(bool value) {
DCHECK(kind() == CodeKind::BUILTIN);
- int32_t previous = code_data_container().kind_specific_flags();
+ CodeDataContainer container = code_data_container(kAcquireLoad);
+ int32_t previous = container.kind_specific_flags();
int32_t updated = IsPromiseRejectionField::update(previous, value);
- code_data_container().set_kind_specific_flags(updated);
+ container.set_kind_specific_flags(updated);
}
inline bool Code::is_exception_caught() const {
DCHECK(kind() == CodeKind::BUILTIN);
- int32_t flags = code_data_container().kind_specific_flags();
+ int32_t flags = code_data_container(kAcquireLoad).kind_specific_flags();
return IsExceptionCaughtField::decode(flags);
}
inline void Code::set_is_exception_caught(bool value) {
DCHECK(kind() == CodeKind::BUILTIN);
- int32_t previous = code_data_container().kind_specific_flags();
+ CodeDataContainer container = code_data_container(kAcquireLoad);
+ int32_t previous = container.kind_specific_flags();
int32_t updated = IsExceptionCaughtField::update(previous, value);
- code_data_container().set_kind_specific_flags(updated);
+ container.set_kind_specific_flags(updated);
}
inline bool Code::is_off_heap_trampoline() const {
@@ -437,16 +424,18 @@ inline HandlerTable::CatchPrediction Code::GetBuiltinCatchPrediction() {
int Code::builtin_index() const {
int index = ReadField<int>(kBuiltinIndexOffset);
- DCHECK(index == -1 || Builtins::IsBuiltinId(index));
+ DCHECK(index == Builtins::kNoBuiltinId || Builtins::IsBuiltinId(index));
return index;
}
void Code::set_builtin_index(int index) {
- DCHECK(index == -1 || Builtins::IsBuiltinId(index));
+ DCHECK(index == Builtins::kNoBuiltinId || Builtins::IsBuiltinId(index));
WriteField<int>(kBuiltinIndexOffset, index);
}
-bool Code::is_builtin() const { return builtin_index() != -1; }
+bool Code::is_builtin() const {
+ return builtin_index() != Builtins::kNoBuiltinId;
+}
unsigned Code::inlined_bytecode_size() const {
DCHECK(CodeKindIsOptimizedJSFunction(kind()) ||
@@ -470,21 +459,22 @@ int Code::stack_slots() const {
bool Code::marked_for_deoptimization() const {
DCHECK(CodeKindCanDeoptimize(kind()));
- int32_t flags = code_data_container().kind_specific_flags();
+ int32_t flags = code_data_container(kAcquireLoad).kind_specific_flags();
return MarkedForDeoptimizationField::decode(flags);
}
void Code::set_marked_for_deoptimization(bool flag) {
DCHECK(CodeKindCanDeoptimize(kind()));
DCHECK_IMPLIES(flag, AllowDeoptimization::IsAllowed(GetIsolate()));
- int32_t previous = code_data_container().kind_specific_flags();
+ CodeDataContainer container = code_data_container(kAcquireLoad);
+ int32_t previous = container.kind_specific_flags();
int32_t updated = MarkedForDeoptimizationField::update(previous, flag);
- code_data_container().set_kind_specific_flags(updated);
+ container.set_kind_specific_flags(updated);
}
int Code::deoptimization_count() const {
DCHECK(CodeKindCanDeoptimize(kind()));
- int32_t flags = code_data_container().kind_specific_flags();
+ int32_t flags = code_data_container(kAcquireLoad).kind_specific_flags();
int count = DeoptCountField::decode(flags);
DCHECK_GE(count, 0);
return count;
@@ -492,40 +482,43 @@ int Code::deoptimization_count() const {
void Code::increment_deoptimization_count() {
DCHECK(CodeKindCanDeoptimize(kind()));
- int32_t flags = code_data_container().kind_specific_flags();
+ CodeDataContainer container = code_data_container(kAcquireLoad);
+ int32_t flags = container.kind_specific_flags();
int32_t count = DeoptCountField::decode(flags);
DCHECK_GE(count, 0);
CHECK_LE(count + 1, DeoptCountField::kMax);
int32_t updated = DeoptCountField::update(flags, count + 1);
- code_data_container().set_kind_specific_flags(updated);
+ container.set_kind_specific_flags(updated);
}
bool Code::embedded_objects_cleared() const {
DCHECK(CodeKindIsOptimizedJSFunction(kind()));
- int32_t flags = code_data_container().kind_specific_flags();
+ int32_t flags = code_data_container(kAcquireLoad).kind_specific_flags();
return EmbeddedObjectsClearedField::decode(flags);
}
void Code::set_embedded_objects_cleared(bool flag) {
DCHECK(CodeKindIsOptimizedJSFunction(kind()));
DCHECK_IMPLIES(flag, marked_for_deoptimization());
- int32_t previous = code_data_container().kind_specific_flags();
+ CodeDataContainer container = code_data_container(kAcquireLoad);
+ int32_t previous = container.kind_specific_flags();
int32_t updated = EmbeddedObjectsClearedField::update(previous, flag);
- code_data_container().set_kind_specific_flags(updated);
+ container.set_kind_specific_flags(updated);
}
bool Code::deopt_already_counted() const {
DCHECK(CodeKindCanDeoptimize(kind()));
- int32_t flags = code_data_container().kind_specific_flags();
+ int32_t flags = code_data_container(kAcquireLoad).kind_specific_flags();
return DeoptAlreadyCountedField::decode(flags);
}
void Code::set_deopt_already_counted(bool flag) {
DCHECK(CodeKindCanDeoptimize(kind()));
DCHECK_IMPLIES(flag, AllowDeoptimization::IsAllowed(GetIsolate()));
- int32_t previous = code_data_container().kind_specific_flags();
+ CodeDataContainer container = code_data_container(kAcquireLoad);
+ int32_t previous = container.kind_specific_flags();
int32_t updated = DeoptAlreadyCountedField::update(previous, flag);
- code_data_container().set_kind_specific_flags(updated);
+ container.set_kind_specific_flags(updated);
}
bool Code::is_optimized_code() const {
@@ -534,25 +527,44 @@ bool Code::is_optimized_code() const {
bool Code::is_wasm_code() const { return kind() == CodeKind::WASM_FUNCTION; }
int Code::constant_pool_offset() const {
- if (!FLAG_enable_embedded_constant_pool) return code_comments_offset();
+ if (!FLAG_enable_embedded_constant_pool) {
+ // Redirection needed since the field doesn't exist in this case.
+ return code_comments_offset();
+ }
return ReadField<int>(kConstantPoolOffsetOffset);
}
void Code::set_constant_pool_offset(int value) {
- if (!FLAG_enable_embedded_constant_pool) return;
- DCHECK_LE(value, InstructionSize());
+ if (!FLAG_enable_embedded_constant_pool) {
+ // Redirection needed since the field doesn't exist in this case.
+ return;
+ }
+ DCHECK_LE(value, MetadataSize());
WriteField<int>(kConstantPoolOffsetOffset, value);
}
Address Code::constant_pool() const {
if (!has_constant_pool()) return kNullAddress;
- return InstructionStart() + constant_pool_offset();
+ return MetadataStart() + constant_pool_offset();
}
Address Code::code_comments() const {
- return InstructionStart() + code_comments_offset();
+ return MetadataStart() + code_comments_offset();
}
+Address Code::unwinding_info_start() const {
+ return MetadataStart() + unwinding_info_offset();
+}
+
+Address Code::unwinding_info_end() const { return MetadataEnd(); }
+
+int Code::unwinding_info_size() const {
+ DCHECK_GE(unwinding_info_end(), unwinding_info_start());
+ return static_cast<int>(unwinding_info_end() - unwinding_info_start());
+}
+
+bool Code::has_unwinding_info() const { return unwinding_info_size() > 0; }
+
Code Code::GetCodeFromTargetAddress(Address address) {
{
// TODO(jgruber,v8:6666): Support embedded builtins here. We'd need to pass
@@ -701,8 +713,8 @@ int32_t BytecodeArray::parameter_count() const {
ACCESSORS(BytecodeArray, constant_pool, FixedArray, kConstantPoolOffset)
ACCESSORS(BytecodeArray, handler_table, ByteArray, kHandlerTableOffset)
-SYNCHRONIZED_ACCESSORS(BytecodeArray, synchronized_source_position_table,
- Object, kSourcePositionTableOffset)
+RELEASE_ACQUIRE_ACCESSORS(BytecodeArray, source_position_table, Object,
+ kSourcePositionTableOffset)
void BytecodeArray::clear_padding() {
int data_size = kHeaderSize + length();
@@ -715,22 +727,22 @@ Address BytecodeArray::GetFirstBytecodeAddress() {
}
bool BytecodeArray::HasSourcePositionTable() const {
- Object maybe_table = synchronized_source_position_table();
+ Object maybe_table = source_position_table(kAcquireLoad);
return !(maybe_table.IsUndefined() || DidSourcePositionGenerationFail());
}
bool BytecodeArray::DidSourcePositionGenerationFail() const {
- return synchronized_source_position_table().IsException();
+ return source_position_table(kAcquireLoad).IsException();
}
void BytecodeArray::SetSourcePositionsFailedToCollect() {
- set_synchronized_source_position_table(GetReadOnlyRoots().exception());
+ set_source_position_table(GetReadOnlyRoots().exception(), kReleaseStore);
}
ByteArray BytecodeArray::SourcePositionTable() const {
// WARNING: This function may be called from a background thread, hence
// changes to how it accesses the heap can easily lead to bugs.
- Object maybe_table = synchronized_source_position_table();
+ Object maybe_table = source_position_table(kAcquireLoad);
if (maybe_table.IsByteArray()) return ByteArray::cast(maybe_table);
ReadOnlyRoots roots = GetReadOnlyRoots();
DCHECK(maybe_table.IsUndefined(roots) || maybe_table.IsException(roots));
diff --git a/deps/v8/src/objects/code-kind.h b/deps/v8/src/objects/code-kind.h
index a1f9b43900..6314005649 100644
--- a/deps/v8/src/objects/code-kind.h
+++ b/deps/v8/src/objects/code-kind.h
@@ -11,30 +11,35 @@
namespace v8 {
namespace internal {
-// TODO(jgruber,rmcilroy): Rename OPTIMIZED_FUNCTION once we've fully
-// disambiguated Turboprop, Turbofan, and NCI code kinds.
-// TODO(jgruber): Rename STUB to DEOPT_ENTRIES_OR_FOR_TESTING, or split it into
-// DEOPT_ENTRIES and FOR_TESTING, or convert DEOPT_ENTRIES into a builtin.
-#define CODE_KIND_LIST(V) \
- V(OPTIMIZED_FUNCTION) \
- V(BYTECODE_HANDLER) \
- V(STUB) \
- V(BUILTIN) \
- V(REGEXP) \
- V(WASM_FUNCTION) \
- V(WASM_TO_CAPI_FUNCTION) \
- V(WASM_TO_JS_FUNCTION) \
- V(JS_TO_WASM_FUNCTION) \
- V(JS_TO_JS_FUNCTION) \
- V(C_WASM_ENTRY) \
- V(INTERPRETED_FUNCTION) \
- V(NATIVE_CONTEXT_INDEPENDENT)
+// The order of INTERPRETED_FUNCTION to TURBOFAN is important. We use it to
+// check the relative ordering of the tiers when fetching / installing optimized
+// code.
+#define CODE_KIND_LIST(V) \
+ V(BYTECODE_HANDLER) \
+ V(FOR_TESTING) \
+ V(BUILTIN) \
+ V(REGEXP) \
+ V(WASM_FUNCTION) \
+ V(WASM_TO_CAPI_FUNCTION) \
+ V(WASM_TO_JS_FUNCTION) \
+ V(JS_TO_WASM_FUNCTION) \
+ V(JS_TO_JS_FUNCTION) \
+ V(C_WASM_ENTRY) \
+ V(INTERPRETED_FUNCTION) \
+ V(NATIVE_CONTEXT_INDEPENDENT) \
+ V(TURBOPROP) \
+ V(TURBOFAN)
enum class CodeKind {
#define DEFINE_CODE_KIND_ENUM(name) name,
CODE_KIND_LIST(DEFINE_CODE_KIND_ENUM)
#undef DEFINE_CODE_KIND_ENUM
};
+STATIC_ASSERT(CodeKind::INTERPRETED_FUNCTION < CodeKind::TURBOPROP &&
+ CodeKind::INTERPRETED_FUNCTION <
+ CodeKind::NATIVE_CONTEXT_INDEPENDENT);
+STATIC_ASSERT(CodeKind::TURBOPROP < CodeKind::TURBOFAN &&
+ CodeKind::NATIVE_CONTEXT_INDEPENDENT < CodeKind::TURBOFAN);
#define V(...) +1
static constexpr int kCodeKindCount = CODE_KIND_LIST(V);
@@ -52,8 +57,9 @@ inline constexpr bool CodeKindIsNativeContextIndependentJSFunction(
}
inline constexpr bool CodeKindIsOptimizedJSFunction(CodeKind kind) {
- return kind == CodeKind::OPTIMIZED_FUNCTION ||
- kind == CodeKind::NATIVE_CONTEXT_INDEPENDENT;
+ return kind == CodeKind::TURBOFAN ||
+ kind == CodeKind::NATIVE_CONTEXT_INDEPENDENT ||
+ kind == CodeKind::TURBOPROP;
}
inline constexpr bool CodeKindIsJSFunction(CodeKind kind) {
@@ -72,9 +78,18 @@ inline constexpr bool CodeKindCanDeoptimize(CodeKind kind) {
return CodeKindIsOptimizedJSFunction(kind);
}
-inline constexpr bool CodeKindChecksOptimizationMarker(CodeKind kind) {
+inline constexpr bool CodeKindCanOSR(CodeKind kind) {
+ return kind == CodeKind::TURBOFAN || kind == CodeKind::TURBOPROP;
+}
+
+inline constexpr bool CodeKindIsOptimizedAndCanTierUp(CodeKind kind) {
+ return kind == CodeKind::NATIVE_CONTEXT_INDEPENDENT ||
+ (FLAG_turboprop_as_midtier && kind == CodeKind::TURBOPROP);
+}
+
+inline constexpr bool CodeKindCanTierUp(CodeKind kind) {
return kind == CodeKind::INTERPRETED_FUNCTION ||
- kind == CodeKind::NATIVE_CONTEXT_INDEPENDENT;
+ CodeKindIsOptimizedAndCanTierUp(kind);
}
// The optimization marker field on the feedback vector has a dual purpose of
@@ -82,10 +97,32 @@ inline constexpr bool CodeKindChecksOptimizationMarker(CodeKind kind) {
// access from multiple closures. The marker is not used for all code kinds
// though, in particular it is not used when generating NCI code.
inline constexpr bool CodeKindIsStoredInOptimizedCodeCache(CodeKind kind) {
- return kind == CodeKind::OPTIMIZED_FUNCTION;
+ return kind == CodeKind::TURBOFAN || kind == CodeKind::TURBOPROP;
}
-inline CodeKind CodeKindForTopTier() { return CodeKind::OPTIMIZED_FUNCTION; }
+inline OptimizationTier GetTierForCodeKind(CodeKind kind) {
+ if (kind == CodeKind::TURBOFAN) return OptimizationTier::kTopTier;
+ if (kind == CodeKind::TURBOPROP) {
+ return FLAG_turboprop_as_midtier ? OptimizationTier::kMidTier
+ : OptimizationTier::kTopTier;
+ }
+ if (kind == CodeKind::NATIVE_CONTEXT_INDEPENDENT) {
+ return FLAG_turbo_nci_as_midtier ? OptimizationTier::kMidTier
+ : OptimizationTier::kTopTier;
+ }
+ return OptimizationTier::kNone;
+}
+
+inline CodeKind CodeKindForTopTier() {
+ // TODO(turboprop, mythria): We should make FLAG_turboprop mean turboprop is
+ // mid-tier compiler and replace FLAG_turboprop_as_midtier with
+ // FLAG_turboprop_as_top_tier to tier up to only Turboprop once
+ // FLAG_turboprop_as_midtier is stable and major regressions are addressed.
+ if (V8_UNLIKELY(FLAG_turboprop)) {
+ return FLAG_turboprop_as_midtier ? CodeKind::TURBOFAN : CodeKind::TURBOPROP;
+ }
+ return CodeKind::TURBOFAN;
+}
// The dedicated CodeKindFlag enum represents all code kinds in a format
// suitable for bit sets.
@@ -107,11 +144,11 @@ using CodeKinds = base::Flags<CodeKindFlag>;
DEFINE_OPERATORS_FOR_FLAGS(CodeKinds)
static constexpr CodeKinds kJSFunctionCodeKindsMask{
- CodeKindFlag::INTERPRETED_FUNCTION | CodeKindFlag::OPTIMIZED_FUNCTION |
- CodeKindFlag::NATIVE_CONTEXT_INDEPENDENT};
+ CodeKindFlag::INTERPRETED_FUNCTION | CodeKindFlag::TURBOFAN |
+ CodeKindFlag::NATIVE_CONTEXT_INDEPENDENT | CodeKindFlag::TURBOPROP};
static constexpr CodeKinds kOptimizedJSFunctionCodeKindsMask{
- CodeKindFlag::OPTIMIZED_FUNCTION |
- CodeKindFlag::NATIVE_CONTEXT_INDEPENDENT};
+ CodeKindFlag::TURBOFAN | CodeKindFlag::NATIVE_CONTEXT_INDEPENDENT |
+ CodeKindFlag::TURBOPROP};
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/code.cc b/deps/v8/src/objects/code.cc
index c796904718..1004180669 100644
--- a/deps/v8/src/objects/code.cc
+++ b/deps/v8/src/objects/code.cc
@@ -31,7 +31,7 @@ namespace v8 {
namespace internal {
Address Code::SafepointTableAddress() const {
- return InstructionStart() + safepoint_table_offset();
+ return MetadataStart() + safepoint_table_offset();
}
int Code::safepoint_table_size() const {
@@ -42,7 +42,7 @@ int Code::safepoint_table_size() const {
bool Code::has_safepoint_table() const { return safepoint_table_size() > 0; }
Address Code::HandlerTableAddress() const {
- return InstructionStart() + handler_table_offset();
+ return MetadataStart() + handler_table_offset();
}
int Code::handler_table_size() const {
@@ -62,14 +62,12 @@ int Code::constant_pool_size() const {
bool Code::has_constant_pool() const { return constant_pool_size() > 0; }
int Code::code_comments_size() const {
- DCHECK_GE(InstructionSize() - code_comments_offset(), 0);
- return InstructionSize() - code_comments_offset();
+ DCHECK_GE(unwinding_info_offset() - code_comments_offset(), 0);
+ return unwinding_info_offset() - code_comments_offset();
}
bool Code::has_code_comments() const { return code_comments_size() > 0; }
-int Code::ExecutableInstructionSize() const { return safepoint_table_offset(); }
-
void Code::ClearEmbeddedObjects(Heap* heap) {
HeapObject undefined = ReadOnlyRoots(heap).undefined_value();
int mode_mask = RelocInfo::EmbeddedObjectModeMask();
@@ -93,17 +91,12 @@ void Code::FlushICache() const {
void Code::CopyFromNoFlush(Heap* heap, const CodeDesc& desc) {
// Copy code.
+ STATIC_ASSERT(kOnHeapBodyIsContiguous);
CopyBytes(reinterpret_cast<byte*>(raw_instruction_start()), desc.buffer,
static_cast<size_t>(desc.instr_size));
-
- // Copy unwinding info, if any.
- if (desc.unwinding_info) {
- DCHECK_GT(desc.unwinding_info_size, 0);
- set_unwinding_info_size(desc.unwinding_info_size);
- CopyBytes(reinterpret_cast<byte*>(unwinding_info_start()),
- desc.unwinding_info,
- static_cast<size_t>(desc.unwinding_info_size));
- }
+ // TODO(jgruber,v8:11036): Merge with the above.
+ CopyBytes(reinterpret_cast<byte*>(raw_instruction_start() + desc.instr_size),
+ desc.unwinding_info, static_cast<size_t>(desc.unwinding_info_size));
// Copy reloc info.
CopyRelocInfoToByteArray(unchecked_relocation_info(), desc);
@@ -143,29 +136,60 @@ SafepointEntry Code::GetSafepointEntry(Address pc) {
int Code::OffHeapInstructionSize() const {
DCHECK(is_off_heap_trampoline());
- if (Isolate::CurrentEmbeddedBlobCode() == nullptr)
+ if (Isolate::CurrentEmbeddedBlobCode() == nullptr) {
return raw_instruction_size();
+ }
EmbeddedData d = EmbeddedData::FromBlob();
return d.InstructionSizeOfBuiltin(builtin_index());
}
Address Code::OffHeapInstructionStart() const {
DCHECK(is_off_heap_trampoline());
- if (Isolate::CurrentEmbeddedBlobCode() == nullptr)
- return raw_instruction_start();
+ if (Isolate::CurrentEmbeddedBlobCode() == nullptr) {
+ return raw_instruction_size();
+ }
EmbeddedData d = EmbeddedData::FromBlob();
return d.InstructionStartOfBuiltin(builtin_index());
}
Address Code::OffHeapInstructionEnd() const {
DCHECK(is_off_heap_trampoline());
- if (Isolate::CurrentEmbeddedBlobCode() == nullptr)
- return raw_instruction_end();
+ if (Isolate::CurrentEmbeddedBlobCode() == nullptr) {
+ return raw_instruction_size();
+ }
EmbeddedData d = EmbeddedData::FromBlob();
return d.InstructionStartOfBuiltin(builtin_index()) +
d.InstructionSizeOfBuiltin(builtin_index());
}
+int Code::OffHeapMetadataSize() const {
+ DCHECK(is_off_heap_trampoline());
+ if (Isolate::CurrentEmbeddedBlobCode() == nullptr) {
+ return raw_instruction_size();
+ }
+ EmbeddedData d = EmbeddedData::FromBlob();
+ return d.MetadataSizeOfBuiltin(builtin_index());
+}
+
+Address Code::OffHeapMetadataStart() const {
+ DCHECK(is_off_heap_trampoline());
+ if (Isolate::CurrentEmbeddedBlobCode() == nullptr) {
+ return raw_instruction_size();
+ }
+ EmbeddedData d = EmbeddedData::FromBlob();
+ return d.MetadataStartOfBuiltin(builtin_index());
+}
+
+Address Code::OffHeapMetadataEnd() const {
+ DCHECK(is_off_heap_trampoline());
+ if (Isolate::CurrentEmbeddedBlobCode() == nullptr) {
+ return raw_instruction_size();
+ }
+ EmbeddedData d = EmbeddedData::FromBlob();
+ return d.MetadataStartOfBuiltin(builtin_index()) +
+ d.MetadataSizeOfBuiltin(builtin_index());
+}
+
int AbstractCode::SourcePosition(int offset) {
Object maybe_table = source_position_table();
if (maybe_table.IsException()) return kNoSourcePosition;
@@ -679,8 +703,7 @@ void Code::Disassemble(const char* name, std::ostream& os, Isolate* isolate,
}
{
- // Stop before reaching any embedded tables
- int code_size = ExecutableInstructionSize();
+ int code_size = InstructionSize();
os << "Instructions (size = " << code_size << ")\n";
DisassembleCodeRange(isolate, os, *this, InstructionStart(), code_size,
current_pc);
@@ -689,8 +712,8 @@ void Code::Disassemble(const char* name, std::ostream& os, Isolate* isolate,
DCHECK_EQ(pool_size & kPointerAlignmentMask, 0);
os << "\nConstant Pool (size = " << pool_size << ")\n";
Vector<char> buf = Vector<char>::New(50);
- intptr_t* ptr = reinterpret_cast<intptr_t*>(InstructionStart() +
- constant_pool_offset());
+ intptr_t* ptr =
+ reinterpret_cast<intptr_t*>(MetadataStart() + constant_pool_offset());
for (int i = 0; i < pool_size; i += kSystemPointerSize, ptr++) {
SNPrintF(buf, "%4d %08" V8PRIxPTR, i, *ptr);
os << static_cast<const void*>(ptr) << " " << buf.begin() << "\n";
@@ -920,6 +943,11 @@ void DependentCode::InstallDependency(Isolate* isolate,
const MaybeObjectHandle& code,
Handle<HeapObject> object,
DependencyGroup group) {
+ if (V8_UNLIKELY(FLAG_trace_code_dependencies)) {
+ StdoutStream{} << "Installing dependency of [" << code->GetHeapObject()
+ << "] on [" << object << "] in group ["
+ << DependencyGroupName(group) << "]\n";
+ }
Handle<DependentCode> old_deps(DependentCode::GetDependentCode(object),
isolate);
Handle<DependentCode> new_deps =
diff --git a/deps/v8/src/objects/code.h b/deps/v8/src/objects/code.h
index d71a0b1132..201f17773a 100644
--- a/deps/v8/src/objects/code.h
+++ b/deps/v8/src/objects/code.h
@@ -78,6 +78,131 @@ class Code : public HeapObject {
// cache state, and arguments count.
using Flags = uint32_t;
+ // All Code objects have the following layout:
+ //
+ // +--------------------------+
+ // | header |
+ // | padded to code alignment |
+ // +--------------------------+ <-- raw_body_start()
+ // | instructions | == raw_instruction_start()
+ // | ... |
+ // | padded to meta alignment | see kMetadataAlignment
+ // +--------------------------+ <-- raw_instruction_end()
+ // | metadata | == raw_metadata_start() (MS)
+ // | ... |
+ // | | <-- MS + handler_table_offset()
+ // | | <-- MS + constant_pool_offset()
+ // | | <-- MS + code_comments_offset()
+ // | | <-- MS + unwinding_info_offset()
+ // | padded to obj alignment |
+ // +--------------------------+ <-- raw_metadata_end() == raw_body_end()
+ // | padded to code alignment |
+ // +--------------------------+
+ //
+ // In other words, the variable-size 'body' consists of 'instructions' and
+ // 'metadata'.
+ //
+ // Note the accessor functions below may be prefixed with 'raw'. In this case,
+ // raw accessors (e.g. raw_instruction_start) always refer to the on-heap
+ // Code object, while camel-case accessors (e.g. InstructionStart) may refer
+ // to an off-heap area in the case of embedded builtins.
+ //
+ // Embedded builtins are on-heap Code objects, with an out-of-line body
+ // section. The on-heap Code object contains an essentially empty body
+ // section, while accessors, as mentioned above, redirect to the off-heap
+ // area. Metadata table offsets remain relative to MetadataStart(), i.e. they
+ // point into the off-heap metadata section. The off-heap layout is described
+ // in detail in the EmbeddedData class, but at a high level one can assume a
+ // dedicated, out-of-line, instruction and metadata section for each embedded
+ // builtin *in addition* to the on-heap Code object:
+ //
+ // +--------------------------+ <-- InstructionStart()
+ // | off-heap instructions |
+ // | ... |
+ // +--------------------------+ <-- InstructionEnd()
+ //
+ // +--------------------------+ <-- MetadataStart() (MS)
+ // | off-heap metadata |
+ // | ... | <-- MS + handler_table_offset()
+ // | | <-- MS + constant_pool_offset()
+ // | | <-- MS + code_comments_offset()
+ // | | <-- MS + unwinding_info_offset()
+ // +--------------------------+ <-- MetadataEnd()
+
+ // Constants for use in static asserts, stating whether the body is adjacent,
+ // i.e. instructions and metadata areas are adjacent.
+ static constexpr bool kOnHeapBodyIsContiguous = true;
+ static constexpr bool kOffHeapBodyIsContiguous = false;
+ static constexpr bool kBodyIsContiguous =
+ kOnHeapBodyIsContiguous && kOffHeapBodyIsContiguous;
+
+ inline Address raw_body_start() const;
+ inline Address raw_body_end() const;
+ inline int raw_body_size() const;
+
+ inline Address raw_instruction_start() const;
+ inline Address InstructionStart() const;
+ V8_EXPORT_PRIVATE Address OffHeapInstructionStart() const;
+
+ inline Address raw_instruction_end() const;
+ inline Address InstructionEnd() const;
+ V8_EXPORT_PRIVATE Address OffHeapInstructionEnd() const;
+
+ inline int raw_instruction_size() const;
+ inline void set_raw_instruction_size(int value);
+ inline int InstructionSize() const;
+ V8_EXPORT_PRIVATE int OffHeapInstructionSize() const;
+
+ inline Address raw_metadata_start() const;
+ inline Address MetadataStart() const;
+ V8_EXPORT_PRIVATE Address OffHeapMetadataStart() const;
+ inline Address raw_metadata_end() const;
+ inline Address MetadataEnd() const;
+ V8_EXPORT_PRIVATE Address OffHeapMetadataEnd() const;
+ inline int raw_metadata_size() const;
+ inline void set_raw_metadata_size(int value);
+ inline int MetadataSize() const;
+ int OffHeapMetadataSize() const;
+
+ // The metadata section is aligned to this value.
+ static constexpr int kMetadataAlignment = kIntSize;
+
+ // [safepoint_table_offset]: The offset where the safepoint table starts.
+ inline int safepoint_table_offset() const { return 0; }
+ Address SafepointTableAddress() const;
+ int safepoint_table_size() const;
+ bool has_safepoint_table() const;
+
+ // [handler_table_offset]: The offset where the exception handler table
+ // starts.
+ inline int handler_table_offset() const;
+ inline void set_handler_table_offset(int offset);
+ Address HandlerTableAddress() const;
+ int handler_table_size() const;
+ bool has_handler_table() const;
+
+ // [constant_pool offset]: Offset of the constant pool.
+ inline int constant_pool_offset() const;
+ inline void set_constant_pool_offset(int offset);
+ inline Address constant_pool() const;
+ int constant_pool_size() const;
+ bool has_constant_pool() const;
+
+ // [code_comments_offset]: Offset of the code comment section.
+ inline int code_comments_offset() const;
+ inline void set_code_comments_offset(int offset);
+ inline Address code_comments() const;
+ V8_EXPORT_PRIVATE int code_comments_size() const;
+ V8_EXPORT_PRIVATE bool has_code_comments() const;
+
+ // [unwinding_info_offset]: Offset of the unwinding info section.
+ inline int32_t unwinding_info_offset() const;
+ inline void set_unwinding_info_offset(int32_t offset);
+ inline Address unwinding_info_start() const;
+ inline Address unwinding_info_end() const;
+ inline int unwinding_info_size() const;
+ inline bool has_unwinding_info() const;
+
#ifdef ENABLE_DISASSEMBLER
const char* GetName(Isolate* isolate) const;
V8_EXPORT_PRIVATE void Disassemble(const char* name, std::ostream& os,
@@ -85,19 +210,6 @@ class Code : public HeapObject {
Address current_pc = kNullAddress);
#endif
- // [instruction_size]: Size of the native instructions, including embedded
- // data such as the safepoints table.
- inline int raw_instruction_size() const;
- inline void set_raw_instruction_size(int value);
-
- // Returns the size of the native instructions, including embedded
- // data such as the safepoints table. For off-heap code objects
- // this may differ from instruction_size in that this will return the size of
- // the off-heap instruction stream rather than the on-heap trampoline located
- // at instruction_start.
- inline int InstructionSize() const;
- V8_EXPORT_PRIVATE int OffHeapInstructionSize() const;
-
// [relocation_info]: Code relocation information
DECL_ACCESSORS(relocation_info, ByteArray)
@@ -115,7 +227,7 @@ class Code : public HeapObject {
inline ByteArray SourcePositionTable() const;
// [code_data_container]: A container indirection for all mutable fields.
- DECL_ACCESSORS(code_data_container, CodeDataContainer)
+ DECL_RELEASE_ACQUIRE_ACCESSORS(code_data_container, CodeDataContainer)
// [next_code_link]: Link for lists of optimized or deoptimized code.
// Note that this field is stored in the {CodeDataContainer} to be mutable.
@@ -153,8 +265,8 @@ class Code : public HeapObject {
inline void set_can_have_weak_objects(bool value);
// [builtin_index]: For builtins, tells which builtin index the code object
- // has. The builtin index is a non-negative integer for builtins, and -1
- // otherwise.
+ // has. The builtin index is a non-negative integer for builtins, and
+ // Builtins::kNoBuiltinId (-1) otherwise.
inline int builtin_index() const;
inline void set_builtin_index(int id);
inline bool is_builtin() const;
@@ -168,39 +280,6 @@ class Code : public HeapObject {
// reserved in the code prologue.
inline int stack_slots() const;
- // [safepoint_table_offset]: If {has_safepoint_info()}, the offset in the
- // instruction stream where the safepoint table starts.
- inline int safepoint_table_offset() const;
- inline void set_safepoint_table_offset(int offset);
- Address SafepointTableAddress() const;
- int safepoint_table_size() const;
- bool has_safepoint_table() const;
-
- // [handler_table_offset]: The offset in the instruction stream where the
- // exception handler table starts.
- inline int handler_table_offset() const;
- inline void set_handler_table_offset(int offset);
- Address HandlerTableAddress() const;
- int handler_table_size() const;
- bool has_handler_table() const;
-
- // [constant_pool offset]: Offset of the constant pool.
- // Valid for FLAG_enable_embedded_constant_pool only
- inline int constant_pool_offset() const;
- inline void set_constant_pool_offset(int offset);
- int constant_pool_size() const;
- bool has_constant_pool() const;
-
- // [code_comments_offset]: Offset of the code comment section.
- inline int code_comments_offset() const;
- inline void set_code_comments_offset(int offset);
- inline Address code_comments() const;
- V8_EXPORT_PRIVATE int code_comments_size() const;
- V8_EXPORT_PRIVATE bool has_code_comments() const;
-
- // The size of the executable instruction area, without embedded metadata.
- int ExecutableInstructionSize() const;
-
// [marked_for_deoptimization]: If CodeKindCanDeoptimize(kind), tells whether
// the code is going to be deoptimized.
inline bool marked_for_deoptimization() const;
@@ -241,9 +320,6 @@ class Code : public HeapObject {
// this is a trampoline to an off-heap builtin.
inline bool is_off_heap_trampoline() const;
- // [constant_pool]: The constant pool for this function.
- inline Address constant_pool() const;
-
// Get the safepoint entry for the given pc.
SafepointEntry GetSafepointEntry(Address pc);
@@ -261,9 +337,8 @@ class Code : public HeapObject {
inline void clear_padding();
// Initialize the flags field. Similar to clear_padding above this ensure that
// the snapshot content is deterministic.
- inline void initialize_flags(CodeKind kind, bool has_unwinding_info,
- bool is_turbofanned, int stack_slots,
- bool is_off_heap_trampoline);
+ inline void initialize_flags(CodeKind kind, bool is_turbofanned,
+ int stack_slots, bool is_off_heap_trampoline);
// Convert a target address into a code object.
static inline Code GetCodeFromTargetAddress(Address address);
@@ -271,30 +346,8 @@ class Code : public HeapObject {
// Convert an entry address into an object.
static inline Code GetObjectFromEntryAddress(Address location_of_address);
- // Returns the address of the first instruction.
- inline Address raw_instruction_start() const;
-
- // Returns the address of the first instruction. For off-heap code objects
- // this differs from instruction_start (which would point to the off-heap
- // trampoline instead).
- inline Address InstructionStart() const;
- V8_EXPORT_PRIVATE Address OffHeapInstructionStart() const;
-
- // Returns the address right after the last instruction.
- inline Address raw_instruction_end() const;
-
- // Returns the address right after the last instruction. For off-heap code
- // objects this differs from instruction_end (which would point to the
- // off-heap trampoline instead).
- inline Address InstructionEnd() const;
- V8_EXPORT_PRIVATE Address OffHeapInstructionEnd() const;
-
- // Returns the size of the instructions, padding, relocation and unwinding
- // information.
- inline int body_size() const;
-
// Returns the size of code and its metadata. This includes the size of code
- // relocation information, deoptimization data and handler table.
+ // relocation information, deoptimization data.
inline int SizeIncludingMetadata() const;
// Returns the address of the first relocation info (read backwards!).
@@ -303,52 +356,6 @@ class Code : public HeapObject {
// Returns the address right after the relocation info (read backwards!).
inline byte* relocation_end() const;
- // [has_unwinding_info]: Whether this code object has unwinding information.
- // If it doesn't, unwinding_information_start() will point to invalid data.
- //
- // The body of all code objects has the following layout.
- //
- // +--------------------------+ <-- raw_instruction_start()
- // | instructions |
- // | ... |
- // +--------------------------+
- // | embedded metadata | <-- safepoint_table_offset()
- // | ... | <-- handler_table_offset()
- // | | <-- constant_pool_offset()
- // | | <-- code_comments_offset()
- // | |
- // +--------------------------+ <-- raw_instruction_end()
- //
- // If has_unwinding_info() is false, raw_instruction_end() points to the first
- // memory location after the end of the code object. Otherwise, the body
- // continues as follows:
- //
- // +--------------------------+
- // | padding to the next |
- // | 8-byte aligned address |
- // +--------------------------+ <-- raw_instruction_end()
- // | [unwinding_info_size] |
- // | as uint64_t |
- // +--------------------------+ <-- unwinding_info_start()
- // | unwinding info |
- // | ... |
- // +--------------------------+ <-- unwinding_info_end()
- //
- // and unwinding_info_end() points to the first memory location after the end
- // of the code object.
- //
- inline bool has_unwinding_info() const;
-
- // [unwinding_info_size]: Size of the unwinding information.
- inline int unwinding_info_size() const;
- inline void set_unwinding_info_size(int value);
-
- // Returns the address of the unwinding information, if any.
- inline Address unwinding_info_start() const;
-
- // Returns the address right after the end of the unwinding information.
- inline Address unwinding_info_end() const;
-
// Code entry point.
inline Address entry() const;
@@ -373,14 +380,9 @@ class Code : public HeapObject {
// Returns the object size for a given body (used for allocation).
static int SizeFor(int body_size) {
- DCHECK_SIZE_TAG_ALIGNED(body_size);
return RoundUp(kHeaderSize + body_size, kCodeAlignment);
}
- // Calculate the size of the code object to report for log events. This takes
- // the layout of the code object into account.
- inline int ExecutableSize() const;
-
DECL_CAST(Code)
// Dispatched behavior.
@@ -414,28 +416,30 @@ class Code : public HeapObject {
class OptimizedCodeIterator;
// Layout description.
-#define CODE_FIELDS(V) \
- V(kRelocationInfoOffset, kTaggedSize) \
- V(kDeoptimizationDataOffset, kTaggedSize) \
- V(kSourcePositionTableOffset, kTaggedSize) \
- V(kCodeDataContainerOffset, kTaggedSize) \
- /* Data or code not directly visited by GC directly starts here. */ \
- /* The serializer needs to copy bytes starting from here verbatim. */ \
- /* Objects embedded into code is visited via reloc info. */ \
- V(kDataStart, 0) \
- V(kInstructionSizeOffset, kIntSize) \
- V(kFlagsOffset, kInt32Size) \
- V(kSafepointTableOffsetOffset, kIntSize) \
- V(kHandlerTableOffsetOffset, kIntSize) \
- V(kConstantPoolOffsetOffset, \
- FLAG_enable_embedded_constant_pool ? kIntSize : 0) \
- V(kCodeCommentsOffsetOffset, kIntSize) \
- V(kBuiltinIndexOffset, kIntSize) \
- V(kInlinedBytecodeSizeOffset, kIntSize) \
- V(kUnalignedHeaderSize, 0) \
- /* Add padding to align the instruction start following right after */ \
- /* the Code object header. */ \
- V(kOptionalPaddingOffset, CODE_POINTER_PADDING(kOptionalPaddingOffset)) \
+#define CODE_FIELDS(V) \
+ V(kRelocationInfoOffset, kTaggedSize) \
+ V(kDeoptimizationDataOffset, kTaggedSize) \
+ V(kSourcePositionTableOffset, kTaggedSize) \
+ V(kCodeDataContainerOffset, kTaggedSize) \
+ /* Data or code not directly visited by GC directly starts here. */ \
+ /* The serializer needs to copy bytes starting from here verbatim. */ \
+ /* Objects embedded into code is visited via reloc info. */ \
+ V(kDataStart, 0) \
+ V(kInstructionSizeOffset, kIntSize) \
+ V(kMetadataSizeOffset, kIntSize) \
+ V(kFlagsOffset, kInt32Size) \
+ V(kBuiltinIndexOffset, kIntSize) \
+ V(kInlinedBytecodeSizeOffset, kIntSize) \
+ /* Offsets describing inline metadata tables, relative to MetadataStart. */ \
+ V(kHandlerTableOffsetOffset, kIntSize) \
+ V(kConstantPoolOffsetOffset, \
+ FLAG_enable_embedded_constant_pool ? kIntSize : 0) \
+ V(kCodeCommentsOffsetOffset, kIntSize) \
+ V(kUnwindingInfoOffsetOffset, kInt32Size) \
+ V(kUnalignedHeaderSize, 0) \
+ /* Add padding to align the instruction start following right after */ \
+ /* the Code object header. */ \
+ V(kOptionalPaddingOffset, CODE_POINTER_PADDING(kOptionalPaddingOffset)) \
V(kHeaderSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, CODE_FIELDS)
@@ -444,35 +448,32 @@ class Code : public HeapObject {
// This documents the amount of free space we have in each Code object header
// due to padding for code alignment.
#if V8_TARGET_ARCH_ARM64
- static constexpr int kHeaderPaddingSize = COMPRESS_POINTERS_BOOL ? 16 : 28;
+ static constexpr int kHeaderPaddingSize = COMPRESS_POINTERS_BOOL ? 12 : 24;
#elif V8_TARGET_ARCH_MIPS64
- static constexpr int kHeaderPaddingSize = 28;
+ static constexpr int kHeaderPaddingSize = 24;
#elif V8_TARGET_ARCH_X64
- static constexpr int kHeaderPaddingSize = COMPRESS_POINTERS_BOOL ? 16 : 28;
+ static constexpr int kHeaderPaddingSize = COMPRESS_POINTERS_BOOL ? 12 : 24;
#elif V8_TARGET_ARCH_ARM
- static constexpr int kHeaderPaddingSize = 16;
+ static constexpr int kHeaderPaddingSize = 12;
#elif V8_TARGET_ARCH_IA32
- static constexpr int kHeaderPaddingSize = 16;
+ static constexpr int kHeaderPaddingSize = 12;
#elif V8_TARGET_ARCH_MIPS
- static constexpr int kHeaderPaddingSize = 16;
+ static constexpr int kHeaderPaddingSize = 12;
#elif V8_TARGET_ARCH_PPC64
static constexpr int kHeaderPaddingSize =
- FLAG_enable_embedded_constant_pool ? (COMPRESS_POINTERS_BOOL ? 12 : 24)
- : (COMPRESS_POINTERS_BOOL ? 16 : 28);
+ FLAG_enable_embedded_constant_pool ? (COMPRESS_POINTERS_BOOL ? 8 : 20)
+ : (COMPRESS_POINTERS_BOOL ? 12 : 24);
#elif V8_TARGET_ARCH_S390X
- static constexpr int kHeaderPaddingSize = COMPRESS_POINTERS_BOOL ? 16 : 28;
+ static constexpr int kHeaderPaddingSize = COMPRESS_POINTERS_BOOL ? 12 : 24;
#else
#error Unknown architecture.
#endif
STATIC_ASSERT(FIELD_SIZE(kOptionalPaddingOffset) == kHeaderPaddingSize);
- inline int GetUnwindingInfoSizeOffset() const;
-
class BodyDescriptor;
// Flags layout. base::BitField<type, shift, size>.
#define CODE_FLAGS_BIT_FIELDS(V, _) \
- V(HasUnwindingInfoField, bool, 1, _) \
V(KindField, CodeKind, 4, _) \
V(IsTurbofannedField, bool, 1, _) \
V(StackSlotsField, int, 24, _) \
@@ -480,7 +481,7 @@ class Code : public HeapObject {
DEFINE_BIT_FIELDS(CODE_FLAGS_BIT_FIELDS)
#undef CODE_FLAGS_BIT_FIELDS
STATIC_ASSERT(kCodeKindCount <= KindField::kNumValues);
- STATIC_ASSERT(CODE_FLAGS_BIT_FIELDS_Ranges::kBitsCount == 31);
+ STATIC_ASSERT(CODE_FLAGS_BIT_FIELDS_Ranges::kBitsCount == 30);
STATIC_ASSERT(CODE_FLAGS_BIT_FIELDS_Ranges::kBitsCount <=
FIELD_SIZE(kFlagsOffset) * kBitsPerByte);
@@ -520,6 +521,8 @@ class Code : public HeapObject {
class Code::OptimizedCodeIterator {
public:
explicit OptimizedCodeIterator(Isolate* isolate);
+ OptimizedCodeIterator(const OptimizedCodeIterator&) = delete;
+ OptimizedCodeIterator& operator=(const OptimizedCodeIterator&) = delete;
Code Next();
private:
@@ -528,7 +531,6 @@ class Code::OptimizedCodeIterator {
Isolate* isolate_;
DISALLOW_HEAP_ALLOCATION(no_gc)
- DISALLOW_COPY_AND_ASSIGN(OptimizedCodeIterator);
};
class AbstractCode : public HeapObject {
@@ -578,10 +580,6 @@ class AbstractCode : public HeapObject {
// Returns the kind of the code.
inline CodeKind kind();
- // Calculate the size of the code object to report for log events. This takes
- // the layout of the code object into account.
- inline int ExecutableSize();
-
DECL_CAST(AbstractCode)
inline Code GetCode();
inline BytecodeArray GetBytecodeArray();
@@ -644,7 +642,7 @@ class DependentCode : public WeakFixedArray {
kAllocationSiteTransitionChangedGroup
};
- // Register a code dependency of {cell} on {object}.
+ // Register a dependency of {code} on {object}, of the kind given by {group}.
V8_EXPORT_PRIVATE static void InstallDependency(Isolate* isolate,
const MaybeObjectHandle& code,
Handle<HeapObject> object,
@@ -774,7 +772,7 @@ class BytecodeArray : public FixedArrayBase {
// * ByteArray (when source positions have been collected for the bytecode)
// * exception (when an error occurred while explicitly collecting source
// positions for pre-existing bytecode).
- DECL_SYNCHRONIZED_ACCESSORS(source_position_table, Object)
+ DECL_RELEASE_ACQUIRE_ACCESSORS(source_position_table, Object)
inline bool HasSourcePositionTable() const;
inline bool DidSourcePositionGenerationFail() const;
diff --git a/deps/v8/src/objects/compilation-cache-inl.h b/deps/v8/src/objects/compilation-cache-table-inl.h
index 324b40f7ea..473eed496c 100644
--- a/deps/v8/src/objects/compilation-cache-inl.h
+++ b/deps/v8/src/objects/compilation-cache-table-inl.h
@@ -2,11 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_OBJECTS_COMPILATION_CACHE_INL_H_
-#define V8_OBJECTS_COMPILATION_CACHE_INL_H_
-
-#include "src/objects/compilation-cache.h"
+#ifndef V8_OBJECTS_COMPILATION_CACHE_TABLE_INL_H_
+#define V8_OBJECTS_COMPILATION_CACHE_TABLE_INL_H_
+#include "src/objects/compilation-cache-table.h"
#include "src/objects/name-inl.h"
#include "src/objects/script-inl.h"
#include "src/objects/shared-function-info.h"
@@ -93,4 +92,4 @@ InfoCellPair::InfoCellPair(Isolate* isolate, SharedFunctionInfo shared,
#include "src/objects/object-macros-undef.h"
-#endif // V8_OBJECTS_COMPILATION_CACHE_INL_H_
+#endif // V8_OBJECTS_COMPILATION_CACHE_TABLE_INL_H_
diff --git a/deps/v8/src/objects/compilation-cache-table.cc b/deps/v8/src/objects/compilation-cache-table.cc
new file mode 100644
index 0000000000..57cbeb040c
--- /dev/null
+++ b/deps/v8/src/objects/compilation-cache-table.cc
@@ -0,0 +1,447 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/objects/compilation-cache-table.h"
+
+#include "src/objects/compilation-cache-table-inl.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+const int kLiteralEntryLength = 2;
+const int kLiteralInitialLength = 2;
+const int kLiteralContextOffset = 0;
+const int kLiteralLiteralsOffset = 1;
+
+// The initial placeholder insertion of the eval cache survives this many GCs.
+const int kHashGenerations = 10;
+
+int SearchLiteralsMapEntry(CompilationCacheTable cache, int cache_entry,
+ Context native_context) {
+ DisallowHeapAllocation no_gc;
+ DCHECK(native_context.IsNativeContext());
+ Object obj = cache.get(cache_entry);
+
+ // Check that there's no confusion between FixedArray and WeakFixedArray (the
+ // object used to be a FixedArray here).
+ DCHECK(!obj.IsFixedArray());
+ if (obj.IsWeakFixedArray()) {
+ WeakFixedArray literals_map = WeakFixedArray::cast(obj);
+ int length = literals_map.length();
+ for (int i = 0; i < length; i += kLiteralEntryLength) {
+ DCHECK(literals_map.Get(i + kLiteralContextOffset)->IsWeakOrCleared());
+ if (literals_map.Get(i + kLiteralContextOffset) ==
+ HeapObjectReference::Weak(native_context)) {
+ return i;
+ }
+ }
+ }
+ return -1;
+}
+
+void AddToFeedbackCellsMap(Handle<CompilationCacheTable> cache, int cache_entry,
+ Handle<Context> native_context,
+ Handle<FeedbackCell> feedback_cell) {
+ Isolate* isolate = native_context->GetIsolate();
+ DCHECK(native_context->IsNativeContext());
+ STATIC_ASSERT(kLiteralEntryLength == 2);
+ Handle<WeakFixedArray> new_literals_map;
+ int entry;
+
+ Object obj = cache->get(cache_entry);
+
+ // Check that there's no confusion between FixedArray and WeakFixedArray (the
+ // object used to be a FixedArray here).
+ DCHECK(!obj.IsFixedArray());
+ if (!obj.IsWeakFixedArray() || WeakFixedArray::cast(obj).length() == 0) {
+ new_literals_map = isolate->factory()->NewWeakFixedArray(
+ kLiteralInitialLength, AllocationType::kOld);
+ entry = 0;
+ } else {
+ Handle<WeakFixedArray> old_literals_map(WeakFixedArray::cast(obj), isolate);
+ entry = SearchLiteralsMapEntry(*cache, cache_entry, *native_context);
+ if (entry >= 0) {
+ // Just set the code of the entry.
+ old_literals_map->Set(entry + kLiteralLiteralsOffset,
+ HeapObjectReference::Weak(*feedback_cell));
+ return;
+ }
+
+ // Can we reuse an entry?
+ DCHECK_LT(entry, 0);
+ int length = old_literals_map->length();
+ for (int i = 0; i < length; i += kLiteralEntryLength) {
+ if (old_literals_map->Get(i + kLiteralContextOffset)->IsCleared()) {
+ new_literals_map = old_literals_map;
+ entry = i;
+ break;
+ }
+ }
+
+ if (entry < 0) {
+ // Copy old optimized code map and append one new entry.
+ new_literals_map = isolate->factory()->CopyWeakFixedArrayAndGrow(
+ old_literals_map, kLiteralEntryLength);
+ entry = old_literals_map->length();
+ }
+ }
+
+ new_literals_map->Set(entry + kLiteralContextOffset,
+ HeapObjectReference::Weak(*native_context));
+ new_literals_map->Set(entry + kLiteralLiteralsOffset,
+ HeapObjectReference::Weak(*feedback_cell));
+
+#ifdef DEBUG
+ for (int i = 0; i < new_literals_map->length(); i += kLiteralEntryLength) {
+ MaybeObject object = new_literals_map->Get(i + kLiteralContextOffset);
+ DCHECK(object->IsCleared() ||
+ object->GetHeapObjectAssumeWeak().IsNativeContext());
+ object = new_literals_map->Get(i + kLiteralLiteralsOffset);
+ DCHECK(object->IsCleared() ||
+ object->GetHeapObjectAssumeWeak().IsFeedbackCell());
+ }
+#endif
+
+ Object old_literals_map = cache->get(cache_entry);
+ if (old_literals_map != *new_literals_map) {
+ cache->set(cache_entry, *new_literals_map);
+ }
+}
+
+FeedbackCell SearchLiteralsMap(CompilationCacheTable cache, int cache_entry,
+ Context native_context) {
+ FeedbackCell result;
+ int entry = SearchLiteralsMapEntry(cache, cache_entry, native_context);
+ if (entry >= 0) {
+ WeakFixedArray literals_map = WeakFixedArray::cast(cache.get(cache_entry));
+ DCHECK_LE(entry + kLiteralEntryLength, literals_map.length());
+ MaybeObject object = literals_map.Get(entry + kLiteralLiteralsOffset);
+
+ if (!object->IsCleared()) {
+ result = FeedbackCell::cast(object->GetHeapObjectAssumeWeak());
+ }
+ }
+ DCHECK(result.is_null() || result.IsFeedbackCell());
+ return result;
+}
+
+// StringSharedKeys are used as keys in the eval cache.
+class StringSharedKey : public HashTableKey {
+ public:
+ // This tuple unambiguously identifies calls to eval() or
+ // CreateDynamicFunction() (such as through the Function() constructor).
+ // * source is the string passed into eval(). For dynamic functions, this is
+ // the effective source for the function, some of which is implicitly
+ // generated.
+ // * shared is the shared function info for the function containing the call
+ // to eval(). for dynamic functions, shared is the native context closure.
+ // * When positive, position is the position in the source where eval is
+ // called. When negative, position is the negation of the position in the
+ // dynamic function's effective source where the ')' ends the parameters.
+ StringSharedKey(Handle<String> source, Handle<SharedFunctionInfo> shared,
+ LanguageMode language_mode, int position)
+ : HashTableKey(CompilationCacheShape::StringSharedHash(
+ *source, *shared, language_mode, position)),
+ source_(source),
+ shared_(shared),
+ language_mode_(language_mode),
+ position_(position) {}
+
+ bool IsMatch(Object other) override {
+ DisallowHeapAllocation no_allocation;
+ if (!other.IsFixedArray()) {
+ DCHECK(other.IsNumber());
+ uint32_t other_hash = static_cast<uint32_t>(other.Number());
+ return Hash() == other_hash;
+ }
+ FixedArray other_array = FixedArray::cast(other);
+ SharedFunctionInfo shared = SharedFunctionInfo::cast(other_array.get(0));
+ if (shared != *shared_) return false;
+ int language_unchecked = Smi::ToInt(other_array.get(2));
+ DCHECK(is_valid_language_mode(language_unchecked));
+ LanguageMode language_mode = static_cast<LanguageMode>(language_unchecked);
+ if (language_mode != language_mode_) return false;
+ int position = Smi::ToInt(other_array.get(3));
+ if (position != position_) return false;
+ String source = String::cast(other_array.get(1));
+ return source.Equals(*source_);
+ }
+
+ Handle<Object> AsHandle(Isolate* isolate) {
+ Handle<FixedArray> array = isolate->factory()->NewFixedArray(4);
+ array->set(0, *shared_);
+ array->set(1, *source_);
+ array->set(2, Smi::FromEnum(language_mode_));
+ array->set(3, Smi::FromInt(position_));
+ array->set_map(ReadOnlyRoots(isolate).fixed_cow_array_map());
+ return array;
+ }
+
+ private:
+ Handle<String> source_;
+ Handle<SharedFunctionInfo> shared_;
+ LanguageMode language_mode_;
+ int position_;
+};
+
+// RegExpKey carries the source and flags of a regular expression as key.
+class RegExpKey : public HashTableKey {
+ public:
+ RegExpKey(Handle<String> string, JSRegExp::Flags flags)
+ : HashTableKey(
+ CompilationCacheShape::RegExpHash(*string, Smi::FromInt(flags))),
+ string_(string),
+ flags_(Smi::FromInt(flags)) {}
+
+ // Rather than storing the key in the hash table, a pointer to the
+ // stored value is stored where the key should be. IsMatch then
+ // compares the search key to the found object, rather than comparing
+ // a key to a key.
+ bool IsMatch(Object obj) override {
+ FixedArray val = FixedArray::cast(obj);
+ return string_->Equals(String::cast(val.get(JSRegExp::kSourceIndex))) &&
+ (flags_ == val.get(JSRegExp::kFlagsIndex));
+ }
+
+ Handle<String> string_;
+ Smi flags_;
+};
+
+// CodeKey carries the SharedFunctionInfo key associated with a Code
+// object value.
+class CodeKey : public HashTableKey {
+ public:
+ explicit CodeKey(Handle<SharedFunctionInfo> key)
+ : HashTableKey(key->Hash()), key_(key) {}
+
+ bool IsMatch(Object string) override { return *key_ == string; }
+
+ Handle<SharedFunctionInfo> key_;
+};
+
+} // namespace
+
+MaybeHandle<SharedFunctionInfo> CompilationCacheTable::LookupScript(
+ Handle<CompilationCacheTable> table, Handle<String> src,
+ Handle<Context> native_context, LanguageMode language_mode) {
+ // We use the empty function SFI as part of the key. Although the
+ // empty_function is native context dependent, the SFI is de-duped on
+ // snapshot builds by the StartupObjectCache, and so this does not prevent
+ // reuse of scripts in the compilation cache across native contexts.
+ Handle<SharedFunctionInfo> shared(native_context->empty_function().shared(),
+ native_context->GetIsolate());
+ Isolate* isolate = native_context->GetIsolate();
+ src = String::Flatten(isolate, src);
+ StringSharedKey key(src, shared, language_mode, kNoSourcePosition);
+ InternalIndex entry = table->FindEntry(isolate, &key);
+ if (entry.is_not_found()) return MaybeHandle<SharedFunctionInfo>();
+ int index = EntryToIndex(entry);
+ if (!table->get(index).IsFixedArray()) {
+ return MaybeHandle<SharedFunctionInfo>();
+ }
+ Object obj = table->get(index + 1);
+ if (obj.IsSharedFunctionInfo()) {
+ return handle(SharedFunctionInfo::cast(obj), native_context->GetIsolate());
+ }
+ return MaybeHandle<SharedFunctionInfo>();
+}
+
+InfoCellPair CompilationCacheTable::LookupEval(
+ Handle<CompilationCacheTable> table, Handle<String> src,
+ Handle<SharedFunctionInfo> outer_info, Handle<Context> native_context,
+ LanguageMode language_mode, int position) {
+ InfoCellPair empty_result;
+ Isolate* isolate = native_context->GetIsolate();
+ src = String::Flatten(isolate, src);
+
+ StringSharedKey key(src, outer_info, language_mode, position);
+ InternalIndex entry = table->FindEntry(isolate, &key);
+ if (entry.is_not_found()) return empty_result;
+
+ int index = EntryToIndex(entry);
+ if (!table->get(index).IsFixedArray()) return empty_result;
+ Object obj = table->get(index + 1);
+ if (!obj.IsSharedFunctionInfo()) return empty_result;
+
+ STATIC_ASSERT(CompilationCacheShape::kEntrySize == 3);
+ FeedbackCell feedback_cell =
+ SearchLiteralsMap(*table, index + 2, *native_context);
+ return InfoCellPair(isolate, SharedFunctionInfo::cast(obj), feedback_cell);
+}
+
+Handle<Object> CompilationCacheTable::LookupRegExp(Handle<String> src,
+ JSRegExp::Flags flags) {
+ Isolate* isolate = GetIsolate();
+ DisallowHeapAllocation no_allocation;
+ RegExpKey key(src, flags);
+ InternalIndex entry = FindEntry(isolate, &key);
+ if (entry.is_not_found()) return isolate->factory()->undefined_value();
+ return Handle<Object>(get(EntryToIndex(entry) + 1), isolate);
+}
+
+MaybeHandle<Code> CompilationCacheTable::LookupCode(
+ Handle<SharedFunctionInfo> key) {
+ Isolate* isolate = GetIsolate();
+ DisallowHeapAllocation no_allocation;
+ CodeKey k(key);
+ InternalIndex entry = FindEntry(isolate, &k);
+ if (entry.is_not_found()) return {};
+ return Handle<Code>(Code::cast(get(EntryToIndex(entry) + 1)), isolate);
+}
+
+Handle<CompilationCacheTable> CompilationCacheTable::PutScript(
+ Handle<CompilationCacheTable> cache, Handle<String> src,
+ Handle<Context> native_context, LanguageMode language_mode,
+ Handle<SharedFunctionInfo> value) {
+ Isolate* isolate = native_context->GetIsolate();
+ // We use the empty function SFI as part of the key. Although the
+ // empty_function is native context dependent, the SFI is de-duped on
+ // snapshot builds by the StartupObjectCache, and so this does not prevent
+ // reuse of scripts in the compilation cache across native contexts.
+ Handle<SharedFunctionInfo> shared(native_context->empty_function().shared(),
+ isolate);
+ src = String::Flatten(isolate, src);
+ StringSharedKey key(src, shared, language_mode, kNoSourcePosition);
+ Handle<Object> k = key.AsHandle(isolate);
+ cache = EnsureCapacity(isolate, cache);
+ InternalIndex entry = cache->FindInsertionEntry(isolate, key.Hash());
+ cache->set(EntryToIndex(entry), *k);
+ cache->set(EntryToIndex(entry) + 1, *value);
+ cache->ElementAdded();
+ return cache;
+}
+
+Handle<CompilationCacheTable> CompilationCacheTable::PutEval(
+ Handle<CompilationCacheTable> cache, Handle<String> src,
+ Handle<SharedFunctionInfo> outer_info, Handle<SharedFunctionInfo> value,
+ Handle<Context> native_context, Handle<FeedbackCell> feedback_cell,
+ int position) {
+ Isolate* isolate = native_context->GetIsolate();
+ src = String::Flatten(isolate, src);
+ StringSharedKey key(src, outer_info, value->language_mode(), position);
+
+ // This block handles 'real' insertions, i.e. the initial dummy insert
+ // (below) has already happened earlier.
+ {
+ Handle<Object> k = key.AsHandle(isolate);
+ InternalIndex entry = cache->FindEntry(isolate, &key);
+ if (entry.is_found()) {
+ cache->set(EntryToIndex(entry), *k);
+ cache->set(EntryToIndex(entry) + 1, *value);
+ // AddToFeedbackCellsMap may allocate a new sub-array to live in the
+ // entry, but it won't change the cache array. Therefore EntryToIndex
+ // and entry remains correct.
+ STATIC_ASSERT(CompilationCacheShape::kEntrySize == 3);
+ AddToFeedbackCellsMap(cache, EntryToIndex(entry) + 2, native_context,
+ feedback_cell);
+ // Add hash again even on cache hit to avoid unnecessary cache delay in
+ // case of hash collisions.
+ }
+ }
+
+ // Create a dummy entry to mark that this key has already been inserted once.
+ cache = EnsureCapacity(isolate, cache);
+ InternalIndex entry = cache->FindInsertionEntry(isolate, key.Hash());
+ Handle<Object> k =
+ isolate->factory()->NewNumber(static_cast<double>(key.Hash()));
+ cache->set(EntryToIndex(entry), *k);
+ cache->set(EntryToIndex(entry) + 1, Smi::FromInt(kHashGenerations));
+ cache->ElementAdded();
+ return cache;
+}
+
+Handle<CompilationCacheTable> CompilationCacheTable::PutRegExp(
+ Isolate* isolate, Handle<CompilationCacheTable> cache, Handle<String> src,
+ JSRegExp::Flags flags, Handle<FixedArray> value) {
+ RegExpKey key(src, flags);
+ cache = EnsureCapacity(isolate, cache);
+ InternalIndex entry = cache->FindInsertionEntry(isolate, key.Hash());
+ // We store the value in the key slot, and compare the search key
+ // to the stored value with a custom IsMatch function during lookups.
+ cache->set(EntryToIndex(entry), *value);
+ cache->set(EntryToIndex(entry) + 1, *value);
+ cache->ElementAdded();
+ return cache;
+}
+
+Handle<CompilationCacheTable> CompilationCacheTable::PutCode(
+ Isolate* isolate, Handle<CompilationCacheTable> cache,
+ Handle<SharedFunctionInfo> key, Handle<Code> value) {
+ CodeKey k(key);
+
+ {
+ InternalIndex entry = cache->FindEntry(isolate, &k);
+ if (entry.is_found()) {
+ // Update.
+ cache->set(EntryToIndex(entry), *key);
+ cache->set(EntryToIndex(entry) + 1, *value);
+ return cache;
+ }
+ }
+
+ // Insert.
+ cache = EnsureCapacity(isolate, cache);
+ InternalIndex entry = cache->FindInsertionEntry(isolate, k.Hash());
+ cache->set(EntryToIndex(entry), *key);
+ cache->set(EntryToIndex(entry) + 1, *value);
+ cache->ElementAdded();
+ return cache;
+}
+
+void CompilationCacheTable::Age() {
+ DisallowHeapAllocation no_allocation;
+ for (InternalIndex entry : IterateEntries()) {
+ const int entry_index = EntryToIndex(entry);
+ const int value_index = entry_index + 1;
+
+ Object key = get(entry_index);
+ if (key.IsNumber()) {
+ // The ageing mechanism for the initial dummy entry in the eval cache.
+ // The 'key' is the hash represented as a Number. The 'value' is a smi
+ // counting down from kHashGenerations. On reaching zero, the entry is
+ // cleared.
+ // Note: The following static assert only establishes an explicit
+ // connection between initialization- and use-sites of the smi value
+ // field.
+ STATIC_ASSERT(kHashGenerations);
+ const int new_count = Smi::ToInt(get(value_index)) - 1;
+ if (new_count == 0) {
+ RemoveEntry(entry_index);
+ } else {
+ DCHECK_GT(new_count, 0);
+ NoWriteBarrierSet(*this, value_index, Smi::FromInt(new_count));
+ }
+ } else if (key.IsFixedArray()) {
+ // The ageing mechanism for script and eval caches.
+ SharedFunctionInfo info = SharedFunctionInfo::cast(get(value_index));
+ if (info.IsInterpreted() && info.GetBytecodeArray().IsOld()) {
+ RemoveEntry(entry_index);
+ }
+ }
+ }
+}
+
+void CompilationCacheTable::Remove(Object value) {
+ DisallowHeapAllocation no_allocation;
+ for (InternalIndex entry : IterateEntries()) {
+ int entry_index = EntryToIndex(entry);
+ int value_index = entry_index + 1;
+ if (get(value_index) == value) {
+ RemoveEntry(entry_index);
+ }
+ }
+}
+
+void CompilationCacheTable::RemoveEntry(int entry_index) {
+ Object the_hole_value = GetReadOnlyRoots().the_hole_value();
+ for (int i = 0; i < kEntrySize; i++) {
+ NoWriteBarrierSet(*this, entry_index + i, the_hole_value);
+ }
+ ElementRemoved();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects/compilation-cache.h b/deps/v8/src/objects/compilation-cache-table.h
index d2665513d2..b624767b8a 100644
--- a/deps/v8/src/objects/compilation-cache.h
+++ b/deps/v8/src/objects/compilation-cache-table.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_OBJECTS_COMPILATION_CACHE_H_
-#define V8_OBJECTS_COMPILATION_CACHE_H_
+#ifndef V8_OBJECTS_COMPILATION_CACHE_TABLE_H_
+#define V8_OBJECTS_COMPILATION_CACHE_TABLE_H_
#include "src/objects/feedback-cell.h"
#include "src/objects/hash-table.h"
@@ -37,6 +37,10 @@ class CompilationCacheShape : public BaseShape<HashTableKey*> {
static inline uint32_t HashForObject(ReadOnlyRoots roots, Object object);
static const int kPrefixSize = 0;
+ // An 'entry' is essentially a grouped collection of slots. Entries are used
+ // in various ways by the different caches; most store the actual key in the
+ // first entry slot, but it may also be used differently.
+ // Why 3 slots? Because of the eval cache.
static const int kEntrySize = 3;
static const bool kMatchNeedsHoleCheck = true;
};
@@ -74,59 +78,63 @@ class InfoCellPair {
EXTERN_DECLARE_HASH_TABLE(CompilationCacheTable, CompilationCacheShape)
-// This cache is used in multiple different variants.
-//
-// For regexp caching, it simply maps identifying info of the regexp
-// to the cached regexp object.
-//
-// Scripts and eval code only gets cached after a second probe for the
-// code object. To do so, on first "put" only a hash identifying the
-// source is entered into the cache, mapping it to a lifetime count of
-// the hash. On each call to Age all such lifetimes get reduced, and
-// removed once they reach zero. If a second put is called while such
-// a hash is live in the cache, the hash gets replaced by an actual
-// cache entry. Age also removes stale live entries from the cache.
-// Such entries are identified by SharedFunctionInfos pointing to
-// either the recompilation stub, or to "old" code. This avoids memory
-// leaks due to premature caching of scripts and eval strings that are
-// never needed later.
class CompilationCacheTable
: public HashTable<CompilationCacheTable, CompilationCacheShape> {
public:
NEVER_READ_ONLY_SPACE
+
+ // The 'script' cache contains SharedFunctionInfos.
static MaybeHandle<SharedFunctionInfo> LookupScript(
Handle<CompilationCacheTable> table, Handle<String> src,
Handle<Context> native_context, LanguageMode language_mode);
+ static Handle<CompilationCacheTable> PutScript(
+ Handle<CompilationCacheTable> cache, Handle<String> src,
+ Handle<Context> native_context, LanguageMode language_mode,
+ Handle<SharedFunctionInfo> value);
+
+ // Eval code only gets cached after a second probe for the
+ // code object. To do so, on first "put" only a hash identifying the
+ // source is entered into the cache, mapping it to a lifetime count of
+ // the hash. On each call to Age all such lifetimes get reduced, and
+ // removed once they reach zero. If a second put is called while such
+ // a hash is live in the cache, the hash gets replaced by an actual
+ // cache entry. Age also removes stale live entries from the cache.
+ // Such entries are identified by SharedFunctionInfos pointing to
+ // either the recompilation stub, or to "old" code. This avoids memory
+ // leaks due to premature caching of eval strings that are
+ // never needed later.
static InfoCellPair LookupEval(Handle<CompilationCacheTable> table,
Handle<String> src,
Handle<SharedFunctionInfo> shared,
Handle<Context> native_context,
LanguageMode language_mode, int position);
- Handle<Object> LookupRegExp(Handle<String> source, JSRegExp::Flags flags);
- MaybeHandle<Code> LookupCode(Handle<SharedFunctionInfo> key);
-
- static Handle<CompilationCacheTable> PutScript(
- Handle<CompilationCacheTable> cache, Handle<String> src,
- Handle<Context> native_context, LanguageMode language_mode,
- Handle<SharedFunctionInfo> value);
static Handle<CompilationCacheTable> PutEval(
Handle<CompilationCacheTable> cache, Handle<String> src,
Handle<SharedFunctionInfo> outer_info, Handle<SharedFunctionInfo> value,
Handle<Context> native_context, Handle<FeedbackCell> feedback_cell,
int position);
+
+ // The RegExp cache contains JSRegExp::data fixed arrays.
+ Handle<Object> LookupRegExp(Handle<String> source, JSRegExp::Flags flags);
static Handle<CompilationCacheTable> PutRegExp(
Isolate* isolate, Handle<CompilationCacheTable> cache, Handle<String> src,
JSRegExp::Flags flags, Handle<FixedArray> value);
+
+ // The Code cache shares native-context-independent (NCI) code between
+ // contexts.
+ MaybeHandle<Code> LookupCode(Handle<SharedFunctionInfo> key);
static Handle<CompilationCacheTable> PutCode(
Isolate* isolate, Handle<CompilationCacheTable> cache,
Handle<SharedFunctionInfo> key, Handle<Code> value);
+
void Remove(Object value);
void Age();
- static const int kHashGenerations = 10;
DECL_CAST(CompilationCacheTable)
private:
+ void RemoveEntry(int entry_index);
+
OBJECT_CONSTRUCTORS(CompilationCacheTable,
HashTable<CompilationCacheTable, CompilationCacheShape>);
};
@@ -136,4 +144,4 @@ class CompilationCacheTable
#include "src/objects/object-macros-undef.h"
-#endif // V8_OBJECTS_COMPILATION_CACHE_H_
+#endif // V8_OBJECTS_COMPILATION_CACHE_TABLE_H_
diff --git a/deps/v8/src/objects/compressed-slots-inl.h b/deps/v8/src/objects/compressed-slots-inl.h
index 81eff427e4..ecb276ce36 100644
--- a/deps/v8/src/objects/compressed-slots-inl.h
+++ b/deps/v8/src/objects/compressed-slots-inl.h
@@ -33,7 +33,7 @@ Object CompressedObjectSlot::operator*() const {
return Object(DecompressTaggedAny(address(), value));
}
-Object CompressedObjectSlot::load(const Isolate* isolate) const {
+Object CompressedObjectSlot::load(IsolateRoot isolate) const {
Tagged_t value = *location();
return Object(DecompressTaggedAny(isolate, value));
}
@@ -52,7 +52,7 @@ Object CompressedObjectSlot::Relaxed_Load() const {
return Object(DecompressTaggedAny(address(), value));
}
-Object CompressedObjectSlot::Relaxed_Load(const Isolate* isolate) const {
+Object CompressedObjectSlot::Relaxed_Load(IsolateRoot isolate) const {
AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location());
return Object(DecompressTaggedAny(isolate, value));
}
@@ -85,7 +85,7 @@ MaybeObject CompressedMaybeObjectSlot::operator*() const {
return MaybeObject(DecompressTaggedAny(address(), value));
}
-MaybeObject CompressedMaybeObjectSlot::load(const Isolate* isolate) const {
+MaybeObject CompressedMaybeObjectSlot::load(IsolateRoot isolate) const {
Tagged_t value = *location();
return MaybeObject(DecompressTaggedAny(isolate, value));
}
@@ -99,8 +99,7 @@ MaybeObject CompressedMaybeObjectSlot::Relaxed_Load() const {
return MaybeObject(DecompressTaggedAny(address(), value));
}
-MaybeObject CompressedMaybeObjectSlot::Relaxed_Load(
- const Isolate* isolate) const {
+MaybeObject CompressedMaybeObjectSlot::Relaxed_Load(IsolateRoot isolate) const {
AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location());
return MaybeObject(DecompressTaggedAny(isolate, value));
}
@@ -126,8 +125,7 @@ HeapObjectReference CompressedHeapObjectSlot::operator*() const {
return HeapObjectReference(DecompressTaggedPointer(address(), value));
}
-HeapObjectReference CompressedHeapObjectSlot::load(
- const Isolate* isolate) const {
+HeapObjectReference CompressedHeapObjectSlot::load(IsolateRoot isolate) const {
Tagged_t value = *location();
return HeapObjectReference(DecompressTaggedPointer(isolate, value));
}
@@ -150,7 +148,7 @@ void CompressedHeapObjectSlot::StoreHeapObject(HeapObject value) const {
// OffHeapCompressedObjectSlot implementation.
//
-Object OffHeapCompressedObjectSlot::load(const Isolate* isolate) const {
+Object OffHeapCompressedObjectSlot::load(IsolateRoot isolate) const {
Tagged_t value = *location();
return Object(DecompressTaggedAny(isolate, value));
}
@@ -159,12 +157,12 @@ void OffHeapCompressedObjectSlot::store(Object value) const {
*location() = CompressTagged(value.ptr());
}
-Object OffHeapCompressedObjectSlot::Relaxed_Load(const Isolate* isolate) const {
+Object OffHeapCompressedObjectSlot::Relaxed_Load(IsolateRoot isolate) const {
AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location());
return Object(DecompressTaggedAny(isolate, value));
}
-Object OffHeapCompressedObjectSlot::Acquire_Load(const Isolate* isolate) const {
+Object OffHeapCompressedObjectSlot::Acquire_Load(IsolateRoot isolate) const {
AtomicTagged_t value = AsAtomicTagged::Acquire_Load(location());
return Object(DecompressTaggedAny(isolate, value));
}
diff --git a/deps/v8/src/objects/compressed-slots.h b/deps/v8/src/objects/compressed-slots.h
index b8f3872384..36a6cab596 100644
--- a/deps/v8/src/objects/compressed-slots.h
+++ b/deps/v8/src/objects/compressed-slots.h
@@ -41,12 +41,12 @@ class CompressedObjectSlot : public SlotBase<CompressedObjectSlot, Tagged_t> {
// TODO(leszeks): Consider deprecating the operator* load, and always pass the
// Isolate.
inline Object operator*() const;
- inline Object load(const Isolate* isolate) const;
+ inline Object load(IsolateRoot isolate) const;
inline void store(Object value) const;
inline Object Acquire_Load() const;
inline Object Relaxed_Load() const;
- inline Object Relaxed_Load(const Isolate* isolate) const;
+ inline Object Relaxed_Load(IsolateRoot isolate) const;
inline void Relaxed_Store(Object value) const;
inline void Release_Store(Object value) const;
inline Object Release_CompareAndSwap(Object old, Object target) const;
@@ -77,11 +77,11 @@ class CompressedMaybeObjectSlot
: SlotBase(slot.address()) {}
inline MaybeObject operator*() const;
- inline MaybeObject load(const Isolate* isolate) const;
+ inline MaybeObject load(IsolateRoot isolate) const;
inline void store(MaybeObject value) const;
inline MaybeObject Relaxed_Load() const;
- inline MaybeObject Relaxed_Load(const Isolate* isolate) const;
+ inline MaybeObject Relaxed_Load(IsolateRoot isolate) const;
inline void Relaxed_Store(MaybeObject value) const;
inline void Release_CompareAndSwap(MaybeObject old, MaybeObject target) const;
};
@@ -105,7 +105,7 @@ class CompressedHeapObjectSlot
: SlotBase(slot.address()) {}
inline HeapObjectReference operator*() const;
- inline HeapObjectReference load(const Isolate* isolate) const;
+ inline HeapObjectReference load(IsolateRoot isolate) const;
inline void store(HeapObjectReference value) const;
inline HeapObject ToHeapObject() const;
@@ -131,11 +131,11 @@ class OffHeapCompressedObjectSlot
explicit OffHeapCompressedObjectSlot(const uint32_t* ptr)
: SlotBase(reinterpret_cast<Address>(ptr)) {}
- inline Object load(const Isolate* isolate) const;
+ inline Object load(IsolateRoot isolate) const;
inline void store(Object value) const;
- inline Object Relaxed_Load(const Isolate* isolate) const;
- inline Object Acquire_Load(const Isolate* isolate) const;
+ inline Object Relaxed_Load(IsolateRoot isolate) const;
+ inline Object Acquire_Load(IsolateRoot isolate) const;
inline void Relaxed_Store(Object value) const;
inline void Release_Store(Object value) const;
inline void Release_CompareAndSwap(Object old, Object target) const;
diff --git a/deps/v8/src/objects/contexts-inl.h b/deps/v8/src/objects/contexts-inl.h
index 9bd30530c9..663ce6a965 100644
--- a/deps/v8/src/objects/contexts-inl.h
+++ b/deps/v8/src/objects/contexts-inl.h
@@ -56,11 +56,11 @@ SMI_ACCESSORS(Context, length, kLengthOffset)
CAST_ACCESSOR(NativeContext)
Object Context::get(int index) const {
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
return get(isolate, index);
}
-Object Context::get(const Isolate* isolate, int index) const {
+Object Context::get(IsolateRoot isolate, int index) const {
DCHECK_LT(static_cast<unsigned>(index),
static_cast<unsigned>(this->length()));
return TaggedField<Object>::Relaxed_Load(isolate, *this,
@@ -88,11 +88,11 @@ void Context::set_scope_info(ScopeInfo scope_info) {
}
Object Context::synchronized_get(int index) const {
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
return synchronized_get(isolate, index);
}
-Object Context::synchronized_get(const Isolate* isolate, int index) const {
+Object Context::synchronized_get(IsolateRoot isolate, int index) const {
DCHECK_LT(static_cast<unsigned int>(index),
static_cast<unsigned int>(this->length()));
return ACQUIRE_READ_FIELD(*this, OffsetOfElementAt(index));
@@ -268,17 +268,19 @@ Map Context::GetInitialJSArrayMap(ElementsKind kind) const {
}
DEF_GETTER(NativeContext, microtask_queue, MicrotaskQueue*) {
- ExternalPointer_t encoded_value =
- ReadField<ExternalPointer_t>(kMicrotaskQueueOffset);
- return reinterpret_cast<MicrotaskQueue*>(
- DecodeExternalPointer(isolate, encoded_value));
+ return reinterpret_cast<MicrotaskQueue*>(ReadExternalPointerField(
+ kMicrotaskQueueOffset, isolate, kNativeContextMicrotaskQueueTag));
+}
+
+void NativeContext::AllocateExternalPointerEntries(Isolate* isolate) {
+ InitExternalPointerField(kMicrotaskQueueOffset, isolate);
}
void NativeContext::set_microtask_queue(Isolate* isolate,
MicrotaskQueue* microtask_queue) {
- ExternalPointer_t encoded_value = EncodeExternalPointer(
- isolate, reinterpret_cast<Address>(microtask_queue));
- WriteField<ExternalPointer_t>(kMicrotaskQueueOffset, encoded_value);
+ WriteExternalPointerField(kMicrotaskQueueOffset, isolate,
+ reinterpret_cast<Address>(microtask_queue),
+ kNativeContextMicrotaskQueueTag);
}
void NativeContext::synchronized_set_script_context_table(
diff --git a/deps/v8/src/objects/contexts.h b/deps/v8/src/objects/contexts.h
index e63ed580f4..f62e41c9a8 100644
--- a/deps/v8/src/objects/contexts.h
+++ b/deps/v8/src/objects/contexts.h
@@ -311,6 +311,7 @@ enum ContextLookupFlags {
V(FINALIZATION_REGISTRY_CLEANUP_SOME, JSFunction, \
finalization_registry_cleanup_some) \
V(FUNCTION_HAS_INSTANCE_INDEX, JSFunction, function_has_instance) \
+ V(FUNCTION_TO_STRING_INDEX, JSFunction, function_to_string) \
V(OBJECT_TO_STRING, JSFunction, object_to_string) \
V(OBJECT_VALUE_OF_FUNCTION_INDEX, JSFunction, object_value_of_function) \
V(PROMISE_ALL_INDEX, JSFunction, promise_all) \
@@ -438,13 +439,13 @@ class Context : public HeapObject {
// Setter and getter for elements.
V8_INLINE Object get(int index) const;
- V8_INLINE Object get(const Isolate* isolate, int index) const;
+ V8_INLINE Object get(IsolateRoot isolate, int index) const;
V8_INLINE void set(int index, Object value);
// Setter with explicit barrier mode.
V8_INLINE void set(int index, Object value, WriteBarrierMode mode);
// Setter and getter with synchronization semantics.
V8_INLINE Object synchronized_get(int index) const;
- V8_INLINE Object synchronized_get(const Isolate* isolate, int index) const;
+ V8_INLINE Object synchronized_get(IsolateRoot isolate, int index) const;
V8_INLINE void synchronized_set(int index, Object value);
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
@@ -662,6 +663,8 @@ class NativeContext : public Context {
DECL_CAST(NativeContext)
// TODO(neis): Move some stuff from Context here.
+ inline void AllocateExternalPointerEntries(Isolate* isolate);
+
// [microtask_queue]: pointer to the MicrotaskQueue object.
DECL_GETTER(microtask_queue, MicrotaskQueue*)
inline void set_microtask_queue(Isolate* isolate, MicrotaskQueue* queue);
diff --git a/deps/v8/src/objects/data-handler-inl.h b/deps/v8/src/objects/data-handler-inl.h
index f9496cc342..f18f499294 100644
--- a/deps/v8/src/objects/data-handler-inl.h
+++ b/deps/v8/src/objects/data-handler-inl.h
@@ -14,6 +14,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/data-handler-tq-inl.inc"
+
OBJECT_CONSTRUCTORS_IMPL(DataHandler, Struct)
CAST_ACCESSOR(DataHandler)
diff --git a/deps/v8/src/objects/data-handler.h b/deps/v8/src/objects/data-handler.h
index c9c0cf4cbc..e27b5be83f 100644
--- a/deps/v8/src/objects/data-handler.h
+++ b/deps/v8/src/objects/data-handler.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_DATA_HANDLER_H_
#include "src/objects/struct.h"
+#include "torque-generated/field-offsets.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -13,6 +14,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/data-handler-tq.inc"
+
// DataHandler is a base class for load and store handlers that can't be
// encoded in one Smi. Kind of a handler can be deduced from instance type.
class DataHandler : public Struct {
diff --git a/deps/v8/src/objects/debug-objects-inl.h b/deps/v8/src/objects/debug-objects-inl.h
index 886c31583e..a0815d04df 100644
--- a/deps/v8/src/objects/debug-objects-inl.h
+++ b/deps/v8/src/objects/debug-objects-inl.h
@@ -18,6 +18,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/debug-objects-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(BreakPoint)
TQ_OBJECT_CONSTRUCTORS_IMPL(BreakPointInfo)
TQ_OBJECT_CONSTRUCTORS_IMPL(CoverageInfo)
diff --git a/deps/v8/src/objects/debug-objects.h b/deps/v8/src/objects/debug-objects.h
index b9012fd9c1..e0ddaddd23 100644
--- a/deps/v8/src/objects/debug-objects.h
+++ b/deps/v8/src/objects/debug-objects.h
@@ -22,6 +22,8 @@ namespace internal {
class BreakPoint;
class BytecodeArray;
+#include "torque-generated/src/objects/debug-objects-tq.inc"
+
// The DebugInfo class holds additional information for a function being
// debugged.
class DebugInfo : public TorqueGeneratedDebugInfo<DebugInfo, Struct> {
diff --git a/deps/v8/src/objects/descriptor-array-inl.h b/deps/v8/src/objects/descriptor-array-inl.h
index a7c6443a05..21f43d292a 100644
--- a/deps/v8/src/objects/descriptor-array-inl.h
+++ b/deps/v8/src/objects/descriptor-array-inl.h
@@ -24,6 +24,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/descriptor-array-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(DescriptorArray)
TQ_OBJECT_CONSTRUCTORS_IMPL(EnumCache)
@@ -104,11 +106,11 @@ ObjectSlot DescriptorArray::GetDescriptorSlot(int descriptor) {
}
Name DescriptorArray::GetKey(InternalIndex descriptor_number) const {
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
return GetKey(isolate, descriptor_number);
}
-Name DescriptorArray::GetKey(const Isolate* isolate,
+Name DescriptorArray::GetKey(IsolateRoot isolate,
InternalIndex descriptor_number) const {
DCHECK_LT(descriptor_number.as_int(), number_of_descriptors());
int entry_offset = OffsetOfDescriptorAt(descriptor_number.as_int());
@@ -127,12 +129,11 @@ int DescriptorArray::GetSortedKeyIndex(int descriptor_number) {
}
Name DescriptorArray::GetSortedKey(int descriptor_number) {
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
return GetSortedKey(isolate, descriptor_number);
}
-Name DescriptorArray::GetSortedKey(const Isolate* isolate,
- int descriptor_number) {
+Name DescriptorArray::GetSortedKey(IsolateRoot isolate, int descriptor_number) {
return GetKey(isolate, InternalIndex(GetSortedKeyIndex(descriptor_number)));
}
@@ -142,11 +143,11 @@ void DescriptorArray::SetSortedKey(int descriptor_number, int pointer) {
}
Object DescriptorArray::GetStrongValue(InternalIndex descriptor_number) {
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
return GetStrongValue(isolate, descriptor_number);
}
-Object DescriptorArray::GetStrongValue(const Isolate* isolate,
+Object DescriptorArray::GetStrongValue(IsolateRoot isolate,
InternalIndex descriptor_number) {
return GetValue(isolate, descriptor_number).cast<Object>();
}
@@ -160,11 +161,11 @@ void DescriptorArray::SetValue(InternalIndex descriptor_number,
}
MaybeObject DescriptorArray::GetValue(InternalIndex descriptor_number) {
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
return GetValue(isolate, descriptor_number);
}
-MaybeObject DescriptorArray::GetValue(const Isolate* isolate,
+MaybeObject DescriptorArray::GetValue(IsolateRoot isolate,
InternalIndex descriptor_number) {
DCHECK_LT(descriptor_number.as_int(), number_of_descriptors());
int entry_offset = OffsetOfDescriptorAt(descriptor_number.as_int());
@@ -191,11 +192,11 @@ int DescriptorArray::GetFieldIndex(InternalIndex descriptor_number) {
}
FieldType DescriptorArray::GetFieldType(InternalIndex descriptor_number) {
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
return GetFieldType(isolate, descriptor_number);
}
-FieldType DescriptorArray::GetFieldType(const Isolate* isolate,
+FieldType DescriptorArray::GetFieldType(IsolateRoot isolate,
InternalIndex descriptor_number) {
DCHECK_EQ(GetDetails(descriptor_number).location(), kField);
MaybeObject wrapped_type = GetValue(isolate, descriptor_number);
diff --git a/deps/v8/src/objects/descriptor-array.h b/deps/v8/src/objects/descriptor-array.h
index 890863d5a0..57f9162c65 100644
--- a/deps/v8/src/objects/descriptor-array.h
+++ b/deps/v8/src/objects/descriptor-array.h
@@ -25,6 +25,8 @@ class Handle;
class Isolate;
+#include "torque-generated/src/objects/descriptor-array-tq.inc"
+
// An EnumCache is a pair used to hold keys and indices caches.
class EnumCache : public TorqueGeneratedEnumCache<EnumCache, Struct> {
public:
@@ -67,22 +69,22 @@ class DescriptorArray
// Accessors for fetching instance descriptor at descriptor number.
inline Name GetKey(InternalIndex descriptor_number) const;
- inline Name GetKey(const Isolate* isolate,
+ inline Name GetKey(IsolateRoot isolate,
InternalIndex descriptor_number) const;
inline Object GetStrongValue(InternalIndex descriptor_number);
- inline Object GetStrongValue(const Isolate* isolate,
+ inline Object GetStrongValue(IsolateRoot isolate,
InternalIndex descriptor_number);
inline MaybeObject GetValue(InternalIndex descriptor_number);
- inline MaybeObject GetValue(const Isolate* isolate,
+ inline MaybeObject GetValue(IsolateRoot isolate,
InternalIndex descriptor_number);
inline PropertyDetails GetDetails(InternalIndex descriptor_number);
inline int GetFieldIndex(InternalIndex descriptor_number);
inline FieldType GetFieldType(InternalIndex descriptor_number);
- inline FieldType GetFieldType(const Isolate* isolate,
+ inline FieldType GetFieldType(IsolateRoot isolate,
InternalIndex descriptor_number);
inline Name GetSortedKey(int descriptor_number);
- inline Name GetSortedKey(const Isolate* isolate, int descriptor_number);
+ inline Name GetSortedKey(IsolateRoot isolate, int descriptor_number);
inline int GetSortedKeyIndex(int descriptor_number);
// Accessor for complete descriptor.
@@ -168,9 +170,7 @@ class DescriptorArray
"Weak fields extend up to the end of the header.");
static_assert(kDescriptorsOffset == kHeaderSize,
"Variable-size array follows header.");
- // We use this visitor to also visitor to also visit the enum_cache, which is
- // the only tagged field in the header, and placed at the end of the header.
- using BodyDescriptor = FlexibleWeakBodyDescriptor<kStartOfStrongFieldsOffset>;
+ class BodyDescriptor;
// Layout of descriptor.
// Naming is consistent with Dictionary classes for easy templating.
diff --git a/deps/v8/src/objects/descriptor-array.tq b/deps/v8/src/objects/descriptor-array.tq
index 0b088b3d73..eb86a3343e 100644
--- a/deps/v8/src/objects/descriptor-array.tq
+++ b/deps/v8/src/objects/descriptor-array.tq
@@ -16,8 +16,9 @@ struct DescriptorEntry {
value: JSAny|Weak<Map>|AccessorInfo|AccessorPair|ClassPositions;
}
-@generateCppClass
-extern class DescriptorArray extends HeapObject {
+@export
+@customCppClass
+class DescriptorArray extends HeapObject {
const number_of_all_descriptors: uint16;
number_of_descriptors: uint16;
raw_number_of_marked_descriptors: uint16;
@@ -25,3 +26,6 @@ extern class DescriptorArray extends HeapObject {
enum_cache: EnumCache;
descriptors[number_of_all_descriptors]: DescriptorEntry;
}
+
+// A descriptor array where all values are held strongly.
+class StrongDescriptorArray extends DescriptorArray {}
diff --git a/deps/v8/src/objects/dictionary-inl.h b/deps/v8/src/objects/dictionary-inl.h
index 97d83eaa55..4df78ac99f 100644
--- a/deps/v8/src/objects/dictionary-inl.h
+++ b/deps/v8/src/objects/dictionary-inl.h
@@ -30,12 +30,12 @@ Dictionary<Derived, Shape>::Dictionary(Address ptr)
template <typename Derived, typename Shape>
Object Dictionary<Derived, Shape>::ValueAt(InternalIndex entry) {
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
return ValueAt(isolate, entry);
}
template <typename Derived, typename Shape>
-Object Dictionary<Derived, Shape>::ValueAt(const Isolate* isolate,
+Object Dictionary<Derived, Shape>::ValueAt(IsolateRoot isolate,
InternalIndex entry) {
return this->get(isolate, DerivedHashTable::EntryToIndex(entry) +
Derived::kEntryValueIndex);
@@ -181,11 +181,11 @@ Handle<Map> GlobalDictionary::GetMap(ReadOnlyRoots roots) {
}
Name NameDictionary::NameAt(InternalIndex entry) {
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
return NameAt(isolate, entry);
}
-Name NameDictionary::NameAt(const Isolate* isolate, InternalIndex entry) {
+Name NameDictionary::NameAt(IsolateRoot isolate, InternalIndex entry) {
return Name::cast(KeyAt(isolate, entry));
}
@@ -194,31 +194,31 @@ Handle<Map> NameDictionary::GetMap(ReadOnlyRoots roots) {
}
PropertyCell GlobalDictionary::CellAt(InternalIndex entry) {
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
return CellAt(isolate, entry);
}
-PropertyCell GlobalDictionary::CellAt(const Isolate* isolate,
+PropertyCell GlobalDictionary::CellAt(IsolateRoot isolate,
InternalIndex entry) {
DCHECK(KeyAt(isolate, entry).IsPropertyCell(isolate));
return PropertyCell::cast(KeyAt(isolate, entry));
}
Name GlobalDictionary::NameAt(InternalIndex entry) {
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
return NameAt(isolate, entry);
}
-Name GlobalDictionary::NameAt(const Isolate* isolate, InternalIndex entry) {
+Name GlobalDictionary::NameAt(IsolateRoot isolate, InternalIndex entry) {
return CellAt(isolate, entry).name(isolate);
}
Object GlobalDictionary::ValueAt(InternalIndex entry) {
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
return ValueAt(isolate, entry);
}
-Object GlobalDictionary::ValueAt(const Isolate* isolate, InternalIndex entry) {
+Object GlobalDictionary::ValueAt(IsolateRoot isolate, InternalIndex entry) {
return CellAt(isolate, entry).value(isolate);
}
diff --git a/deps/v8/src/objects/dictionary.h b/deps/v8/src/objects/dictionary.h
index e8b61dbbb2..d9cc62afc1 100644
--- a/deps/v8/src/objects/dictionary.h
+++ b/deps/v8/src/objects/dictionary.h
@@ -32,7 +32,7 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) Dictionary
using Key = typename Shape::Key;
// Returns the value at entry.
inline Object ValueAt(InternalIndex entry);
- inline Object ValueAt(const Isolate* isolate, InternalIndex entry);
+ inline Object ValueAt(IsolateRoot isolate, InternalIndex entry);
// Set the value for entry.
inline void ValueAtPut(InternalIndex entry, Object value);
@@ -131,6 +131,8 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) BaseNameDictionary
static const int kObjectHashIndex = kNextEnumerationIndexIndex + 1;
static const int kEntryValueIndex = 1;
+ static const bool kIsOrderedDictionaryType = false;
+
inline void SetHash(int hash);
inline int Hash() const;
@@ -141,11 +143,6 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) BaseNameDictionary
AllocationType allocation = AllocationType::kYoung,
MinimumCapacity capacity_option = USE_DEFAULT_MINIMUM_CAPACITY);
- // Collect the keys into the given KeyAccumulator, in ascending chronological
- // order of property creation.
- V8_WARN_UNUSED_RESULT static ExceptionStatus CollectKeysTo(
- Handle<Derived> dictionary, KeyAccumulator* keys);
-
// Allocate the next enumeration index. Possibly updates all enumeration
// indices in the table.
static int NextEnumerationIndex(Isolate* isolate, Handle<Derived> dictionary);
@@ -157,13 +154,6 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) BaseNameDictionary
static Handle<FixedArray> IterationIndices(Isolate* isolate,
Handle<Derived> dictionary);
- // Copies enumerable keys to preallocated fixed array.
- // Does not throw for uninitialized exports in module namespace objects, so
- // this has to be checked separately.
- static void CopyEnumKeysTo(Isolate* isolate, Handle<Derived> dictionary,
- Handle<FixedArray> storage, KeyCollectionMode mode,
- KeyAccumulator* accumulator);
-
template <typename LocalIsolate>
V8_WARN_UNUSED_RESULT static Handle<Derived> AddNoUpdateNextEnumerationIndex(
LocalIsolate* isolate, Handle<Derived> dictionary, Key key,
@@ -197,7 +187,7 @@ class V8_EXPORT_PRIVATE NameDictionary
static const int kInitialCapacity = 2;
inline Name NameAt(InternalIndex entry);
- inline Name NameAt(const Isolate* isolate, InternalIndex entry);
+ inline Name NameAt(IsolateRoot isolate, InternalIndex entry);
inline void set_hash(int hash);
inline int hash() const;
@@ -234,14 +224,14 @@ class V8_EXPORT_PRIVATE GlobalDictionary
DECL_CAST(GlobalDictionary)
inline Object ValueAt(InternalIndex entry);
- inline Object ValueAt(const Isolate* isolate, InternalIndex entry);
+ inline Object ValueAt(IsolateRoot isolate, InternalIndex entry);
inline PropertyCell CellAt(InternalIndex entry);
- inline PropertyCell CellAt(const Isolate* isolate, InternalIndex entry);
+ inline PropertyCell CellAt(IsolateRoot isolate, InternalIndex entry);
inline void SetEntry(InternalIndex entry, Object key, Object value,
PropertyDetails details);
inline void ClearEntry(InternalIndex entry);
inline Name NameAt(InternalIndex entry);
- inline Name NameAt(const Isolate* isolate, InternalIndex entry);
+ inline Name NameAt(IsolateRoot isolate, InternalIndex entry);
inline void ValueAtPut(InternalIndex entry, Object value);
OBJECT_CONSTRUCTORS(
@@ -361,6 +351,22 @@ class NumberDictionary
Dictionary<NumberDictionary, NumberDictionaryShape>);
};
+// The comparator is passed two indices |a| and |b|, and it returns < 0 when the
+// property at index |a| comes before the property at index |b| in the
+// enumeration order.
+template <typename Dictionary>
+struct EnumIndexComparator {
+ explicit EnumIndexComparator(Dictionary dict) : dict(dict) {}
+ bool operator()(Tagged_t a, Tagged_t b) {
+ PropertyDetails da(
+ dict.DetailsAt(InternalIndex(Smi(static_cast<Address>(a)).value())));
+ PropertyDetails db(
+ dict.DetailsAt(InternalIndex(Smi(static_cast<Address>(b)).value())));
+ return da.dictionary_index() < db.dictionary_index();
+ }
+ Dictionary dict;
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/elements.cc b/deps/v8/src/objects/elements.cc
index 32bd891a74..4dcbb2befc 100644
--- a/deps/v8/src/objects/elements.cc
+++ b/deps/v8/src/objects/elements.cc
@@ -22,8 +22,6 @@
#include "src/objects/slots-atomic-inl.h"
#include "src/objects/slots.h"
#include "src/utils/utils.h"
-#include "torque-generated/exported-class-definitions-inl.h"
-#include "torque-generated/exported-class-definitions.h"
// Each concrete ElementsAccessor can handle exactly one ElementsKind,
// several abstract ElementsAccessor classes are used to allow sharing
@@ -179,7 +177,7 @@ void CopyObjectToObjectElements(Isolate* isolate, FixedArrayBase from_base,
if (raw_copy_size < 0) {
DCHECK_EQ(kCopyToEndAndInitializeToHole, raw_copy_size);
copy_size =
- Min(from_base.length() - from_start, to_base.length() - to_start);
+ std::min(from_base.length() - from_start, to_base.length() - to_start);
int start = to_start + copy_size;
int length = to_base.length() - start;
if (length > 0) {
@@ -252,7 +250,7 @@ void CopyDoubleToObjectElements(Isolate* isolate, FixedArrayBase from_base,
DisallowHeapAllocation no_allocation;
DCHECK_EQ(kCopyToEndAndInitializeToHole, raw_copy_size);
copy_size =
- Min(from_base.length() - from_start, to_base.length() - to_start);
+ std::min(from_base.length() - from_start, to_base.length() - to_start);
// Also initialize the area that will be copied over since HeapNumber
// allocation below can cause an incremental marking step, requiring all
// existing heap objects to be propertly initialized.
@@ -296,7 +294,7 @@ void CopyDoubleToDoubleElements(FixedArrayBase from_base, uint32_t from_start,
if (raw_copy_size < 0) {
DCHECK_EQ(kCopyToEndAndInitializeToHole, raw_copy_size);
copy_size =
- Min(from_base.length() - from_start, to_base.length() - to_start);
+ std::min(from_base.length() - from_start, to_base.length() - to_start);
for (int i = to_start + copy_size; i < to_base.length(); ++i) {
FixedDoubleArray::cast(to_base).set_the_hole(i);
}
@@ -542,6 +540,8 @@ template <typename Subclass, typename ElementsTraitsParam>
class ElementsAccessorBase : public InternalElementsAccessor {
public:
ElementsAccessorBase() = default;
+ ElementsAccessorBase(const ElementsAccessorBase&) = delete;
+ ElementsAccessorBase& operator=(const ElementsAccessorBase&) = delete;
using ElementsTraits = ElementsTraitsParam;
using BackingStore = typename ElementsTraitsParam::BackingStore;
@@ -704,7 +704,7 @@ class ElementsAccessorBase : public InternalElementsAccessor {
// Check whether the backing store should be shrunk.
uint32_t capacity = backing_store->length();
- old_length = Min(old_length, capacity);
+ old_length = std::min(old_length, capacity);
if (length == 0) {
array->initialize_elements();
} else if (length <= capacity) {
@@ -733,7 +733,7 @@ class ElementsAccessorBase : public InternalElementsAccessor {
}
} else {
// Check whether the backing store should be expanded.
- capacity = Max(length, JSObject::NewElementsCapacity(capacity));
+ capacity = std::max(length, JSObject::NewElementsCapacity(capacity));
Subclass::GrowCapacityAndConvertImpl(array, capacity);
}
@@ -1325,9 +1325,6 @@ class ElementsAccessorBase : public InternalElementsAccessor {
uint32_t length) {
UNREACHABLE();
}
-
- private:
- DISALLOW_COPY_AND_ASSIGN(ElementsAccessorBase);
};
class DictionaryElementsAccessor
@@ -1423,7 +1420,7 @@ class DictionaryElementsAccessor
DisallowHeapAllocation no_gc;
NumberDictionary dict = NumberDictionary::cast(backing_store);
if (!dict.requires_slow_elements()) return false;
- const Isolate* isolate = GetIsolateForPtrCompr(holder);
+ IsolateRoot isolate = GetIsolateForPtrCompr(holder);
ReadOnlyRoots roots = holder.GetReadOnlyRoots(isolate);
for (InternalIndex i : dict.IterateEntries()) {
Object key = dict.KeyAt(isolate, i);
@@ -1812,7 +1809,7 @@ class DictionaryElementsAccessor
if (k.Number() > NumberDictionary::kRequiresSlowElementsLimit) {
requires_slow_elements = true;
} else {
- max_key = Max(max_key, Smi::ToInt(k));
+ max_key = std::max(max_key, Smi::ToInt(k));
}
}
if (requires_slow_elements) {
diff --git a/deps/v8/src/objects/elements.h b/deps/v8/src/objects/elements.h
index 551183fe6d..4a34e866f2 100644
--- a/deps/v8/src/objects/elements.h
+++ b/deps/v8/src/objects/elements.h
@@ -22,6 +22,8 @@ class ElementsAccessor {
public:
ElementsAccessor() = default;
virtual ~ElementsAccessor() = default;
+ ElementsAccessor(const ElementsAccessor&) = delete;
+ ElementsAccessor& operator=(const ElementsAccessor&) = delete;
// Returns a shared ElementsAccessor for the specified ElementsKind.
static ElementsAccessor* ForKind(ElementsKind elements_kind) {
@@ -202,8 +204,6 @@ class ElementsAccessor {
private:
V8_EXPORT_PRIVATE static ElementsAccessor** elements_accessors_;
-
- DISALLOW_COPY_AND_ASSIGN(ElementsAccessor);
};
V8_WARN_UNUSED_RESULT MaybeHandle<Object> ArrayConstructInitializeElements(
diff --git a/deps/v8/src/objects/embedder-data-array-inl.h b/deps/v8/src/objects/embedder-data-array-inl.h
index 9c514aef89..6eb1076287 100644
--- a/deps/v8/src/objects/embedder-data-array-inl.h
+++ b/deps/v8/src/objects/embedder-data-array-inl.h
@@ -16,15 +16,16 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/embedder-data-array-tq-inl.inc"
TQ_OBJECT_CONSTRUCTORS_IMPL(EmbedderDataArray)
Address EmbedderDataArray::slots_start() {
- return FIELD_ADDR(*this, OffsetOfElementAt(0));
+ return field_address(OffsetOfElementAt(0));
}
Address EmbedderDataArray::slots_end() {
- return FIELD_ADDR(*this, OffsetOfElementAt(length()));
+ return field_address(OffsetOfElementAt(length()));
}
} // namespace internal
diff --git a/deps/v8/src/objects/embedder-data-array.h b/deps/v8/src/objects/embedder-data-array.h
index 728c3cf86a..5c4389c16d 100644
--- a/deps/v8/src/objects/embedder-data-array.h
+++ b/deps/v8/src/objects/embedder-data-array.h
@@ -8,7 +8,6 @@
#include "src/common/globals.h"
#include "src/handles/maybe-handles.h"
#include "src/objects/heap-object.h"
-#include "torque-generated/class-definitions.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -16,6 +15,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/embedder-data-array-tq.inc"
+
// This is a storage array for embedder data fields stored in native context.
// It's basically an "array of EmbedderDataSlots".
// Note, if the pointer compression is enabled the embedder data slot also
diff --git a/deps/v8/src/objects/embedder-data-slot-inl.h b/deps/v8/src/objects/embedder-data-slot-inl.h
index 2ede262f80..f9ef6e1e56 100644
--- a/deps/v8/src/objects/embedder-data-slot-inl.h
+++ b/deps/v8/src/objects/embedder-data-slot-inl.h
@@ -27,6 +27,19 @@ EmbedderDataSlot::EmbedderDataSlot(JSObject object, int embedder_field_index)
: SlotBase(FIELD_ADDR(
object, object.GetEmbedderFieldOffset(embedder_field_index))) {}
+void EmbedderDataSlot::AllocateExternalPointerEntry(Isolate* isolate) {
+#ifdef V8_HEAP_SANDBOX
+ // TODO(v8:10391, saelo): Use InitExternalPointerField() once
+ // ExternalPointer_t is 4-bytes.
+ uint32_t index = isolate->external_pointer_table().allocate();
+ // Object slots don't support storing raw values, so we just "reinterpret
+ // cast" the index value to Object.
+ Object index_as_object(index);
+ ObjectSlot(address() + kRawPayloadOffset).Relaxed_Store(index_as_object);
+ ObjectSlot(address() + kTaggedPayloadOffset).Relaxed_Store(Smi::zero());
+#endif
+}
+
Object EmbedderDataSlot::load_tagged() const {
return ObjectSlot(address() + kTaggedPayloadOffset).Relaxed_Load();
}
@@ -61,40 +74,75 @@ void EmbedderDataSlot::store_tagged(JSObject object, int embedder_field_index,
.Relaxed_Store(value);
WRITE_BARRIER(object, slot_offset + kTaggedPayloadOffset, value);
#ifdef V8_COMPRESS_POINTERS
- // See gc_safe_store() for the reasons behind two stores.
+ // See gc_safe_store() for the reasons behind two stores and why the second is
+ // only done if !V8_HEAP_SANDBOX_BOOL
ObjectSlot(FIELD_ADDR(object, slot_offset + kRawPayloadOffset))
.Relaxed_Store(Smi::zero());
#endif
}
-bool EmbedderDataSlot::ToAlignedPointer(const Isolate* isolate,
+bool EmbedderDataSlot::ToAlignedPointer(IsolateRoot isolate_root,
void** out_pointer) const {
// We don't care about atomicity of access here because embedder slots
// are accessed this way only from the main thread via API during "mutator"
// phase which is propely synched with GC (concurrent marker may still look
// at the tagged part of the embedder slot but read-only access is ok).
-#ifdef V8_COMPRESS_POINTERS
- // TODO(ishell, v8:8875): When pointer compression is enabled 8-byte size
- // fields (external pointers, doubles and BigInt data) are only kTaggedSize
- // aligned so we have to use unaligned pointer friendly way of accessing them
- // in order to avoid undefined behavior in C++ code.
- Address raw_value = base::ReadUnalignedValue<Address>(address());
- // We currently have to treat zero as nullptr in embedder slots.
- if (raw_value) raw_value = DecodeExternalPointer(isolate, raw_value);
+ Address raw_value;
+#ifdef V8_HEAP_SANDBOX
+ uint32_t index = base::Memory<uint32_t>(address() + kRawPayloadOffset);
+ const Isolate* isolate = Isolate::FromRootAddress(isolate_root.address());
+ raw_value = isolate->external_pointer_table().get(index) ^
+ kEmbedderDataSlotPayloadTag;
#else
- Address raw_value = *location();
+ if (COMPRESS_POINTERS_BOOL) {
+ // TODO(ishell, v8:8875): When pointer compression is enabled 8-byte size
+ // fields (external pointers, doubles and BigInt data) are only kTaggedSize
+ // aligned so we have to use unaligned pointer friendly way of accessing
+ // them in order to avoid undefined behavior in C++ code.
+ raw_value = base::ReadUnalignedValue<Address>(address());
+ } else {
+ raw_value = *location();
+ }
#endif
*out_pointer = reinterpret_cast<void*>(raw_value);
return HAS_SMI_TAG(raw_value);
}
+bool EmbedderDataSlot::ToAlignedPointerSafe(IsolateRoot isolate_root,
+ void** out_pointer) const {
+#ifdef V8_HEAP_SANDBOX
+ uint32_t index = base::Memory<uint32_t>(address() + kRawPayloadOffset);
+ Address raw_value;
+ const Isolate* isolate = Isolate::FromRootAddress(isolate_root.address());
+ if (isolate->external_pointer_table().is_valid_index(index)) {
+ raw_value = isolate->external_pointer_table().get(index) ^
+ kEmbedderDataSlotPayloadTag;
+ *out_pointer = reinterpret_cast<void*>(raw_value);
+ return true;
+ }
+ return false;
+#else
+ return ToAlignedPointer(isolate_root, out_pointer);
+#endif // V8_HEAP_SANDBOX
+}
+
bool EmbedderDataSlot::store_aligned_pointer(Isolate* isolate, void* ptr) {
Address value = reinterpret_cast<Address>(ptr);
if (!HAS_SMI_TAG(value)) return false;
- // We currently have to treat zero as nullptr in embedder slots.
- if (value) value = EncodeExternalPointer(isolate, value);
- DCHECK(HAS_SMI_TAG(value));
- gc_safe_store(value);
+#ifdef V8_HEAP_SANDBOX
+ if (V8_HEAP_SANDBOX_BOOL) {
+ AllocateExternalPointerEntry(isolate);
+ // Raw payload contains the table index. Object slots don't support loading
+ // of raw values, so we just "reinterpret cast" Object value to index.
+ Object index_as_object =
+ ObjectSlot(address() + kRawPayloadOffset).Relaxed_Load();
+ uint32_t index = static_cast<uint32_t>(index_as_object.ptr());
+ isolate->external_pointer_table().set(index,
+ value ^ kEmbedderDataSlotPayloadTag);
+ return true;
+ }
+#endif
+ gc_safe_store(isolate, value);
return true;
}
@@ -109,10 +157,7 @@ EmbedderDataSlot::RawData EmbedderDataSlot::load_raw(
// fields (external pointers, doubles and BigInt data) are only kTaggedSize
// aligned so we have to use unaligned pointer friendly way of accessing them
// in order to avoid undefined behavior in C++ code.
- Address value = base::ReadUnalignedValue<Address>(address());
- // We currently have to treat zero as nullptr in embedder slots.
- if (value) return DecodeExternalPointer(isolate, value);
- return value;
+ return base::ReadUnalignedValue<EmbedderDataSlot::RawData>(address());
#else
return *location();
#endif
@@ -121,16 +166,15 @@ EmbedderDataSlot::RawData EmbedderDataSlot::load_raw(
void EmbedderDataSlot::store_raw(Isolate* isolate,
EmbedderDataSlot::RawData data,
const DisallowGarbageCollection& no_gc) {
- // We currently have to treat zero as nullptr in embedder slots.
- if (data) data = EncodeExternalPointer(isolate, data);
- gc_safe_store(data);
+ gc_safe_store(isolate, data);
}
-void EmbedderDataSlot::gc_safe_store(Address value) {
+void EmbedderDataSlot::gc_safe_store(Isolate* isolate, Address value) {
#ifdef V8_COMPRESS_POINTERS
STATIC_ASSERT(kSmiShiftSize == 0);
STATIC_ASSERT(SmiValuesAre31Bits());
STATIC_ASSERT(kTaggedSize == kInt32Size);
+
// We have to do two 32-bit stores here because
// 1) tagged part modifications must be atomic to be properly synchronized
// with the concurrent marker.
diff --git a/deps/v8/src/objects/embedder-data-slot.h b/deps/v8/src/objects/embedder-data-slot.h
index 68d71c0177..8f4fcc8af2 100644
--- a/deps/v8/src/objects/embedder-data-slot.h
+++ b/deps/v8/src/objects/embedder-data-slot.h
@@ -43,7 +43,11 @@ class EmbedderDataSlot
#endif
#ifdef V8_COMPRESS_POINTERS
- // The raw payload is located in the other tagged part of the full pointer.
+ // The raw payload is located in the other "tagged" part of the full pointer
+ // and cotains the upper part of aligned address. The raw part is not expected
+ // to look like a tagged value.
+ // When V8_HEAP_SANDBOX is defined the raw payload contains an index into the
+ // external pointer table.
static constexpr int kRawPayloadOffset = kTaggedSize - kTaggedPayloadOffset;
#endif
static constexpr int kRequiredPtrAlignment = kSmiTagSize;
@@ -51,6 +55,8 @@ class EmbedderDataSlot
// Opaque type used for storing raw embedder data.
using RawData = Address;
+ V8_INLINE void AllocateExternalPointerEntry(Isolate* isolate);
+
V8_INLINE Object load_tagged() const;
V8_INLINE void store_smi(Smi value);
@@ -66,8 +72,22 @@ class EmbedderDataSlot
// the pointer-like value. Note, that some Smis could still look like an
// aligned pointers.
// Returns true on success.
- V8_INLINE bool ToAlignedPointer(const Isolate* isolate,
- void** out_result) const;
+ // When V8 heap sandbox is enabled, calling this method when the raw part of
+ // the slot does not contain valid external pointer table index is undefined
+ // behaviour and most likely result in crashes.
+ V8_INLINE bool ToAlignedPointer(IsolateRoot isolate, void** out_result) const;
+
+ // Same as ToAlignedPointer() but with a workaround for V8 heap sandbox.
+ // When V8 heap sandbox is enabled, this method doesn't crash when the raw
+ // part of the slot contains "undefined" instead of a correct external table
+ // entry index (see Factory::InitializeJSObjectBody() for details).
+ // Returns true when the external pointer table index was pointing to a valid
+ // entry, otherwise false.
+ //
+ // Call this function if you are not sure whether the slot contains valid
+ // external pointer or not.
+ V8_INLINE bool ToAlignedPointerSafe(IsolateRoot isolate,
+ void** out_result) const;
// Returns true if the pointer was successfully stored or false it the pointer
// was improperly aligned.
@@ -82,7 +102,7 @@ class EmbedderDataSlot
private:
// Stores given value to the embedder data slot in a concurrent-marker
// friendly manner (tagged part of the slot is written atomically).
- V8_INLINE void gc_safe_store(Address value);
+ V8_INLINE void gc_safe_store(Isolate* isolate, Address value);
};
} // namespace internal
diff --git a/deps/v8/src/objects/feedback-cell-inl.h b/deps/v8/src/objects/feedback-cell-inl.h
index 36d9bc8569..494a951ce4 100644
--- a/deps/v8/src/objects/feedback-cell-inl.h
+++ b/deps/v8/src/objects/feedback-cell-inl.h
@@ -17,6 +17,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/feedback-cell-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(FeedbackCell)
void FeedbackCell::clear_padding() {
@@ -55,6 +57,17 @@ void FeedbackCell::SetInterruptBudget() {
set_interrupt_budget(FLAG_interrupt_budget);
}
+void FeedbackCell::IncrementClosureCount(Isolate* isolate) {
+ ReadOnlyRoots r(isolate);
+ if (map() == r.no_closures_cell_map()) {
+ set_map(r.one_closure_cell_map());
+ } else if (map() == r.one_closure_cell_map()) {
+ set_map(r.many_closures_cell_map());
+ } else {
+ DCHECK(map() == r.many_closures_cell_map());
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/feedback-cell.h b/deps/v8/src/objects/feedback-cell.h
index 9728f8e8c0..19f1075e62 100644
--- a/deps/v8/src/objects/feedback-cell.h
+++ b/deps/v8/src/objects/feedback-cell.h
@@ -13,6 +13,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/feedback-cell-tq.inc"
+
// This is a special cell used to maintain both the link between a
// closure and its feedback vector, as well as a way to count the
// number of closures created for a certain function per native
@@ -34,6 +36,11 @@ class FeedbackCell : public TorqueGeneratedFeedbackCell<FeedbackCell, Struct> {
inline void SetInitialInterruptBudget();
inline void SetInterruptBudget();
+ // The closure count is encoded in the cell's map, which distinguishes
+ // between zero, one, or many closures. This function records a new closure
+ // creation by updating the map.
+ inline void IncrementClosureCount(Isolate* isolate);
+
using BodyDescriptor =
FixedBodyDescriptor<kValueOffset, kInterruptBudgetOffset, kAlignedSize>;
diff --git a/deps/v8/src/objects/feedback-vector-inl.h b/deps/v8/src/objects/feedback-vector-inl.h
index 6db9230ebb..2e23c35b5f 100644
--- a/deps/v8/src/objects/feedback-vector-inl.h
+++ b/deps/v8/src/objects/feedback-vector-inl.h
@@ -21,6 +21,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/feedback-vector-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(FeedbackVector)
OBJECT_CONSTRUCTORS_IMPL(FeedbackMetadata, HeapObject)
OBJECT_CONSTRUCTORS_IMPL(ClosureFeedbackCellArray, FixedArray)
@@ -33,12 +35,12 @@ CAST_ACCESSOR(ClosureFeedbackCellArray)
INT32_ACCESSORS(FeedbackMetadata, slot_count, kSlotCountOffset)
-INT32_ACCESSORS(FeedbackMetadata, closure_feedback_cell_count,
- kFeedbackCellCountOffset)
+INT32_ACCESSORS(FeedbackMetadata, create_closure_slot_count,
+ kCreateClosureSlotCountOffset)
int32_t FeedbackMetadata::synchronized_slot_count() const {
- return base::Acquire_Load(reinterpret_cast<const base::Atomic32*>(
- FIELD_ADDR(*this, kSlotCountOffset)));
+ return base::Acquire_Load(
+ reinterpret_cast<const base::Atomic32*>(field_address(kSlotCountOffset)));
}
int32_t FeedbackMetadata::get(int index) const {
@@ -98,8 +100,6 @@ Handle<FeedbackCell> ClosureFeedbackCellArray::GetFeedbackCell(int index) {
return handle(FeedbackCell::cast(get(index)), GetIsolate());
}
-void FeedbackVector::clear_padding() { set_padding(0); }
-
bool FeedbackVector::is_empty() const { return length() == 0; }
FeedbackMetadata FeedbackVector::metadata() const {
@@ -109,17 +109,30 @@ FeedbackMetadata FeedbackVector::metadata() const {
void FeedbackVector::clear_invocation_count() { set_invocation_count(0); }
Code FeedbackVector::optimized_code() const {
- MaybeObject slot = optimized_code_weak_or_smi();
- DCHECK(slot->IsSmi() || slot->IsWeakOrCleared());
+ MaybeObject slot = maybe_optimized_code();
+ DCHECK(slot->IsWeakOrCleared());
HeapObject heap_object;
- return slot->GetHeapObject(&heap_object) ? Code::cast(heap_object) : Code();
+ Code code =
+ slot->GetHeapObject(&heap_object) ? Code::cast(heap_object) : Code();
+ // It is possible that the maybe_optimized_code slot is cleared but the
+ // optimization tier hasn't been updated yet. We update the tier when we
+ // execute the function next time / when we create new closure.
+ DCHECK_IMPLIES(!code.is_null(), OptimizationTierBits::decode(flags()) ==
+ GetTierForCodeKind(code.kind()));
+ return code;
}
OptimizationMarker FeedbackVector::optimization_marker() const {
- MaybeObject slot = optimized_code_weak_or_smi();
- Smi value;
- if (!slot->ToSmi(&value)) return OptimizationMarker::kNone;
- return static_cast<OptimizationMarker>(value.value());
+ return OptimizationMarkerBits::decode(flags());
+}
+
+OptimizationTier FeedbackVector::optimization_tier() const {
+ OptimizationTier tier = OptimizationTierBits::decode(flags());
+ // It is possible that the optimization tier bits aren't updated when the code
+ // was cleared due to a GC.
+ DCHECK_IMPLIES(tier == OptimizationTier::kNone,
+ maybe_optimized_code()->IsCleared());
+ return tier;
}
bool FeedbackVector::has_optimized_code() const {
@@ -139,13 +152,28 @@ FeedbackSlot FeedbackVector::ToSlot(intptr_t index) {
return FeedbackSlot(static_cast<int>(index));
}
+#ifdef DEBUG
+// Instead of FixedArray, the Feedback and the Extra should contain
+// WeakFixedArrays. The only allowed FixedArray subtype is HashTable.
+bool FeedbackVector::IsOfLegacyType(MaybeObject value) {
+ HeapObject heap_object;
+ if (value->GetHeapObject(&heap_object)) {
+ return heap_object.IsFixedArray() && !heap_object.IsHashTable();
+ }
+ return false;
+}
+#endif // DEBUG
+
MaybeObject FeedbackVector::Get(FeedbackSlot slot) const {
- return raw_feedback_slots(GetIndex(slot));
+ MaybeObject value = raw_feedback_slots(GetIndex(slot));
+ DCHECK(!IsOfLegacyType(value));
+ return value;
}
-MaybeObject FeedbackVector::Get(const Isolate* isolate,
- FeedbackSlot slot) const {
- return raw_feedback_slots(isolate, GetIndex(slot));
+MaybeObject FeedbackVector::Get(IsolateRoot isolate, FeedbackSlot slot) const {
+ MaybeObject value = raw_feedback_slots(isolate, GetIndex(slot));
+ DCHECK(!IsOfLegacyType(value));
+ return value;
}
Handle<FeedbackCell> FeedbackVector::GetClosureFeedbackCell(int index) const {
@@ -155,14 +183,41 @@ Handle<FeedbackCell> FeedbackVector::GetClosureFeedbackCell(int index) const {
return cell_array.GetFeedbackCell(index);
}
+MaybeObject FeedbackVector::SynchronizedGet(FeedbackSlot slot) const {
+ const int i = slot.ToInt();
+ DCHECK_LT(static_cast<unsigned>(i), static_cast<unsigned>(this->length()));
+ const int offset = kRawFeedbackSlotsOffset + i * kTaggedSize;
+ MaybeObject value = TaggedField<MaybeObject>::Acquire_Load(*this, offset);
+ DCHECK(!IsOfLegacyType(value));
+ return value;
+}
+
+void FeedbackVector::SynchronizedSet(FeedbackSlot slot, MaybeObject value,
+ WriteBarrierMode mode) {
+ DCHECK(!IsOfLegacyType(value));
+ const int i = slot.ToInt();
+ DCHECK_LT(static_cast<unsigned>(i), static_cast<unsigned>(this->length()));
+ const int offset = kRawFeedbackSlotsOffset + i * kTaggedSize;
+ TaggedField<MaybeObject>::Release_Store(*this, offset, value);
+ CONDITIONAL_WEAK_WRITE_BARRIER(*this, offset, value, mode);
+}
+
+void FeedbackVector::SynchronizedSet(FeedbackSlot slot, Object value,
+ WriteBarrierMode mode) {
+ SynchronizedSet(slot, MaybeObject::FromObject(value), mode);
+}
+
void FeedbackVector::Set(FeedbackSlot slot, MaybeObject value,
WriteBarrierMode mode) {
+ DCHECK(!IsOfLegacyType(value));
set_raw_feedback_slots(GetIndex(slot), value, mode);
}
void FeedbackVector::Set(FeedbackSlot slot, Object value,
WriteBarrierMode mode) {
- set_raw_feedback_slots(GetIndex(slot), MaybeObject::FromObject(value), mode);
+ MaybeObject maybe_value = MaybeObject::FromObject(value);
+ DCHECK(!IsOfLegacyType(maybe_value));
+ set_raw_feedback_slots(GetIndex(slot), maybe_value, mode);
}
inline MaybeObjectSlot FeedbackVector::slots_start() {
@@ -237,7 +292,7 @@ CompareOperationHint CompareOperationHintFromFeedback(int type_feedback) {
}
// Helper function to transform the feedback to ForInHint.
-ForInHint ForInHintFromFeedback(int type_feedback) {
+ForInHint ForInHintFromFeedback(ForInFeedback type_feedback) {
switch (type_feedback) {
case ForInFeedback::kNone:
return ForInHint::kNone;
@@ -255,10 +310,6 @@ Handle<Symbol> FeedbackVector::UninitializedSentinel(Isolate* isolate) {
return isolate->factory()->uninitialized_symbol();
}
-Handle<Symbol> FeedbackVector::GenericSentinel(Isolate* isolate) {
- return isolate->factory()->generic_symbol();
-}
-
Handle<Symbol> FeedbackVector::MegamorphicSentinel(Isolate* isolate) {
return isolate->factory()->megamorphic_symbol();
}
@@ -283,46 +334,91 @@ int FeedbackMetadataIterator::entry_size() const {
return FeedbackMetadata::GetSlotSize(kind());
}
-MaybeObject FeedbackNexus::GetFeedback() const {
- MaybeObject feedback = vector().Get(slot());
- FeedbackVector::AssertNoLegacyTypes(feedback);
- return feedback;
+MaybeObject NexusConfig::GetFeedback(FeedbackVector vector,
+ FeedbackSlot slot) const {
+ return vector.SynchronizedGet(slot);
}
-MaybeObject FeedbackNexus::GetFeedbackExtra() const {
-#ifdef DEBUG
- FeedbackSlotKind kind = vector().GetKind(slot());
- DCHECK_LT(1, FeedbackMetadata::GetSlotSize(kind));
-#endif
- return vector().Get(slot().WithOffset(1));
+void NexusConfig::SetFeedback(FeedbackVector vector, FeedbackSlot slot,
+ MaybeObject feedback,
+ WriteBarrierMode mode) const {
+ DCHECK(can_write());
+ vector.SynchronizedSet(slot, feedback, mode);
}
-void FeedbackNexus::SetFeedback(Object feedback, WriteBarrierMode mode) {
- SetFeedback(MaybeObject::FromObject(feedback));
+MaybeObject FeedbackNexus::UninitializedSentinel() const {
+ return MaybeObject::FromObject(
+ *FeedbackVector::UninitializedSentinel(GetIsolate()));
}
-void FeedbackNexus::SetFeedback(MaybeObject feedback, WriteBarrierMode mode) {
- FeedbackVector::AssertNoLegacyTypes(feedback);
- vector().Set(slot(), feedback, mode);
+MaybeObject FeedbackNexus::MegamorphicSentinel() const {
+ return MaybeObject::FromObject(
+ *FeedbackVector::MegamorphicSentinel(GetIsolate()));
}
-void FeedbackNexus::SetFeedbackExtra(Object feedback_extra,
- WriteBarrierMode mode) {
-#ifdef DEBUG
- FeedbackSlotKind kind = vector().GetKind(slot());
- DCHECK_LT(1, FeedbackMetadata::GetSlotSize(kind));
- FeedbackVector::AssertNoLegacyTypes(MaybeObject::FromObject(feedback_extra));
-#endif
- vector().Set(slot().WithOffset(1), MaybeObject::FromObject(feedback_extra),
- mode);
+MaybeObject FeedbackNexus::FromHandle(MaybeObjectHandle slot) const {
+ return slot.is_null() ? HeapObjectReference::ClearedValue(config()->isolate())
+ : *slot;
}
-void FeedbackNexus::SetFeedbackExtra(MaybeObject feedback_extra,
- WriteBarrierMode mode) {
-#ifdef DEBUG
- FeedbackVector::AssertNoLegacyTypes(feedback_extra);
-#endif
- vector().Set(slot().WithOffset(1), feedback_extra, mode);
+MaybeObjectHandle FeedbackNexus::ToHandle(MaybeObject value) const {
+ return value.IsCleared() ? MaybeObjectHandle()
+ : MaybeObjectHandle(config()->NewHandle(value));
+}
+
+MaybeObject FeedbackNexus::GetFeedback() const {
+ auto pair = GetFeedbackPair();
+ return pair.first;
+}
+
+MaybeObject FeedbackNexus::GetFeedbackExtra() const {
+ auto pair = GetFeedbackPair();
+ return pair.second;
+}
+
+std::pair<MaybeObject, MaybeObject> FeedbackNexus::GetFeedbackPair() const {
+ if (config()->mode() == NexusConfig::BackgroundThread &&
+ feedback_cache_.has_value()) {
+ return std::make_pair(FromHandle(feedback_cache_->first),
+ FromHandle(feedback_cache_->second));
+ }
+ auto pair = FeedbackMetadata::GetSlotSize(kind()) == 2
+ ? config()->GetFeedbackPair(vector(), slot())
+ : std::make_pair(config()->GetFeedback(vector(), slot()),
+ MaybeObject());
+ if (config()->mode() == NexusConfig::BackgroundThread &&
+ !feedback_cache_.has_value()) {
+ feedback_cache_ =
+ std::make_pair(ToHandle(pair.first), ToHandle(pair.second));
+ }
+ return pair;
+}
+
+template <typename T>
+struct IsValidFeedbackType
+ : public std::integral_constant<bool,
+ std::is_base_of<MaybeObject, T>::value ||
+ std::is_base_of<Object, T>::value> {};
+
+template <typename FeedbackType>
+void FeedbackNexus::SetFeedback(FeedbackType feedback, WriteBarrierMode mode) {
+ static_assert(IsValidFeedbackType<FeedbackType>(),
+ "feedbacks need to be Smi, Object or MaybeObject");
+ MaybeObject fmo = MaybeObject::Create(feedback);
+ config()->SetFeedback(vector(), slot(), fmo, mode);
+}
+
+template <typename FeedbackType, typename FeedbackExtraType>
+void FeedbackNexus::SetFeedback(FeedbackType feedback, WriteBarrierMode mode,
+ FeedbackExtraType feedback_extra,
+ WriteBarrierMode mode_extra) {
+ static_assert(IsValidFeedbackType<FeedbackType>(),
+ "feedbacks need to be Smi, Object or MaybeObject");
+ static_assert(IsValidFeedbackType<FeedbackExtraType>(),
+ "feedbacks need to be Smi, Object or MaybeObject");
+ MaybeObject fmo = MaybeObject::Create(feedback);
+ MaybeObject fmo_extra = MaybeObject::Create(feedback_extra);
+ config()->SetFeedbackPair(vector(), slot(), fmo, mode, fmo_extra, mode_extra);
}
Isolate* FeedbackNexus::GetIsolate() const { return vector().GetIsolate(); }
diff --git a/deps/v8/src/objects/feedback-vector.cc b/deps/v8/src/objects/feedback-vector.cc
index d48cd7eb28..7c30e1a045 100644
--- a/deps/v8/src/objects/feedback-vector.cc
+++ b/deps/v8/src/objects/feedback-vector.cc
@@ -5,6 +5,7 @@
#include "src/objects/feedback-vector.h"
#include "src/diagnostics/code-tracer.h"
+#include "src/heap/heap-inl.h"
#include "src/heap/local-factory-inl.h"
#include "src/ic/handler-configuration-inl.h"
#include "src/ic/ic-inl.h"
@@ -19,7 +20,7 @@ namespace v8 {
namespace internal {
FeedbackSlot FeedbackVectorSpec::AddSlot(FeedbackSlotKind kind) {
- int slot = slots();
+ int slot = slot_count();
int entries_per_slot = FeedbackMetadata::GetSlotSize(kind);
append(kind);
for (int i = 1; i < entries_per_slot; i++) {
@@ -38,9 +39,7 @@ FeedbackSlot FeedbackVectorSpec::AddTypeProfileSlot() {
bool FeedbackVectorSpec::HasTypeProfileSlot() const {
FeedbackSlot slot =
FeedbackVector::ToSlot(FeedbackVectorSpec::kTypeProfileSlotIndex);
- if (slots() <= slot.ToInt()) {
- return false;
- }
+ if (slot_count() <= slot.ToInt()) return false;
return GetKind(slot) == FeedbackSlotKind::kTypeProfile;
}
@@ -81,10 +80,10 @@ Handle<FeedbackMetadata> FeedbackMetadata::New(LocalIsolate* isolate,
const FeedbackVectorSpec* spec) {
auto* factory = isolate->factory();
- const int slot_count = spec == nullptr ? 0 : spec->slots();
- const int closure_feedback_cell_count =
- spec == nullptr ? 0 : spec->closure_feedback_cells();
- if (slot_count == 0 && closure_feedback_cell_count == 0) {
+ const int slot_count = spec == nullptr ? 0 : spec->slot_count();
+ const int create_closure_slot_count =
+ spec == nullptr ? 0 : spec->create_closure_slot_count();
+ if (slot_count == 0 && create_closure_slot_count == 0) {
return factory->empty_feedback_metadata();
}
#ifdef DEBUG
@@ -101,7 +100,7 @@ Handle<FeedbackMetadata> FeedbackMetadata::New(LocalIsolate* isolate,
#endif
Handle<FeedbackMetadata> metadata =
- factory->NewFeedbackMetadata(slot_count, closure_feedback_cell_count);
+ factory->NewFeedbackMetadata(slot_count, create_closure_slot_count);
// Initialize the slots. The raw data section has already been pre-zeroed in
// NewFeedbackMetadata.
@@ -122,7 +121,7 @@ template Handle<FeedbackMetadata> FeedbackMetadata::New(
bool FeedbackMetadata::SpecDiffersFrom(
const FeedbackVectorSpec* other_spec) const {
- if (other_spec->slots() != slot_count()) {
+ if (other_spec->slot_count() != slot_count()) {
return true;
}
@@ -220,7 +219,7 @@ Handle<ClosureFeedbackCellArray> ClosureFeedbackCellArray::New(
Factory* factory = isolate->factory();
int num_feedback_cells =
- shared->feedback_metadata().closure_feedback_cell_count();
+ shared->feedback_metadata().create_closure_slot_count();
Handle<ClosureFeedbackCellArray> feedback_cell_array =
factory->NewClosureFeedbackCellArray(num_feedback_cells);
@@ -251,16 +250,18 @@ Handle<FeedbackVector> FeedbackVector::New(
DCHECK_EQ(vector->length(), slot_count);
DCHECK_EQ(vector->shared_function_info(), *shared);
- DCHECK_EQ(
- vector->optimized_code_weak_or_smi(),
- MaybeObject::FromSmi(Smi::FromEnum(
- FLAG_log_function_events ? OptimizationMarker::kLogFirstExecution
- : OptimizationMarker::kNone)));
+ DCHECK_EQ(vector->optimization_marker(),
+ FLAG_log_function_events ? OptimizationMarker::kLogFirstExecution
+ : OptimizationMarker::kNone);
+ // TODO(mythria): This might change if NCI code is installed on feedback
+ // vector. Update this accordingly.
+ DCHECK_EQ(vector->optimization_tier(), OptimizationTier::kNone);
DCHECK_EQ(vector->invocation_count(), 0);
DCHECK_EQ(vector->profiler_ticks(), 0);
+ DCHECK(vector->maybe_optimized_code()->IsCleared());
// Ensure we can skip the write barrier
- Handle<Object> uninitialized_sentinel = UninitializedSentinel(isolate);
+ Handle<Symbol> uninitialized_sentinel = UninitializedSentinel(isolate);
DCHECK_EQ(ReadOnlyRoots(isolate).uninitialized_symbol(),
*uninitialized_sentinel);
for (int i = 0; i < slot_count;) {
@@ -268,7 +269,7 @@ Handle<FeedbackVector> FeedbackVector::New(
FeedbackSlotKind kind = feedback_metadata->GetKind(slot);
int entry_size = FeedbackMetadata::GetSlotSize(kind);
- Object extra_value = *uninitialized_sentinel;
+ MaybeObject extra_value = MaybeObject::FromObject(*uninitialized_sentinel);
switch (kind) {
case FeedbackSlotKind::kLoadGlobalInsideTypeof:
case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
@@ -287,7 +288,7 @@ Handle<FeedbackVector> FeedbackVector::New(
break;
case FeedbackSlotKind::kCall:
vector->Set(slot, *uninitialized_sentinel, SKIP_WRITE_BARRIER);
- extra_value = Smi::zero();
+ extra_value = MaybeObject::FromObject(Smi::zero());
break;
case FeedbackSlotKind::kCloneObject:
case FeedbackSlotKind::kLoadProperty:
@@ -382,32 +383,62 @@ void FeedbackVector::SaturatingIncrementProfilerTicks() {
void FeedbackVector::SetOptimizedCode(Handle<FeedbackVector> vector,
Handle<Code> code) {
DCHECK(CodeKindIsOptimizedJSFunction(code->kind()));
- vector->set_optimized_code_weak_or_smi(HeapObjectReference::Weak(*code));
+ // We should only set optimized code only when there is no valid optimized
+ // code or we are tiering up.
+ DCHECK(!vector->has_optimized_code() ||
+ vector->optimized_code().marked_for_deoptimization() ||
+ (vector->optimized_code().kind() == CodeKind::TURBOPROP &&
+ code->kind() == CodeKind::TURBOFAN));
+ // TODO(mythria): We could see a CompileOptimized marker here either from
+ // tests that use %OptimizeFunctionOnNextCall or because we re-mark the
+ // function for non-concurrent optimization after an OSR. We should avoid
+ // these cases and also check that marker isn't kCompileOptimized.
+ DCHECK(vector->optimization_marker() !=
+ OptimizationMarker::kCompileOptimizedConcurrent);
+ vector->set_maybe_optimized_code(HeapObjectReference::Weak(*code));
+ int32_t state = vector->flags();
+ state = OptimizationTierBits::update(state, GetTierForCodeKind(code->kind()));
+ state = OptimizationMarkerBits::update(state, OptimizationMarker::kNone);
+ vector->set_flags(state);
}
void FeedbackVector::ClearOptimizedCode() {
DCHECK(has_optimized_code());
- SetOptimizationMarker(OptimizationMarker::kNone);
+ DCHECK_NE(optimization_tier(), OptimizationTier::kNone);
+ set_maybe_optimized_code(HeapObjectReference::ClearedValue(GetIsolate()));
+ ClearOptimizationTier();
}
void FeedbackVector::ClearOptimizationMarker() {
- DCHECK(!has_optimized_code());
SetOptimizationMarker(OptimizationMarker::kNone);
}
void FeedbackVector::SetOptimizationMarker(OptimizationMarker marker) {
- set_optimized_code_weak_or_smi(MaybeObject::FromSmi(Smi::FromEnum(marker)));
+ int32_t state = flags();
+ state = OptimizationMarkerBits::update(state, marker);
+ set_flags(state);
+}
+
+void FeedbackVector::ClearOptimizationTier() {
+ int32_t state = flags();
+ state = OptimizationTierBits::update(state, OptimizationTier::kNone);
+ set_flags(state);
+}
+
+void FeedbackVector::InitializeOptimizationState() {
+ int32_t state = 0;
+ state = OptimizationMarkerBits::update(
+ state, FLAG_log_function_events ? OptimizationMarker::kLogFirstExecution
+ : OptimizationMarker::kNone);
+ state = OptimizationTierBits::update(state, OptimizationTier::kNone);
+ set_flags(state);
}
void FeedbackVector::EvictOptimizedCodeMarkedForDeoptimization(
SharedFunctionInfo shared, const char* reason) {
- MaybeObject slot = optimized_code_weak_or_smi();
- if (slot->IsSmi()) {
- return;
- }
-
+ MaybeObject slot = maybe_optimized_code();
if (slot->IsCleared()) {
- ClearOptimizationMarker();
+ ClearOptimizationTier();
return;
}
@@ -440,20 +471,77 @@ bool FeedbackVector::ClearSlots(Isolate* isolate) {
return feedback_updated;
}
-void FeedbackVector::AssertNoLegacyTypes(MaybeObject object) {
-#ifdef DEBUG
- HeapObject heap_object;
- if (object->GetHeapObject(&heap_object)) {
- // Instead of FixedArray, the Feedback and the Extra should contain
- // WeakFixedArrays. The only allowed FixedArray subtype is HashTable.
- DCHECK_IMPLIES(heap_object.IsFixedArray(), heap_object.IsHashTable());
+MaybeObjectHandle NexusConfig::NewHandle(MaybeObject object) const {
+ if (mode() == Mode::MainThread) {
+ return handle(object, isolate_);
}
-#endif
+ DCHECK_EQ(mode(), Mode::BackgroundThread);
+ return handle(object, local_heap_);
+}
+
+template <typename T>
+Handle<T> NexusConfig::NewHandle(T object) const {
+ if (mode() == Mode::MainThread) {
+ return handle(object, isolate_);
+ }
+ DCHECK_EQ(mode(), Mode::BackgroundThread);
+ return handle(object, local_heap_);
+}
+
+void NexusConfig::SetFeedbackPair(FeedbackVector vector,
+ FeedbackSlot start_slot, MaybeObject feedback,
+ WriteBarrierMode mode,
+ MaybeObject feedback_extra,
+ WriteBarrierMode mode_extra) const {
+ CHECK(can_write());
+ CHECK_GT(vector.length(), start_slot.WithOffset(1).ToInt());
+ base::SharedMutexGuard<base::kExclusive> shared_mutex_guard(
+ isolate()->feedback_vector_access());
+ vector.Set(start_slot, feedback, mode);
+ vector.Set(start_slot.WithOffset(1), feedback_extra, mode_extra);
+}
+
+std::pair<MaybeObject, MaybeObject> NexusConfig::GetFeedbackPair(
+ FeedbackVector vector, FeedbackSlot slot) const {
+ if (mode() == BackgroundThread) {
+ isolate()->feedback_vector_access()->LockShared();
+ }
+ MaybeObject feedback = vector.Get(slot);
+ MaybeObject feedback_extra = vector.Get(slot.WithOffset(1));
+ auto return_value = std::make_pair(feedback, feedback_extra);
+ if (mode() == BackgroundThread) {
+ isolate()->feedback_vector_access()->UnlockShared();
+ }
+ return return_value;
+}
+
+FeedbackNexus::FeedbackNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
+ : vector_handle_(vector),
+ slot_(slot),
+ config_(NexusConfig::FromMainThread(
+ vector.is_null() ? nullptr : vector->GetIsolate())) {
+ kind_ = vector.is_null() ? FeedbackSlotKind::kInvalid : vector->GetKind(slot);
+}
+
+FeedbackNexus::FeedbackNexus(FeedbackVector vector, FeedbackSlot slot)
+ : vector_(vector),
+ slot_(slot),
+ config_(NexusConfig::FromMainThread(
+ vector.is_null() ? nullptr : vector.GetIsolate())) {
+ kind_ = vector.is_null() ? FeedbackSlotKind::kInvalid : vector.GetKind(slot);
}
+FeedbackNexus::FeedbackNexus(Handle<FeedbackVector> vector, FeedbackSlot slot,
+ const NexusConfig& config)
+ : vector_handle_(vector),
+ slot_(slot),
+ kind_(vector->GetKind(slot)),
+ config_(config) {}
+
Handle<WeakFixedArray> FeedbackNexus::CreateArrayOfSize(int length) {
- Isolate* isolate = GetIsolate();
- Handle<WeakFixedArray> array = isolate->factory()->NewWeakFixedArray(length);
+ DCHECK(config()->can_write());
+ Handle<WeakFixedArray> array =
+ GetIsolate()->factory()->NewWeakFixedArray(length);
return array;
}
@@ -465,21 +553,18 @@ void FeedbackNexus::ConfigureUninitialized() {
case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
case FeedbackSlotKind::kLoadGlobalInsideTypeof: {
SetFeedback(HeapObjectReference::ClearedValue(isolate),
+ SKIP_WRITE_BARRIER, UninitializedSentinel(),
SKIP_WRITE_BARRIER);
- SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(isolate),
- SKIP_WRITE_BARRIER);
break;
}
case FeedbackSlotKind::kCloneObject:
case FeedbackSlotKind::kCall: {
- SetFeedback(*FeedbackVector::UninitializedSentinel(isolate),
+ SetFeedback(UninitializedSentinel(), SKIP_WRITE_BARRIER, Smi::zero(),
SKIP_WRITE_BARRIER);
- SetFeedbackExtra(Smi::zero(), SKIP_WRITE_BARRIER);
break;
}
case FeedbackSlotKind::kInstanceOf: {
- SetFeedback(*FeedbackVector::UninitializedSentinel(isolate),
- SKIP_WRITE_BARRIER);
+ SetFeedback(UninitializedSentinel(), SKIP_WRITE_BARRIER);
break;
}
case FeedbackSlotKind::kStoreNamedSloppy:
@@ -492,10 +577,8 @@ void FeedbackNexus::ConfigureUninitialized() {
case FeedbackSlotKind::kLoadKeyed:
case FeedbackSlotKind::kHasKeyed:
case FeedbackSlotKind::kStoreDataPropertyInLiteral: {
- SetFeedback(*FeedbackVector::UninitializedSentinel(isolate),
- SKIP_WRITE_BARRIER);
- SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(isolate),
- SKIP_WRITE_BARRIER);
+ SetFeedback(UninitializedSentinel(), SKIP_WRITE_BARRIER,
+ UninitializedSentinel(), SKIP_WRITE_BARRIER);
break;
}
default:
@@ -555,11 +638,10 @@ bool FeedbackNexus::Clear() {
bool FeedbackNexus::ConfigureMegamorphic() {
DisallowHeapAllocation no_gc;
Isolate* isolate = GetIsolate();
- MaybeObject sentinel =
- MaybeObject::FromObject(*FeedbackVector::MegamorphicSentinel(isolate));
+ MaybeObject sentinel = MegamorphicSentinel();
if (GetFeedback() != sentinel) {
- SetFeedback(sentinel, SKIP_WRITE_BARRIER);
- SetFeedbackExtra(HeapObjectReference::ClearedValue(isolate));
+ SetFeedback(sentinel, SKIP_WRITE_BARRIER,
+ HeapObjectReference::ClearedValue(isolate));
return true;
}
@@ -568,21 +650,17 @@ bool FeedbackNexus::ConfigureMegamorphic() {
bool FeedbackNexus::ConfigureMegamorphic(IcCheckType property_type) {
DisallowHeapAllocation no_gc;
- Isolate* isolate = GetIsolate();
- bool changed = false;
- MaybeObject sentinel =
- MaybeObject::FromObject(*FeedbackVector::MegamorphicSentinel(isolate));
- if (GetFeedback() != sentinel) {
- SetFeedback(sentinel, SKIP_WRITE_BARRIER);
- changed = true;
- }
-
- Smi extra = Smi::FromInt(static_cast<int>(property_type));
- if (changed || GetFeedbackExtra() != MaybeObject::FromSmi(extra)) {
- SetFeedbackExtra(extra, SKIP_WRITE_BARRIER);
- changed = true;
+ MaybeObject sentinel = MegamorphicSentinel();
+ MaybeObject maybe_extra =
+ MaybeObject::FromSmi(Smi::FromInt(static_cast<int>(property_type)));
+
+ auto feedback = GetFeedbackPair();
+ bool update_required =
+ feedback.first != sentinel || feedback.second != maybe_extra;
+ if (update_required) {
+ SetFeedback(sentinel, SKIP_WRITE_BARRIER, maybe_extra, SKIP_WRITE_BARRIER);
}
- return changed;
+ return update_required;
}
Map FeedbackNexus::GetFirstMap() const {
@@ -595,8 +673,8 @@ Map FeedbackNexus::GetFirstMap() const {
}
InlineCacheState FeedbackNexus::ic_state() const {
- Isolate* isolate = GetIsolate();
- MaybeObject feedback = GetFeedback();
+ MaybeObject feedback, extra;
+ std::tie(feedback, extra) = GetFeedbackPair();
switch (kind()) {
case FeedbackSlotKind::kLiteral:
@@ -610,10 +688,7 @@ InlineCacheState FeedbackNexus::ic_state() const {
if (feedback->IsSmi()) return MONOMORPHIC;
DCHECK(feedback->IsWeakOrCleared());
- MaybeObject extra = GetFeedbackExtra();
- if (!feedback->IsCleared() ||
- extra != MaybeObject::FromObject(
- *FeedbackVector::UninitializedSentinel(isolate))) {
+ if (!feedback->IsCleared() || extra != UninitializedSentinel()) {
return MONOMORPHIC;
}
return UNINITIALIZED;
@@ -628,12 +703,10 @@ InlineCacheState FeedbackNexus::ic_state() const {
case FeedbackSlotKind::kLoadProperty:
case FeedbackSlotKind::kLoadKeyed:
case FeedbackSlotKind::kHasKeyed: {
- if (feedback == MaybeObject::FromObject(
- *FeedbackVector::UninitializedSentinel(isolate))) {
+ if (feedback == UninitializedSentinel()) {
return UNINITIALIZED;
}
- if (feedback == MaybeObject::FromObject(
- *FeedbackVector::MegamorphicSentinel(isolate))) {
+ if (feedback == MegamorphicSentinel()) {
return MEGAMORPHIC;
}
if (feedback->IsWeakOrCleared()) {
@@ -650,8 +723,8 @@ InlineCacheState FeedbackNexus::ic_state() const {
if (heap_object.IsName()) {
DCHECK(IsKeyedLoadICKind(kind()) || IsKeyedStoreICKind(kind()) ||
IsKeyedHasICKind(kind()));
- Object extra = GetFeedbackExtra()->GetHeapObjectAssumeStrong();
- WeakFixedArray extra_array = WeakFixedArray::cast(extra);
+ Object extra_object = extra->GetHeapObjectAssumeStrong();
+ WeakFixedArray extra_array = WeakFixedArray::cast(extra_object);
return extra_array.length() > 2 ? POLYMORPHIC : MONOMORPHIC;
}
}
@@ -659,8 +732,7 @@ InlineCacheState FeedbackNexus::ic_state() const {
}
case FeedbackSlotKind::kCall: {
HeapObject heap_object;
- if (feedback == MaybeObject::FromObject(
- *FeedbackVector::MegamorphicSentinel(isolate))) {
+ if (feedback == MegamorphicSentinel()) {
return GENERIC;
} else if (feedback->IsWeakOrCleared()) {
if (feedback->GetHeapObjectIfWeak(&heap_object)) {
@@ -675,8 +747,7 @@ InlineCacheState FeedbackNexus::ic_state() const {
return MONOMORPHIC;
}
- CHECK_EQ(feedback, MaybeObject::FromObject(
- *FeedbackVector::UninitializedSentinel(isolate)));
+ CHECK_EQ(feedback, UninitializedSentinel());
return UNINITIALIZED;
}
case FeedbackSlotKind::kBinaryOp: {
@@ -709,19 +780,15 @@ InlineCacheState FeedbackNexus::ic_state() const {
return MONOMORPHIC;
}
case FeedbackSlotKind::kInstanceOf: {
- if (feedback == MaybeObject::FromObject(
- *FeedbackVector::UninitializedSentinel(isolate))) {
+ if (feedback == UninitializedSentinel()) {
return UNINITIALIZED;
- } else if (feedback ==
- MaybeObject::FromObject(
- *FeedbackVector::MegamorphicSentinel(isolate))) {
+ } else if (feedback == MegamorphicSentinel()) {
return MEGAMORPHIC;
}
return MONOMORPHIC;
}
case FeedbackSlotKind::kStoreDataPropertyInLiteral: {
- if (feedback == MaybeObject::FromObject(
- *FeedbackVector::UninitializedSentinel(isolate))) {
+ if (feedback == UninitializedSentinel()) {
return UNINITIALIZED;
} else if (feedback->IsWeakOrCleared()) {
// Don't check if the map is cleared.
@@ -731,20 +798,17 @@ InlineCacheState FeedbackNexus::ic_state() const {
return MEGAMORPHIC;
}
case FeedbackSlotKind::kTypeProfile: {
- if (feedback == MaybeObject::FromObject(
- *FeedbackVector::UninitializedSentinel(isolate))) {
+ if (feedback == UninitializedSentinel()) {
return UNINITIALIZED;
}
return MONOMORPHIC;
}
case FeedbackSlotKind::kCloneObject: {
- if (feedback == MaybeObject::FromObject(
- *FeedbackVector::UninitializedSentinel(isolate))) {
+ if (feedback == UninitializedSentinel()) {
return UNINITIALIZED;
}
- if (feedback == MaybeObject::FromObject(
- *FeedbackVector::MegamorphicSentinel(isolate))) {
+ if (feedback == MegamorphicSentinel()) {
return MEGAMORPHIC;
}
if (feedback->IsWeakOrCleared()) {
@@ -764,10 +828,8 @@ InlineCacheState FeedbackNexus::ic_state() const {
void FeedbackNexus::ConfigurePropertyCellMode(Handle<PropertyCell> cell) {
DCHECK(IsGlobalICKind(kind()));
- Isolate* isolate = GetIsolate();
- SetFeedback(HeapObjectReference::Weak(*cell));
- SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(isolate),
- SKIP_WRITE_BARRIER);
+ SetFeedback(HeapObjectReference::Weak(*cell), UPDATE_WRITE_BARRIER,
+ UninitializedSentinel(), SKIP_WRITE_BARRIER);
}
bool FeedbackNexus::ConfigureLexicalVarMode(int script_context_index,
@@ -785,22 +847,21 @@ bool FeedbackNexus::ConfigureLexicalVarMode(int script_context_index,
SlotIndexBits::encode(context_slot_index) |
ImmutabilityBit::encode(immutable);
- SetFeedback(Smi::From31BitPattern(config));
- Isolate* isolate = GetIsolate();
- SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(isolate),
- SKIP_WRITE_BARRIER);
+ SetFeedback(Smi::From31BitPattern(config), SKIP_WRITE_BARRIER,
+ UninitializedSentinel(), SKIP_WRITE_BARRIER);
return true;
}
void FeedbackNexus::ConfigureHandlerMode(const MaybeObjectHandle& handler) {
DCHECK(IsGlobalICKind(kind()));
DCHECK(IC::IsHandler(*handler));
- SetFeedback(HeapObjectReference::ClearedValue(GetIsolate()));
- SetFeedbackExtra(*handler);
+ SetFeedback(HeapObjectReference::ClearedValue(GetIsolate()),
+ UPDATE_WRITE_BARRIER, *handler, UPDATE_WRITE_BARRIER);
}
void FeedbackNexus::ConfigureCloneObject(Handle<Map> source_map,
Handle<Map> result_map) {
+ DCHECK(config()->can_write());
Isolate* isolate = GetIsolate();
Handle<HeapObject> feedback;
{
@@ -814,14 +875,14 @@ void FeedbackNexus::ConfigureCloneObject(Handle<Map> source_map,
switch (ic_state()) {
case UNINITIALIZED:
// Cache the first map seen which meets the fast case requirements.
- SetFeedback(HeapObjectReference::Weak(*source_map));
- SetFeedbackExtra(*result_map);
+ SetFeedback(HeapObjectReference::Weak(*source_map), UPDATE_WRITE_BARRIER,
+ *result_map);
break;
case MONOMORPHIC:
if (feedback.is_null() || feedback.is_identical_to(source_map) ||
Map::cast(*feedback).is_deprecated()) {
- SetFeedback(HeapObjectReference::Weak(*source_map));
- SetFeedbackExtra(*result_map);
+ SetFeedback(HeapObjectReference::Weak(*source_map),
+ UPDATE_WRITE_BARRIER, *result_map);
} else {
// Transition to POLYMORPHIC.
Handle<WeakFixedArray> array =
@@ -830,8 +891,8 @@ void FeedbackNexus::ConfigureCloneObject(Handle<Map> source_map,
array->Set(1, GetFeedbackExtra());
array->Set(2, HeapObjectReference::Weak(*source_map));
array->Set(3, MaybeObject::FromObject(*result_map));
- SetFeedback(*array);
- SetFeedbackExtra(HeapObjectReference::ClearedValue(isolate));
+ SetFeedback(*array, UPDATE_WRITE_BARRIER,
+ HeapObjectReference::ClearedValue(isolate));
}
break;
case POLYMORPHIC: {
@@ -852,10 +913,9 @@ void FeedbackNexus::ConfigureCloneObject(Handle<Map> source_map,
if (i >= array->length()) {
if (i == kMaxElements) {
// Transition to MEGAMORPHIC.
- MaybeObject sentinel = MaybeObject::FromObject(
- *FeedbackVector::MegamorphicSentinel(isolate));
- SetFeedback(sentinel, SKIP_WRITE_BARRIER);
- SetFeedbackExtra(HeapObjectReference::ClearedValue(isolate));
+ MaybeObject sentinel = MegamorphicSentinel();
+ SetFeedback(sentinel, SKIP_WRITE_BARRIER,
+ HeapObjectReference::ClearedValue(isolate));
break;
}
@@ -896,7 +956,10 @@ void FeedbackNexus::SetSpeculationMode(SpeculationMode mode) {
uint32_t count = static_cast<uint32_t>(Smi::ToInt(call_count));
uint32_t value = CallCountField::encode(CallCountField::decode(count));
int result = static_cast<int>(value | SpeculationModeField::encode(mode));
- SetFeedbackExtra(Smi::FromInt(result), SKIP_WRITE_BARRIER);
+ MaybeObject feedback = GetFeedback();
+ // We can skip the write barrier for {feedback} because it's not changing.
+ SetFeedback(feedback, SKIP_WRITE_BARRIER, Smi::FromInt(result),
+ SKIP_WRITE_BARRIER);
}
SpeculationMode FeedbackNexus::GetSpeculationMode() {
@@ -924,18 +987,17 @@ void FeedbackNexus::ConfigureMonomorphic(Handle<Name> name,
const MaybeObjectHandle& handler) {
DCHECK(handler.is_null() || IC::IsHandler(*handler));
if (kind() == FeedbackSlotKind::kStoreDataPropertyInLiteral) {
- SetFeedback(HeapObjectReference::Weak(*receiver_map));
- SetFeedbackExtra(*name);
+ SetFeedback(HeapObjectReference::Weak(*receiver_map), UPDATE_WRITE_BARRIER,
+ *name);
} else {
if (name.is_null()) {
- SetFeedback(HeapObjectReference::Weak(*receiver_map));
- SetFeedbackExtra(*handler);
+ SetFeedback(HeapObjectReference::Weak(*receiver_map),
+ UPDATE_WRITE_BARRIER, *handler);
} else {
Handle<WeakFixedArray> array = CreateArrayOfSize(2);
- SetFeedback(*name);
array->Set(0, HeapObjectReference::Weak(*receiver_map));
array->Set(1, *handler);
- SetFeedbackExtra(*array);
+ SetFeedback(*name, UPDATE_WRITE_BARRIER, *array);
}
}
}
@@ -953,22 +1015,20 @@ void FeedbackNexus::ConfigurePolymorphic(
DCHECK(IC::IsHandler(*handler));
array->Set(current * 2 + 1, *handler);
}
+
if (name.is_null()) {
- SetFeedback(*array);
- SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(GetIsolate()),
- SKIP_WRITE_BARRIER);
+ SetFeedback(*array, UPDATE_WRITE_BARRIER, UninitializedSentinel(),
+ SKIP_WRITE_BARRIER);
} else {
- SetFeedback(*name);
- SetFeedbackExtra(*array);
+ SetFeedback(*name, UPDATE_WRITE_BARRIER, *array);
}
}
int FeedbackNexus::ExtractMaps(MapHandles* maps) const {
DisallowHeapAllocation no_gc;
- Isolate* isolate = GetIsolate();
int found = 0;
for (FeedbackIterator it(this); !it.done(); it.Advance()) {
- maps->push_back(handle(it.map(), isolate));
+ maps->push_back(config()->NewHandle(it.map()));
found++;
}
@@ -978,16 +1038,15 @@ int FeedbackNexus::ExtractMaps(MapHandles* maps) const {
int FeedbackNexus::ExtractMapsAndFeedback(
std::vector<MapAndFeedback>* maps_and_feedback) const {
DisallowHeapAllocation no_gc;
- Isolate* isolate = GetIsolate();
int found = 0;
for (FeedbackIterator it(this); !it.done(); it.Advance()) {
- Handle<Map> map = handle(it.map(), isolate);
+ Handle<Map> map = config()->NewHandle(it.map());
MaybeObject maybe_handler = it.handler();
if (!maybe_handler->IsCleared()) {
DCHECK(IC::IsHandler(maybe_handler) ||
IsStoreDataPropertyInLiteralKind(kind()));
- MaybeObjectHandle handler = handle(maybe_handler, isolate);
+ MaybeObjectHandle handler = config()->NewHandle(maybe_handler);
maps_and_feedback->push_back(MapAndHandler(map, handler));
found++;
}
@@ -998,20 +1057,18 @@ int FeedbackNexus::ExtractMapsAndFeedback(
int FeedbackNexus::ExtractMapsAndHandlers(
std::vector<MapAndHandler>* maps_and_handlers,
- bool try_update_deprecated) const {
+ TryUpdateHandler map_handler) const {
DCHECK(!IsStoreDataPropertyInLiteralKind(kind()));
DisallowHeapAllocation no_gc;
- Isolate* isolate = GetIsolate();
int found = 0;
for (FeedbackIterator it(this); !it.done(); it.Advance()) {
- Handle<Map> map = handle(it.map(), isolate);
+ Handle<Map> map = config()->NewHandle(it.map());
MaybeObject maybe_handler = it.handler();
if (!maybe_handler->IsCleared()) {
DCHECK(IC::IsHandler(maybe_handler));
- MaybeObjectHandle handler = handle(maybe_handler, isolate);
- if (try_update_deprecated &&
- !Map::TryUpdate(isolate, map).ToHandle(&map)) {
+ MaybeObjectHandle handler = config()->NewHandle(maybe_handler);
+ if (map_handler && !(map_handler(map).ToHandle(&map))) {
continue;
}
maps_and_handlers->push_back(MapAndHandler(map, handler));
@@ -1027,7 +1084,7 @@ MaybeObjectHandle FeedbackNexus::FindHandlerForMap(Handle<Map> map) const {
for (FeedbackIterator it(this); !it.done(); it.Advance()) {
if (it.map() == *map && !it.handler()->IsCleared()) {
- return handle(it.handler(), GetIsolate());
+ return config()->NewHandle(it.handler());
}
}
return MaybeObjectHandle();
@@ -1174,14 +1231,14 @@ IcCheckType FeedbackNexus::GetKeyType() const {
DCHECK(IsKeyedStoreICKind(kind()) || IsKeyedLoadICKind(kind()) ||
IsStoreInArrayLiteralICKind(kind()) || IsKeyedHasICKind(kind()) ||
IsStoreDataPropertyInLiteralKind(kind()));
- MaybeObject feedback = GetFeedback();
- if (feedback == MaybeObject::FromObject(
- *FeedbackVector::MegamorphicSentinel(GetIsolate()))) {
+ auto pair = GetFeedbackPair();
+ MaybeObject feedback = pair.first;
+ if (feedback == MegamorphicSentinel()) {
return static_cast<IcCheckType>(
- Smi::ToInt(GetFeedbackExtra()->cast<Object>()));
+ Smi::ToInt(pair.second->template cast<Object>()));
}
MaybeObject maybe_name =
- IsStoreDataPropertyInLiteralKind(kind()) ? GetFeedbackExtra() : feedback;
+ IsStoreDataPropertyInLiteralKind(kind()) ? pair.second : feedback;
return IsPropertyNameFeedback(maybe_name) ? PROPERTY : ELEMENT;
}
@@ -1200,16 +1257,15 @@ CompareOperationHint FeedbackNexus::GetCompareOperationFeedback() const {
ForInHint FeedbackNexus::GetForInFeedback() const {
DCHECK_EQ(kind(), FeedbackSlotKind::kForIn);
int feedback = GetFeedback().ToSmi().value();
- return ForInHintFromFeedback(feedback);
+ return ForInHintFromFeedback(static_cast<ForInFeedback>(feedback));
}
MaybeHandle<JSObject> FeedbackNexus::GetConstructorFeedback() const {
DCHECK_EQ(kind(), FeedbackSlotKind::kInstanceOf);
- Isolate* isolate = GetIsolate();
MaybeObject feedback = GetFeedback();
HeapObject heap_object;
if (feedback->GetHeapObjectIfWeak(&heap_object)) {
- return handle(JSObject::cast(heap_object), isolate);
+ return config()->NewHandle(JSObject::cast(heap_object));
}
return MaybeHandle<JSObject>();
}
@@ -1230,6 +1286,7 @@ bool InList(Handle<ArrayList> types, Handle<String> type) {
void FeedbackNexus::Collect(Handle<String> type, int position) {
DCHECK(IsTypeProfileKind(kind()));
DCHECK_GE(position, 0);
+ DCHECK(config()->can_write());
Isolate* isolate = GetIsolate();
MaybeObject const feedback = GetFeedback();
@@ -1237,8 +1294,7 @@ void FeedbackNexus::Collect(Handle<String> type, int position) {
// Map source position to collection of types
Handle<SimpleNumberDictionary> types;
- if (feedback == MaybeObject::FromObject(
- *FeedbackVector::UninitializedSentinel(isolate))) {
+ if (feedback == UninitializedSentinel()) {
types = SimpleNumberDictionary::New(isolate, 1);
} else {
types = handle(
@@ -1274,8 +1330,7 @@ std::vector<int> FeedbackNexus::GetSourcePositions() const {
MaybeObject const feedback = GetFeedback();
- if (feedback == MaybeObject::FromObject(
- *FeedbackVector::UninitializedSentinel(isolate))) {
+ if (feedback == UninitializedSentinel()) {
return source_positions;
}
@@ -1302,8 +1357,7 @@ std::vector<Handle<String>> FeedbackNexus::GetTypesForSourcePositions(
MaybeObject const feedback = GetFeedback();
std::vector<Handle<String>> types_for_position;
- if (feedback == MaybeObject::FromObject(
- *FeedbackVector::UninitializedSentinel(isolate))) {
+ if (feedback == UninitializedSentinel()) {
return types_for_position;
}
@@ -1361,8 +1415,7 @@ JSObject FeedbackNexus::GetTypeProfile() const {
MaybeObject const feedback = GetFeedback();
- if (feedback == MaybeObject::FromObject(
- *FeedbackVector::UninitializedSentinel(isolate))) {
+ if (feedback == UninitializedSentinel()) {
return *isolate->factory()->NewJSObject(isolate->object_function());
}
@@ -1374,7 +1427,7 @@ JSObject FeedbackNexus::GetTypeProfile() const {
void FeedbackNexus::ResetTypeProfile() {
DCHECK(IsTypeProfileKind(kind()));
- SetFeedback(*FeedbackVector::UninitializedSentinel(GetIsolate()));
+ SetFeedback(UninitializedSentinel());
}
FeedbackIterator::FeedbackIterator(const FeedbackNexus* nexus)
@@ -1387,8 +1440,8 @@ FeedbackIterator::FeedbackIterator(const FeedbackNexus* nexus)
IsKeyedHasICKind(nexus->kind()));
DisallowHeapAllocation no_gc;
- Isolate* isolate = nexus->GetIsolate();
- MaybeObject feedback = nexus->GetFeedback();
+ auto pair = nexus->GetFeedbackPair();
+ MaybeObject feedback = pair.first;
bool is_named_feedback = IsPropertyNameFeedback(feedback);
HeapObject heap_object;
@@ -1399,18 +1452,16 @@ FeedbackIterator::FeedbackIterator(const FeedbackNexus* nexus)
state_ = kPolymorphic;
heap_object = feedback->GetHeapObjectAssumeStrong();
if (is_named_feedback) {
- polymorphic_feedback_ =
- handle(WeakFixedArray::cast(
- nexus->GetFeedbackExtra()->GetHeapObjectAssumeStrong()),
- isolate);
+ polymorphic_feedback_ = nexus->config()->NewHandle(
+ WeakFixedArray::cast(pair.second->GetHeapObjectAssumeStrong()));
} else {
polymorphic_feedback_ =
- handle(WeakFixedArray::cast(heap_object), isolate);
+ nexus->config()->NewHandle(WeakFixedArray::cast(heap_object));
}
AdvancePolymorphic();
} else if (feedback->GetHeapObjectIfWeak(&heap_object)) {
state_ = kMonomorphic;
- MaybeObject handler = nexus->GetFeedbackExtra();
+ MaybeObject handler = pair.second;
map_ = Map::cast(heap_object);
handler_ = handler;
} else {
diff --git a/deps/v8/src/objects/feedback-vector.h b/deps/v8/src/objects/feedback-vector.h
index 677ec22457..6c5b9b97ab 100644
--- a/deps/v8/src/objects/feedback-vector.h
+++ b/deps/v8/src/objects/feedback-vector.h
@@ -17,7 +17,6 @@
#include "src/objects/name.h"
#include "src/objects/type-hints.h"
#include "src/zone/zone-containers.h"
-#include "torque-generated/class-definitions.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -27,7 +26,7 @@ namespace internal {
class IsCompiledScope;
-enum class FeedbackSlotKind {
+enum class FeedbackSlotKind : uint8_t {
// This kind means that the slot points to the middle of other slot
// which occupies more than one feedback vector element.
// There must be no such slots in the system.
@@ -153,6 +152,8 @@ using MaybeObjectHandles = std::vector<MaybeObjectHandle>;
class FeedbackMetadata;
+#include "torque-generated/src/objects/feedback-vector-tq.inc"
+
// ClosureFeedbackCellArray is a FixedArray that contains feedback cells used
// when creating closures from a function. This is created once the function is
// compiled and is either held by the feedback vector (if allocated) or by the
@@ -174,6 +175,8 @@ class ClosureFeedbackCellArray : public FixedArray {
OBJECT_CONSTRUCTORS(ClosureFeedbackCellArray, FixedArray);
};
+class NexusConfig;
+
// A FeedbackVector has a fixed header with:
// - shared function info (which includes feedback metadata)
// - invocation count
@@ -185,6 +188,20 @@ class FeedbackVector
: public TorqueGeneratedFeedbackVector<FeedbackVector, HeapObject> {
public:
NEVER_READ_ONLY_SPACE
+ DEFINE_TORQUE_GENERATED_FEEDBACK_VECTOR_FLAGS()
+ STATIC_ASSERT(OptimizationMarker::kLastOptimizationMarker <
+ OptimizationMarkerBits::kMax);
+ STATIC_ASSERT(OptimizationTier::kLastOptimizationTier <
+ OptimizationTierBits::kMax);
+
+ static constexpr uint32_t kHasCompileOptimizedOrLogFirstExecutionMarker =
+ kNoneOrInOptimizationQueueMask << OptimizationMarkerBits::kShift;
+ static constexpr uint32_t kHasNoTopTierCodeOrCompileOptimizedMarkerMask =
+ kNoneOrMidTierMask << OptimizationTierBits::kShift |
+ kHasCompileOptimizedOrLogFirstExecutionMarker;
+ static constexpr uint32_t kHasOptimizedCodeOrCompileOptimizedMarkerMask =
+ OptimizationTierBits::kMask |
+ kHasCompileOptimizedOrLogFirstExecutionMarker;
inline bool is_empty() const;
@@ -193,21 +210,21 @@ class FeedbackVector
// Increment profiler ticks, saturating at the maximal value.
void SaturatingIncrementProfilerTicks();
- // Initialize the padding if necessary.
- inline void clear_padding();
-
inline void clear_invocation_count();
inline Code optimized_code() const;
- inline OptimizationMarker optimization_marker() const;
inline bool has_optimized_code() const;
inline bool has_optimization_marker() const;
+ inline OptimizationMarker optimization_marker() const;
+ inline OptimizationTier optimization_tier() const;
void ClearOptimizedCode();
void EvictOptimizedCodeMarkedForDeoptimization(SharedFunctionInfo shared,
const char* reason);
static void SetOptimizedCode(Handle<FeedbackVector> vector,
Handle<Code> code);
void SetOptimizationMarker(OptimizationMarker marker);
+ void ClearOptimizationTier();
+ void InitializeOptimizationState();
// Clears the optimization marker in the feedback vector.
void ClearOptimizationMarker();
@@ -217,12 +234,15 @@ class FeedbackVector
// Conversion from an integer index to the underlying array to a slot.
static inline FeedbackSlot ToSlot(intptr_t index);
+
+ inline MaybeObject SynchronizedGet(FeedbackSlot slot) const;
+ inline void SynchronizedSet(FeedbackSlot slot, MaybeObject value,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ inline void SynchronizedSet(FeedbackSlot slot, Object value,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+
inline MaybeObject Get(FeedbackSlot slot) const;
- inline MaybeObject Get(const Isolate* isolate, FeedbackSlot slot) const;
- inline void Set(FeedbackSlot slot, MaybeObject value,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
- inline void Set(FeedbackSlot slot, Object value,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ inline MaybeObject Get(IsolateRoot isolate, FeedbackSlot slot) const;
// Returns the feedback cell at |index| that is used to create the
// closure.
@@ -271,8 +291,6 @@ class FeedbackVector
return GetLanguageModeFromSlotKind(GetKind(slot));
}
- V8_EXPORT_PRIVATE static void AssertNoLegacyTypes(MaybeObject object);
-
DECL_PRINTER(FeedbackVector)
void FeedbackSlotPrint(std::ostream& os, FeedbackSlot slot); // NOLINT
@@ -283,9 +301,6 @@ class FeedbackVector
// The object that indicates an uninitialized cache.
static inline Handle<Symbol> UninitializedSentinel(Isolate* isolate);
- // The object that indicates a generic state.
- static inline Handle<Symbol> GenericSentinel(Isolate* isolate);
-
// The object that indicates a megamorphic state.
static inline Handle<Symbol> MegamorphicSentinel(Isolate* isolate);
@@ -308,26 +323,38 @@ class FeedbackVector
static void AddToVectorsForProfilingTools(Isolate* isolate,
Handle<FeedbackVector> vector);
+ // Private for initializing stores in FeedbackVector::New().
+ inline void Set(FeedbackSlot slot, MaybeObject value,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ inline void Set(FeedbackSlot slot, Object value,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+
+#ifdef DEBUG
+ // Returns true if value is a non-HashTable FixedArray. We want to
+ // make sure not to store such objects in the vector.
+ inline static bool IsOfLegacyType(MaybeObject value);
+#endif // DEBUG
+
+ // NexusConfig controls setting slots in the vector.
+ friend NexusConfig;
+
// Don't expose the raw feedback slot getter/setter.
using TorqueGeneratedFeedbackVector::raw_feedback_slots;
};
class V8_EXPORT_PRIVATE FeedbackVectorSpec {
public:
- explicit FeedbackVectorSpec(Zone* zone)
- : slot_kinds_(zone), num_closure_feedback_cells_(0) {
+ explicit FeedbackVectorSpec(Zone* zone) : slot_kinds_(zone) {
slot_kinds_.reserve(16);
}
- int slots() const { return static_cast<int>(slot_kinds_.size()); }
- int closure_feedback_cells() const { return num_closure_feedback_cells_; }
+ int slot_count() const { return static_cast<int>(slot_kinds_.size()); }
+ int create_closure_slot_count() const { return create_closure_slot_count_; }
- int AddFeedbackCellForCreateClosure() {
- return num_closure_feedback_cells_++;
- }
+ int AddCreateClosureSlot() { return create_closure_slot_count_++; }
FeedbackSlotKind GetKind(FeedbackSlot slot) const {
- return static_cast<FeedbackSlotKind>(slot_kinds_.at(slot.ToInt()));
+ return slot_kinds_.at(slot.ToInt());
}
bool HasTypeProfileSlot() const;
@@ -428,12 +455,11 @@ class V8_EXPORT_PRIVATE FeedbackVectorSpec {
private:
FeedbackSlot AddSlot(FeedbackSlotKind kind);
- void append(FeedbackSlotKind kind) {
- slot_kinds_.push_back(static_cast<unsigned char>(kind));
- }
+ void append(FeedbackSlotKind kind) { slot_kinds_.push_back(kind); }
- ZoneVector<unsigned char> slot_kinds_;
- unsigned int num_closure_feedback_cells_;
+ STATIC_ASSERT(sizeof(FeedbackSlotKind) == sizeof(uint8_t));
+ ZoneVector<FeedbackSlotKind> slot_kinds_;
+ int create_closure_slot_count_ = 0;
friend class SharedFeedbackSlot;
};
@@ -472,7 +498,7 @@ class FeedbackMetadata : public HeapObject {
// int32.
// TODO(mythria): Consider using 16 bits for this and slot_count so that we
// can save 4 bytes.
- DECL_INT32_ACCESSORS(closure_feedback_cell_count)
+ DECL_INT32_ACCESSORS(create_closure_slot_count)
// Get slot_count using an acquire load.
inline int32_t synchronized_slot_count() const;
@@ -505,9 +531,13 @@ class FeedbackMetadata : public HeapObject {
return OBJECT_POINTER_ALIGN(kHeaderSize + length(slot_count) * kInt32Size);
}
- static const int kSlotCountOffset = HeapObject::kHeaderSize;
- static const int kFeedbackCellCountOffset = kSlotCountOffset + kInt32Size;
- static const int kHeaderSize = kFeedbackCellCountOffset + kInt32Size;
+#define FIELDS(V) \
+ V(kSlotCountOffset, kInt32Size) \
+ V(kCreateClosureSlotCountOffset, kInt32Size) \
+ V(kHeaderSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, FIELDS)
+#undef FIELDS
class BodyDescriptor;
@@ -587,20 +617,79 @@ class FeedbackMetadataIterator {
FeedbackSlotKind slot_kind_;
};
-// A FeedbackNexus is the combination of a FeedbackVector and a slot.
-class V8_EXPORT_PRIVATE FeedbackNexus final {
+// NexusConfig adapts the FeedbackNexus to be used on the main thread
+// or a background thread. It controls the actual read and writes of
+// the underlying feedback vector, manages the creation of handles, and
+// expresses capabilities available in the very different contexts of
+// main and background thread. Here are the differences:
+//
+// Capability: MainThread BackgroundThread
+// Write to vector Allowed Not allowed
+// Handle creation Via Isolate Via LocalHeap
+// Reads of vector "Live" Cached after initial read
+// Thread safety Exclusive write, Shared read only
+// shared read
+class V8_EXPORT_PRIVATE NexusConfig {
public:
- FeedbackNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
- : vector_handle_(vector), slot_(slot) {
- kind_ =
- (vector.is_null()) ? FeedbackSlotKind::kInvalid : vector->GetKind(slot);
+ static NexusConfig FromMainThread(Isolate* isolate) {
+ return NexusConfig(isolate);
}
- FeedbackNexus(FeedbackVector vector, FeedbackSlot slot)
- : vector_(vector), slot_(slot) {
- kind_ =
- (vector.is_null()) ? FeedbackSlotKind::kInvalid : vector.GetKind(slot);
+
+ static NexusConfig FromBackgroundThread(Isolate* isolate,
+ LocalHeap* local_heap) {
+ return NexusConfig(isolate, local_heap);
+ }
+
+ enum Mode { MainThread, BackgroundThread };
+
+ Mode mode() const {
+ return local_heap_ == nullptr ? MainThread : BackgroundThread;
}
+ Isolate* isolate() const { return isolate_; }
+
+ MaybeObjectHandle NewHandle(MaybeObject object) const;
+ template <typename T>
+ Handle<T> NewHandle(T object) const;
+
+ bool can_write() const { return mode() == MainThread; }
+
+ inline MaybeObject GetFeedback(FeedbackVector vector,
+ FeedbackSlot slot) const;
+ inline void SetFeedback(FeedbackVector vector, FeedbackSlot slot,
+ MaybeObject object,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER) const;
+
+ std::pair<MaybeObject, MaybeObject> GetFeedbackPair(FeedbackVector vector,
+ FeedbackSlot slot) const;
+ void SetFeedbackPair(FeedbackVector vector, FeedbackSlot start_slot,
+ MaybeObject feedback, WriteBarrierMode mode,
+ MaybeObject feedback_extra,
+ WriteBarrierMode mode_extra) const;
+
+ private:
+ explicit NexusConfig(Isolate* isolate)
+ : isolate_(isolate), local_heap_(nullptr) {}
+ NexusConfig(Isolate* isolate, LocalHeap* local_heap)
+ : isolate_(isolate), local_heap_(local_heap) {}
+
+ Isolate* const isolate_;
+ LocalHeap* const local_heap_;
+};
+
+// A FeedbackNexus is the combination of a FeedbackVector and a slot.
+class V8_EXPORT_PRIVATE FeedbackNexus final {
+ public:
+ // For use on the main thread. A null {vector} is accepted as well.
+ FeedbackNexus(Handle<FeedbackVector> vector, FeedbackSlot slot);
+ FeedbackNexus(FeedbackVector vector, FeedbackSlot slot);
+
+ // For use on the main or background thread as configured by {config}.
+ // {vector} must be valid.
+ FeedbackNexus(Handle<FeedbackVector> vector, FeedbackSlot slot,
+ const NexusConfig& config);
+
+ const NexusConfig* config() const { return &config_; }
Handle<FeedbackVector> vector_handle() const {
DCHECK(vector_.is_null());
return vector_handle_;
@@ -608,6 +697,7 @@ class V8_EXPORT_PRIVATE FeedbackNexus final {
FeedbackVector vector() const {
return vector_handle_.is_null() ? vector_ : *vector_handle_;
}
+
FeedbackSlot slot() const { return slot_; }
FeedbackSlotKind kind() const { return kind_; }
@@ -624,13 +714,14 @@ class V8_EXPORT_PRIVATE FeedbackNexus final {
// For map-based ICs (load, keyed-load, store, keyed-store).
Map GetFirstMap() const;
-
int ExtractMaps(MapHandles* maps) const;
// Used to obtain maps and the associated handlers stored in the feedback
- // vector. This should be called when we expect only a handler to be sotred in
- // the extra feedback. This is used by ICs when updting the handlers.
- int ExtractMapsAndHandlers(std::vector<MapAndHandler>* maps_and_handlers,
- bool try_update_deprecated = false) const;
+ // vector. This should be called when we expect only a handler to be stored in
+ // the extra feedback. This is used by ICs when updating the handlers.
+ using TryUpdateHandler = std::function<MaybeHandle<Map>(Handle<Map>)>;
+ int ExtractMapsAndHandlers(
+ std::vector<MapAndHandler>* maps_and_handlers,
+ TryUpdateHandler map_handler = TryUpdateHandler()) const;
MaybeObjectHandle FindHandlerForMap(Handle<Map> map) const;
// Used to obtain maps and the associated feedback stored in the feedback
// vector. The returned feedback need not be always a handler. It could be a
@@ -654,6 +745,7 @@ class V8_EXPORT_PRIVATE FeedbackNexus final {
inline MaybeObject GetFeedback() const;
inline MaybeObject GetFeedbackExtra() const;
+ inline std::pair<MaybeObject, MaybeObject> GetFeedbackPair() const;
inline Isolate* GetIsolate() const;
@@ -726,19 +818,25 @@ class V8_EXPORT_PRIVATE FeedbackNexus final {
std::vector<int> GetSourcePositions() const;
std::vector<Handle<String>> GetTypesForSourcePositions(uint32_t pos) const;
- inline void SetFeedback(Object feedback,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
- inline void SetFeedback(MaybeObject feedback,
+ private:
+ template <typename FeedbackType>
+ inline void SetFeedback(FeedbackType feedback,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
- inline void SetFeedbackExtra(Object feedback_extra,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
- inline void SetFeedbackExtra(MaybeObject feedback_extra,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ template <typename FeedbackType, typename FeedbackExtraType>
+ inline void SetFeedback(FeedbackType feedback, WriteBarrierMode mode,
+ FeedbackExtraType feedback_extra,
+ WriteBarrierMode mode_extra = UPDATE_WRITE_BARRIER);
+
+ inline MaybeObject UninitializedSentinel() const;
+ inline MaybeObject MegamorphicSentinel() const;
// Create an array. The caller must install it in a feedback vector slot.
Handle<WeakFixedArray> CreateArrayOfSize(int length);
- private:
+ // Helpers to maintain feedback_cache_.
+ inline MaybeObject FromHandle(MaybeObjectHandle slot) const;
+ inline MaybeObjectHandle ToHandle(MaybeObject value) const;
+
// The reason for having a vector handle and a raw pointer is that we can and
// should use handles during IC miss, but not during GC when we clear ICs. If
// you have a handle to the vector that is better because more operations can
@@ -747,6 +845,11 @@ class V8_EXPORT_PRIVATE FeedbackNexus final {
FeedbackVector vector_;
FeedbackSlot slot_;
FeedbackSlotKind kind_;
+ // When using the background-thread configuration, a cache is used to
+ // guarantee a consistent view of the feedback to FeedbackNexus methods.
+ mutable base::Optional<std::pair<MaybeObjectHandle, MaybeObjectHandle>>
+ feedback_cache_;
+ NexusConfig config_;
};
class V8_EXPORT_PRIVATE FeedbackIterator final {
@@ -788,7 +891,7 @@ class V8_EXPORT_PRIVATE FeedbackIterator final {
inline BinaryOperationHint BinaryOperationHintFromFeedback(int type_feedback);
inline CompareOperationHint CompareOperationHintFromFeedback(int type_feedback);
-inline ForInHint ForInHintFromFeedback(int type_feedback);
+inline ForInHint ForInHintFromFeedback(ForInFeedback type_feedback);
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/feedback-vector.tq b/deps/v8/src/objects/feedback-vector.tq
index 794a159ca9..a90d4d363c 100644
--- a/deps/v8/src/objects/feedback-vector.tq
+++ b/deps/v8/src/objects/feedback-vector.tq
@@ -2,18 +2,26 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+type OptimizationMarker extends uint16 constexpr 'OptimizationMarker';
+type OptimizationTier extends uint16 constexpr 'OptimizationTier';
+
+bitfield struct FeedbackVectorFlags extends uint32 {
+ optimization_marker: OptimizationMarker: 3 bit;
+ optimization_tier: OptimizationTier: 2 bit;
+}
+
@generateBodyDescriptor
@generateCppClass
extern class FeedbackVector extends HeapObject {
const length: int32;
invocation_count: int32;
profiler_ticks: int32;
- // TODO(v8:9287) The padding is not necessary on platforms with 4 bytes
- // tagged pointers, we should make it conditional; however, platform-specific
- // interacts badly with GCMole, so we need to address that first.
- padding: uint32;
+ // TODO(turboprop, v8:11010): This field could be removed by changing the
+ // tier up checks for Turboprop. If removing this field also check v8:9287.
+ // Padding was necessary for GCMole.
+ flags: FeedbackVectorFlags;
shared_function_info: SharedFunctionInfo;
- optimized_code_weak_or_smi: Weak<Code>|Smi;
+ maybe_optimized_code: Weak<Code>;
closure_feedback_cell_array: ClosureFeedbackCellArray;
raw_feedback_slots[length]: MaybeObject;
}
diff --git a/deps/v8/src/objects/field-index-inl.h b/deps/v8/src/objects/field-index-inl.h
index ee1f875104..a3b4c23140 100644
--- a/deps/v8/src/objects/field-index-inl.h
+++ b/deps/v8/src/objects/field-index-inl.h
@@ -61,14 +61,14 @@ int FieldIndex::GetLoadByFieldIndex() const {
}
FieldIndex FieldIndex::ForDescriptor(Map map, InternalIndex descriptor_index) {
- const Isolate* isolate = GetIsolateForPtrCompr(map);
+ IsolateRoot isolate = GetIsolateForPtrCompr(map);
return ForDescriptor(isolate, map, descriptor_index);
}
-FieldIndex FieldIndex::ForDescriptor(const Isolate* isolate, Map map,
+FieldIndex FieldIndex::ForDescriptor(IsolateRoot isolate, Map map,
InternalIndex descriptor_index) {
- PropertyDetails details =
- map.instance_descriptors(isolate).GetDetails(descriptor_index);
+ PropertyDetails details = map.instance_descriptors(isolate, kRelaxedLoad)
+ .GetDetails(descriptor_index);
int field_index = details.field_index();
return ForPropertyIndex(map, field_index, details.representation());
}
diff --git a/deps/v8/src/objects/field-index.h b/deps/v8/src/objects/field-index.h
index 930f12bcda..7819c8c06b 100644
--- a/deps/v8/src/objects/field-index.h
+++ b/deps/v8/src/objects/field-index.h
@@ -31,7 +31,7 @@ class FieldIndex final {
static inline FieldIndex ForInObjectOffset(int offset, Encoding encoding);
static inline FieldIndex ForDescriptor(Map map,
InternalIndex descriptor_index);
- static inline FieldIndex ForDescriptor(const Isolate* isolate, Map map,
+ static inline FieldIndex ForDescriptor(IsolateRoot isolate, Map map,
InternalIndex descriptor_index);
inline int GetLoadByFieldIndex() const;
diff --git a/deps/v8/src/objects/fixed-array-inl.h b/deps/v8/src/objects/fixed-array-inl.h
index ed2d952b96..547e4dc817 100644
--- a/deps/v8/src/objects/fixed-array-inl.h
+++ b/deps/v8/src/objects/fixed-array-inl.h
@@ -20,7 +20,6 @@
#include "src/objects/slots.h"
#include "src/roots/roots-inl.h"
#include "src/sanitizer/tsan.h"
-#include "torque-generated/class-definitions-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -28,6 +27,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/fixed-array-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(FixedArrayBase)
FixedArrayBase::FixedArrayBase(Address ptr,
HeapObject::AllowInlineSmiStorage allow_smi)
@@ -69,11 +70,11 @@ bool FixedArray::ContainsOnlySmisOrHoles() {
}
Object FixedArray::get(int index) const {
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
return get(isolate, index);
}
-Object FixedArray::get(const Isolate* isolate, int index) const {
+Object FixedArray::get(IsolateRoot isolate, int index) const {
DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
return TaggedField<Object>::Relaxed_Load(isolate, *this,
OffsetOfElementAt(index));
@@ -87,7 +88,6 @@ bool FixedArray::is_the_hole(Isolate* isolate, int index) {
return get(isolate, index).IsTheHole(isolate);
}
-#if !defined(_WIN32) || defined(_WIN64)
void FixedArray::set(int index, Smi value) {
DCHECK_NE(map(), GetReadOnlyRoots().fixed_cow_array_map());
DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
@@ -95,7 +95,6 @@ void FixedArray::set(int index, Smi value) {
int offset = OffsetOfElementAt(index);
RELAXED_WRITE_FIELD(*this, offset, value);
}
-#endif
void FixedArray::set(int index, Object value) {
DCHECK_NE(GetReadOnlyRoots().fixed_cow_array_map(), map());
@@ -125,11 +124,11 @@ void FixedArray::NoWriteBarrierSet(FixedArray array, int index, Object value) {
}
Object FixedArray::synchronized_get(int index) const {
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
return synchronized_get(isolate, index);
}
-Object FixedArray::synchronized_get(const Isolate* isolate, int index) const {
+Object FixedArray::synchronized_get(IsolateRoot isolate, int index) const {
DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
return ACQUIRE_READ_FIELD(*this, OffsetOfElementAt(index));
}
@@ -396,8 +395,7 @@ void FixedDoubleArray::MoveElements(Isolate* isolate, int dst_index,
int src_index, int len,
WriteBarrierMode mode) {
DCHECK_EQ(SKIP_WRITE_BARRIER, mode);
- double* data_start =
- reinterpret_cast<double*>(FIELD_ADDR(*this, kHeaderSize));
+ double* data_start = reinterpret_cast<double*>(field_address(kHeaderSize));
MemMove(data_start + dst_index, data_start + src_index, len * kDoubleSize);
}
@@ -408,11 +406,11 @@ void FixedDoubleArray::FillWithHoles(int from, int to) {
}
MaybeObject WeakFixedArray::Get(int index) const {
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
return Get(isolate, index);
}
-MaybeObject WeakFixedArray::Get(const Isolate* isolate, int index) const {
+MaybeObject WeakFixedArray::Get(IsolateRoot isolate, int index) const {
DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
return objects(isolate, index);
}
@@ -443,11 +441,11 @@ void WeakFixedArray::CopyElements(Isolate* isolate, int dst_index,
}
MaybeObject WeakArrayList::Get(int index) const {
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
return Get(isolate, index);
}
-MaybeObject WeakArrayList::Get(const Isolate* isolate, int index) const {
+MaybeObject WeakArrayList::Get(IsolateRoot isolate, int index) const {
DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(capacity()));
return objects(isolate, index);
}
@@ -498,7 +496,7 @@ Object ArrayList::Get(int index) const {
return FixedArray::cast(*this).get(kFirstIndex + index);
}
-Object ArrayList::Get(const Isolate* isolate, int index) const {
+Object ArrayList::Get(IsolateRoot isolate, int index) const {
return FixedArray::cast(*this).get(isolate, kFirstIndex + index);
}
@@ -531,14 +529,14 @@ void ByteArray::set(int index, byte value) {
void ByteArray::copy_in(int index, const byte* buffer, int length) {
DCHECK(index >= 0 && length >= 0 && length <= kMaxInt - index &&
index + length <= this->length());
- Address dst_addr = FIELD_ADDR(*this, kHeaderSize + index * kCharSize);
+ Address dst_addr = field_address(kHeaderSize + index * kCharSize);
memcpy(reinterpret_cast<void*>(dst_addr), buffer, length);
}
void ByteArray::copy_out(int index, byte* buffer, int length) {
DCHECK(index >= 0 && length >= 0 && length <= kMaxInt - index &&
index + length <= this->length());
- Address src_addr = FIELD_ADDR(*this, kHeaderSize + index * kCharSize);
+ Address src_addr = field_address(kHeaderSize + index * kCharSize);
memcpy(buffer, reinterpret_cast<void*>(src_addr), length);
}
@@ -623,7 +621,7 @@ Object TemplateList::get(int index) const {
return FixedArray::cast(*this).get(kFirstElementIndex + index);
}
-Object TemplateList::get(const Isolate* isolate, int index) const {
+Object TemplateList::get(IsolateRoot isolate, int index) const {
return FixedArray::cast(*this).get(isolate, kFirstElementIndex + index);
}
diff --git a/deps/v8/src/objects/fixed-array.h b/deps/v8/src/objects/fixed-array.h
index 13148b2099..97f9297ef7 100644
--- a/deps/v8/src/objects/fixed-array.h
+++ b/deps/v8/src/objects/fixed-array.h
@@ -9,7 +9,6 @@
#include "src/objects/instance-type.h"
#include "src/objects/objects.h"
#include "src/objects/smi.h"
-#include "torque-generated/class-definitions.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -65,6 +64,8 @@ enum FixedArraySubInstanceType {
LAST_FIXED_ARRAY_SUB_TYPE = WEAK_NEW_SPACE_OBJECT_TO_CODE_SUB_TYPE
};
+#include "torque-generated/src/objects/fixed-array-tq.inc"
+
// Common superclass for FixedArrays that allow implementations to share
// common accessors and some code paths.
class FixedArrayBase
@@ -100,7 +101,7 @@ class FixedArray
public:
// Setter and getter for elements.
inline Object get(int index) const;
- inline Object get(const Isolate* isolate, int index) const;
+ inline Object get(IsolateRoot isolate, int index) const;
static inline Handle<Object> get(FixedArray array, int index,
Isolate* isolate);
@@ -112,7 +113,7 @@ class FixedArray
// Synchronized setters and getters.
inline Object synchronized_get(int index) const;
- inline Object synchronized_get(const Isolate* isolate, int index) const;
+ inline Object synchronized_get(IsolateRoot isolate, int index) const;
// Currently only Smis are written with release semantics, hence we can avoid
// a write barrier.
inline void synchronized_set(int index, Smi value);
@@ -122,18 +123,7 @@ class FixedArray
inline bool is_the_hole(Isolate* isolate, int index);
// Setter that doesn't need write barrier.
-#if defined(_WIN32) && !defined(_WIN64)
- inline void set(int index, Smi value) {
- DCHECK_NE(map(), GetReadOnlyRoots().fixed_cow_array_map());
- DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
- DCHECK(Object(value).IsSmi());
- int offset = OffsetOfElementAt(index);
- RELAXED_WRITE_FIELD(*this, offset, value);
- }
-#else
inline void set(int index, Smi value);
-#endif
-
// Setter with explicit barrier mode.
inline void set(int index, Object value, WriteBarrierMode mode);
@@ -278,7 +268,7 @@ class WeakFixedArray
: public TorqueGeneratedWeakFixedArray<WeakFixedArray, HeapObject> {
public:
inline MaybeObject Get(int index) const;
- inline MaybeObject Get(const Isolate* isolate, int index) const;
+ inline MaybeObject Get(IsolateRoot isolate, int index) const;
inline void Set(
int index, MaybeObject value,
@@ -353,7 +343,7 @@ class WeakArrayList
V8_EXPORT_PRIVATE void Compact(Isolate* isolate);
inline MaybeObject Get(int index) const;
- inline MaybeObject Get(const Isolate* isolate, int index) const;
+ inline MaybeObject Get(IsolateRoot isolate, int index) const;
// Set the element at index to obj. The underlying array must be large enough.
// If you need to grow the WeakArrayList, use the static AddToEnd() method
@@ -366,7 +356,7 @@ class WeakArrayList
}
static constexpr int CapacityForLength(int length) {
- return length + Max(length / 2, 2);
+ return length + std::max(length / 2, 2);
}
// Gives access to raw memory which stores the array's data.
@@ -416,6 +406,8 @@ class WeakArrayList
class WeakArrayList::Iterator {
public:
explicit Iterator(WeakArrayList array) : index_(0), array_(array) {}
+ Iterator(const Iterator&) = delete;
+ Iterator& operator=(const Iterator&) = delete;
inline HeapObject Next();
@@ -425,7 +417,6 @@ class WeakArrayList::Iterator {
#ifdef DEBUG
DisallowHeapAllocation no_gc_;
#endif // DEBUG
- DISALLOW_COPY_AND_ASSIGN(Iterator);
};
// Generic array grows dynamically with O(1) amortized insertion.
@@ -454,7 +445,7 @@ class ArrayList : public TorqueGeneratedArrayList<ArrayList, FixedArray> {
// storage capacity, i.e., length().
inline void SetLength(int length);
inline Object Get(int index) const;
- inline Object Get(const Isolate* isolate, int index) const;
+ inline Object Get(IsolateRoot isolate, int index) const;
inline ObjectSlot Slot(int index);
// Set the element at index to obj. The underlying array must be large enough.
@@ -600,7 +591,7 @@ class TemplateList
static Handle<TemplateList> New(Isolate* isolate, int size);
inline int length() const;
inline Object get(int index) const;
- inline Object get(const Isolate* isolate, int index) const;
+ inline Object get(IsolateRoot isolate, int index) const;
inline void set(int index, Object value);
static Handle<TemplateList> Add(Isolate* isolate, Handle<TemplateList> list,
Handle<Object> value);
diff --git a/deps/v8/src/objects/foreign-inl.h b/deps/v8/src/objects/foreign-inl.h
index d455aede1a..cb3dac91eb 100644
--- a/deps/v8/src/objects/foreign-inl.h
+++ b/deps/v8/src/objects/foreign-inl.h
@@ -18,6 +18,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/foreign-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(Foreign)
// static
@@ -27,15 +29,17 @@ bool Foreign::IsNormalized(Object value) {
}
DEF_GETTER(Foreign, foreign_address, Address) {
- ExternalPointer_t encoded_value =
- ReadField<ExternalPointer_t>(kForeignAddressOffset);
- Address value = DecodeExternalPointer(isolate, encoded_value);
- return value;
+ return ReadExternalPointerField(kForeignAddressOffset, isolate,
+ kForeignForeignAddressTag);
+}
+
+void Foreign::AllocateExternalPointerEntries(Isolate* isolate) {
+ InitExternalPointerField(kForeignAddressOffset, isolate);
}
void Foreign::set_foreign_address(Isolate* isolate, Address value) {
- ExternalPointer_t encoded_value = EncodeExternalPointer(isolate, value);
- WriteField<ExternalPointer_t>(kForeignAddressOffset, encoded_value);
+ WriteExternalPointerField(kForeignAddressOffset, isolate, value,
+ kForeignForeignAddressTag);
}
} // namespace internal
diff --git a/deps/v8/src/objects/foreign.h b/deps/v8/src/objects/foreign.h
index 2dd869d5ac..ebb219b153 100644
--- a/deps/v8/src/objects/foreign.h
+++ b/deps/v8/src/objects/foreign.h
@@ -6,7 +6,6 @@
#define V8_OBJECTS_FOREIGN_H_
#include "src/objects/heap-object.h"
-#include "torque-generated/class-definitions.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -14,6 +13,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/foreign-tq.inc"
+
// Foreign describes objects pointing from JavaScript to C structures.
class Foreign : public TorqueGeneratedForeign<Foreign, HeapObject> {
public:
@@ -43,6 +44,8 @@ class Foreign : public TorqueGeneratedForeign<Foreign, HeapObject> {
friend class StartupSerializer;
friend class WasmTypeInfo;
+ inline void AllocateExternalPointerEntries(Isolate* isolate);
+
inline void set_foreign_address(Isolate* isolate, Address value);
TQ_OBJECT_CONSTRUCTORS(Foreign)
diff --git a/deps/v8/src/objects/foreign.tq b/deps/v8/src/objects/foreign.tq
index dcf52b12e3..872da31e3b 100644
--- a/deps/v8/src/objects/foreign.tq
+++ b/deps/v8/src/objects/foreign.tq
@@ -7,3 +7,6 @@
extern class Foreign extends HeapObject {
foreign_address: ExternalPointer;
}
+
+extern operator '.foreign_address_ptr' macro LoadForeignForeignAddressPtr(
+ Foreign): RawPtr;
diff --git a/deps/v8/src/objects/free-space-inl.h b/deps/v8/src/objects/free-space-inl.h
index 0b27b3f433..e8ce1d6350 100644
--- a/deps/v8/src/objects/free-space-inl.h
+++ b/deps/v8/src/objects/free-space-inl.h
@@ -17,6 +17,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/free-space-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(FreeSpace)
RELAXED_SMI_ACCESSORS(FreeSpace, size, kSizeOffset)
diff --git a/deps/v8/src/objects/free-space.h b/deps/v8/src/objects/free-space.h
index 7bed4a1a7c..76d618cbfd 100644
--- a/deps/v8/src/objects/free-space.h
+++ b/deps/v8/src/objects/free-space.h
@@ -6,7 +6,6 @@
#define V8_OBJECTS_FREE_SPACE_H_
#include "src/objects/heap-object.h"
-#include "torque-generated/class-definitions.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -14,6 +13,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/free-space-tq.inc"
+
// FreeSpace are fixed-size free memory blocks used by the heap and GC.
// They look like heap objects (are heap object tagged and have a map) so that
// the heap remains iterable. They have a size and a next pointer.
diff --git a/deps/v8/src/objects/hash-table-inl.h b/deps/v8/src/objects/hash-table-inl.h
index b16b8a796f..4e4e9b9017 100644
--- a/deps/v8/src/objects/hash-table-inl.h
+++ b/deps/v8/src/objects/hash-table-inl.h
@@ -110,7 +110,7 @@ int HashTableBase::ComputeCapacity(int at_least_space_for) {
// Must be kept in sync with CodeStubAssembler::HashTableComputeCapacity().
int raw_cap = at_least_space_for + (at_least_space_for >> 1);
int capacity = base::bits::RoundUpToPowerOfTwo32(raw_cap);
- return Max(capacity, kMinCapacity);
+ return std::max({capacity, kMinCapacity});
}
void HashTableBase::SetNumberOfElements(int nof) {
@@ -142,8 +142,7 @@ InternalIndex HashTable<Derived, Shape>::FindEntry(LocalIsolate* isolate,
// Find entry for key otherwise return kNotFound.
template <typename Derived, typename Shape>
-template <typename LocalIsolate>
-InternalIndex HashTable<Derived, Shape>::FindEntry(const LocalIsolate* isolate,
+InternalIndex HashTable<Derived, Shape>::FindEntry(IsolateRoot isolate,
ReadOnlyRoots roots, Key key,
int32_t hash) {
uint32_t capacity = Capacity();
@@ -180,8 +179,8 @@ bool HashTable<Derived, Shape>::ToKey(ReadOnlyRoots roots, InternalIndex entry,
}
template <typename Derived, typename Shape>
-bool HashTable<Derived, Shape>::ToKey(const Isolate* isolate,
- InternalIndex entry, Object* out_k) {
+bool HashTable<Derived, Shape>::ToKey(IsolateRoot isolate, InternalIndex entry,
+ Object* out_k) {
Object k = KeyAt(isolate, entry);
if (!IsKey(GetReadOnlyRoots(isolate), k)) return false;
*out_k = Shape::Unwrap(k);
@@ -190,16 +189,14 @@ bool HashTable<Derived, Shape>::ToKey(const Isolate* isolate,
template <typename Derived, typename Shape>
Object HashTable<Derived, Shape>::KeyAt(InternalIndex entry) {
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
return KeyAt(isolate, entry);
}
template <typename Derived, typename Shape>
-template <typename LocalIsolate>
-Object HashTable<Derived, Shape>::KeyAt(const LocalIsolate* isolate,
+Object HashTable<Derived, Shape>::KeyAt(IsolateRoot isolate,
InternalIndex entry) {
- return get(GetIsolateForPtrCompr(isolate),
- EntryToIndex(entry) + kEntryKeyIndex);
+ return get(isolate, EntryToIndex(entry) + kEntryKeyIndex);
}
template <typename Derived, typename Shape>
diff --git a/deps/v8/src/objects/hash-table.h b/deps/v8/src/objects/hash-table.h
index c9dd57a45a..39d8e326f6 100644
--- a/deps/v8/src/objects/hash-table.h
+++ b/deps/v8/src/objects/hash-table.h
@@ -138,26 +138,24 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) HashTable
void IterateElements(ObjectVisitor* visitor);
// Find entry for key otherwise return kNotFound.
- template <typename LocalIsolate>
- inline InternalIndex FindEntry(const LocalIsolate* isolate,
- ReadOnlyRoots roots, Key key, int32_t hash);
+ inline InternalIndex FindEntry(IsolateRoot isolate, ReadOnlyRoots roots,
+ Key key, int32_t hash);
template <typename LocalIsolate>
inline InternalIndex FindEntry(LocalIsolate* isolate, Key key);
// Rehashes the table in-place.
- void Rehash(const Isolate* isolate);
+ void Rehash(IsolateRoot isolate);
// Returns whether k is a real key. The hole and undefined are not allowed as
// keys and can be used to indicate missing or deleted elements.
static inline bool IsKey(ReadOnlyRoots roots, Object k);
inline bool ToKey(ReadOnlyRoots roots, InternalIndex entry, Object* out_k);
- inline bool ToKey(const Isolate* isolate, InternalIndex entry, Object* out_k);
+ inline bool ToKey(IsolateRoot isolate, InternalIndex entry, Object* out_k);
// Returns the key at entry.
inline Object KeyAt(InternalIndex entry);
- template <typename LocalIsolate>
- inline Object KeyAt(const LocalIsolate* isolate, InternalIndex entry);
+ inline Object KeyAt(IsolateRoot isolate, InternalIndex entry);
static const int kElementsStartIndex = kPrefixStartIndex + Shape::kPrefixSize;
static const int kEntrySize = Shape::kEntrySize;
@@ -219,7 +217,7 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) HashTable
// Find the entry at which to insert element with the given key that
// has the given hash value.
- InternalIndex FindInsertionEntry(const Isolate* isolate, ReadOnlyRoots roots,
+ InternalIndex FindInsertionEntry(IsolateRoot isolate, ReadOnlyRoots roots,
uint32_t hash);
InternalIndex FindInsertionEntry(Isolate* isolate, uint32_t hash);
@@ -233,7 +231,7 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) HashTable
Isolate* isolate, Handle<Derived> table, int additionalCapacity = 0);
// Rehashes this hash-table into the new table.
- void Rehash(const Isolate* isolate, Derived new_table);
+ void Rehash(IsolateRoot isolate, Derived new_table);
inline void set_key(int index, Object value);
inline void set_key(int index, Object value, WriteBarrierMode mode);
@@ -324,7 +322,7 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) ObjectHashTableBase
// returned in case the key is not present.
Object Lookup(Handle<Object> key);
Object Lookup(Handle<Object> key, int32_t hash);
- Object Lookup(const Isolate* isolate, Handle<Object> key, int32_t hash);
+ Object Lookup(IsolateRoot isolate, Handle<Object> key, int32_t hash);
// Returns the value at entry.
Object ValueAt(InternalIndex entry);
diff --git a/deps/v8/src/objects/heap-number-inl.h b/deps/v8/src/objects/heap-number-inl.h
index 4e40fa70b8..97db52a58c 100644
--- a/deps/v8/src/objects/heap-number-inl.h
+++ b/deps/v8/src/objects/heap-number-inl.h
@@ -16,6 +16,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/heap-number-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(HeapNumber)
uint64_t HeapNumber::value_as_bits() const {
diff --git a/deps/v8/src/objects/heap-number.h b/deps/v8/src/objects/heap-number.h
index 4e77a52340..311f1437be 100644
--- a/deps/v8/src/objects/heap-number.h
+++ b/deps/v8/src/objects/heap-number.h
@@ -13,6 +13,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/heap-number-tq.inc"
+
// The HeapNumber class describes heap allocated numbers that cannot be
// represented in a Smi (small integer).
class HeapNumber
diff --git a/deps/v8/src/objects/heap-object.h b/deps/v8/src/objects/heap-object.h
index 2f16236e02..429379d9d3 100644
--- a/deps/v8/src/objects/heap-object.h
+++ b/deps/v8/src/objects/heap-object.h
@@ -6,10 +6,10 @@
#define V8_OBJECTS_HEAP_OBJECT_H_
#include "src/common/globals.h"
-#include "src/roots/roots.h"
-
+#include "src/objects/instance-type.h"
#include "src/objects/objects.h"
#include "src/objects/tagged-field.h"
+#include "src/roots/roots.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -18,6 +18,7 @@ namespace v8 {
namespace internal {
class Heap;
+class PrimitiveHeapObject;
// HeapObject is the superclass for all classes describing heap allocated
// objects.
@@ -45,8 +46,8 @@ class HeapObject : public Object {
// Compare-and-swaps map word using release store, returns true if the map
// word was actually swapped.
- inline bool synchronized_compare_and_swap_map_word(MapWord old_map_word,
- MapWord new_map_word);
+ inline bool release_compare_and_swap_map_word(MapWord old_map_word,
+ MapWord new_map_word);
// Initialize the map immediately after the object is allocated.
// Do not use this outside Heap.
@@ -68,11 +69,11 @@ class HeapObject : public Object {
inline ReadOnlyRoots GetReadOnlyRoots() const;
// This version is intended to be used for the isolate values produced by
// i::GetIsolateForPtrCompr(HeapObject) function which may return nullptr.
- inline ReadOnlyRoots GetReadOnlyRoots(const Isolate* isolate) const;
+ inline ReadOnlyRoots GetReadOnlyRoots(IsolateRoot isolate) const;
#define IS_TYPE_FUNCTION_DECL(Type) \
V8_INLINE bool Is##Type() const; \
- V8_INLINE bool Is##Type(const Isolate* isolate) const;
+ V8_INLINE bool Is##Type(IsolateRoot isolate) const;
HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
IS_TYPE_FUNCTION_DECL(HashTableBase)
IS_TYPE_FUNCTION_DECL(SmallOrderedHashTable)
@@ -93,7 +94,7 @@ class HeapObject : public Object {
#define DECL_STRUCT_PREDICATE(NAME, Name, name) \
V8_INLINE bool Is##Name() const; \
- V8_INLINE bool Is##Name(const Isolate* isolate) const;
+ V8_INLINE bool Is##Name(IsolateRoot isolate) const;
STRUCT_LIST(DECL_STRUCT_PREDICATE)
#undef DECL_STRUCT_PREDICATE
@@ -181,6 +182,7 @@ class HeapObject : public Object {
// Whether the object needs rehashing. That is the case if the object's
// content depends on FLAG_hash_seed. When the object is deserialized into
// a heap with a different hash seed, these objects need to adapt.
+ bool NeedsRehashing(InstanceType instance_type) const;
bool NeedsRehashing() const;
// Rehashing support is not implemented for all objects that need rehashing.
diff --git a/deps/v8/src/objects/internal-index.h b/deps/v8/src/objects/internal-index.h
index 130d4d1868..a241f3c686 100644
--- a/deps/v8/src/objects/internal-index.h
+++ b/deps/v8/src/objects/internal-index.h
@@ -45,11 +45,15 @@ class InternalIndex {
return static_cast<int>(entry_);
}
- bool operator==(const InternalIndex& other) { return entry_ == other.entry_; }
+ bool operator==(const InternalIndex& other) const {
+ return entry_ == other.entry_;
+ }
// Iteration support.
InternalIndex operator*() { return *this; }
- bool operator!=(const InternalIndex& other) { return entry_ != other.entry_; }
+ bool operator!=(const InternalIndex& other) const {
+ return entry_ != other.entry_;
+ }
InternalIndex& operator++() {
entry_++;
return *this;
diff --git a/deps/v8/src/objects/intl-objects.cc b/deps/v8/src/objects/intl-objects.cc
index e1cee90422..4840a83975 100644
--- a/deps/v8/src/objects/intl-objects.cc
+++ b/deps/v8/src/objects/intl-objects.cc
@@ -24,6 +24,7 @@
#include "src/objects/js-number-format-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/property-descriptor.h"
+#include "src/objects/smi.h"
#include "src/objects/string.h"
#include "src/strings/string-case.h"
#include "unicode/basictz.h"
@@ -1275,28 +1276,72 @@ Maybe<Intl::NumberFormatDigitOptions> Intl::SetNumberFormatDigitOptions(
// 15. Else If mnfd is not undefined or mxfd is not undefined, then
if (!mnfd_obj->IsUndefined(isolate) || !mxfd_obj->IsUndefined(isolate)) {
- // 15. b. Let mnfd be ? DefaultNumberOption(mnfd, 0, 20, mnfdDefault).
+ Handle<String> mxfd_str = factory->maximumFractionDigits_string();
Handle<String> mnfd_str = factory->minimumFractionDigits_string();
- if (!DefaultNumberOption(isolate, mnfd_obj, 0, 20, mnfd_default, mnfd_str)
- .To(&mnfd)) {
+
+ int specified_mnfd;
+ int specified_mxfd;
+
+ // a. Let _specifiedMnfd_ be ? DefaultNumberOption(_mnfd_, 0, 20,
+ // *undefined*).
+ if (!DefaultNumberOption(isolate, mnfd_obj, 0, 20, -1, mnfd_str)
+ .To(&specified_mnfd)) {
+ return Nothing<NumberFormatDigitOptions>();
+ }
+ Handle<Object> specifiedMnfd_obj;
+ if (specified_mnfd < 0) {
+ specifiedMnfd_obj = factory->undefined_value();
+ } else {
+ specifiedMnfd_obj = handle(Smi::FromInt(specified_mnfd), isolate);
+ }
+
+ // b. Let _specifiedMxfd_ be ? DefaultNumberOption(_mxfd_, 0, 20,
+ // *undefined*).
+ if (!DefaultNumberOption(isolate, mxfd_obj, 0, 20, -1, mxfd_str)
+ .To(&specified_mxfd)) {
return Nothing<NumberFormatDigitOptions>();
}
+ Handle<Object> specifiedMxfd_obj;
+ if (specified_mxfd < 0) {
+ specifiedMxfd_obj = factory->undefined_value();
+ } else {
+ specifiedMxfd_obj = handle(Smi::FromInt(specified_mxfd), isolate);
+ }
- // 15. c. Let mxfdActualDefault be max( mnfd, mxfdDefault ).
- int mxfd_actual_default = std::max(mnfd, mxfd_default);
+ // c. If _specifiedMxfd_ is not *undefined*, set _mnfdDefault_ to
+ // min(_mnfdDefault_, _specifiedMxfd_).
+ if (specified_mxfd >= 0) {
+ mnfd_default = std::min(mnfd_default, specified_mxfd);
+ }
- // 15. d. Let mxfd be ? DefaultNumberOption(mxfd, mnfd, 20,
- // mxfdActualDefault).
- Handle<String> mxfd_str = factory->maximumFractionDigits_string();
- if (!DefaultNumberOption(isolate, mxfd_obj, mnfd, 20, mxfd_actual_default,
- mxfd_str)
+ // d. Set _mnfd_ to ! DefaultNumberOption(_specifiedMnfd_, 0, 20,
+ // _mnfdDefault_).
+ if (!DefaultNumberOption(isolate, specifiedMnfd_obj, 0, 20, mnfd_default,
+ mnfd_str)
+ .To(&mnfd)) {
+ return Nothing<NumberFormatDigitOptions>();
+ }
+
+ // e. Set _mxfd_ to ! DefaultNumberOption(_specifiedMxfd_, 0, 20,
+ // max(_mxfdDefault_, _mnfd_)).
+ if (!DefaultNumberOption(isolate, specifiedMxfd_obj, 0, 20,
+ std::max(mxfd_default, mnfd), mxfd_str)
.To(&mxfd)) {
return Nothing<NumberFormatDigitOptions>();
}
- // 15. e. Set intlObj.[[MinimumFractionDigits]] to mnfd.
+
+ // f. If _mnfd_ is greater than _mxfd_, throw a *RangeError* exception.
+ if (mnfd > mxfd) {
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate,
+ NewRangeError(MessageTemplate::kPropertyValueOutOfRange, mxfd_str),
+ Nothing<NumberFormatDigitOptions>());
+ }
+
+ // g. Set intlObj.[[MinimumFractionDigits]] to mnfd.
digit_options.minimum_fraction_digits = mnfd;
- // 15. f. Set intlObj.[[MaximumFractionDigits]] to mxfd.
+ // h. Set intlObj.[[MaximumFractionDigits]] to mxfd.
digit_options.maximum_fraction_digits = mxfd;
// Else If intlObj.[[Notation]] is "compact", then
} else if (notation_is_compact) {
diff --git a/deps/v8/src/objects/intl-objects.tq b/deps/v8/src/objects/intl-objects.tq
index 88714f2bee..61f85facfd 100644
--- a/deps/v8/src/objects/intl-objects.tq
+++ b/deps/v8/src/objects/intl-objects.tq
@@ -2,157 +2,4 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include 'src/objects/js-break-iterator.h'
-#include 'src/objects/js-collator.h'
-#include 'src/objects/js-date-time-format.h'
-#include 'src/objects/js-display-names.h'
-#include 'src/objects/js-list-format.h'
-#include 'src/objects/js-locale.h'
-#include 'src/objects/js-number-format.h'
#include 'src/objects/js-objects.h'
-#include 'src/objects/js-plural-rules.h'
-#include 'src/objects/js-relative-time-format.h'
-#include 'src/objects/js-segment-iterator.h'
-#include 'src/objects/js-segmenter.h'
-#include 'src/objects/js-segments.h'
-
-type DateTimeStyle extends int32 constexpr 'JSDateTimeFormat::DateTimeStyle';
-type HourCycle extends int32 constexpr 'JSDateTimeFormat::HourCycle';
-bitfield struct JSDateTimeFormatFlags extends uint31 {
- hour_cycle: HourCycle: 3 bit;
- date_style: DateTimeStyle: 3 bit;
- time_style: DateTimeStyle: 3 bit;
-}
-
-@generateCppClass
-extern class JSDateTimeFormat extends JSObject {
- locale: String;
- icu_locale: Foreign; // Managed<icu::Locale>
- icu_simple_date_format: Foreign; // Managed<icu::SimpleDateFormat>
- icu_date_interval_format: Foreign; // Managed<icu::DateIntervalFormat>
- bound_format: JSFunction|Undefined;
- flags: SmiTagged<JSDateTimeFormatFlags>;
-}
-
-type JSDisplayNamesStyle extends int32 constexpr 'JSDisplayNames::Style';
-type JSDisplayNamesFallback extends int32
-constexpr 'JSDisplayNames::Fallback';
-bitfield struct JSDisplayNamesFlags extends uint31 {
- style: JSDisplayNamesStyle: 2 bit;
- fallback: JSDisplayNamesFallback: 1 bit;
-}
-
-@generateCppClass
-extern class JSDisplayNames extends JSObject {
- internal: Foreign; // Managed<DisplayNamesInternal>
- flags: SmiTagged<JSDisplayNamesFlags>;
-}
-
-type JSListFormatStyle extends int32 constexpr 'JSListFormat::Style';
-type JSListFormatType extends int32 constexpr 'JSListFormat::Type';
-bitfield struct JSListFormatFlags extends uint31 {
- style: JSListFormatStyle: 2 bit;
- Type: JSListFormatType: 2 bit; // "type" is a reserved word.
-}
-
-@generateCppClass
-extern class JSListFormat extends JSObject {
- locale: String;
- icu_formatter: Foreign; // Managed<icu::ListFormatter>
- flags: SmiTagged<JSListFormatFlags>;
-}
-
-@generateCppClass
-extern class JSNumberFormat extends JSObject {
- locale: String;
- icu_number_formatter:
- Foreign; // Managed<icu::number::LocalizedNumberFormatter>
- bound_format: JSFunction|Undefined;
-}
-
-type JSPluralRulesType extends int32 constexpr 'JSPluralRules::Type';
-bitfield struct JSPluralRulesFlags extends uint31 {
- Type: JSPluralRulesType: 1 bit; // "type" is a reserved word.
-}
-
-@generateCppClass
-extern class JSPluralRules extends JSObject {
- locale: String;
- flags: SmiTagged<JSPluralRulesFlags>;
- icu_plural_rules: Foreign; // Managed<icu::PluralRules>
- icu_number_formatter:
- Foreign; // Managed<icu::number::LocalizedNumberFormatter>
-}
-
-type JSRelativeTimeFormatNumeric extends int32
-constexpr 'JSRelativeTimeFormat::Numeric';
-bitfield struct JSRelativeTimeFormatFlags extends uint31 {
- numeric: JSRelativeTimeFormatNumeric: 1 bit;
-}
-
-@generateCppClass
-extern class JSRelativeTimeFormat extends JSObject {
- locale: String;
- numberingSystem: String;
- icu_formatter: Foreign; // Managed<icu::RelativeDateTimeFormatter>
- flags: SmiTagged<JSRelativeTimeFormatFlags>;
-}
-
-@generateCppClass
-extern class JSLocale extends JSObject {
- icu_locale: Foreign; // Managed<icu::Locale>
-}
-
-type JSSegmenterGranularity extends int32
-constexpr 'JSSegmenter::Granularity';
-bitfield struct JSSegmenterFlags extends uint31 {
- granularity: JSSegmenterGranularity: 2 bit;
-}
-
-@generateCppClass
-extern class JSSegmenter extends JSObject {
- locale: String;
- icu_break_iterator: Foreign; // Managed<icu::BreakIterator>
- flags: SmiTagged<JSSegmenterFlags>;
-}
-
-bitfield struct JSSegmentsFlags extends uint31 {
- granularity: JSSegmenterGranularity: 2 bit;
-}
-
-@generateCppClass
-extern class JSSegments extends JSObject {
- icu_break_iterator: Foreign; // Managed<icu::BreakIterator>
- unicode_string: Foreign; // Managed<icu::UnicodeString>
- flags: SmiTagged<JSSegmentsFlags>;
-}
-
-bitfield struct JSSegmentIteratorFlags extends uint31 {
- granularity: JSSegmenterGranularity: 2 bit;
-}
-
-@generateCppClass
-extern class JSSegmentIterator extends JSObject {
- icu_break_iterator: Foreign; // Managed<icu::BreakIterator>
- unicode_string: Foreign; // Managed<icu::UnicodeString>
- flags: SmiTagged<JSSegmentIteratorFlags>;
-}
-
-@generateCppClass
-extern class JSV8BreakIterator extends JSObject {
- locale: String;
- break_iterator: Foreign; // Managed<icu::BreakIterator>;
- unicode_string: Foreign; // Managed<icu::UnicodeString>;
- bound_adopt_text: Undefined|JSFunction;
- bound_first: Undefined|JSFunction;
- bound_next: Undefined|JSFunction;
- bound_current: Undefined|JSFunction;
- bound_break_type: Undefined|JSFunction;
-}
-
-@generateCppClass
-extern class JSCollator extends JSObject {
- icu_collator: Foreign; // Managed<icu::Collator>
- bound_compare: Undefined|JSFunction;
- locale: String;
-}
diff --git a/deps/v8/src/objects/js-array-buffer-inl.h b/deps/v8/src/objects/js-array-buffer-inl.h
index 1a9a89695c..9f2046382f 100644
--- a/deps/v8/src/objects/js-array-buffer-inl.h
+++ b/deps/v8/src/objects/js-array-buffer-inl.h
@@ -20,11 +20,17 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-array-buffer-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(JSArrayBuffer)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSArrayBufferView)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSTypedArray)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSDataView)
+void JSArrayBuffer::AllocateExternalPointerEntries(Isolate* isolate) {
+ InitExternalPointerField(kBackingStoreOffset, isolate);
+}
+
size_t JSArrayBuffer::byte_length() const {
return ReadField<size_t>(kByteLengthOffset);
}
@@ -34,26 +40,25 @@ void JSArrayBuffer::set_byte_length(size_t value) {
}
DEF_GETTER(JSArrayBuffer, backing_store, void*) {
- ExternalPointer_t encoded_value =
- ReadField<ExternalPointer_t>(kBackingStoreOffset);
- return reinterpret_cast<void*>(DecodeExternalPointer(isolate, encoded_value));
+ Address value = ReadExternalPointerField(kBackingStoreOffset, isolate,
+ kArrayBufferBackingStoreTag);
+ return reinterpret_cast<void*>(value);
}
void JSArrayBuffer::set_backing_store(Isolate* isolate, void* value) {
- ExternalPointer_t encoded_value =
- EncodeExternalPointer(isolate, reinterpret_cast<Address>(value));
- WriteField<ExternalPointer_t>(kBackingStoreOffset, encoded_value);
+ WriteExternalPointerField(kBackingStoreOffset, isolate,
+ reinterpret_cast<Address>(value),
+ kArrayBufferBackingStoreTag);
}
uint32_t JSArrayBuffer::GetBackingStoreRefForDeserialization() const {
- ExternalPointer_t encoded_value =
- ReadField<ExternalPointer_t>(kBackingStoreOffset);
- return static_cast<uint32_t>(encoded_value);
+ return static_cast<uint32_t>(
+ ReadField<ExternalPointer_t>(kBackingStoreOffset));
}
void JSArrayBuffer::SetBackingStoreRefForSerialization(uint32_t ref) {
- ExternalPointer_t encoded_value = ref;
- WriteField<ExternalPointer_t>(kBackingStoreOffset, encoded_value);
+ WriteField<ExternalPointer_t>(kBackingStoreOffset,
+ static_cast<ExternalPointer_t>(ref));
}
ArrayBufferExtension* JSArrayBuffer::extension() const {
@@ -160,7 +165,6 @@ BIT_FIELD_ACCESSORS(JSArrayBuffer, bit_field, is_asmjs_memory,
BIT_FIELD_ACCESSORS(JSArrayBuffer, bit_field, is_shared,
JSArrayBuffer::IsSharedBit)
-
size_t JSArrayBufferView::byte_offset() const {
return ReadField<size_t>(kByteOffsetOffset);
}
@@ -181,6 +185,10 @@ bool JSArrayBufferView::WasDetached() const {
return JSArrayBuffer::cast(buffer()).was_detached();
}
+void JSTypedArray::AllocateExternalPointerEntries(Isolate* isolate) {
+ InitExternalPointerField(kExternalPointerOffset, isolate);
+}
+
size_t JSTypedArray::length() const { return ReadField<size_t>(kLengthOffset); }
void JSTypedArray::set_length(size_t value) {
@@ -188,20 +196,23 @@ void JSTypedArray::set_length(size_t value) {
}
DEF_GETTER(JSTypedArray, external_pointer, Address) {
- ExternalPointer_t encoded_value =
- ReadField<ExternalPointer_t>(kExternalPointerOffset);
- return DecodeExternalPointer(isolate, encoded_value);
+ return ReadExternalPointerField(kExternalPointerOffset, isolate,
+ kTypedArrayExternalPointerTag);
+}
+
+DEF_GETTER(JSTypedArray, external_pointer_raw, ExternalPointer_t) {
+ return ReadField<ExternalPointer_t>(kExternalPointerOffset);
}
void JSTypedArray::set_external_pointer(Isolate* isolate, Address value) {
- ExternalPointer_t encoded_value = EncodeExternalPointer(isolate, value);
- WriteField<ExternalPointer_t>(kExternalPointerOffset, encoded_value);
+ WriteExternalPointerField(kExternalPointerOffset, isolate, value,
+ kTypedArrayExternalPointerTag);
}
Address JSTypedArray::ExternalPointerCompensationForOnHeapArray(
- const Isolate* isolate) {
+ IsolateRoot isolate) {
#ifdef V8_COMPRESS_POINTERS
- return GetIsolateRoot(isolate);
+ return isolate.address();
#else
return 0;
#endif
@@ -209,15 +220,14 @@ Address JSTypedArray::ExternalPointerCompensationForOnHeapArray(
uint32_t JSTypedArray::GetExternalBackingStoreRefForDeserialization() const {
DCHECK(!is_on_heap());
- ExternalPointer_t encoded_value =
- ReadField<ExternalPointer_t>(kExternalPointerOffset);
- return static_cast<uint32_t>(encoded_value);
+ return static_cast<uint32_t>(
+ ReadField<ExternalPointer_t>(kExternalPointerOffset));
}
void JSTypedArray::SetExternalBackingStoreRefForSerialization(uint32_t ref) {
DCHECK(!is_on_heap());
- ExternalPointer_t encoded_value = ref;
- WriteField<ExternalPointer_t>(kExternalPointerOffset, encoded_value);
+ WriteField<ExternalPointer_t>(kExternalPointerOffset,
+ static_cast<ExternalPointer_t>(ref));
}
void JSTypedArray::RemoveExternalPointerCompensationForSerialization(
@@ -227,9 +237,15 @@ void JSTypedArray::RemoveExternalPointerCompensationForSerialization(
// compensation by replacing external_pointer and base_pointer fields
// with one data_pointer field which can point to either external data
// backing store or into on-heap backing store.
- set_external_pointer(
- isolate,
- external_pointer() - ExternalPointerCompensationForOnHeapArray(isolate));
+ Address offset =
+ external_pointer() - ExternalPointerCompensationForOnHeapArray(isolate);
+#ifdef V8_HEAP_SANDBOX
+ // Write decompensated offset directly to the external pointer field, thus
+ // allowing the offset to be propagated through serialization-deserialization.
+ WriteField<ExternalPointer_t>(kExternalPointerOffset, offset);
+#else
+ set_external_pointer(isolate, offset);
+#endif
}
void* JSTypedArray::DataPtr() {
@@ -287,15 +303,18 @@ MaybeHandle<JSTypedArray> JSTypedArray::Validate(Isolate* isolate,
}
DEF_GETTER(JSDataView, data_pointer, void*) {
- ExternalPointer_t encoded_value =
- ReadField<ExternalPointer_t>(kDataPointerOffset);
- return reinterpret_cast<void*>(DecodeExternalPointer(isolate, encoded_value));
+ return reinterpret_cast<void*>(ReadExternalPointerField(
+ kDataPointerOffset, isolate, kDataViewDataPointerTag));
+}
+
+void JSDataView::AllocateExternalPointerEntries(Isolate* isolate) {
+ InitExternalPointerField(kDataPointerOffset, isolate);
}
void JSDataView::set_data_pointer(Isolate* isolate, void* value) {
- WriteField<ExternalPointer_t>(
- kDataPointerOffset,
- EncodeExternalPointer(isolate, reinterpret_cast<Address>(value)));
+ WriteExternalPointerField(kDataPointerOffset, isolate,
+ reinterpret_cast<Address>(value),
+ kDataViewDataPointerTag);
}
} // namespace internal
diff --git a/deps/v8/src/objects/js-array-buffer.cc b/deps/v8/src/objects/js-array-buffer.cc
index c480e77041..72dfde896e 100644
--- a/deps/v8/src/objects/js-array-buffer.cc
+++ b/deps/v8/src/objects/js-array-buffer.cc
@@ -44,6 +44,7 @@ void JSArrayBuffer::Setup(SharedFlag shared,
SetEmbedderField(i, Smi::zero());
}
set_extension(nullptr);
+ AllocateExternalPointerEntries(GetIsolate());
if (!backing_store) {
set_backing_store(GetIsolate(), nullptr);
set_byte_length(0);
diff --git a/deps/v8/src/objects/js-array-buffer.h b/deps/v8/src/objects/js-array-buffer.h
index 543cbc1a34..6a61ce4385 100644
--- a/deps/v8/src/objects/js-array-buffer.h
+++ b/deps/v8/src/objects/js-array-buffer.h
@@ -17,6 +17,8 @@ namespace internal {
class ArrayBufferExtension;
+#include "torque-generated/src/objects/js-array-buffer-tq.inc"
+
class JSArrayBuffer
: public TorqueGeneratedJSArrayBuffer<JSArrayBuffer, JSObject> {
public:
@@ -30,6 +32,12 @@ class JSArrayBuffer
static constexpr size_t kMaxByteLength = kMaxSafeInteger;
#endif
+ // When soft sandbox is enabled, creates entries in external pointer table for
+ // all JSArrayBuffer's fields that require soft sandbox protection (backing
+ // store pointer, backing store length, etc.).
+ // When sandbox is not enabled, it's a no-op.
+ inline void AllocateExternalPointerEntries(Isolate* isolate);
+
// [byte_length]: length in bytes
DECL_PRIMITIVE_ACCESSORS(byte_length, size_t)
@@ -258,6 +266,12 @@ class JSTypedArray
V8_EXPORT_PRIVATE Handle<JSArrayBuffer> GetBuffer();
+ // When soft sandbox is enabled, creates entries in external pointer table for
+ // all JSTypedArray's fields that require soft sandbox protection (external
+ // pointer, offset, length, etc.).
+ // When sandbox is not enabled, it's a no-op.
+ inline void AllocateExternalPointerEntries(Isolate* isolate);
+
// Use with care: returns raw pointer into heap.
inline void* DataPtr();
@@ -278,7 +292,7 @@ class JSTypedArray
// as Tagged_t value and an |external_pointer| value.
// For full-pointer mode the compensation value is zero.
static inline Address ExternalPointerCompensationForOnHeapArray(
- const Isolate* isolate);
+ IsolateRoot isolate);
//
// Serializer/deserializer support.
@@ -324,6 +338,8 @@ class JSTypedArray
// [external_pointer]: TODO(v8:4153)
DECL_GETTER(external_pointer, Address)
+ DECL_GETTER(external_pointer_raw, ExternalPointer_t)
+
inline void set_external_pointer(Isolate* isolate, Address value);
TQ_OBJECT_CONSTRUCTORS(JSTypedArray)
@@ -336,6 +352,12 @@ class JSDataView
DECL_GETTER(data_pointer, void*)
inline void set_data_pointer(Isolate* isolate, void* value);
+ // When soft sandbox is enabled, creates entries in external pointer table for
+ // all JSDataView's fields that require soft sandbox protection (data pointer,
+ // offset, length, etc.).
+ // When sandbox is not enabled, it's a no-op.
+ inline void AllocateExternalPointerEntries(Isolate* isolate);
+
// Dispatched behavior.
DECL_PRINTER(JSDataView)
DECL_VERIFIER(JSDataView)
diff --git a/deps/v8/src/objects/js-array-buffer.tq b/deps/v8/src/objects/js-array-buffer.tq
index b7b547a1db..6dcf03bd05 100644
--- a/deps/v8/src/objects/js-array-buffer.tq
+++ b/deps/v8/src/objects/js-array-buffer.tq
@@ -49,6 +49,11 @@ extern class JSTypedArray extends JSArrayBufferView {
base_pointer: ByteArray|Smi;
}
+extern operator '.external_pointer_ptr' macro
+LoadJSTypedArrayExternalPointerPtr(JSTypedArray): RawPtr;
+extern operator '.external_pointer_ptr=' macro
+StoreJSTypedArrayExternalPointerPtr(JSTypedArray, RawPtr);
+
@generateCppClass
extern class JSDataView extends JSArrayBufferView {
data_pointer: ExternalPointer;
diff --git a/deps/v8/src/objects/js-break-iterator-inl.h b/deps/v8/src/objects/js-break-iterator-inl.h
index 729aff90af..dc85efe3fe 100644
--- a/deps/v8/src/objects/js-break-iterator-inl.h
+++ b/deps/v8/src/objects/js-break-iterator-inl.h
@@ -18,6 +18,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-break-iterator-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(JSV8BreakIterator)
ACCESSORS(JSV8BreakIterator, break_iterator, Managed<icu::BreakIterator>,
diff --git a/deps/v8/src/objects/js-break-iterator.h b/deps/v8/src/objects/js-break-iterator.h
index e06b7b42b0..92104084ad 100644
--- a/deps/v8/src/objects/js-break-iterator.h
+++ b/deps/v8/src/objects/js-break-iterator.h
@@ -27,6 +27,8 @@ class BreakIterator;
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-break-iterator-tq.inc"
+
class JSV8BreakIterator
: public TorqueGeneratedJSV8BreakIterator<JSV8BreakIterator, JSObject> {
public:
diff --git a/deps/v8/src/objects/js-break-iterator.tq b/deps/v8/src/objects/js-break-iterator.tq
new file mode 100644
index 0000000000..08d121520a
--- /dev/null
+++ b/deps/v8/src/objects/js-break-iterator.tq
@@ -0,0 +1,17 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/objects/js-break-iterator.h'
+
+@generateCppClass
+extern class JSV8BreakIterator extends JSObject {
+ locale: String;
+ break_iterator: Foreign; // Managed<icu::BreakIterator>;
+ unicode_string: Foreign; // Managed<icu::UnicodeString>;
+ bound_adopt_text: Undefined|JSFunction;
+ bound_first: Undefined|JSFunction;
+ bound_next: Undefined|JSFunction;
+ bound_current: Undefined|JSFunction;
+ bound_break_type: Undefined|JSFunction;
+}
diff --git a/deps/v8/src/objects/js-collator-inl.h b/deps/v8/src/objects/js-collator-inl.h
index 30660f2e14..81ee95326a 100644
--- a/deps/v8/src/objects/js-collator-inl.h
+++ b/deps/v8/src/objects/js-collator-inl.h
@@ -18,6 +18,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-collator-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(JSCollator)
ACCESSORS(JSCollator, icu_collator, Managed<icu::Collator>, kIcuCollatorOffset)
diff --git a/deps/v8/src/objects/js-collator.h b/deps/v8/src/objects/js-collator.h
index 7e3cbc35c9..eaeac21d59 100644
--- a/deps/v8/src/objects/js-collator.h
+++ b/deps/v8/src/objects/js-collator.h
@@ -29,6 +29,8 @@ class Collator;
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-collator-tq.inc"
+
class JSCollator : public TorqueGeneratedJSCollator<JSCollator, JSObject> {
public:
// ecma402/#sec-initializecollator
diff --git a/deps/v8/src/objects/js-collator.tq b/deps/v8/src/objects/js-collator.tq
new file mode 100644
index 0000000000..2e1c847534
--- /dev/null
+++ b/deps/v8/src/objects/js-collator.tq
@@ -0,0 +1,12 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/objects/js-collator.h'
+
+@generateCppClass
+extern class JSCollator extends JSObject {
+ icu_collator: Foreign; // Managed<icu::Collator>
+ bound_compare: Undefined|JSFunction;
+ locale: String;
+}
diff --git a/deps/v8/src/objects/js-collection-inl.h b/deps/v8/src/objects/js-collection-inl.h
index 6bbaa9bc1f..f2471175aa 100644
--- a/deps/v8/src/objects/js-collection-inl.h
+++ b/deps/v8/src/objects/js-collection-inl.h
@@ -5,10 +5,10 @@
#ifndef V8_OBJECTS_JS_COLLECTION_INL_H_
#define V8_OBJECTS_JS_COLLECTION_INL_H_
-#include "src/objects/js-collection.h"
-
#include "src/heap/heap-write-barrier-inl.h"
#include "src/objects/heap-object-inl.h"
+#include "src/objects/js-collection-iterator-inl.h"
+#include "src/objects/js-collection.h"
#include "src/objects/objects-inl.h"
#include "src/objects/ordered-hash-table-inl.h"
#include "src/roots/roots-inl.h"
@@ -19,6 +19,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-collection-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(JSCollection)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSMap)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSSet)
@@ -26,10 +28,6 @@ TQ_OBJECT_CONSTRUCTORS_IMPL(JSWeakCollection)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSWeakMap)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSWeakSet)
-// TODO(jkummerow): Move JSCollectionIterator to js-collection.h?
-// TODO(jkummerow): Introduce IsJSCollectionIterator() check? Or unchecked
-// version of OBJECT_CONSTRUCTORS_IMPL macro?
-TQ_OBJECT_CONSTRUCTORS_IMPL(JSCollectionIterator)
template <class Derived, class TableType>
OrderedHashTableIterator<Derived, TableType>::OrderedHashTableIterator(
Address ptr)
@@ -51,7 +49,9 @@ CAST_ACCESSOR(JSMapIterator)
Object JSMapIterator::CurrentValue() {
OrderedHashMap table = OrderedHashMap::cast(this->table());
int index = Smi::ToInt(this->index());
- Object value = table.ValueAt(index);
+ DCHECK_GE(index, 0);
+ InternalIndex entry(index);
+ Object value = table.ValueAt(entry);
DCHECK(!value.IsTheHole());
return value;
}
diff --git a/deps/v8/src/objects/js-collection-iterator-inl.h b/deps/v8/src/objects/js-collection-iterator-inl.h
new file mode 100644
index 0000000000..d5354e76b7
--- /dev/null
+++ b/deps/v8/src/objects/js-collection-iterator-inl.h
@@ -0,0 +1,26 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_JS_COLLECTION_ITERATOR_INL_H_
+#define V8_OBJECTS_JS_COLLECTION_ITERATOR_INL_H_
+
+#include "src/objects/js-collection-iterator.h"
+#include "src/objects/objects-inl.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+#include "torque-generated/src/objects/js-collection-iterator-tq-inl.inc"
+
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSCollectionIterator)
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_JS_COLLECTION_ITERATOR_INL_H_
diff --git a/deps/v8/src/objects/js-collection-iterator.h b/deps/v8/src/objects/js-collection-iterator.h
index b193aa84cd..feb3da37fa 100644
--- a/deps/v8/src/objects/js-collection-iterator.h
+++ b/deps/v8/src/objects/js-collection-iterator.h
@@ -16,6 +16,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-collection-iterator-tq.inc"
+
class JSCollectionIterator
: public TorqueGeneratedJSCollectionIterator<JSCollectionIterator,
JSObject> {
diff --git a/deps/v8/src/objects/js-collection.h b/deps/v8/src/objects/js-collection.h
index a0350726c0..9b3e9d0541 100644
--- a/deps/v8/src/objects/js-collection.h
+++ b/deps/v8/src/objects/js-collection.h
@@ -17,6 +17,8 @@ namespace internal {
class OrderedHashSet;
class OrderedHashMap;
+#include "torque-generated/src/objects/js-collection-tq.inc"
+
class JSCollection
: public TorqueGeneratedJSCollection<JSCollection, JSObject> {
public:
diff --git a/deps/v8/src/objects/js-date-time-format-inl.h b/deps/v8/src/objects/js-date-time-format-inl.h
index 56d44cacf9..fefe081f8f 100644
--- a/deps/v8/src/objects/js-date-time-format-inl.h
+++ b/deps/v8/src/objects/js-date-time-format-inl.h
@@ -18,6 +18,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-date-time-format-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(JSDateTimeFormat)
ACCESSORS(JSDateTimeFormat, icu_locale, Managed<icu::Locale>, kIcuLocaleOffset)
diff --git a/deps/v8/src/objects/js-date-time-format.h b/deps/v8/src/objects/js-date-time-format.h
index 64c89eeaeb..52815f9e86 100644
--- a/deps/v8/src/objects/js-date-time-format.h
+++ b/deps/v8/src/objects/js-date-time-format.h
@@ -31,6 +31,8 @@ class SimpleDateFormat;
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-date-time-format-tq.inc"
+
class JSDateTimeFormat
: public TorqueGeneratedJSDateTimeFormat<JSDateTimeFormat, JSObject> {
public:
diff --git a/deps/v8/src/objects/js-date-time-format.tq b/deps/v8/src/objects/js-date-time-format.tq
new file mode 100644
index 0000000000..f45db187eb
--- /dev/null
+++ b/deps/v8/src/objects/js-date-time-format.tq
@@ -0,0 +1,23 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/objects/js-date-time-format.h'
+
+type DateTimeStyle extends int32 constexpr 'JSDateTimeFormat::DateTimeStyle';
+type HourCycle extends int32 constexpr 'JSDateTimeFormat::HourCycle';
+bitfield struct JSDateTimeFormatFlags extends uint31 {
+ hour_cycle: HourCycle: 3 bit;
+ date_style: DateTimeStyle: 3 bit;
+ time_style: DateTimeStyle: 3 bit;
+}
+
+@generateCppClass
+extern class JSDateTimeFormat extends JSObject {
+ locale: String;
+ icu_locale: Foreign; // Managed<icu::Locale>
+ icu_simple_date_format: Foreign; // Managed<icu::SimpleDateFormat>
+ icu_date_interval_format: Foreign; // Managed<icu::DateIntervalFormat>
+ bound_format: JSFunction|Undefined;
+ flags: SmiTagged<JSDateTimeFormatFlags>;
+}
diff --git a/deps/v8/src/objects/js-display-names-inl.h b/deps/v8/src/objects/js-display-names-inl.h
index 40bea22c97..5cc5b0b066 100644
--- a/deps/v8/src/objects/js-display-names-inl.h
+++ b/deps/v8/src/objects/js-display-names-inl.h
@@ -18,6 +18,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-display-names-tq-inl.inc"
+
ACCESSORS(JSDisplayNames, internal, Managed<DisplayNamesInternal>,
kInternalOffset)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSDisplayNames)
diff --git a/deps/v8/src/objects/js-display-names.h b/deps/v8/src/objects/js-display-names.h
index cd3ca1ea47..837184d1de 100644
--- a/deps/v8/src/objects/js-display-names.h
+++ b/deps/v8/src/objects/js-display-names.h
@@ -25,6 +25,8 @@ namespace internal {
class DisplayNamesInternal;
+#include "torque-generated/src/objects/js-display-names-tq.inc"
+
class JSDisplayNames
: public TorqueGeneratedJSDisplayNames<JSDisplayNames, JSObject> {
public:
diff --git a/deps/v8/src/objects/js-display-names.tq b/deps/v8/src/objects/js-display-names.tq
new file mode 100644
index 0000000000..d2edf228d0
--- /dev/null
+++ b/deps/v8/src/objects/js-display-names.tq
@@ -0,0 +1,19 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/objects/js-display-names.h'
+
+type JSDisplayNamesStyle extends int32 constexpr 'JSDisplayNames::Style';
+type JSDisplayNamesFallback extends int32
+constexpr 'JSDisplayNames::Fallback';
+bitfield struct JSDisplayNamesFlags extends uint31 {
+ style: JSDisplayNamesStyle: 2 bit;
+ fallback: JSDisplayNamesFallback: 1 bit;
+}
+
+@generateCppClass
+extern class JSDisplayNames extends JSObject {
+ internal: Foreign; // Managed<DisplayNamesInternal>
+ flags: SmiTagged<JSDisplayNamesFlags>;
+}
diff --git a/deps/v8/src/objects/js-function-inl.h b/deps/v8/src/objects/js-function-inl.h
index 606deb290a..c937f02311 100644
--- a/deps/v8/src/objects/js-function-inl.h
+++ b/deps/v8/src/objects/js-function-inl.h
@@ -20,6 +20,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-function-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(JSFunctionOrBoundFunction)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSBoundFunction)
OBJECT_CONSTRUCTORS_IMPL(JSFunction, JSFunctionOrBoundFunction)
@@ -69,7 +71,8 @@ void JSFunction::MarkForOptimization(ConcurrencyMode mode) {
mode = ConcurrencyMode::kNotConcurrent;
}
- DCHECK(!is_compiled() || ActiveTierIsIgnition() || ActiveTierIsNCI());
+ DCHECK(!is_compiled() || ActiveTierIsIgnition() || ActiveTierIsNCI() ||
+ ActiveTierIsMidtierTurboprop());
DCHECK(!ActiveTierIsTurbofan());
DCHECK(shared().IsInterpreted());
DCHECK(shared().allows_lazy_compilation() ||
@@ -97,8 +100,8 @@ void JSFunction::MarkForOptimization(ConcurrencyMode mode) {
}
bool JSFunction::IsInOptimizationQueue() {
- return has_feedback_vector() && feedback_vector().optimization_marker() ==
- OptimizationMarker::kInOptimizationQueue;
+ if (!has_feedback_vector()) return false;
+ return IsInOptimizationQueueMarker(feedback_vector().optimization_marker());
}
void JSFunction::CompleteInobjectSlackTrackingIfActive() {
@@ -147,20 +150,6 @@ void JSFunction::set_shared(SharedFunctionInfo value, WriteBarrierMode mode) {
CONDITIONAL_WRITE_BARRIER(*this, kSharedFunctionInfoOffset, value, mode);
}
-void JSFunction::ClearOptimizedCodeSlot(const char* reason) {
- if (has_feedback_vector() && feedback_vector().has_optimized_code()) {
- if (FLAG_trace_opt) {
- CodeTracer::Scope scope(GetIsolate()->GetCodeTracer());
- PrintF(scope.file(),
- "[evicting entry from optimizing code feedback slot (%s) for ",
- reason);
- ShortPrint(scope.file());
- PrintF(scope.file(), "]\n");
- }
- feedback_vector().ClearOptimizedCode();
- }
-}
-
void JSFunction::SetOptimizationMarker(OptimizationMarker marker) {
DCHECK(has_feedback_vector());
DCHECK(ChecksOptimizationMarker());
diff --git a/deps/v8/src/objects/js-function.cc b/deps/v8/src/objects/js-function.cc
index 6e83273e8f..6bb3665963 100644
--- a/deps/v8/src/objects/js-function.cc
+++ b/deps/v8/src/objects/js-function.cc
@@ -87,8 +87,11 @@ namespace {
// otherwise returns true and sets highest_tier.
bool HighestTierOf(CodeKinds kinds, CodeKind* highest_tier) {
DCHECK_EQ((kinds & ~kJSFunctionCodeKindsMask), 0);
- if ((kinds & CodeKindFlag::OPTIMIZED_FUNCTION) != 0) {
- *highest_tier = CodeKind::OPTIMIZED_FUNCTION;
+ if ((kinds & CodeKindFlag::TURBOFAN) != 0) {
+ *highest_tier = CodeKind::TURBOFAN;
+ return true;
+ } else if ((kinds & CodeKindFlag::TURBOPROP) != 0) {
+ *highest_tier = CodeKind::TURBOPROP;
return true;
} else if ((kinds & CodeKindFlag::NATIVE_CONTEXT_INDEPENDENT) != 0) {
*highest_tier = CodeKind::NATIVE_CONTEXT_INDEPENDENT;
@@ -119,7 +122,7 @@ bool JSFunction::ActiveTierIsIgnition() const {
bool JSFunction::ActiveTierIsTurbofan() const {
CodeKind highest_tier;
if (!HighestTierOf(GetAvailableCodeKinds(), &highest_tier)) return false;
- return highest_tier == CodeKind::OPTIMIZED_FUNCTION;
+ return highest_tier == CodeKind::TURBOFAN;
}
bool JSFunction::ActiveTierIsNCI() const {
@@ -128,10 +131,30 @@ bool JSFunction::ActiveTierIsNCI() const {
return highest_tier == CodeKind::NATIVE_CONTEXT_INDEPENDENT;
}
+bool JSFunction::ActiveTierIsToptierTurboprop() const {
+ CodeKind highest_tier;
+ if (!FLAG_turboprop) return false;
+ if (!HighestTierOf(GetAvailableCodeKinds(), &highest_tier)) return false;
+ return highest_tier == CodeKind::TURBOPROP && !FLAG_turboprop_as_midtier;
+}
+
+bool JSFunction::ActiveTierIsMidtierTurboprop() const {
+ CodeKind highest_tier;
+ if (!FLAG_turboprop_as_midtier) return false;
+ if (!HighestTierOf(GetAvailableCodeKinds(), &highest_tier)) return false;
+ return highest_tier == CodeKind::TURBOPROP && FLAG_turboprop_as_midtier;
+}
+
CodeKind JSFunction::NextTier() const {
- return (FLAG_turbo_nci_as_midtier && ActiveTierIsIgnition())
- ? CodeKind::NATIVE_CONTEXT_INDEPENDENT
- : CodeKind::OPTIMIZED_FUNCTION;
+ if (V8_UNLIKELY(FLAG_turbo_nci_as_midtier && ActiveTierIsIgnition())) {
+ return CodeKind::NATIVE_CONTEXT_INDEPENDENT;
+ } else if (V8_UNLIKELY(FLAG_turboprop) && ActiveTierIsMidtierTurboprop()) {
+ return CodeKind::TURBOFAN;
+ } else if (V8_UNLIKELY(FLAG_turboprop)) {
+ DCHECK(ActiveTierIsIgnition());
+ return CodeKind::TURBOPROP;
+ }
+ return CodeKind::TURBOFAN;
}
bool JSFunction::CanDiscardCompiled() const {
@@ -144,7 +167,7 @@ bool JSFunction::CanDiscardCompiled() const {
//
// Note that when the function has not yet been compiled we also return
// false; that's fine, since nothing must be discarded in that case.
- if (code().kind() == CodeKind::OPTIMIZED_FUNCTION) return true;
+ if (CodeKindIsOptimizedJSFunction(code().kind())) return true;
CodeKinds result = GetAvailableCodeKinds();
return (result & kJSFunctionCodeKindsMask) != 0;
}
@@ -205,7 +228,7 @@ Maybe<int> JSBoundFunction::GetLength(Isolate* isolate,
isolate);
int target_length = target->length();
- int length = Max(0, target_length - nof_bound_arguments);
+ int length = std::max(0, target_length - nof_bound_arguments);
return Just(length);
}
@@ -411,8 +434,9 @@ void JSFunction::SetPrototype(Handle<JSFunction> function,
void JSFunction::SetInitialMap(Handle<JSFunction> function, Handle<Map> map,
Handle<HeapObject> prototype) {
- if (map->prototype() != *prototype)
+ if (map->prototype() != *prototype) {
Map::SetPrototype(function->GetIsolate(), map, prototype);
+ }
function->set_prototype_or_initial_map(*map);
map->SetConstructor(*function);
if (FLAG_trace_maps) {
@@ -604,9 +628,9 @@ bool FastInitializeDerivedMap(Isolate* isolate, Handle<JSFunction> new_target,
// 2) the prototype chain is modified during iteration, or 3) compilation
// failure occur during prototype chain iteration.
// So we take the maximum of two values.
- int expected_nof_properties =
- Max(static_cast<int>(constructor->shared().expected_nof_properties()),
- JSFunction::CalculateExpectedNofProperties(isolate, new_target));
+ int expected_nof_properties = std::max(
+ static_cast<int>(constructor->shared().expected_nof_properties()),
+ JSFunction::CalculateExpectedNofProperties(isolate, new_target));
JSFunction::CalculateInstanceSizeHelper(
instance_type, true, embedder_fields, expected_nof_properties,
&instance_size, &in_object_properties);
@@ -894,8 +918,8 @@ void JSFunction::CalculateInstanceSizeHelper(InstanceType instance_type,
CHECK_LE(max_nof_fields, JSObject::kMaxInObjectProperties);
CHECK_LE(static_cast<unsigned>(requested_embedder_fields),
static_cast<unsigned>(max_nof_fields));
- *in_object_properties = Min(requested_in_object_properties,
- max_nof_fields - requested_embedder_fields);
+ *in_object_properties = std::min(requested_in_object_properties,
+ max_nof_fields - requested_embedder_fields);
*instance_size =
header_size +
((requested_embedder_fields + *in_object_properties) << kTaggedSizeLog2);
diff --git a/deps/v8/src/objects/js-function.h b/deps/v8/src/objects/js-function.h
index e7f2c0caf3..9fab6bd6c7 100644
--- a/deps/v8/src/objects/js-function.h
+++ b/deps/v8/src/objects/js-function.h
@@ -7,7 +7,6 @@
#include "src/objects/code-kind.h"
#include "src/objects/js-objects.h"
-#include "torque-generated/class-definitions.h"
#include "torque-generated/field-offsets.h"
// Has to be the last include (doesn't have include guards):
@@ -16,6 +15,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-function-tq.inc"
+
// An abstract superclass for classes representing JavaScript function values.
// It doesn't carry any functionality but allows function classes to be
// identified in the type system.
@@ -115,6 +116,8 @@ class JSFunction : public JSFunctionOrBoundFunction {
V8_EXPORT_PRIVATE bool ActiveTierIsIgnition() const;
bool ActiveTierIsTurbofan() const;
bool ActiveTierIsNCI() const;
+ bool ActiveTierIsMidtierTurboprop() const;
+ bool ActiveTierIsToptierTurboprop() const;
CodeKind NextTier() const;
@@ -141,9 +144,6 @@ class JSFunction : public JSFunctionOrBoundFunction {
// Tells whether or not the function is on the concurrent recompilation queue.
inline bool IsInOptimizationQueue();
- // Clears the optimized code slot in the function's feedback vector.
- inline void ClearOptimizedCodeSlot(const char* reason);
-
// Sets the optimization marker in the function's feedback vector.
inline void SetOptimizationMarker(OptimizationMarker marker);
diff --git a/deps/v8/src/objects/js-function.tq b/deps/v8/src/objects/js-function.tq
new file mode 100644
index 0000000000..b2e8aa6be2
--- /dev/null
+++ b/deps/v8/src/objects/js-function.tq
@@ -0,0 +1,34 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+@abstract
+@generateCppClass
+@highestInstanceTypeWithinParentClassRange
+extern class JSFunctionOrBoundFunction extends JSObject {
+}
+
+@generateCppClass
+extern class JSBoundFunction extends JSFunctionOrBoundFunction {
+ // The wrapped function object.
+ bound_target_function: Callable;
+ // The value that is always passed as the this value when calling the wrapped
+ // function.
+ bound_this: JSAny|SourceTextModule;
+ // A list of values whose elements are used as the first arguments to any call
+ // to the wrapped function.
+ bound_arguments: FixedArray;
+}
+
+@highestInstanceTypeWithinParentClassRange
+extern class JSFunction extends JSFunctionOrBoundFunction {
+ shared_function_info: SharedFunctionInfo;
+ context: Context;
+ feedback_cell: FeedbackCell;
+ weak code: Code;
+
+ // Space for the following field may or may not be allocated.
+ @noVerifier weak prototype_or_initial_map: JSReceiver|Map;
+}
+
+type JSFunctionWithPrototypeSlot extends JSFunction;
diff --git a/deps/v8/src/objects/js-generator-inl.h b/deps/v8/src/objects/js-generator-inl.h
index 2d5e9fe03e..4e93938710 100644
--- a/deps/v8/src/objects/js-generator-inl.h
+++ b/deps/v8/src/objects/js-generator-inl.h
@@ -16,6 +16,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-generator-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(JSGeneratorObject)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSAsyncFunctionObject)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSAsyncGeneratorObject)
diff --git a/deps/v8/src/objects/js-generator.h b/deps/v8/src/objects/js-generator.h
index bf35595fdd..99f05abcbc 100644
--- a/deps/v8/src/objects/js-generator.h
+++ b/deps/v8/src/objects/js-generator.h
@@ -17,6 +17,8 @@ namespace internal {
// Forward declarations.
class JSPromise;
+#include "torque-generated/src/objects/js-generator-tq.inc"
+
class JSGeneratorObject
: public TorqueGeneratedJSGeneratorObject<JSGeneratorObject, JSObject> {
public:
diff --git a/deps/v8/src/objects/js-list-format-inl.h b/deps/v8/src/objects/js-list-format-inl.h
index 5cf95db4d5..e7e0384c99 100644
--- a/deps/v8/src/objects/js-list-format-inl.h
+++ b/deps/v8/src/objects/js-list-format-inl.h
@@ -18,6 +18,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-list-format-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(JSListFormat)
// Base list format accessors.
diff --git a/deps/v8/src/objects/js-list-format.cc b/deps/v8/src/objects/js-list-format.cc
index b17d38c43f..e48a387be5 100644
--- a/deps/v8/src/objects/js-list-format.cc
+++ b/deps/v8/src/objects/js-list-format.cc
@@ -29,46 +29,27 @@ namespace v8 {
namespace internal {
namespace {
-const char* kStandard = "standard";
-const char* kOr = "or";
-const char* kUnit = "unit";
-const char* kStandardShort = "standard-short";
-const char* kOrShort = "or-short";
-const char* kUnitShort = "unit-short";
-const char* kStandardNarrow = "standard-narrow";
-const char* kOrNarrow = "or-narrow";
-const char* kUnitNarrow = "unit-narrow";
-
-const char* GetIcuStyleString(JSListFormat::Style style,
- JSListFormat::Type type) {
+
+UListFormatterWidth GetIcuWidth(JSListFormat::Style style) {
+ switch (style) {
+ case JSListFormat::Style::LONG:
+ return ULISTFMT_WIDTH_WIDE;
+ case JSListFormat::Style::SHORT:
+ return ULISTFMT_WIDTH_SHORT;
+ case JSListFormat::Style::NARROW:
+ return ULISTFMT_WIDTH_NARROW;
+ }
+ UNREACHABLE();
+}
+
+UListFormatterType GetIcuType(JSListFormat::Type type) {
switch (type) {
case JSListFormat::Type::CONJUNCTION:
- switch (style) {
- case JSListFormat::Style::LONG:
- return kStandard;
- case JSListFormat::Style::SHORT:
- return kStandardShort;
- case JSListFormat::Style::NARROW:
- return kStandardNarrow;
- }
+ return ULISTFMT_TYPE_AND;
case JSListFormat::Type::DISJUNCTION:
- switch (style) {
- case JSListFormat::Style::LONG:
- return kOr;
- case JSListFormat::Style::SHORT:
- return kOrShort;
- case JSListFormat::Style::NARROW:
- return kOrNarrow;
- }
+ return ULISTFMT_TYPE_OR;
case JSListFormat::Type::UNIT:
- switch (style) {
- case JSListFormat::Style::LONG:
- return kUnit;
- case JSListFormat::Style::SHORT:
- return kUnitShort;
- case JSListFormat::Style::NARROW:
- return kUnitNarrow;
- }
+ return ULISTFMT_TYPE_UNITS;
}
UNREACHABLE();
}
@@ -143,7 +124,7 @@ MaybeHandle<JSListFormat> JSListFormat::New(Isolate* isolate, Handle<Map> map,
icu::Locale icu_locale = r.icu_locale;
UErrorCode status = U_ZERO_ERROR;
icu::ListFormatter* formatter = icu::ListFormatter::createInstance(
- icu_locale, GetIcuStyleString(style_enum, type_enum), status);
+ icu_locale, GetIcuType(type_enum), GetIcuWidth(style_enum), status);
if (U_FAILURE(status) || formatter == nullptr) {
delete formatter;
THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kIcuError),
diff --git a/deps/v8/src/objects/js-list-format.h b/deps/v8/src/objects/js-list-format.h
index 34878b5661..123f9e459e 100644
--- a/deps/v8/src/objects/js-list-format.h
+++ b/deps/v8/src/objects/js-list-format.h
@@ -29,6 +29,8 @@ class ListFormatter;
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-list-format-tq.inc"
+
class JSListFormat
: public TorqueGeneratedJSListFormat<JSListFormat, JSObject> {
public:
diff --git a/deps/v8/src/objects/js-list-format.tq b/deps/v8/src/objects/js-list-format.tq
new file mode 100644
index 0000000000..95d80ea96d
--- /dev/null
+++ b/deps/v8/src/objects/js-list-format.tq
@@ -0,0 +1,19 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/objects/js-list-format.h'
+
+type JSListFormatStyle extends int32 constexpr 'JSListFormat::Style';
+type JSListFormatType extends int32 constexpr 'JSListFormat::Type';
+bitfield struct JSListFormatFlags extends uint31 {
+ style: JSListFormatStyle: 2 bit;
+ Type: JSListFormatType: 2 bit; // "type" is a reserved word.
+}
+
+@generateCppClass
+extern class JSListFormat extends JSObject {
+ locale: String;
+ icu_formatter: Foreign; // Managed<icu::ListFormatter>
+ flags: SmiTagged<JSListFormatFlags>;
+}
diff --git a/deps/v8/src/objects/js-locale-inl.h b/deps/v8/src/objects/js-locale-inl.h
index cbd62b9a93..49c4dc7b4f 100644
--- a/deps/v8/src/objects/js-locale-inl.h
+++ b/deps/v8/src/objects/js-locale-inl.h
@@ -19,6 +19,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-locale-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(JSLocale)
ACCESSORS(JSLocale, icu_locale, Managed<icu::Locale>, kIcuLocaleOffset)
diff --git a/deps/v8/src/objects/js-locale.h b/deps/v8/src/objects/js-locale.h
index 62dceac85d..d864c8272f 100644
--- a/deps/v8/src/objects/js-locale.h
+++ b/deps/v8/src/objects/js-locale.h
@@ -25,6 +25,8 @@ class Locale;
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-locale-tq.inc"
+
class JSLocale : public TorqueGeneratedJSLocale<JSLocale, JSObject> {
public:
// Creates locale object with properties derived from input locale string
diff --git a/deps/v8/src/objects/js-locale.tq b/deps/v8/src/objects/js-locale.tq
new file mode 100644
index 0000000000..55c80f926f
--- /dev/null
+++ b/deps/v8/src/objects/js-locale.tq
@@ -0,0 +1,10 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/objects/js-locale.h'
+
+@generateCppClass
+extern class JSLocale extends JSObject {
+ icu_locale: Foreign; // Managed<icu::Locale>
+}
diff --git a/deps/v8/src/objects/js-number-format-inl.h b/deps/v8/src/objects/js-number-format-inl.h
index 035eaf57a3..cddc93afd2 100644
--- a/deps/v8/src/objects/js-number-format-inl.h
+++ b/deps/v8/src/objects/js-number-format-inl.h
@@ -18,6 +18,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-number-format-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(JSNumberFormat)
ACCESSORS(JSNumberFormat, icu_number_formatter,
diff --git a/deps/v8/src/objects/js-number-format.cc b/deps/v8/src/objects/js-number-format.cc
index 45b0eab2db..daedb2a23a 100644
--- a/deps/v8/src/objects/js-number-format.cc
+++ b/deps/v8/src/objects/js-number-format.cc
@@ -389,17 +389,17 @@ Handle<String> CurrencySignString(Isolate* isolate,
Handle<String> UnitDisplayString(Isolate* isolate,
const icu::UnicodeString& skeleton) {
// Ex: skeleton as
- // "measure-unit/length-meter .### rounding-mode-half-up unit-width-full-name"
+ // "unit/length-meter .### rounding-mode-half-up unit-width-full-name"
if (skeleton.indexOf("unit-width-full-name") >= 0) {
return ReadOnlyRoots(isolate).long_string_handle();
}
// Ex: skeleton as
- // "measure-unit/length-meter .### rounding-mode-half-up unit-width-narrow".
+ // "unit/length-meter .### rounding-mode-half-up unit-width-narrow".
if (skeleton.indexOf("unit-width-narrow") >= 0) {
return ReadOnlyRoots(isolate).narrow_string_handle();
}
// Ex: skeleton as
- // "measure-unit/length-foot .### rounding-mode-half-up"
+ // "unit/length-foot .### rounding-mode-half-up"
return ReadOnlyRoots(isolate).short_string_handle();
}
@@ -422,7 +422,7 @@ Notation NotationFromSkeleton(const icu::UnicodeString& skeleton) {
return Notation::COMPACT;
}
// Ex: skeleton as
- // "measure-unit/length-foot .### rounding-mode-half-up"
+ // "unit/length-foot .### rounding-mode-half-up"
return Notation::STANDARD;
}
@@ -562,14 +562,14 @@ namespace {
// Ex: percent .### rounding-mode-half-up
// Special case for "percent"
-// Ex: "measure-unit/length-kilometer per-measure-unit/duration-hour .###
-// rounding-mode-half-up" should return "kilometer-per-unit".
-// Ex: "measure-unit/duration-year .### rounding-mode-half-up" should return
+// Ex: "unit/milliliter-per-acre .### rounding-mode-half-up"
+// should return "milliliter-per-acre".
+// Ex: "unit/year .### rounding-mode-half-up" should return
// "year".
std::string UnitFromSkeleton(const icu::UnicodeString& skeleton) {
std::string str;
str = skeleton.toUTF8String<std::string>(str);
- std::string search("measure-unit/");
+ std::string search("unit/");
size_t begin = str.find(search);
if (begin == str.npos) {
// Special case for "percent".
@@ -578,64 +578,44 @@ std::string UnitFromSkeleton(const icu::UnicodeString& skeleton) {
}
return "";
}
- // Skip the type (ex: "length").
- // "measure-unit/length-kilometer per-measure-unit/duration-hour"
- // b
- begin = str.find("-", begin + search.size());
+ // Ex:
+ // "unit/acre .### rounding-mode-half-up"
+ // b
+ // Ex:
+ // "unit/milliliter-per-acre .### rounding-mode-half-up"
+ // b
+ begin += search.size();
if (begin == str.npos) {
return "";
}
- begin++; // Skip the '-'.
// Find the end of the subtype.
size_t end = str.find(" ", begin);
- // "measure-unit/length-kilometer per-measure-unit/duration-hour"
- // b e
+ // Ex:
+ // "unit/acre .### rounding-mode-half-up"
+ // b e
+ // Ex:
+ // "unit/milliliter-per-acre .### rounding-mode-half-up"
+ // b e
if (end == str.npos) {
end = str.size();
- return str.substr(begin, end - begin);
- }
- // "measure-unit/length-kilometer per-measure-unit/duration-hour"
- // b e
- // [result ]
- std::string result = str.substr(begin, end - begin);
- begin = end + 1;
- // "measure-unit/length-kilometer per-measure-unit/duration-hour"
- // [result ]eb
- std::string search_per("per-measure-unit/");
- begin = str.find(search_per, begin);
- // "measure-unit/length-kilometer per-measure-unit/duration-hour"
- // [result ]e b
- if (begin == str.npos) {
- return result;
- }
- // Skip the type (ex: "duration").
- begin = str.find("-", begin + search_per.size());
- // "measure-unit/length-kilometer per-measure-unit/duration-hour"
- // [result ]e b
- if (begin == str.npos) {
- return result;
}
- begin++; // Skip the '-'.
- // "measure-unit/length-kilometer per-measure-unit/duration-hour"
- // [result ]e b
- end = str.find(" ", begin);
- if (end == str.npos) {
- end = str.size();
- }
- // "measure-unit/length-kilometer per-measure-unit/duration-hour"
- // [result ] b e
- return result + "-per-" + str.substr(begin, end - begin);
+ return str.substr(begin, end - begin);
}
Style StyleFromSkeleton(const icu::UnicodeString& skeleton) {
if (skeleton.indexOf("currency/") >= 0) {
return Style::CURRENCY;
}
- if (skeleton.indexOf("measure-unit/") >= 0) {
- if (skeleton.indexOf("scale/100") >= 0 &&
- skeleton.indexOf("measure-unit/concentr-percent") >= 0) {
+ if (skeleton.indexOf("percent") >= 0) {
+ // percent precision-integer rounding-mode-half-up scale/100
+ if (skeleton.indexOf("scale/100") >= 0) {
return Style::PERCENT;
+ } else {
+ return Style::UNIT;
}
+ }
+ // Before ICU68: "measure-unit/", since ICU68 "unit/"
+ if (skeleton.indexOf("unit/") >= 0) {
return Style::UNIT;
}
return Style::DECIMAL;
diff --git a/deps/v8/src/objects/js-number-format.h b/deps/v8/src/objects/js-number-format.h
index 062f3e07a3..38710131d6 100644
--- a/deps/v8/src/objects/js-number-format.h
+++ b/deps/v8/src/objects/js-number-format.h
@@ -32,6 +32,8 @@ class LocalizedNumberFormatter;
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-number-format-tq.inc"
+
class JSNumberFormat
: public TorqueGeneratedJSNumberFormat<JSNumberFormat, JSObject> {
public:
diff --git a/deps/v8/src/objects/js-number-format.tq b/deps/v8/src/objects/js-number-format.tq
new file mode 100644
index 0000000000..b1b63016f1
--- /dev/null
+++ b/deps/v8/src/objects/js-number-format.tq
@@ -0,0 +1,13 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/objects/js-number-format.h'
+
+@generateCppClass
+extern class JSNumberFormat extends JSObject {
+ locale: String;
+ icu_number_formatter:
+ Foreign; // Managed<icu::number::LocalizedNumberFormatter>
+ bound_format: JSFunction|Undefined;
+}
diff --git a/deps/v8/src/objects/js-objects-inl.h b/deps/v8/src/objects/js-objects-inl.h
index 9fcd183b89..65a50d3417 100644
--- a/deps/v8/src/objects/js-objects-inl.h
+++ b/deps/v8/src/objects/js-objects-inl.h
@@ -5,6 +5,7 @@
#ifndef V8_OBJECTS_JS_OBJECTS_INL_H_
#define V8_OBJECTS_JS_OBJECTS_INL_H_
+#include "src/common/globals.h"
#include "src/heap/heap-write-barrier.h"
#include "src/objects/elements.h"
#include "src/objects/embedder-data-slot-inl.h"
@@ -27,6 +28,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-objects-tq-inl.inc"
+
OBJECT_CONSTRUCTORS_IMPL(JSReceiver, HeapObject)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSObject)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSCustomElementsObject)
@@ -283,6 +286,10 @@ int JSObject::GetEmbedderFieldOffset(int index) {
return GetEmbedderFieldsStartOffset() + (kEmbedderDataSlotSize * index);
}
+void JSObject::InitializeEmbedderField(Isolate* isolate, int index) {
+ EmbedderDataSlot(*this, index).AllocateExternalPointerEntry(isolate);
+}
+
Object JSObject::GetEmbedderField(int index) {
return EmbedderDataSlot(*this, index).load_tagged();
}
@@ -296,11 +303,11 @@ void JSObject::SetEmbedderField(int index, Smi value) {
}
bool JSObject::IsUnboxedDoubleField(FieldIndex index) const {
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
return IsUnboxedDoubleField(isolate, index);
}
-bool JSObject::IsUnboxedDoubleField(const Isolate* isolate,
+bool JSObject::IsUnboxedDoubleField(IsolateRoot isolate,
FieldIndex index) const {
if (!FLAG_unbox_double_fields) return false;
return map(isolate).IsUnboxedDoubleField(isolate, index);
@@ -310,11 +317,11 @@ bool JSObject::IsUnboxedDoubleField(const Isolate* isolate,
// is needed to correctly distinguish between properties stored in-object and
// properties stored in the properties array.
Object JSObject::RawFastPropertyAt(FieldIndex index) const {
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
return RawFastPropertyAt(isolate, index);
}
-Object JSObject::RawFastPropertyAt(const Isolate* isolate,
+Object JSObject::RawFastPropertyAt(IsolateRoot isolate,
FieldIndex index) const {
DCHECK(!IsUnboxedDoubleField(isolate, index));
if (index.is_inobject()) {
@@ -357,7 +364,7 @@ void JSObject::RawFastDoublePropertyAsBitsAtPut(FieldIndex index,
// Double unboxing is enabled only on 64-bit platforms without pointer
// compression.
DCHECK_EQ(kDoubleSize, kTaggedSize);
- Address field_addr = FIELD_ADDR(*this, index.offset());
+ Address field_addr = field_address(index.offset());
base::Relaxed_Store(reinterpret_cast<base::AtomicWord*>(field_addr),
static_cast<base::AtomicWord>(bits));
}
@@ -633,9 +640,15 @@ void JSReceiver::initialize_properties(Isolate* isolate) {
ReadOnlyRoots roots(isolate);
DCHECK(!ObjectInYoungGeneration(roots.empty_fixed_array()));
DCHECK(!ObjectInYoungGeneration(roots.empty_property_dictionary()));
+ DCHECK(!ObjectInYoungGeneration(roots.empty_ordered_property_dictionary()));
if (map(isolate).is_dictionary_map()) {
- WRITE_FIELD(*this, kPropertiesOrHashOffset,
- roots.empty_property_dictionary());
+ if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ WRITE_FIELD(*this, kPropertiesOrHashOffset,
+ roots.empty_ordered_property_dictionary());
+ } else {
+ WRITE_FIELD(*this, kPropertiesOrHashOffset,
+ roots.empty_property_dictionary());
+ }
} else {
WRITE_FIELD(*this, kPropertiesOrHashOffset, roots.empty_fixed_array());
}
@@ -644,7 +657,8 @@ void JSReceiver::initialize_properties(Isolate* isolate) {
DEF_GETTER(JSReceiver, HasFastProperties, bool) {
DCHECK(raw_properties_or_hash(isolate).IsSmi() ||
((raw_properties_or_hash(isolate).IsGlobalDictionary(isolate) ||
- raw_properties_or_hash(isolate).IsNameDictionary(isolate)) ==
+ raw_properties_or_hash(isolate).IsNameDictionary(isolate) ||
+ raw_properties_or_hash(isolate).IsOrderedNameDictionary(isolate)) ==
map(isolate).is_dictionary_map()));
return !map(isolate).is_dictionary_map();
}
@@ -652,6 +666,8 @@ DEF_GETTER(JSReceiver, HasFastProperties, bool) {
DEF_GETTER(JSReceiver, property_dictionary, NameDictionary) {
DCHECK(!IsJSGlobalObject(isolate));
DCHECK(!HasFastProperties(isolate));
+ DCHECK(!V8_DICT_MODE_PROTOTYPES_BOOL);
+
// Can't use ReadOnlyRoots(isolate) as this isolate could be produced by
// i::GetIsolateForPtrCompr(HeapObject).
Object prop = raw_properties_or_hash(isolate);
@@ -661,6 +677,20 @@ DEF_GETTER(JSReceiver, property_dictionary, NameDictionary) {
return NameDictionary::cast(prop);
}
+DEF_GETTER(JSReceiver, property_dictionary_ordered, OrderedNameDictionary) {
+ DCHECK(!IsJSGlobalObject(isolate));
+ DCHECK(!HasFastProperties(isolate));
+ DCHECK(V8_DICT_MODE_PROTOTYPES_BOOL);
+
+ // Can't use ReadOnlyRoots(isolate) as this isolate could be produced by
+ // i::GetIsolateForPtrCompr(HeapObject).
+ Object prop = raw_properties_or_hash(isolate);
+ if (prop.IsSmi()) {
+ return GetReadOnlyRoots(isolate).empty_ordered_property_dictionary();
+ }
+ return OrderedNameDictionary::cast(prop);
+}
+
// TODO(gsathya): Pass isolate directly to this function and access
// the heap from this.
DEF_GETTER(JSReceiver, property_array, PropertyArray) {
diff --git a/deps/v8/src/objects/js-objects.cc b/deps/v8/src/objects/js-objects.cc
index 2d095d1743..f889c43499 100644
--- a/deps/v8/src/objects/js-objects.cc
+++ b/deps/v8/src/objects/js-objects.cc
@@ -5,6 +5,7 @@
#include "src/objects/js-objects.h"
#include "src/api/api-arguments-inl.h"
+#include "src/common/globals.h"
#include "src/date/date.h"
#include "src/execution/arguments.h"
#include "src/execution/frames.h"
@@ -70,8 +71,6 @@
#include "src/strings/string-stream.h"
#include "src/utils/ostreams.h"
#include "src/wasm/wasm-objects.h"
-#include "torque-generated/exported-class-definitions-inl.h"
-#include "torque-generated/exported-class-definitions.h"
namespace v8 {
namespace internal {
@@ -221,7 +220,8 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastAssign(
return Just(false);
}
- Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate);
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(kRelaxedLoad),
+ isolate);
bool stable = true;
@@ -233,7 +233,7 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastAssign(
// Directly decode from the descriptor array if |from| did not change shape.
if (stable) {
DCHECK_EQ(from->map(), *map);
- DCHECK_EQ(*descriptors, map->instance_descriptors());
+ DCHECK_EQ(*descriptors, map->instance_descriptors(kRelaxedLoad));
PropertyDetails details = descriptors->GetDetails(i);
if (!details.IsEnumerable()) continue;
@@ -252,7 +252,7 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastAssign(
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, prop_value, Object::GetProperty(&it), Nothing<bool>());
stable = from->map() == *map;
- descriptors.PatchValue(map->instance_descriptors());
+ descriptors.PatchValue(map->instance_descriptors(kRelaxedLoad));
}
} else {
// If the map did change, do a slower lookup. We are still guaranteed that
@@ -278,7 +278,7 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastAssign(
if (result.IsNothing()) return result;
if (stable) {
stable = from->map() == *map;
- descriptors.PatchValue(map->instance_descriptors());
+ descriptors.PatchValue(map->instance_descriptors(kRelaxedLoad));
}
} else {
if (excluded_properties != nullptr &&
@@ -318,15 +318,24 @@ Maybe<bool> JSReceiver::SetOrCopyDataProperties(
GetKeysConversion::kKeepNumbers),
Nothing<bool>());
- if (!from->HasFastProperties() && target->HasFastProperties()) {
+ if (!from->HasFastProperties() && target->HasFastProperties() &&
+ !target->IsJSGlobalProxy()) {
+ // JSProxy is always in slow-mode.
+ DCHECK(!target->IsJSProxy());
// Convert to slow properties if we're guaranteed to overflow the number of
// descriptors.
- int source_length =
- from->IsJSGlobalObject()
- ? JSGlobalObject::cast(*from)
- .global_dictionary()
- .NumberOfEnumerableProperties()
- : from->property_dictionary().NumberOfEnumerableProperties();
+ int source_length;
+ if (from->IsJSGlobalObject()) {
+ source_length = JSGlobalObject::cast(*from)
+ .global_dictionary()
+ .NumberOfEnumerableProperties();
+ } else if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ source_length =
+ from->property_dictionary_ordered().NumberOfEnumerableProperties();
+ } else {
+ source_length =
+ from->property_dictionary().NumberOfEnumerableProperties();
+ }
if (source_length > kMaxNumberOfDescriptors) {
JSObject::NormalizeProperties(isolate, Handle<JSObject>::cast(target),
CLEAR_INOBJECT_PROPERTIES, source_length,
@@ -606,7 +615,8 @@ Object SetHashAndUpdateProperties(HeapObject properties, int hash) {
ReadOnlyRoots roots = properties.GetReadOnlyRoots();
if (properties == roots.empty_fixed_array() ||
properties == roots.empty_property_array() ||
- properties == roots.empty_property_dictionary()) {
+ properties == roots.empty_property_dictionary() ||
+ properties == roots.empty_ordered_property_dictionary()) {
return Smi::FromInt(hash);
}
@@ -621,8 +631,13 @@ Object SetHashAndUpdateProperties(HeapObject properties, int hash) {
return properties;
}
- DCHECK(properties.IsNameDictionary());
- NameDictionary::cast(properties).SetHash(hash);
+ if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ DCHECK(properties.IsOrderedNameDictionary());
+ OrderedNameDictionary::cast(properties).SetHash(hash);
+ } else {
+ DCHECK(properties.IsNameDictionary());
+ NameDictionary::cast(properties).SetHash(hash);
+ }
return properties;
}
@@ -636,8 +651,12 @@ int GetIdentityHashHelper(JSReceiver object) {
if (properties.IsPropertyArray()) {
return PropertyArray::cast(properties).Hash();
}
+ if (V8_DICT_MODE_PROTOTYPES_BOOL && properties.IsOrderedNameDictionary()) {
+ return OrderedNameDictionary::cast(properties).Hash();
+ }
if (properties.IsNameDictionary()) {
+ DCHECK(!V8_DICT_MODE_PROTOTYPES_BOOL);
return NameDictionary::cast(properties).Hash();
}
@@ -648,7 +667,8 @@ int GetIdentityHashHelper(JSReceiver object) {
#ifdef DEBUG
ReadOnlyRoots roots = object.GetReadOnlyRoots();
DCHECK(properties == roots.empty_fixed_array() ||
- properties == roots.empty_property_dictionary());
+ properties == roots.empty_property_dictionary() ||
+ properties == roots.empty_ordered_property_dictionary());
#endif
return PropertyArray::kNoHashSentinel;
@@ -734,10 +754,19 @@ void JSReceiver::DeleteNormalizedProperty(Handle<JSReceiver> object,
cell->ClearAndInvalidate(ReadOnlyRoots(isolate));
} else {
- Handle<NameDictionary> dictionary(object->property_dictionary(), isolate);
+ if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ Handle<OrderedNameDictionary> dictionary(
+ object->property_dictionary_ordered(), isolate);
- dictionary = NameDictionary::DeleteEntry(isolate, dictionary, entry);
- object->SetProperties(*dictionary);
+ dictionary =
+ OrderedNameDictionary::DeleteEntry(isolate, dictionary, entry);
+ object->SetProperties(*dictionary);
+ } else {
+ Handle<NameDictionary> dictionary(object->property_dictionary(), isolate);
+
+ dictionary = NameDictionary::DeleteEntry(isolate, dictionary, entry);
+ object->SetProperties(*dictionary);
+ }
}
if (object->map().is_prototype_map()) {
// Invalidate prototype validity cell as this may invalidate transitioning
@@ -1855,7 +1884,8 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastGetOwnValuesOrEntries(
if (!map->OnlyHasSimpleProperties()) return Just(false);
Handle<JSObject> object(JSObject::cast(*receiver), isolate);
- Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate);
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(kRelaxedLoad),
+ isolate);
int number_of_own_descriptors = map->NumberOfOwnDescriptors();
size_t number_of_own_elements =
@@ -1883,7 +1913,7 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastGetOwnValuesOrEntries(
// side-effects.
bool stable = *map == object->map();
if (stable) {
- descriptors.PatchValue(map->instance_descriptors());
+ descriptors.PatchValue(map->instance_descriptors(kRelaxedLoad));
}
for (InternalIndex index : InternalIndex::Range(number_of_own_descriptors)) {
@@ -1896,7 +1926,7 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastGetOwnValuesOrEntries(
// Directly decode from the descriptor array if |from| did not change shape.
if (stable) {
DCHECK_EQ(object->map(), *map);
- DCHECK_EQ(*descriptors, map->instance_descriptors());
+ DCHECK_EQ(*descriptors, map->instance_descriptors(kRelaxedLoad));
PropertyDetails details = descriptors->GetDetails(index);
if (!details.IsEnumerable()) continue;
@@ -1917,7 +1947,7 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastGetOwnValuesOrEntries(
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, prop_value, Object::GetProperty(&it), Nothing<bool>());
stable = object->map() == *map;
- descriptors.PatchValue(map->instance_descriptors());
+ descriptors.PatchValue(map->instance_descriptors(kRelaxedLoad));
}
} else {
// If the map did change, do a slower lookup. We are still guaranteed that
@@ -2038,6 +2068,21 @@ bool JSReceiver::HasProxyInPrototype(Isolate* isolate) {
return false;
}
+bool JSReceiver::IsCodeLike(Isolate* isolate) const {
+ DisallowGarbageCollection no_gc;
+ Object maybe_constructor = map().GetConstructor();
+ if (!maybe_constructor.IsJSFunction()) return false;
+ if (!JSFunction::cast(maybe_constructor).shared().IsApiFunction()) {
+ return false;
+ }
+ Object instance_template = JSFunction::cast(maybe_constructor)
+ .shared()
+ .get_api_func_data()
+ .GetInstanceTemplate();
+ if (instance_template.IsUndefined(isolate)) return false;
+ return ObjectTemplateInfo::cast(instance_template).code_like();
+}
+
// static
MaybeHandle<JSObject> JSObject::New(Handle<JSFunction> constructor,
Handle<JSReceiver> new_target,
@@ -2057,9 +2102,11 @@ MaybeHandle<JSObject> JSObject::New(Handle<JSFunction> constructor,
ASSIGN_RETURN_ON_EXCEPTION(
isolate, initial_map,
JSFunction::GetDerivedMap(isolate, constructor, new_target), JSObject);
+ int initial_capacity = V8_DICT_MODE_PROTOTYPES_BOOL
+ ? OrderedNameDictionary::kInitialCapacity
+ : NameDictionary::kInitialCapacity;
Handle<JSObject> result = isolate->factory()->NewFastOrSlowJSObjectFromMap(
- initial_map, NameDictionary::kInitialCapacity, AllocationType::kYoung,
- site);
+ initial_map, initial_capacity, AllocationType::kYoung, site);
isolate->counters()->constructed_objects()->Increment();
isolate->counters()->constructed_objects_runtime()->Increment();
return result;
@@ -2368,21 +2415,36 @@ void JSObject::SetNormalizedProperty(Handle<JSObject> object, Handle<Name> name,
cell->set_value(*value);
}
} else {
- Handle<NameDictionary> dictionary(object->property_dictionary(), isolate);
-
- InternalIndex entry = dictionary->FindEntry(isolate, name);
- if (entry.is_not_found()) {
- DCHECK_IMPLIES(object->map().is_prototype_map(),
- Map::IsPrototypeChainInvalidated(object->map()));
- dictionary =
- NameDictionary::Add(isolate, dictionary, name, value, details);
- object->SetProperties(*dictionary);
+ if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ Handle<OrderedNameDictionary> dictionary(
+ object->property_dictionary_ordered(), isolate);
+ InternalIndex entry = dictionary->FindEntry(isolate, *name);
+ if (entry.is_not_found()) {
+ DCHECK_IMPLIES(object->map().is_prototype_map(),
+ Map::IsPrototypeChainInvalidated(object->map()));
+ dictionary = OrderedNameDictionary::Add(isolate, dictionary, name,
+ value, details)
+ .ToHandleChecked();
+ object->SetProperties(*dictionary);
+ } else {
+ dictionary->SetEntry(entry, *name, *value, details);
+ }
} else {
- PropertyDetails original_details = dictionary->DetailsAt(entry);
- int enumeration_index = original_details.dictionary_index();
- DCHECK_GT(enumeration_index, 0);
- details = details.set_index(enumeration_index);
- dictionary->SetEntry(entry, *name, *value, details);
+ Handle<NameDictionary> dictionary(object->property_dictionary(), isolate);
+ InternalIndex entry = dictionary->FindEntry(isolate, name);
+ if (entry.is_not_found()) {
+ DCHECK_IMPLIES(object->map().is_prototype_map(),
+ Map::IsPrototypeChainInvalidated(object->map()));
+ dictionary =
+ NameDictionary::Add(isolate, dictionary, name, value, details);
+ object->SetProperties(*dictionary);
+ } else {
+ PropertyDetails original_details = dictionary->DetailsAt(entry);
+ int enumeration_index = original_details.dictionary_index();
+ DCHECK_GT(enumeration_index, 0);
+ details = details.set_index(enumeration_index);
+ dictionary->SetEntry(entry, *name, *value, details);
+ }
}
}
}
@@ -2539,8 +2601,8 @@ void JSObject::PrintInstanceMigration(FILE* file, Map original_map,
return;
}
PrintF(file, "[migrating]");
- DescriptorArray o = original_map.instance_descriptors();
- DescriptorArray n = new_map.instance_descriptors();
+ DescriptorArray o = original_map.instance_descriptors(kRelaxedLoad);
+ DescriptorArray n = new_map.instance_descriptors(kRelaxedLoad);
for (InternalIndex i : original_map.IterateOwnDescriptors()) {
Representation o_r = o.GetDetails(i).representation();
Representation n_r = n.GetDetails(i).representation();
@@ -2728,9 +2790,9 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
isolate->factory()->NewFixedArray(inobject);
Handle<DescriptorArray> old_descriptors(
- old_map->instance_descriptors(isolate), isolate);
+ old_map->instance_descriptors(isolate, kRelaxedLoad), isolate);
Handle<DescriptorArray> new_descriptors(
- new_map->instance_descriptors(isolate), isolate);
+ new_map->instance_descriptors(isolate, kRelaxedLoad), isolate);
int old_nof = old_map->NumberOfOwnDescriptors();
int new_nof = new_map->NumberOfOwnDescriptors();
@@ -2818,7 +2880,7 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
// Copy (real) inobject properties. If necessary, stop at number_of_fields to
// avoid overwriting |one_pointer_filler_map|.
- int limit = Min(inobject, number_of_fields);
+ int limit = std::min(inobject, number_of_fields);
for (int i = 0; i < limit; i++) {
FieldIndex index = FieldIndex::ForPropertyIndex(*new_map, i);
Object value = inobject_props->get(isolate, i);
@@ -2883,12 +2945,23 @@ void MigrateFastToSlow(Isolate* isolate, Handle<JSObject> object,
property_count += expected_additional_properties;
} else {
// Make space for two more properties.
- property_count += NameDictionary::kInitialCapacity;
+ int initial_capacity = V8_DICT_MODE_PROTOTYPES_BOOL
+ ? OrderedNameDictionary::kInitialCapacity
+ : NameDictionary::kInitialCapacity;
+ property_count += initial_capacity;
}
- Handle<NameDictionary> dictionary =
- NameDictionary::New(isolate, property_count);
- Handle<DescriptorArray> descs(map->instance_descriptors(isolate), isolate);
+ Handle<NameDictionary> dictionary;
+ Handle<OrderedNameDictionary> ord_dictionary;
+ if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ ord_dictionary =
+ isolate->factory()->NewOrderedNameDictionary(property_count);
+ } else {
+ dictionary = isolate->factory()->NewNameDictionary(property_count);
+ }
+
+ Handle<DescriptorArray> descs(
+ map->instance_descriptors(isolate, kRelaxedLoad), isolate);
for (InternalIndex i : InternalIndex::Range(real_size)) {
PropertyDetails details = descs->GetDetails(i);
Handle<Name> key(descs->GetKey(isolate, i), isolate);
@@ -2919,11 +2992,19 @@ void MigrateFastToSlow(Isolate* isolate, Handle<JSObject> object,
DCHECK(!value.is_null());
PropertyDetails d(details.kind(), details.attributes(),
PropertyCellType::kNoCell);
- dictionary = NameDictionary::Add(isolate, dictionary, key, value, d);
+ if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ ord_dictionary =
+ OrderedNameDictionary::Add(isolate, ord_dictionary, key, value, d)
+ .ToHandleChecked();
+ } else {
+ dictionary = NameDictionary::Add(isolate, dictionary, key, value, d);
+ }
}
- // Copy the next enumeration index from instance descriptor.
- dictionary->set_next_enumeration_index(real_size + 1);
+ if (!V8_DICT_MODE_PROTOTYPES_BOOL) {
+ // Copy the next enumeration index from instance descriptor.
+ dictionary->set_next_enumeration_index(real_size + 1);
+ }
// From here on we cannot fail and we shouldn't GC anymore.
DisallowHeapAllocation no_allocation;
@@ -2951,7 +3032,11 @@ void MigrateFastToSlow(Isolate* isolate, Handle<JSObject> object,
// the left-over space to avoid races with the sweeper thread.
object->synchronized_set_map(*new_map);
- object->SetProperties(*dictionary);
+ if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ object->SetProperties(*ord_dictionary);
+ } else {
+ object->SetProperties(*dictionary);
+ }
// Ensure that in-object space of slow-mode object does not contain random
// garbage.
@@ -3079,7 +3164,8 @@ void JSObject::AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map) {
if (!FLAG_unbox_double_fields || external > 0) {
Isolate* isolate = object->GetIsolate();
- Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate);
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(kRelaxedLoad),
+ isolate);
Handle<FixedArray> storage;
if (!FLAG_unbox_double_fields) {
storage = isolate->factory()->NewFixedArray(inobject);
@@ -3332,26 +3418,52 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
DCHECK(!object->IsJSGlobalObject());
Isolate* isolate = object->GetIsolate();
Factory* factory = isolate->factory();
- Handle<NameDictionary> dictionary(object->property_dictionary(), isolate);
+
+ Handle<NameDictionary> dictionary;
+ Handle<OrderedNameDictionary> ord_dictionary;
+ int number_of_elements;
+ if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ ord_dictionary = handle(object->property_dictionary_ordered(), isolate);
+ number_of_elements = ord_dictionary->NumberOfElements();
+ } else {
+ dictionary = handle(object->property_dictionary(), isolate);
+ number_of_elements = dictionary->NumberOfElements();
+ }
// Make sure we preserve dictionary representation if there are too many
// descriptors.
- int number_of_elements = dictionary->NumberOfElements();
if (number_of_elements > kMaxNumberOfDescriptors) return;
- Handle<FixedArray> iteration_order =
- NameDictionary::IterationIndices(isolate, dictionary);
+ Handle<FixedArray> iteration_order;
+ int iteration_length;
+ if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ // |iteration_order| remains empty handle, we don't need it.
+ iteration_length = ord_dictionary->UsedCapacity();
+ } else {
+ iteration_order = NameDictionary::IterationIndices(isolate, dictionary);
+ iteration_length = dictionary->NumberOfElements();
+ }
- int instance_descriptor_length = iteration_order->length();
int number_of_fields = 0;
// Compute the length of the instance descriptor.
ReadOnlyRoots roots(isolate);
- for (int i = 0; i < instance_descriptor_length; i++) {
- InternalIndex index(Smi::ToInt(iteration_order->get(i)));
- DCHECK(dictionary->IsKey(roots, dictionary->KeyAt(isolate, index)));
+ for (int i = 0; i < iteration_length; i++) {
+ PropertyKind kind;
+ if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ InternalIndex index(i);
+ Object key = ord_dictionary->KeyAt(index);
+ if (!OrderedNameDictionary::IsKey(roots, key)) {
+ // Ignore deleted entries.
+ continue;
+ }
+ kind = ord_dictionary->DetailsAt(index).kind();
+ } else {
+ InternalIndex index(Smi::ToInt(iteration_order->get(i)));
+ DCHECK(dictionary->IsKey(roots, dictionary->KeyAt(isolate, index)));
+ kind = dictionary->DetailsAt(index).kind();
+ }
- PropertyKind kind = dictionary->DetailsAt(index).kind();
if (kind == kData) {
number_of_fields += 1;
}
@@ -3371,7 +3483,7 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
NotifyMapChange(old_map, new_map, isolate);
- if (instance_descriptor_length == 0) {
+ if (number_of_elements == 0) {
DisallowHeapAllocation no_gc;
DCHECK_LE(unused_property_fields, inobject_props);
// Transform the object.
@@ -3388,7 +3500,7 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
// Allocate the instance descriptor.
Handle<DescriptorArray> descriptors =
- DescriptorArray::Allocate(isolate, instance_descriptor_length, 0);
+ DescriptorArray::Allocate(isolate, number_of_elements, 0);
int number_of_allocated_fields =
number_of_fields + unused_property_fields - inobject_props;
@@ -3407,9 +3519,30 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
// Fill in the instance descriptor and the fields.
int current_offset = 0;
- for (int i = 0; i < instance_descriptor_length; i++) {
- InternalIndex index(Smi::ToInt(iteration_order->get(i)));
- Name k = dictionary->NameAt(index);
+ int descriptor_index = 0;
+ for (int i = 0; i < iteration_length; i++) {
+ Name k;
+ Object value;
+ PropertyDetails details = PropertyDetails::Empty();
+
+ if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ InternalIndex index(i);
+ Object key_obj = ord_dictionary->KeyAt(index);
+ if (!OrderedNameDictionary::IsKey(roots, key_obj)) {
+ continue;
+ }
+ k = Name::cast(key_obj);
+
+ value = ord_dictionary->ValueAt(index);
+ details = ord_dictionary->DetailsAt(index);
+ } else {
+ InternalIndex index(Smi::ToInt(iteration_order->get(i)));
+ k = dictionary->NameAt(index);
+
+ value = dictionary->ValueAt(index);
+ details = dictionary->DetailsAt(index);
+ }
+
// Dictionary keys are internalized upon insertion.
// TODO(jkummerow): Turn this into a DCHECK if it's not hit in the wild.
CHECK(k.IsUniqueName());
@@ -3420,9 +3553,6 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
new_map->set_may_have_interesting_symbols(true);
}
- Object value = dictionary->ValueAt(index);
-
- PropertyDetails details = dictionary->DetailsAt(index);
DCHECK_EQ(kField, details.location());
DCHECK_EQ(PropertyConstness::kMutable, details.constness());
@@ -3453,9 +3583,10 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
}
current_offset += details.field_width_in_words();
}
- descriptors->Set(InternalIndex(i), &d);
+ descriptors->Set(InternalIndex(descriptor_index++), &d);
}
- DCHECK(current_offset == number_of_fields);
+ DCHECK_EQ(current_offset, number_of_fields);
+ DCHECK_EQ(descriptor_index, number_of_elements);
descriptors->Sort();
@@ -3646,7 +3777,7 @@ bool TestFastPropertiesIntegrityLevel(Map map, PropertyAttributes level) {
DCHECK(!map.IsCustomElementsReceiverMap());
DCHECK(!map.is_dictionary_map());
- DescriptorArray descriptors = map.instance_descriptors();
+ DescriptorArray descriptors = map.instance_descriptors(kRelaxedLoad);
for (InternalIndex i : map.IterateOwnDescriptors()) {
if (descriptors.GetKey(i).IsPrivate()) continue;
PropertyDetails details = descriptors.GetDetails(i);
@@ -3665,8 +3796,13 @@ bool TestPropertiesIntegrityLevel(JSObject object, PropertyAttributes level) {
return TestFastPropertiesIntegrityLevel(object.map(), level);
}
- return TestDictionaryPropertiesIntegrityLevel(
- object.property_dictionary(), object.GetReadOnlyRoots(), level);
+ if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ return TestDictionaryPropertiesIntegrityLevel(
+ object.property_dictionary_ordered(), object.GetReadOnlyRoots(), level);
+ } else {
+ return TestDictionaryPropertiesIntegrityLevel(
+ object.property_dictionary(), object.GetReadOnlyRoots(), level);
+ }
}
bool TestElementsIntegrityLevel(JSObject object, PropertyAttributes level) {
@@ -3964,6 +4100,11 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
JSGlobalObject::cast(*object).global_dictionary(), isolate);
JSObject::ApplyAttributesToDictionary(isolate, roots, dictionary,
attrs);
+ } else if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ Handle<OrderedNameDictionary> dictionary(
+ object->property_dictionary_ordered(), isolate);
+ JSObject::ApplyAttributesToDictionary(isolate, roots, dictionary,
+ attrs);
} else {
Handle<NameDictionary> dictionary(object->property_dictionary(),
isolate);
@@ -4181,7 +4322,7 @@ MaybeHandle<Object> JSObject::SetAccessor(Handle<JSObject> object,
Object JSObject::SlowReverseLookup(Object value) {
if (HasFastProperties()) {
- DescriptorArray descs = map().instance_descriptors();
+ DescriptorArray descs = map().instance_descriptors(kRelaxedLoad);
bool value_is_number = value.IsNumber();
for (InternalIndex i : map().IterateOwnDescriptors()) {
PropertyDetails details = descs.GetDetails(i);
@@ -4219,6 +4360,8 @@ Object JSObject::SlowReverseLookup(Object value) {
} else if (IsJSGlobalObject()) {
return JSGlobalObject::cast(*this).global_dictionary().SlowReverseLookup(
value);
+ } else if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ return property_dictionary_ordered().SlowReverseLookup(GetIsolate(), value);
} else {
return property_dictionary().SlowReverseLookup(value);
}
@@ -4620,7 +4763,7 @@ static bool ShouldConvertToFastElements(JSObject object,
} else {
*new_capacity = dictionary.max_number_key() + 1;
}
- *new_capacity = Max(index + 1, *new_capacity);
+ *new_capacity = std::max(index + 1, *new_capacity);
uint32_t dictionary_size = static_cast<uint32_t>(dictionary.Capacity()) *
NumberDictionary::kEntrySize;
@@ -4881,6 +5024,10 @@ bool JSObject::IsDroppableApiWrapper() {
instance_type == JS_SPECIAL_API_OBJECT_TYPE;
}
+bool JSGlobalProxy::IsDetached() const {
+ return native_context().IsNull(GetIsolate());
+}
+
void JSGlobalObject::InvalidatePropertyCell(Handle<JSGlobalObject> global,
Handle<Name> name) {
// Regardless of whether the property is there or not invalidate
diff --git a/deps/v8/src/objects/js-objects.h b/deps/v8/src/objects/js-objects.h
index 11e8273fcb..cc9cc0f1dc 100644
--- a/deps/v8/src/objects/js-objects.h
+++ b/deps/v8/src/objects/js-objects.h
@@ -10,7 +10,6 @@
#include "src/objects/internal-index.h"
#include "src/objects/objects.h"
#include "src/objects/property-array.h"
-#include "torque-generated/class-definitions.h"
#include "torque-generated/field-offsets.h"
// Has to be the last include (doesn't have include guards):
@@ -29,6 +28,8 @@ class JSGlobalProxy;
class NativeContext;
class IsCompiledScope;
+#include "torque-generated/src/objects/js-objects-tq.inc"
+
// JSReceiver includes types on which properties can be defined, i.e.,
// JSObject and JSProxy.
class JSReceiver : public HeapObject {
@@ -43,9 +44,14 @@ class JSReceiver : public HeapObject {
// map.
DECL_GETTER(property_array, PropertyArray)
- // Gets slow properties for non-global objects.
+ // Gets slow properties for non-global objects (if v8_dict_mode_prototypes is
+ // not set).
DECL_GETTER(property_dictionary, NameDictionary)
+ // Gets slow properties for non-global objects (if v8_dict_mode_prototypes is
+ // set).
+ DECL_GETTER(property_dictionary_ordered, OrderedNameDictionary)
+
// Sets the properties backing store and makes sure any existing hash is moved
// to the new properties store. To clear out the properties store, pass in the
// empty_fixed_array(), the hash will be maintained in this case as well.
@@ -279,6 +285,9 @@ class JSReceiver : public HeapObject {
TORQUE_GENERATED_JS_RECEIVER_FIELDS)
bool HasProxyInPrototype(Isolate* isolate);
+ // TC39 "Dynamic Code Brand Checks"
+ bool IsCodeLike(Isolate* isolate) const;
+
OBJECT_CONSTRUCTORS(JSReceiver, HeapObject);
};
@@ -567,6 +576,7 @@ class JSObject : public TorqueGeneratedJSObject<JSObject, JSReceiver> {
static inline int GetEmbedderFieldCount(Map map);
inline int GetEmbedderFieldCount() const;
inline int GetEmbedderFieldOffset(int index);
+ inline void InitializeEmbedderField(Isolate* isolate, int index);
inline Object GetEmbedderField(int index);
inline void SetEmbedderField(int index, Object value);
inline void SetEmbedderField(int index, Smi value);
@@ -620,16 +630,14 @@ class JSObject : public TorqueGeneratedJSObject<JSObject, JSReceiver> {
const char* reason);
inline bool IsUnboxedDoubleField(FieldIndex index) const;
- inline bool IsUnboxedDoubleField(const Isolate* isolate,
- FieldIndex index) const;
+ inline bool IsUnboxedDoubleField(IsolateRoot isolate, FieldIndex index) const;
// Access fast-case object properties at index.
static Handle<Object> FastPropertyAt(Handle<JSObject> object,
Representation representation,
FieldIndex index);
inline Object RawFastPropertyAt(FieldIndex index) const;
- inline Object RawFastPropertyAt(const Isolate* isolate,
- FieldIndex index) const;
+ inline Object RawFastPropertyAt(IsolateRoot isolate, FieldIndex index) const;
inline double RawFastDoublePropertyAt(FieldIndex index) const;
inline uint64_t RawFastDoublePropertyAsBitsAt(FieldIndex index) const;
@@ -724,7 +732,7 @@ class JSObject : public TorqueGeneratedJSObject<JSObject, JSReceiver> {
// If a GC was caused while constructing this object, the elements pointer
// may point to a one pointer filler map. The object won't be rooted, but
// our heap verification code could stumble across it.
- V8_EXPORT_PRIVATE bool ElementsAreSafeToExamine(const Isolate* isolate) const;
+ V8_EXPORT_PRIVATE bool ElementsAreSafeToExamine(IsolateRoot isolate) const;
#endif
Object SlowReverseLookup(Object value);
@@ -938,6 +946,7 @@ class JSGlobalProxy
: public TorqueGeneratedJSGlobalProxy<JSGlobalProxy, JSSpecialObject> {
public:
inline bool IsDetachedFrom(JSGlobalObject global) const;
+ V8_EXPORT_PRIVATE bool IsDetached() const;
static int SizeWithEmbedderFields(int embedder_field_count);
diff --git a/deps/v8/src/objects/js-objects.tq b/deps/v8/src/objects/js-objects.tq
index 1139deeb3d..8dbe1dce03 100644
--- a/deps/v8/src/objects/js-objects.tq
+++ b/deps/v8/src/objects/js-objects.tq
@@ -53,19 +53,6 @@ extern class JSCustomElementsObject extends JSObject {
extern class JSSpecialObject extends JSCustomElementsObject {
}
-@highestInstanceTypeWithinParentClassRange
-extern class JSFunction extends JSFunctionOrBoundFunction {
- shared_function_info: SharedFunctionInfo;
- context: Context;
- feedback_cell: FeedbackCell;
- weak code: Code;
-
- // Space for the following field may or may not be allocated.
- @noVerifier weak prototype_or_initial_map: JSReceiver|Map;
-}
-
-type JSFunctionWithPrototypeSlot extends JSFunction;
-
macro GetDerivedMap(implicit context: Context)(
target: JSFunction, newTarget: JSReceiver): Map {
try {
@@ -128,24 +115,6 @@ extern class JSMessageObject extends JSObject {
error_level: Smi;
}
-@abstract
-@generateCppClass
-@highestInstanceTypeWithinParentClassRange
-extern class JSFunctionOrBoundFunction extends JSObject {
-}
-
-@generateCppClass
-extern class JSBoundFunction extends JSFunctionOrBoundFunction {
- // The wrapped function object.
- bound_target_function: Callable;
- // The value that is always passed as the this value when calling the wrapped
- // function.
- bound_this: JSAny|SourceTextModule;
- // A list of values whose elements are used as the first arguments to any call
- // to the wrapped function.
- bound_arguments: FixedArray;
-}
-
@generateCppClass
extern class JSDate extends JSObject {
// If one component is NaN, all of them are, indicating a NaN time value.
diff --git a/deps/v8/src/objects/js-plural-rules-inl.h b/deps/v8/src/objects/js-plural-rules-inl.h
index 60340931fe..fb4a97e476 100644
--- a/deps/v8/src/objects/js-plural-rules-inl.h
+++ b/deps/v8/src/objects/js-plural-rules-inl.h
@@ -19,6 +19,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-plural-rules-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(JSPluralRules)
ACCESSORS(JSPluralRules, icu_plural_rules, Managed<icu::PluralRules>,
diff --git a/deps/v8/src/objects/js-plural-rules.h b/deps/v8/src/objects/js-plural-rules.h
index eac9d5e92a..bd0bfe65f7 100644
--- a/deps/v8/src/objects/js-plural-rules.h
+++ b/deps/v8/src/objects/js-plural-rules.h
@@ -32,6 +32,8 @@ class LocalizedNumberFormatter;
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-plural-rules-tq.inc"
+
class JSPluralRules
: public TorqueGeneratedJSPluralRules<JSPluralRules, JSObject> {
public:
diff --git a/deps/v8/src/objects/js-plural-rules.tq b/deps/v8/src/objects/js-plural-rules.tq
new file mode 100644
index 0000000000..818cff5787
--- /dev/null
+++ b/deps/v8/src/objects/js-plural-rules.tq
@@ -0,0 +1,19 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/objects/js-plural-rules.h'
+
+type JSPluralRulesType extends int32 constexpr 'JSPluralRules::Type';
+bitfield struct JSPluralRulesFlags extends uint31 {
+ Type: JSPluralRulesType: 1 bit; // "type" is a reserved word.
+}
+
+@generateCppClass
+extern class JSPluralRules extends JSObject {
+ locale: String;
+ flags: SmiTagged<JSPluralRulesFlags>;
+ icu_plural_rules: Foreign; // Managed<icu::PluralRules>
+ icu_number_formatter:
+ Foreign; // Managed<icu::number::LocalizedNumberFormatter>
+}
diff --git a/deps/v8/src/objects/js-promise-inl.h b/deps/v8/src/objects/js-promise-inl.h
index 601de6612b..6f1c316c48 100644
--- a/deps/v8/src/objects/js-promise-inl.h
+++ b/deps/v8/src/objects/js-promise-inl.h
@@ -16,6 +16,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-promise-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(JSPromise)
BOOL_ACCESSORS(JSPromise, flags, has_handler, HasHandlerBit::kShift)
diff --git a/deps/v8/src/objects/js-promise.h b/deps/v8/src/objects/js-promise.h
index 2028bc3f8b..8ef663bb39 100644
--- a/deps/v8/src/objects/js-promise.h
+++ b/deps/v8/src/objects/js-promise.h
@@ -15,6 +15,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-promise-tq.inc"
+
// Representation of promise objects in the specification. Our layout of
// JSPromise differs a bit from the layout in the specification, for example
// there's only a single list of PromiseReaction objects, instead of separate
diff --git a/deps/v8/src/objects/js-proxy-inl.h b/deps/v8/src/objects/js-proxy-inl.h
index 0683cfeec8..9abe4c08d1 100644
--- a/deps/v8/src/objects/js-proxy-inl.h
+++ b/deps/v8/src/objects/js-proxy-inl.h
@@ -15,6 +15,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-proxy-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(JSProxy)
bool JSProxy::IsRevoked() const { return !handler().IsJSReceiver(); }
diff --git a/deps/v8/src/objects/js-proxy.h b/deps/v8/src/objects/js-proxy.h
index 1161f71486..28da615da5 100644
--- a/deps/v8/src/objects/js-proxy.h
+++ b/deps/v8/src/objects/js-proxy.h
@@ -14,6 +14,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-proxy-tq.inc"
+
// The JSProxy describes EcmaScript Harmony proxies
class JSProxy : public TorqueGeneratedJSProxy<JSProxy, JSReceiver> {
public:
diff --git a/deps/v8/src/objects/js-regexp-inl.h b/deps/v8/src/objects/js-regexp-inl.h
index 48fe911ff5..8b99aa7c4c 100644
--- a/deps/v8/src/objects/js-regexp-inl.h
+++ b/deps/v8/src/objects/js-regexp-inl.h
@@ -18,6 +18,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-regexp-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(JSRegExp)
OBJECT_CONSTRUCTORS_IMPL_CHECK_SUPER(JSRegExpResult, JSArray)
OBJECT_CONSTRUCTORS_IMPL_CHECK_SUPER(JSRegExpResultIndices, JSArray)
diff --git a/deps/v8/src/objects/js-regexp-string-iterator-inl.h b/deps/v8/src/objects/js-regexp-string-iterator-inl.h
index b0d8e4c5ec..acd724de5c 100644
--- a/deps/v8/src/objects/js-regexp-string-iterator-inl.h
+++ b/deps/v8/src/objects/js-regexp-string-iterator-inl.h
@@ -15,6 +15,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-regexp-string-iterator-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(JSRegExpStringIterator)
BOOL_ACCESSORS(JSRegExpStringIterator, flags, done, DoneBit::kShift)
diff --git a/deps/v8/src/objects/js-regexp-string-iterator.h b/deps/v8/src/objects/js-regexp-string-iterator.h
index c5f2e33421..8991db82b5 100644
--- a/deps/v8/src/objects/js-regexp-string-iterator.h
+++ b/deps/v8/src/objects/js-regexp-string-iterator.h
@@ -14,6 +14,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-regexp-string-iterator-tq.inc"
+
class JSRegExpStringIterator
: public TorqueGeneratedJSRegExpStringIterator<JSRegExpStringIterator,
JSObject> {
diff --git a/deps/v8/src/objects/js-regexp.cc b/deps/v8/src/objects/js-regexp.cc
index f0317a23f5..eb2bb1c432 100644
--- a/deps/v8/src/objects/js-regexp.cc
+++ b/deps/v8/src/objects/js-regexp.cc
@@ -171,13 +171,6 @@ uint32_t JSRegExp::BacktrackLimit() const {
// static
JSRegExp::Flags JSRegExp::FlagsFromString(Isolate* isolate,
Handle<String> flags, bool* success) {
- STATIC_ASSERT(*JSRegExp::FlagFromChar('g') == JSRegExp::kGlobal);
- STATIC_ASSERT(*JSRegExp::FlagFromChar('i') == JSRegExp::kIgnoreCase);
- STATIC_ASSERT(*JSRegExp::FlagFromChar('m') == JSRegExp::kMultiline);
- STATIC_ASSERT(*JSRegExp::FlagFromChar('s') == JSRegExp::kDotAll);
- STATIC_ASSERT(*JSRegExp::FlagFromChar('u') == JSRegExp::kUnicode);
- STATIC_ASSERT(*JSRegExp::FlagFromChar('y') == JSRegExp::kSticky);
-
int length = flags->length();
if (length == 0) {
*success = true;
diff --git a/deps/v8/src/objects/js-regexp.h b/deps/v8/src/objects/js-regexp.h
index f9618e5266..b1d1399eab 100644
--- a/deps/v8/src/objects/js-regexp.h
+++ b/deps/v8/src/objects/js-regexp.h
@@ -14,6 +14,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-regexp-tq.inc"
+
// Regular expressions
// The regular expression holds a single reference to a FixedArray in
// the kDataOffset field.
@@ -40,8 +42,8 @@ class JSRegExp : public TorqueGeneratedJSRegExp<JSRegExp, JSObject> {
enum Type { NOT_COMPILED, ATOM, IRREGEXP, EXPERIMENTAL };
DEFINE_TORQUE_GENERATED_JS_REG_EXP_FLAGS()
- static constexpr base::Optional<Flag> FlagFromChar(char c) {
- STATIC_ASSERT(kFlagCount == 6);
+ static base::Optional<Flag> FlagFromChar(char c) {
+ STATIC_ASSERT(kFlagCount == 7);
// clang-format off
return c == 'g' ? base::Optional<Flag>(kGlobal)
: c == 'i' ? base::Optional<Flag>(kIgnoreCase)
@@ -49,6 +51,8 @@ class JSRegExp : public TorqueGeneratedJSRegExp<JSRegExp, JSObject> {
: c == 'y' ? base::Optional<Flag>(kSticky)
: c == 'u' ? base::Optional<Flag>(kUnicode)
: c == 's' ? base::Optional<Flag>(kDotAll)
+ : (FLAG_enable_experimental_regexp_engine && c == 'l')
+ ? base::Optional<Flag>(kLinear)
: base::Optional<Flag>();
// clang-format on
}
@@ -60,6 +64,7 @@ class JSRegExp : public TorqueGeneratedJSRegExp<JSRegExp, JSObject> {
STATIC_ASSERT(static_cast<int>(kSticky) == v8::RegExp::kSticky);
STATIC_ASSERT(static_cast<int>(kUnicode) == v8::RegExp::kUnicode);
STATIC_ASSERT(static_cast<int>(kDotAll) == v8::RegExp::kDotAll);
+ STATIC_ASSERT(static_cast<int>(kLinear) == v8::RegExp::kLinear);
STATIC_ASSERT(kFlagCount == v8::RegExp::kFlagCount);
DECL_ACCESSORS(last_index, Object)
diff --git a/deps/v8/src/objects/js-regexp.tq b/deps/v8/src/objects/js-regexp.tq
index 35e77114ba..6d3fc113cd 100644
--- a/deps/v8/src/objects/js-regexp.tq
+++ b/deps/v8/src/objects/js-regexp.tq
@@ -9,6 +9,7 @@ bitfield struct JSRegExpFlags extends uint31 {
sticky: bool: 1 bit;
unicode: bool: 1 bit;
dot_all: bool: 1 bit;
+ linear: bool: 1 bit;
}
@generateCppClass
diff --git a/deps/v8/src/objects/js-relative-time-format-inl.h b/deps/v8/src/objects/js-relative-time-format-inl.h
index 52d9d12261..4afdaa3088 100644
--- a/deps/v8/src/objects/js-relative-time-format-inl.h
+++ b/deps/v8/src/objects/js-relative-time-format-inl.h
@@ -18,6 +18,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-relative-time-format-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(JSRelativeTimeFormat)
// Base relative time format accessors.
diff --git a/deps/v8/src/objects/js-relative-time-format.cc b/deps/v8/src/objects/js-relative-time-format.cc
index 267343aaae..a2fab9ddc8 100644
--- a/deps/v8/src/objects/js-relative-time-format.cc
+++ b/deps/v8/src/objects/js-relative-time-format.cc
@@ -195,9 +195,12 @@ MaybeHandle<JSRelativeTimeFormat> JSRelativeTimeFormat::New(
}
}
- icu::DecimalFormat* decimal_format =
- static_cast<icu::DecimalFormat*>(number_format);
- decimal_format->setMinimumGroupingDigits(-2);
+ if (number_format->getDynamicClassID() ==
+ icu::DecimalFormat::getStaticClassID()) {
+ icu::DecimalFormat* decimal_format =
+ static_cast<icu::DecimalFormat*>(number_format);
+ decimal_format->setMinimumGroupingDigits(-2);
+ }
// Change UDISPCTX_CAPITALIZATION_NONE to other values if
// ECMA402 later include option to change capitalization.
diff --git a/deps/v8/src/objects/js-relative-time-format.h b/deps/v8/src/objects/js-relative-time-format.h
index 79e079b05b..444082cf0e 100644
--- a/deps/v8/src/objects/js-relative-time-format.h
+++ b/deps/v8/src/objects/js-relative-time-format.h
@@ -29,6 +29,8 @@ class RelativeDateTimeFormatter;
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-relative-time-format-tq.inc"
+
class JSRelativeTimeFormat
: public TorqueGeneratedJSRelativeTimeFormat<JSRelativeTimeFormat,
JSObject> {
diff --git a/deps/v8/src/objects/js-relative-time-format.tq b/deps/v8/src/objects/js-relative-time-format.tq
new file mode 100644
index 0000000000..70b5e82245
--- /dev/null
+++ b/deps/v8/src/objects/js-relative-time-format.tq
@@ -0,0 +1,19 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/objects/js-relative-time-format.h'
+
+type JSRelativeTimeFormatNumeric extends int32
+constexpr 'JSRelativeTimeFormat::Numeric';
+bitfield struct JSRelativeTimeFormatFlags extends uint31 {
+ numeric: JSRelativeTimeFormatNumeric: 1 bit;
+}
+
+@generateCppClass
+extern class JSRelativeTimeFormat extends JSObject {
+ locale: String;
+ numberingSystem: String;
+ icu_formatter: Foreign; // Managed<icu::RelativeDateTimeFormatter>
+ flags: SmiTagged<JSRelativeTimeFormatFlags>;
+}
diff --git a/deps/v8/src/objects/js-segment-iterator-inl.h b/deps/v8/src/objects/js-segment-iterator-inl.h
index e6a1c4a53d..979a1c796b 100644
--- a/deps/v8/src/objects/js-segment-iterator-inl.h
+++ b/deps/v8/src/objects/js-segment-iterator-inl.h
@@ -17,6 +17,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-segment-iterator-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(JSSegmentIterator)
// Base segment iterator accessors.
diff --git a/deps/v8/src/objects/js-segment-iterator.h b/deps/v8/src/objects/js-segment-iterator.h
index 45e03c06fa..bcbc22df37 100644
--- a/deps/v8/src/objects/js-segment-iterator.h
+++ b/deps/v8/src/objects/js-segment-iterator.h
@@ -27,6 +27,8 @@ class UnicodeString;
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-segment-iterator-tq.inc"
+
class JSSegmentIterator
: public TorqueGeneratedJSSegmentIterator<JSSegmentIterator, JSObject> {
public:
diff --git a/deps/v8/src/objects/js-segment-iterator.tq b/deps/v8/src/objects/js-segment-iterator.tq
new file mode 100644
index 0000000000..502070cefd
--- /dev/null
+++ b/deps/v8/src/objects/js-segment-iterator.tq
@@ -0,0 +1,16 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/objects/js-segment-iterator.h'
+
+bitfield struct JSSegmentIteratorFlags extends uint31 {
+ granularity: JSSegmenterGranularity: 2 bit;
+}
+
+@generateCppClass
+extern class JSSegmentIterator extends JSObject {
+ icu_break_iterator: Foreign; // Managed<icu::BreakIterator>
+ unicode_string: Foreign; // Managed<icu::UnicodeString>
+ flags: SmiTagged<JSSegmentIteratorFlags>;
+}
diff --git a/deps/v8/src/objects/js-segmenter-inl.h b/deps/v8/src/objects/js-segmenter-inl.h
index 98bc2e863b..e6744268c4 100644
--- a/deps/v8/src/objects/js-segmenter-inl.h
+++ b/deps/v8/src/objects/js-segmenter-inl.h
@@ -17,6 +17,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-segmenter-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(JSSegmenter)
// Base segmenter accessors.
diff --git a/deps/v8/src/objects/js-segmenter.h b/deps/v8/src/objects/js-segmenter.h
index e462042711..512625d204 100644
--- a/deps/v8/src/objects/js-segmenter.h
+++ b/deps/v8/src/objects/js-segmenter.h
@@ -28,6 +28,8 @@ class BreakIterator;
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-segmenter-tq.inc"
+
class JSSegmenter : public TorqueGeneratedJSSegmenter<JSSegmenter, JSObject> {
public:
// Creates segmenter object with properties derived from input locales and
diff --git a/deps/v8/src/objects/js-segmenter.tq b/deps/v8/src/objects/js-segmenter.tq
new file mode 100644
index 0000000000..fdd888b428
--- /dev/null
+++ b/deps/v8/src/objects/js-segmenter.tq
@@ -0,0 +1,18 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/objects/js-segmenter.h'
+
+type JSSegmenterGranularity extends int32
+constexpr 'JSSegmenter::Granularity';
+bitfield struct JSSegmenterFlags extends uint31 {
+ granularity: JSSegmenterGranularity: 2 bit;
+}
+
+@generateCppClass
+extern class JSSegmenter extends JSObject {
+ locale: String;
+ icu_break_iterator: Foreign; // Managed<icu::BreakIterator>
+ flags: SmiTagged<JSSegmenterFlags>;
+}
diff --git a/deps/v8/src/objects/js-segments-inl.h b/deps/v8/src/objects/js-segments-inl.h
index ceabd6741d..37fc4964e0 100644
--- a/deps/v8/src/objects/js-segments-inl.h
+++ b/deps/v8/src/objects/js-segments-inl.h
@@ -17,6 +17,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-segments-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(JSSegments)
// Base segments accessors.
diff --git a/deps/v8/src/objects/js-segments.h b/deps/v8/src/objects/js-segments.h
index b33323d6f9..30c387fea6 100644
--- a/deps/v8/src/objects/js-segments.h
+++ b/deps/v8/src/objects/js-segments.h
@@ -27,6 +27,8 @@ class UnicodeString;
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-segments-tq.inc"
+
class JSSegments : public TorqueGeneratedJSSegments<JSSegments, JSObject> {
public:
// ecma402 #sec-createsegmentsobject
diff --git a/deps/v8/src/objects/js-segments.tq b/deps/v8/src/objects/js-segments.tq
new file mode 100644
index 0000000000..f891e26ca0
--- /dev/null
+++ b/deps/v8/src/objects/js-segments.tq
@@ -0,0 +1,16 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/objects/js-segments.h'
+
+bitfield struct JSSegmentsFlags extends uint31 {
+ granularity: JSSegmenterGranularity: 2 bit;
+}
+
+@generateCppClass
+extern class JSSegments extends JSObject {
+ icu_break_iterator: Foreign; // Managed<icu::BreakIterator>
+ unicode_string: Foreign; // Managed<icu::UnicodeString>
+ flags: SmiTagged<JSSegmentsFlags>;
+}
diff --git a/deps/v8/src/objects/js-weak-refs-inl.h b/deps/v8/src/objects/js-weak-refs-inl.h
index 8b1bb6eaec..193544e1c2 100644
--- a/deps/v8/src/objects/js-weak-refs-inl.h
+++ b/deps/v8/src/objects/js-weak-refs-inl.h
@@ -17,6 +17,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-weak-refs-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(WeakCell)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSWeakRef)
OBJECT_CONSTRUCTORS_IMPL(JSFinalizationRegistry, JSObject)
diff --git a/deps/v8/src/objects/js-weak-refs.h b/deps/v8/src/objects/js-weak-refs.h
index 2aa0a4ff2d..300673381a 100644
--- a/deps/v8/src/objects/js-weak-refs.h
+++ b/deps/v8/src/objects/js-weak-refs.h
@@ -17,6 +17,8 @@ namespace internal {
class NativeContext;
class WeakCell;
+#include "torque-generated/src/objects/js-weak-refs-tq.inc"
+
// FinalizationRegistry object from the JS Weak Refs spec proposal:
// https://github.com/tc39/proposal-weakrefs
class JSFinalizationRegistry : public JSObject {
diff --git a/deps/v8/src/objects/keys.cc b/deps/v8/src/objects/keys.cc
index ff6ea1fb57..ba5fa9c928 100644
--- a/deps/v8/src/objects/keys.cc
+++ b/deps/v8/src/objects/keys.cc
@@ -5,6 +5,7 @@
#include "src/objects/keys.h"
#include "src/api/api-arguments-inl.h"
+#include "src/common/globals.h"
#include "src/execution/isolate-inl.h"
#include "src/handles/handles-inl.h"
#include "src/heap/factory.h"
@@ -17,6 +18,7 @@
#include "src/objects/ordered-hash-table-inl.h"
#include "src/objects/property-descriptor.h"
#include "src/objects/prototype.h"
+#include "src/objects/slots-atomic-inl.h"
#include "src/utils/identity-map.h"
#include "src/zone/zone-hashmap.h"
@@ -67,7 +69,8 @@ static Handle<FixedArray> CombineKeys(Isolate* isolate,
int nof_descriptors = map.NumberOfOwnDescriptors();
if (nof_descriptors == 0 && !may_have_elements) return prototype_chain_keys;
- Handle<DescriptorArray> descs(map.instance_descriptors(), isolate);
+ Handle<DescriptorArray> descs(map.instance_descriptors(kRelaxedLoad),
+ isolate);
int own_keys_length = own_keys.is_null() ? 0 : own_keys->length();
Handle<FixedArray> combined_keys = isolate->factory()->NewFixedArray(
own_keys_length + prototype_chain_keys_length);
@@ -369,8 +372,8 @@ Handle<FixedArray> ReduceFixedArrayTo(Isolate* isolate,
Handle<FixedArray> GetFastEnumPropertyKeys(Isolate* isolate,
Handle<JSObject> object) {
Handle<Map> map(object->map(), isolate);
- Handle<FixedArray> keys(map->instance_descriptors().enum_cache().keys(),
- isolate);
+ Handle<FixedArray> keys(
+ map->instance_descriptors(kRelaxedLoad).enum_cache().keys(), isolate);
// Check if the {map} has a valid enum length, which implies that it
// must have a valid enum cache as well.
@@ -395,7 +398,7 @@ Handle<FixedArray> GetFastEnumPropertyKeys(Isolate* isolate,
}
Handle<DescriptorArray> descriptors =
- Handle<DescriptorArray>(map->instance_descriptors(), isolate);
+ Handle<DescriptorArray>(map->instance_descriptors(kRelaxedLoad), isolate);
isolate->counters()->enum_cache_misses()->Increment();
// Create the keys array.
@@ -651,14 +654,11 @@ bool FastKeyAccumulator::TryPrototypeInfoCache(Handle<JSReceiver> receiver) {
return true;
}
-namespace {
-
-enum IndexedOrNamed { kIndexed, kNamed };
-
-V8_WARN_UNUSED_RESULT ExceptionStatus FilterForEnumerableProperties(
+V8_WARN_UNUSED_RESULT ExceptionStatus
+KeyAccumulator::FilterForEnumerableProperties(
Handle<JSReceiver> receiver, Handle<JSObject> object,
- Handle<InterceptorInfo> interceptor, KeyAccumulator* accumulator,
- Handle<JSObject> result, IndexedOrNamed type) {
+ Handle<InterceptorInfo> interceptor, Handle<JSObject> result,
+ IndexedOrNamed type) {
DCHECK(result->IsJSArray() || result->HasSloppyArgumentsElements());
ElementsAccessor* accessor = result->GetElementsAccessor();
@@ -667,8 +667,8 @@ V8_WARN_UNUSED_RESULT ExceptionStatus FilterForEnumerableProperties(
if (!accessor->HasEntry(*result, entry)) continue;
// args are invalid after args.Call(), create a new one in every iteration.
- PropertyCallbackArguments args(accumulator->isolate(), interceptor->data(),
- *receiver, *object, Just(kDontThrow));
+ PropertyCallbackArguments args(isolate_, interceptor->data(), *receiver,
+ *object, Just(kDontThrow));
Handle<Object> element = accessor->Get(result, entry);
Handle<Object> attributes;
@@ -686,8 +686,7 @@ V8_WARN_UNUSED_RESULT ExceptionStatus FilterForEnumerableProperties(
int32_t value;
CHECK(attributes->ToInt32(&value));
if ((value & DONT_ENUM) == 0) {
- RETURN_FAILURE_IF_NOT_SUCCESSFUL(
- accumulator->AddKey(element, DO_NOT_CONVERT));
+ RETURN_FAILURE_IF_NOT_SUCCESSFUL(AddKey(element, DO_NOT_CONVERT));
}
}
}
@@ -695,17 +694,14 @@ V8_WARN_UNUSED_RESULT ExceptionStatus FilterForEnumerableProperties(
}
// Returns |true| on success, |nothing| on exception.
-Maybe<bool> CollectInterceptorKeysInternal(Handle<JSReceiver> receiver,
- Handle<JSObject> object,
- Handle<InterceptorInfo> interceptor,
- KeyAccumulator* accumulator,
- IndexedOrNamed type) {
- Isolate* isolate = accumulator->isolate();
- PropertyCallbackArguments enum_args(isolate, interceptor->data(), *receiver,
+Maybe<bool> KeyAccumulator::CollectInterceptorKeysInternal(
+ Handle<JSReceiver> receiver, Handle<JSObject> object,
+ Handle<InterceptorInfo> interceptor, IndexedOrNamed type) {
+ PropertyCallbackArguments enum_args(isolate_, interceptor->data(), *receiver,
*object, Just(kDontThrow));
Handle<JSObject> result;
- if (!interceptor->enumerator().IsUndefined(isolate)) {
+ if (!interceptor->enumerator().IsUndefined(isolate_)) {
if (type == kIndexed) {
result = enum_args.CallIndexedEnumerator(interceptor);
} else {
@@ -713,25 +709,23 @@ Maybe<bool> CollectInterceptorKeysInternal(Handle<JSReceiver> receiver,
result = enum_args.CallNamedEnumerator(interceptor);
}
}
- RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate_, Nothing<bool>());
if (result.is_null()) return Just(true);
- if ((accumulator->filter() & ONLY_ENUMERABLE) &&
- !interceptor->query().IsUndefined(isolate)) {
+ if ((filter_ & ONLY_ENUMERABLE) &&
+ !interceptor->query().IsUndefined(isolate_)) {
RETURN_NOTHING_IF_NOT_SUCCESSFUL(FilterForEnumerableProperties(
- receiver, object, interceptor, accumulator, result, type));
+ receiver, object, interceptor, result, type));
} else {
- RETURN_NOTHING_IF_NOT_SUCCESSFUL(accumulator->AddKeys(
+ RETURN_NOTHING_IF_NOT_SUCCESSFUL(AddKeys(
result, type == kIndexed ? CONVERT_TO_ARRAY_INDEX : DO_NOT_CONVERT));
}
return Just(true);
}
-Maybe<bool> CollectInterceptorKeys(Handle<JSReceiver> receiver,
- Handle<JSObject> object,
- KeyAccumulator* accumulator,
- IndexedOrNamed type) {
- Isolate* isolate = accumulator->isolate();
+Maybe<bool> KeyAccumulator::CollectInterceptorKeys(Handle<JSReceiver> receiver,
+ Handle<JSObject> object,
+ IndexedOrNamed type) {
if (type == kIndexed) {
if (!object->HasIndexedInterceptor()) return Just(true);
} else {
@@ -740,17 +734,13 @@ Maybe<bool> CollectInterceptorKeys(Handle<JSReceiver> receiver,
Handle<InterceptorInfo> interceptor(type == kIndexed
? object->GetIndexedInterceptor()
: object->GetNamedInterceptor(),
- isolate);
- if ((accumulator->filter() & ONLY_ALL_CAN_READ) &&
- !interceptor->all_can_read()) {
+ isolate_);
+ if ((filter() & ONLY_ALL_CAN_READ) && !interceptor->all_can_read()) {
return Just(true);
}
- return CollectInterceptorKeysInternal(receiver, object, interceptor,
- accumulator, type);
+ return CollectInterceptorKeysInternal(receiver, object, interceptor, type);
}
-} // namespace
-
Maybe<bool> KeyAccumulator::CollectOwnElementIndices(
Handle<JSReceiver> receiver, Handle<JSObject> object) {
if (filter_ & SKIP_STRINGS || skip_indices_) return Just(true);
@@ -758,7 +748,7 @@ Maybe<bool> KeyAccumulator::CollectOwnElementIndices(
ElementsAccessor* accessor = object->GetElementsAccessor();
RETURN_NOTHING_IF_NOT_SUCCESSFUL(
accessor->CollectElementIndices(object, this));
- return CollectInterceptorKeys(receiver, object, this, kIndexed);
+ return CollectInterceptorKeys(receiver, object, kIndexed);
}
namespace {
@@ -810,6 +800,93 @@ base::Optional<int> CollectOwnPropertyNamesInternal(
return first_skipped;
}
+// Logic shared between different specializations of CopyEnumKeysTo.
+template <typename Dictionary>
+void CommonCopyEnumKeysTo(Isolate* isolate, Handle<Dictionary> dictionary,
+ Handle<FixedArray> storage, KeyCollectionMode mode,
+ KeyAccumulator* accumulator) {
+ DCHECK_IMPLIES(mode != KeyCollectionMode::kOwnOnly, accumulator != nullptr);
+ int length = storage->length();
+ int properties = 0;
+ ReadOnlyRoots roots(isolate);
+
+ AllowHeapAllocation allow_gc;
+ for (InternalIndex i : dictionary->IterateEntries()) {
+ Object key;
+ if (!dictionary->ToKey(roots, i, &key)) continue;
+ bool is_shadowing_key = false;
+ if (key.IsSymbol()) continue;
+ PropertyDetails details = dictionary->DetailsAt(i);
+ if (details.IsDontEnum()) {
+ if (mode == KeyCollectionMode::kIncludePrototypes) {
+ is_shadowing_key = true;
+ } else {
+ continue;
+ }
+ }
+ if (is_shadowing_key) {
+ // This might allocate, but {key} is not used afterwards.
+ accumulator->AddShadowingKey(key, &allow_gc);
+ continue;
+ } else {
+ if (Dictionary::kIsOrderedDictionaryType) {
+ storage->set(properties, dictionary->ValueAt(i));
+ } else {
+ // If the dictionary does not store elements in enumeration order,
+ // we need to sort it afterwards in CopyEnumKeysTo. To enable this we
+ // need to store indices at this point, rather than the values at the
+ // given indices.
+ storage->set(properties, Smi::FromInt(i.as_int()));
+ }
+ }
+ properties++;
+ if (mode == KeyCollectionMode::kOwnOnly && properties == length) break;
+ }
+
+ CHECK_EQ(length, properties);
+}
+
+// Copies enumerable keys to preallocated fixed array.
+// Does not throw for uninitialized exports in module namespace objects, so
+// this has to be checked separately.
+template <typename Dictionary>
+void CopyEnumKeysTo(Isolate* isolate, Handle<Dictionary> dictionary,
+ Handle<FixedArray> storage, KeyCollectionMode mode,
+ KeyAccumulator* accumulator) {
+ STATIC_ASSERT(!Dictionary::kIsOrderedDictionaryType);
+
+ CommonCopyEnumKeysTo<Dictionary>(isolate, dictionary, storage, mode,
+ accumulator);
+
+ int length = storage->length();
+
+ DisallowHeapAllocation no_gc;
+ Dictionary raw_dictionary = *dictionary;
+ FixedArray raw_storage = *storage;
+ EnumIndexComparator<Dictionary> cmp(raw_dictionary);
+ // Use AtomicSlot wrapper to ensure that std::sort uses atomic load and
+ // store operations that are safe for concurrent marking.
+ AtomicSlot start(storage->GetFirstElementAddress());
+ std::sort(start, start + length, cmp);
+ for (int i = 0; i < length; i++) {
+ InternalIndex index(Smi::ToInt(raw_storage.get(i)));
+ raw_storage.set(i, raw_dictionary.NameAt(index));
+ }
+}
+
+template <>
+void CopyEnumKeysTo(Isolate* isolate, Handle<OrderedNameDictionary> dictionary,
+ Handle<FixedArray> storage, KeyCollectionMode mode,
+ KeyAccumulator* accumulator) {
+ CommonCopyEnumKeysTo<OrderedNameDictionary>(isolate, dictionary, storage,
+ mode, accumulator);
+
+ // No need to sort, as CommonCopyEnumKeysTo on OrderedNameDictionary
+ // adds entries to |storage| in the dict's insertion order
+ // Further, the template argument true above means that |storage|
+ // now contains the actual values from |dictionary|, rather than indices.
+}
+
template <class T>
Handle<FixedArray> GetOwnEnumPropertyDictionaryKeys(Isolate* isolate,
KeyCollectionMode mode,
@@ -822,9 +899,83 @@ Handle<FixedArray> GetOwnEnumPropertyDictionaryKeys(Isolate* isolate,
}
int length = dictionary->NumberOfEnumerableProperties();
Handle<FixedArray> storage = isolate->factory()->NewFixedArray(length);
- T::CopyEnumKeysTo(isolate, dictionary, storage, mode, accumulator);
+ CopyEnumKeysTo(isolate, dictionary, storage, mode, accumulator);
return storage;
}
+
+// Collect the keys from |dictionary| into |keys|, in ascending chronological
+// order of property creation.
+template <typename Dictionary>
+ExceptionStatus CollectKeysFromDictionary(Handle<Dictionary> dictionary,
+ KeyAccumulator* keys) {
+ Isolate* isolate = keys->isolate();
+ ReadOnlyRoots roots(isolate);
+ // TODO(jkummerow): Consider using a std::unique_ptr<InternalIndex[]> instead.
+ Handle<FixedArray> array =
+ isolate->factory()->NewFixedArray(dictionary->NumberOfElements());
+ int array_size = 0;
+ PropertyFilter filter = keys->filter();
+ // Handle enumerable strings in CopyEnumKeysTo.
+ DCHECK_NE(keys->filter(), ENUMERABLE_STRINGS);
+ {
+ DisallowHeapAllocation no_gc;
+ for (InternalIndex i : dictionary->IterateEntries()) {
+ Object key;
+ Dictionary raw_dictionary = *dictionary;
+ if (!raw_dictionary.ToKey(roots, i, &key)) continue;
+ if (key.FilterKey(filter)) continue;
+ PropertyDetails details = raw_dictionary.DetailsAt(i);
+ if ((details.attributes() & filter) != 0) {
+ AllowHeapAllocation gc;
+ // This might allocate, but {key} is not used afterwards.
+ keys->AddShadowingKey(key, &gc);
+ continue;
+ }
+ if (filter & ONLY_ALL_CAN_READ) {
+ if (details.kind() != kAccessor) continue;
+ Object accessors = raw_dictionary.ValueAt(i);
+ if (!accessors.IsAccessorInfo()) continue;
+ if (!AccessorInfo::cast(accessors).all_can_read()) continue;
+ }
+ // TODO(emrich): consider storing keys instead of indices into the array
+ // in case of ordered dictionary type.
+ array->set(array_size++, Smi::FromInt(i.as_int()));
+ }
+ if (!Dictionary::kIsOrderedDictionaryType) {
+ // Sorting only needed if it's an unordered dictionary,
+ // otherwise we traversed elements in insertion order
+
+ EnumIndexComparator<Dictionary> cmp(*dictionary);
+ // Use AtomicSlot wrapper to ensure that std::sort uses atomic load and
+ // store operations that are safe for concurrent marking.
+ AtomicSlot start(array->GetFirstElementAddress());
+ std::sort(start, start + array_size, cmp);
+ }
+ }
+
+ bool has_seen_symbol = false;
+ for (int i = 0; i < array_size; i++) {
+ InternalIndex index(Smi::ToInt(array->get(i)));
+ Object key = dictionary->NameAt(index);
+ if (key.IsSymbol()) {
+ has_seen_symbol = true;
+ continue;
+ }
+ ExceptionStatus status = keys->AddKey(key, DO_NOT_CONVERT);
+ if (!status) return status;
+ }
+ if (has_seen_symbol) {
+ for (int i = 0; i < array_size; i++) {
+ InternalIndex index(Smi::ToInt(array->get(i)));
+ Object key = dictionary->NameAt(index);
+ if (!key.IsSymbol()) continue;
+ ExceptionStatus status = keys->AddKey(key, DO_NOT_CONVERT);
+ if (!status) return status;
+ }
+ }
+ return ExceptionStatus::kSuccess;
+}
+
} // namespace
Maybe<bool> KeyAccumulator::CollectOwnPropertyNames(Handle<JSReceiver> receiver,
@@ -840,8 +991,8 @@ Maybe<bool> KeyAccumulator::CollectOwnPropertyNames(Handle<JSReceiver> receiver,
if (enum_keys->length() != nof_descriptors) {
if (map.prototype(isolate_) != ReadOnlyRoots(isolate_).null_value()) {
AllowHeapAllocation allow_gc;
- Handle<DescriptorArray> descs =
- Handle<DescriptorArray>(map.instance_descriptors(), isolate_);
+ Handle<DescriptorArray> descs = Handle<DescriptorArray>(
+ map.instance_descriptors(kRelaxedLoad), isolate_);
for (InternalIndex i : InternalIndex::Range(nof_descriptors)) {
PropertyDetails details = descs->GetDetails(i);
if (!details.IsDontEnum()) continue;
@@ -853,6 +1004,9 @@ Maybe<bool> KeyAccumulator::CollectOwnPropertyNames(Handle<JSReceiver> receiver,
enum_keys = GetOwnEnumPropertyDictionaryKeys(
isolate_, mode_, this, object,
JSGlobalObject::cast(*object).global_dictionary());
+ } else if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ enum_keys = GetOwnEnumPropertyDictionaryKeys(
+ isolate_, mode_, this, object, object->property_dictionary_ordered());
} else {
enum_keys = GetOwnEnumPropertyDictionaryKeys(
isolate_, mode_, this, object, object->property_dictionary());
@@ -873,8 +1027,8 @@ Maybe<bool> KeyAccumulator::CollectOwnPropertyNames(Handle<JSReceiver> receiver,
} else {
if (object->HasFastProperties()) {
int limit = object->map().NumberOfOwnDescriptors();
- Handle<DescriptorArray> descs(object->map().instance_descriptors(),
- isolate_);
+ Handle<DescriptorArray> descs(
+ object->map().instance_descriptors(kRelaxedLoad), isolate_);
// First collect the strings,
base::Optional<int> first_symbol =
CollectOwnPropertyNamesInternal<true>(object, this, descs, 0, limit);
@@ -885,16 +1039,19 @@ Maybe<bool> KeyAccumulator::CollectOwnPropertyNames(Handle<JSReceiver> receiver,
object, this, descs, first_symbol.value(), limit));
}
} else if (object->IsJSGlobalObject()) {
- RETURN_NOTHING_IF_NOT_SUCCESSFUL(GlobalDictionary::CollectKeysTo(
+ RETURN_NOTHING_IF_NOT_SUCCESSFUL(CollectKeysFromDictionary(
handle(JSGlobalObject::cast(*object).global_dictionary(), isolate_),
this));
+ } else if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ RETURN_NOTHING_IF_NOT_SUCCESSFUL(CollectKeysFromDictionary(
+ handle(object->property_dictionary_ordered(), isolate_), this));
} else {
- RETURN_NOTHING_IF_NOT_SUCCESSFUL(NameDictionary::CollectKeysTo(
+ RETURN_NOTHING_IF_NOT_SUCCESSFUL(CollectKeysFromDictionary(
handle(object->property_dictionary(), isolate_), this));
}
}
// Add the property keys from the interceptor.
- return CollectInterceptorKeys(receiver, object, this, kNamed);
+ return CollectInterceptorKeys(receiver, object, kNamed);
}
ExceptionStatus KeyAccumulator::CollectPrivateNames(Handle<JSReceiver> receiver,
@@ -902,15 +1059,18 @@ ExceptionStatus KeyAccumulator::CollectPrivateNames(Handle<JSReceiver> receiver,
DCHECK_EQ(mode_, KeyCollectionMode::kOwnOnly);
if (object->HasFastProperties()) {
int limit = object->map().NumberOfOwnDescriptors();
- Handle<DescriptorArray> descs(object->map().instance_descriptors(),
- isolate_);
+ Handle<DescriptorArray> descs(
+ object->map().instance_descriptors(kRelaxedLoad), isolate_);
CollectOwnPropertyNamesInternal<false>(object, this, descs, 0, limit);
} else if (object->IsJSGlobalObject()) {
- RETURN_FAILURE_IF_NOT_SUCCESSFUL(GlobalDictionary::CollectKeysTo(
+ RETURN_FAILURE_IF_NOT_SUCCESSFUL(CollectKeysFromDictionary(
handle(JSGlobalObject::cast(*object).global_dictionary(), isolate_),
this));
+ } else if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ RETURN_FAILURE_IF_NOT_SUCCESSFUL(CollectKeysFromDictionary(
+ handle(object->property_dictionary_ordered(), isolate_), this));
} else {
- RETURN_FAILURE_IF_NOT_SUCCESSFUL(NameDictionary::CollectKeysTo(
+ RETURN_FAILURE_IF_NOT_SUCCESSFUL(CollectKeysFromDictionary(
handle(object->property_dictionary(), isolate_), this));
}
return ExceptionStatus::kSuccess;
@@ -925,7 +1085,7 @@ Maybe<bool> KeyAccumulator::CollectAccessCheckInterceptorKeys(
handle(InterceptorInfo::cast(
access_check_info->indexed_interceptor()),
isolate_),
- this, kIndexed)),
+ kIndexed)),
Nothing<bool>());
}
MAYBE_RETURN(
@@ -933,7 +1093,7 @@ Maybe<bool> KeyAccumulator::CollectAccessCheckInterceptorKeys(
receiver, object,
handle(InterceptorInfo::cast(access_check_info->named_interceptor()),
isolate_),
- this, kNamed)),
+ kNamed)),
Nothing<bool>());
return Just(true);
}
@@ -991,6 +1151,10 @@ Handle<FixedArray> KeyAccumulator::GetOwnEnumPropertyKeys(
return GetOwnEnumPropertyDictionaryKeys(
isolate, KeyCollectionMode::kOwnOnly, nullptr, object,
JSGlobalObject::cast(*object).global_dictionary());
+ } else if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ return GetOwnEnumPropertyDictionaryKeys(
+ isolate, KeyCollectionMode::kOwnOnly, nullptr, object,
+ object->property_dictionary_ordered());
} else {
return GetOwnEnumPropertyDictionaryKeys(
isolate, KeyCollectionMode::kOwnOnly, nullptr, object,
@@ -1021,8 +1185,13 @@ Maybe<bool> KeyAccumulator::CollectOwnJSProxyKeys(Handle<JSReceiver> receiver,
Handle<JSProxy> proxy) {
STACK_CHECK(isolate_, Nothing<bool>());
if (filter_ == PRIVATE_NAMES_ONLY) {
- RETURN_NOTHING_IF_NOT_SUCCESSFUL(NameDictionary::CollectKeysTo(
- handle(proxy->property_dictionary(), isolate_), this));
+ if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ RETURN_NOTHING_IF_NOT_SUCCESSFUL(CollectKeysFromDictionary(
+ handle(proxy->property_dictionary_ordered(), isolate_), this));
+ } else {
+ RETURN_NOTHING_IF_NOT_SUCCESSFUL(CollectKeysFromDictionary(
+ handle(proxy->property_dictionary(), isolate_), this));
+ }
return Just(true);
}
diff --git a/deps/v8/src/objects/keys.h b/deps/v8/src/objects/keys.h
index d0c27b2a4d..92b1fd783e 100644
--- a/deps/v8/src/objects/keys.h
+++ b/deps/v8/src/objects/keys.h
@@ -13,6 +13,7 @@ namespace v8 {
namespace internal {
class JSProxy;
+class FastKeyAccumulator;
enum AddKeyConversion { DO_NOT_CONVERT, CONVERT_TO_ARRAY_INDEX };
@@ -38,6 +39,8 @@ class KeyAccumulator final {
PropertyFilter filter)
: isolate_(isolate), mode_(mode), filter_(filter) {}
~KeyAccumulator() = default;
+ KeyAccumulator(const KeyAccumulator&) = delete;
+ KeyAccumulator& operator=(const KeyAccumulator&) = delete;
static MaybeHandle<FixedArray> GetKeys(
Handle<JSReceiver> object, KeyCollectionMode mode, PropertyFilter filter,
@@ -48,15 +51,6 @@ class KeyAccumulator final {
GetKeysConversion convert = GetKeysConversion::kKeepNumbers);
Maybe<bool> CollectKeys(Handle<JSReceiver> receiver,
Handle<JSReceiver> object);
- Maybe<bool> CollectOwnElementIndices(Handle<JSReceiver> receiver,
- Handle<JSObject> object);
- Maybe<bool> CollectOwnPropertyNames(Handle<JSReceiver> receiver,
- Handle<JSObject> object);
- V8_WARN_UNUSED_RESULT ExceptionStatus
- CollectPrivateNames(Handle<JSReceiver> receiver, Handle<JSObject> object);
- Maybe<bool> CollectAccessCheckInterceptorKeys(
- Handle<AccessCheckInfo> access_check_info, Handle<JSReceiver> receiver,
- Handle<JSObject> object);
// Might return directly the object's enum_cache, copy the result before using
// as an elements backing store for a JSObject.
@@ -69,10 +63,6 @@ class KeyAccumulator final {
AddKey(Object key, AddKeyConversion convert = DO_NOT_CONVERT);
V8_WARN_UNUSED_RESULT ExceptionStatus
AddKey(Handle<Object> key, AddKeyConversion convert = DO_NOT_CONVERT);
- V8_WARN_UNUSED_RESULT ExceptionStatus AddKeys(Handle<FixedArray> array,
- AddKeyConversion convert);
- V8_WARN_UNUSED_RESULT ExceptionStatus AddKeys(Handle<JSObject> array_like,
- AddKeyConversion convert);
// Jump to the next level, pushing the current |levelLength_| to
// |levelLengths_| and adding a new list to |elements_|.
@@ -82,43 +72,74 @@ class KeyAccumulator final {
// The collection mode defines whether we collect the keys from the prototype
// chain or only look at the receiver.
KeyCollectionMode mode() { return mode_; }
- // In case of for-in loops we have to treat JSProxy keys differently and
- // deduplicate them. Additionally we convert JSProxy keys back to array
- // indices.
- void set_is_for_in(bool value) { is_for_in_ = value; }
void set_skip_indices(bool value) { skip_indices_ = value; }
- void set_first_prototype_map(Handle<Map> value) {
- first_prototype_map_ = value;
- }
- void set_try_prototype_info_cache(bool value) {
- try_prototype_info_cache_ = value;
- }
- void set_receiver(Handle<JSReceiver> object) { receiver_ = object; }
- // The last_non_empty_prototype is used to limit the prototypes for which
- // we have to keep track of non-enumerable keys that can shadow keys
- // repeated on the prototype chain.
- void set_last_non_empty_prototype(Handle<JSReceiver> object) {
- last_non_empty_prototype_ = object;
- }
- void set_may_have_elements(bool value) { may_have_elements_ = value; }
// Shadowing keys are used to filter keys. This happens when non-enumerable
// keys appear again on the prototype chain.
void AddShadowingKey(Object key, AllowHeapAllocation* allow_gc);
void AddShadowingKey(Handle<Object> key);
private:
+ enum IndexedOrNamed { kIndexed, kNamed };
+
+ V8_WARN_UNUSED_RESULT ExceptionStatus
+ CollectPrivateNames(Handle<JSReceiver> receiver, Handle<JSObject> object);
+ Maybe<bool> CollectAccessCheckInterceptorKeys(
+ Handle<AccessCheckInfo> access_check_info, Handle<JSReceiver> receiver,
+ Handle<JSObject> object);
+
+ Maybe<bool> CollectInterceptorKeysInternal(
+ Handle<JSReceiver> receiver, Handle<JSObject> object,
+ Handle<InterceptorInfo> interceptor, IndexedOrNamed type);
+ Maybe<bool> CollectInterceptorKeys(Handle<JSReceiver> receiver,
+ Handle<JSObject> object,
+ IndexedOrNamed type);
+
+ Maybe<bool> CollectOwnElementIndices(Handle<JSReceiver> receiver,
+ Handle<JSObject> object);
+ Maybe<bool> CollectOwnPropertyNames(Handle<JSReceiver> receiver,
+ Handle<JSObject> object);
Maybe<bool> CollectOwnKeys(Handle<JSReceiver> receiver,
Handle<JSObject> object);
Maybe<bool> CollectOwnJSProxyKeys(Handle<JSReceiver> receiver,
Handle<JSProxy> proxy);
Maybe<bool> CollectOwnJSProxyTargetKeys(Handle<JSProxy> proxy,
Handle<JSReceiver> target);
+
+ V8_WARN_UNUSED_RESULT ExceptionStatus FilterForEnumerableProperties(
+ Handle<JSReceiver> receiver, Handle<JSObject> object,
+ Handle<InterceptorInfo> interceptor, Handle<JSObject> result,
+ IndexedOrNamed type);
+
Maybe<bool> AddKeysFromJSProxy(Handle<JSProxy> proxy,
Handle<FixedArray> keys);
+ V8_WARN_UNUSED_RESULT ExceptionStatus AddKeys(Handle<FixedArray> array,
+ AddKeyConversion convert);
+ V8_WARN_UNUSED_RESULT ExceptionStatus AddKeys(Handle<JSObject> array_like,
+ AddKeyConversion convert);
+
bool IsShadowed(Handle<Object> key);
bool HasShadowingKeys();
Handle<OrderedHashSet> keys();
+ // In case of for-in loops we have to treat JSProxy keys differently and
+ // deduplicate them. Additionally we convert JSProxy keys back to array
+ // indices.
+ void set_is_for_in(bool value) { is_for_in_ = value; }
+ void set_first_prototype_map(Handle<Map> value) {
+ first_prototype_map_ = value;
+ }
+ void set_try_prototype_info_cache(bool value) {
+ try_prototype_info_cache_ = value;
+ }
+ void set_receiver(Handle<JSReceiver> object) { receiver_ = object; }
+ // The last_non_empty_prototype is used to limit the prototypes for which
+ // we have to keep track of non-enumerable keys that can shadow keys
+ // repeated on the prototype chain.
+ void set_last_non_empty_prototype(Handle<JSReceiver> object) {
+ last_non_empty_prototype_ = object;
+ }
+ void set_may_have_elements(bool value) { may_have_elements_ = value; }
+
Isolate* isolate_;
// keys_ is either an Handle<OrderedHashSet> or in the case of own JSProxy
// keys a Handle<FixedArray>. The OrderedHashSet is in-place converted to the
@@ -138,7 +159,7 @@ class KeyAccumulator final {
bool may_have_elements_ = true;
bool try_prototype_info_cache_ = false;
- DISALLOW_COPY_AND_ASSIGN(KeyAccumulator);
+ friend FastKeyAccumulator;
};
// The FastKeyAccumulator handles the cases where there are no elements on the
@@ -158,6 +179,8 @@ class FastKeyAccumulator {
skip_indices_(skip_indices) {
Prepare();
}
+ FastKeyAccumulator(const FastKeyAccumulator&) = delete;
+ FastKeyAccumulator& operator=(const FastKeyAccumulator&) = delete;
bool is_receiver_simple_enum() { return is_receiver_simple_enum_; }
bool has_empty_prototype() { return has_empty_prototype_; }
@@ -193,8 +216,6 @@ class FastKeyAccumulator {
bool has_prototype_info_cache_ = false;
bool try_prototype_info_cache_ = false;
bool only_own_has_simple_elements_ = false;
-
- DISALLOW_COPY_AND_ASSIGN(FastKeyAccumulator);
};
} // namespace internal
diff --git a/deps/v8/src/objects/layout-descriptor-inl.h b/deps/v8/src/objects/layout-descriptor-inl.h
index 561e79505e..76dd3f618b 100644
--- a/deps/v8/src/objects/layout-descriptor-inl.h
+++ b/deps/v8/src/objects/layout-descriptor-inl.h
@@ -175,11 +175,12 @@ int LayoutDescriptor::CalculateCapacity(Map map, DescriptorArray descriptors,
if (!InobjectUnboxedField(inobject_properties, details)) continue;
int field_index = details.field_index();
int field_width_in_words = details.field_width_in_words();
- layout_descriptor_length =
- Max(layout_descriptor_length, field_index + field_width_in_words);
+ layout_descriptor_length = std::max(layout_descriptor_length,
+ field_index + field_width_in_words);
}
}
- layout_descriptor_length = Min(layout_descriptor_length, inobject_properties);
+ layout_descriptor_length =
+ std::min(layout_descriptor_length, inobject_properties);
return layout_descriptor_length;
}
diff --git a/deps/v8/src/objects/layout-descriptor.cc b/deps/v8/src/objects/layout-descriptor.cc
index 2b588a58bf..034680e297 100644
--- a/deps/v8/src/objects/layout-descriptor.cc
+++ b/deps/v8/src/objects/layout-descriptor.cc
@@ -65,7 +65,7 @@ Handle<LayoutDescriptor> LayoutDescriptor::AppendIfFastOrUseFull(
Isolate* isolate, Handle<Map> map, PropertyDetails details,
Handle<LayoutDescriptor> full_layout_descriptor) {
DisallowHeapAllocation no_allocation;
- LayoutDescriptor layout_descriptor = map->layout_descriptor();
+ LayoutDescriptor layout_descriptor = map->layout_descriptor(kAcquireLoad);
if (layout_descriptor.IsSlowLayout()) {
return full_layout_descriptor;
}
@@ -164,8 +164,8 @@ bool LayoutDescriptor::IsTagged(int field_index, int max_sequence_length,
}
}
} else { // Fast layout.
- sequence_length = Min(base::bits::CountTrailingZeros(value),
- static_cast<unsigned>(kBitsInSmiLayout)) -
+ sequence_length = std::min(base::bits::CountTrailingZeros(value),
+ static_cast<unsigned>(kBitsInSmiLayout)) -
layout_bit_index;
if (is_tagged && (field_index + sequence_length == capacity())) {
// The contiguous sequence of tagged fields lasts till the end of the
@@ -174,7 +174,7 @@ bool LayoutDescriptor::IsTagged(int field_index, int max_sequence_length,
sequence_length = std::numeric_limits<int>::max();
}
}
- *out_sequence_length = Min(sequence_length, max_sequence_length);
+ *out_sequence_length = std::min(sequence_length, max_sequence_length);
return is_tagged;
}
@@ -200,7 +200,7 @@ bool LayoutDescriptorHelper::IsTagged(
return true;
}
int max_sequence_length = (end_offset - offset_in_bytes) / kTaggedSize;
- int field_index = Max(0, (offset_in_bytes - header_size_) / kTaggedSize);
+ int field_index = std::max(0, (offset_in_bytes - header_size_) / kTaggedSize);
int sequence_length;
bool tagged = layout_descriptor_.IsTagged(field_index, max_sequence_length,
&sequence_length);
@@ -257,7 +257,7 @@ LayoutDescriptor LayoutDescriptor::Trim(Heap* heap, Map map,
bool LayoutDescriptor::IsConsistentWithMap(Map map, bool check_tail) {
if (FLAG_unbox_double_fields) {
- DescriptorArray descriptors = map.instance_descriptors();
+ DescriptorArray descriptors = map.instance_descriptors(kRelaxedLoad);
int last_field_index = 0;
for (InternalIndex i : map.IterateOwnDescriptors()) {
PropertyDetails details = descriptors.GetDetails(i);
@@ -271,8 +271,8 @@ bool LayoutDescriptor::IsConsistentWithMap(Map map, bool check_tail) {
if (tagged_actual != tagged_expected) return false;
}
last_field_index =
- Max(last_field_index,
- details.field_index() + details.field_width_in_words());
+ std::max(last_field_index,
+ details.field_index() + details.field_width_in_words());
}
if (check_tail) {
int n = capacity();
diff --git a/deps/v8/src/objects/literal-objects-inl.h b/deps/v8/src/objects/literal-objects-inl.h
index d7b0185f7b..8b08dedb72 100644
--- a/deps/v8/src/objects/literal-objects-inl.h
+++ b/deps/v8/src/objects/literal-objects-inl.h
@@ -15,6 +15,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/literal-objects-tq-inl.inc"
+
//
// ObjectBoilerplateDescription
//
@@ -27,11 +29,11 @@ SMI_ACCESSORS(ObjectBoilerplateDescription, flags,
FixedArray::OffsetOfElementAt(kLiteralTypeOffset))
Object ObjectBoilerplateDescription::name(int index) const {
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
return name(isolate, index);
}
-Object ObjectBoilerplateDescription::name(const Isolate* isolate,
+Object ObjectBoilerplateDescription::name(IsolateRoot isolate,
int index) const {
// get() already checks for out of bounds access, but we do not want to allow
// access to the last element, if it is the number of properties.
@@ -40,11 +42,11 @@ Object ObjectBoilerplateDescription::name(const Isolate* isolate,
}
Object ObjectBoilerplateDescription::value(int index) const {
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
return value(isolate, index);
}
-Object ObjectBoilerplateDescription::value(const Isolate* isolate,
+Object ObjectBoilerplateDescription::value(IsolateRoot isolate,
int index) const {
return get(isolate, 2 * index + 1 + kDescriptionStartIndex);
}
diff --git a/deps/v8/src/objects/literal-objects.cc b/deps/v8/src/objects/literal-objects.cc
index b5cdfd2795..365eb6ba9e 100644
--- a/deps/v8/src/objects/literal-objects.cc
+++ b/deps/v8/src/objects/literal-objects.cc
@@ -119,8 +119,9 @@ constexpr int ComputeEnumerationIndex(int value_index) {
// We "shift" value indices to ensure that the enumeration index for the value
// will not overlap with minimum properties set for both class and prototype
// objects.
- return value_index + Max(ClassBoilerplate::kMinimumClassPropertiesCount,
- ClassBoilerplate::kMinimumPrototypePropertiesCount);
+ return value_index +
+ std::max({ClassBoilerplate::kMinimumClassPropertiesCount,
+ ClassBoilerplate::kMinimumPrototypePropertiesCount});
}
inline int GetExistingValueIndex(Object value) {
diff --git a/deps/v8/src/objects/literal-objects.h b/deps/v8/src/objects/literal-objects.h
index 6603a9fad8..2ea5a521c5 100644
--- a/deps/v8/src/objects/literal-objects.h
+++ b/deps/v8/src/objects/literal-objects.h
@@ -17,6 +17,8 @@ namespace internal {
class ClassLiteral;
+#include "torque-generated/src/objects/literal-objects-tq.inc"
+
// ObjectBoilerplateDescription is a list of properties consisting of name value
// pairs. In addition to the properties, it provides the projected number
// of properties in the backing store. This number includes properties with
@@ -26,10 +28,10 @@ class ClassLiteral;
class ObjectBoilerplateDescription : public FixedArray {
public:
inline Object name(int index) const;
- inline Object name(const Isolate* isolate, int index) const;
+ inline Object name(IsolateRoot isolate, int index) const;
inline Object value(int index) const;
- inline Object value(const Isolate* isolate, int index) const;
+ inline Object value(IsolateRoot isolate, int index) const;
inline void set_key_value(int index, Object key, Object value);
diff --git a/deps/v8/src/objects/lookup-cache.h b/deps/v8/src/objects/lookup-cache.h
index a2016d23df..4aa3c5a588 100644
--- a/deps/v8/src/objects/lookup-cache.h
+++ b/deps/v8/src/objects/lookup-cache.h
@@ -18,6 +18,8 @@ namespace internal {
// Cleared at startup and prior to any gc.
class DescriptorLookupCache {
public:
+ DescriptorLookupCache(const DescriptorLookupCache&) = delete;
+ DescriptorLookupCache& operator=(const DescriptorLookupCache&) = delete;
// Lookup descriptor index for (map, name).
// If absent, kAbsent is returned.
inline int Lookup(Map source, Name name);
@@ -51,7 +53,6 @@ class DescriptorLookupCache {
int results_[kLength];
friend class Isolate;
- DISALLOW_COPY_AND_ASSIGN(DescriptorLookupCache);
};
} // namespace internal
diff --git a/deps/v8/src/objects/lookup.cc b/deps/v8/src/objects/lookup.cc
index 25f2d254df..da7a4740ae 100644
--- a/deps/v8/src/objects/lookup.cc
+++ b/deps/v8/src/objects/lookup.cc
@@ -4,18 +4,19 @@
#include "src/objects/lookup.h"
+#include "src/common/globals.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/isolate-inl.h"
#include "src/execution/protectors-inl.h"
#include "src/init/bootstrapper.h"
#include "src/logging/counters.h"
+#include "src/objects/arguments-inl.h"
#include "src/objects/elements.h"
#include "src/objects/field-type.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/heap-number-inl.h"
+#include "src/objects/ordered-hash-table.h"
#include "src/objects/struct-inl.h"
-#include "torque-generated/exported-class-definitions-inl.h"
-#include "torque-generated/exported-class-definitions.h"
namespace v8 {
namespace internal {
@@ -437,8 +438,9 @@ void LookupIterator::PrepareForDataProperty(Handle<Object> value) {
if (old_map.is_identical_to(new_map)) {
// Update the property details if the representation was None.
if (constness() != new_constness || representation().IsNone()) {
- property_details_ = new_map->instance_descriptors(isolate_).GetDetails(
- descriptor_number());
+ property_details_ =
+ new_map->instance_descriptors(isolate_, kRelaxedLoad)
+ .GetDetails(descriptor_number());
}
return;
}
@@ -510,15 +512,24 @@ void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
cell->set_value(*value);
property_details_ = cell->property_details();
} else {
- Handle<NameDictionary> dictionary(
- holder_obj->property_dictionary(isolate_), isolate());
- PropertyDetails original_details =
- dictionary->DetailsAt(dictionary_entry());
- int enumeration_index = original_details.dictionary_index();
- DCHECK_GT(enumeration_index, 0);
- details = details.set_index(enumeration_index);
- dictionary->SetEntry(dictionary_entry(), *name(), *value, details);
- property_details_ = details;
+ if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ Handle<OrderedNameDictionary> dictionary(
+ holder_obj->property_dictionary_ordered(isolate_), isolate());
+ dictionary->SetEntry(dictionary_entry(), *name(), *value, details);
+ DCHECK_EQ(details.AsSmi(),
+ dictionary->DetailsAt(dictionary_entry()).AsSmi());
+ property_details_ = details;
+ } else {
+ Handle<NameDictionary> dictionary(
+ holder_obj->property_dictionary(isolate_), isolate());
+ PropertyDetails original_details =
+ dictionary->DetailsAt(dictionary_entry());
+ int enumeration_index = original_details.dictionary_index();
+ DCHECK_GT(enumeration_index, 0);
+ details = details.set_index(enumeration_index);
+ dictionary->SetEntry(dictionary_entry(), *name(), *value, details);
+ property_details_ = details;
+ }
}
state_ = DATA;
}
@@ -641,18 +652,35 @@ void LookupIterator::ApplyTransitionToDataProperty(
property_details_ = transition->GetLastDescriptorDetails(isolate_);
state_ = DATA;
} else if (receiver->map(isolate_).is_dictionary_map()) {
- Handle<NameDictionary> dictionary(receiver->property_dictionary(isolate_),
- isolate_);
if (receiver->map(isolate_).is_prototype_map() &&
receiver->IsJSObject(isolate_)) {
JSObject::InvalidatePrototypeChains(receiver->map(isolate_));
}
- dictionary = NameDictionary::Add(isolate(), dictionary, name(),
+ if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ Handle<OrderedNameDictionary> dictionary(
+ receiver->property_dictionary_ordered(isolate_), isolate_);
+
+ dictionary =
+ OrderedNameDictionary::Add(isolate(), dictionary, name(),
isolate_->factory()->uninitialized_value(),
- property_details_, &number_);
- receiver->SetProperties(*dictionary);
- // Reload details containing proper enumeration index value.
- property_details_ = dictionary->DetailsAt(number_);
+ property_details_)
+ .ToHandleChecked();
+
+ // set to last used entry
+ number_ = InternalIndex(dictionary->UsedCapacity() - 1);
+ receiver->SetProperties(*dictionary);
+ } else {
+ Handle<NameDictionary> dictionary(receiver->property_dictionary(isolate_),
+ isolate_);
+
+ dictionary =
+ NameDictionary::Add(isolate(), dictionary, name(),
+ isolate_->factory()->uninitialized_value(),
+ property_details_, &number_);
+ receiver->SetProperties(*dictionary);
+ // Reload details containing proper enumeration index value.
+ property_details_ = dictionary->DetailsAt(number_);
+ }
has_property_ = true;
state_ = DATA;
@@ -837,8 +865,13 @@ Handle<Object> LookupIterator::FetchValue(
result = holder->global_dictionary(isolate_).ValueAt(isolate_,
dictionary_entry());
} else if (!holder_->HasFastProperties(isolate_)) {
- result = holder_->property_dictionary(isolate_).ValueAt(isolate_,
- dictionary_entry());
+ if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ result = holder_->property_dictionary_ordered(isolate_).ValueAt(
+ dictionary_entry());
+ } else {
+ result = holder_->property_dictionary(isolate_).ValueAt(
+ isolate_, dictionary_entry());
+ }
} else if (property_details_.location() == kField) {
DCHECK_EQ(kData, property_details_.kind());
Handle<JSObject> holder = GetHolder<JSObject>();
@@ -851,9 +884,9 @@ Handle<Object> LookupIterator::FetchValue(
return JSObject::FastPropertyAt(holder, property_details_.representation(),
field_index);
} else {
- result =
- holder_->map(isolate_).instance_descriptors(isolate_).GetStrongValue(
- isolate_, descriptor_number());
+ result = holder_->map(isolate_)
+ .instance_descriptors(isolate_, kRelaxedLoad)
+ .GetStrongValue(isolate_, descriptor_number());
}
return handle(result, isolate_);
}
@@ -941,10 +974,10 @@ Handle<FieldType> LookupIterator::GetFieldType() const {
DCHECK(has_property_);
DCHECK(holder_->HasFastProperties(isolate_));
DCHECK_EQ(kField, property_details_.location());
- return handle(
- holder_->map(isolate_).instance_descriptors(isolate_).GetFieldType(
- isolate_, descriptor_number()),
- isolate_);
+ return handle(holder_->map(isolate_)
+ .instance_descriptors(isolate_, kRelaxedLoad)
+ .GetFieldType(isolate_, descriptor_number()),
+ isolate_);
}
Handle<PropertyCell> LookupIterator::GetPropertyCell() const {
@@ -994,8 +1027,14 @@ void LookupIterator::WriteDataValue(Handle<Object> value,
dictionary.CellAt(isolate_, dictionary_entry()).set_value(*value);
} else {
DCHECK_IMPLIES(holder->IsJSProxy(isolate_), name()->IsPrivate(isolate_));
- NameDictionary dictionary = holder->property_dictionary(isolate_);
- dictionary.ValueAtPut(dictionary_entry(), *value);
+ if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ OrderedNameDictionary dictionary =
+ holder->property_dictionary_ordered(isolate_);
+ dictionary.ValueAtPut(dictionary_entry(), *value);
+ } else {
+ NameDictionary dictionary = holder->property_dictionary(isolate_);
+ dictionary.ValueAtPut(dictionary_entry(), *value);
+ }
}
}
@@ -1131,16 +1170,24 @@ LookupIterator::State LookupIterator::LookupInRegularHolder(
property_details_ = property_details_.CopyAddAttributes(SEALED);
}
} else if (!map.is_dictionary_map()) {
- DescriptorArray descriptors = map.instance_descriptors(isolate_);
+ DescriptorArray descriptors =
+ map.instance_descriptors(isolate_, kRelaxedLoad);
number_ = descriptors.SearchWithCache(isolate_, *name_, map);
if (number_.is_not_found()) return NotFound(holder);
property_details_ = descriptors.GetDetails(number_);
} else {
DCHECK_IMPLIES(holder.IsJSProxy(isolate_), name()->IsPrivate(isolate_));
- NameDictionary dict = holder.property_dictionary(isolate_);
- number_ = dict.FindEntry(isolate(), name_);
- if (number_.is_not_found()) return NotFound(holder);
- property_details_ = dict.DetailsAt(number_);
+ if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ OrderedNameDictionary dict = holder.property_dictionary_ordered(isolate_);
+ number_ = dict.FindEntry(isolate(), *name_);
+ if (number_.is_not_found()) return NotFound(holder);
+ property_details_ = dict.DetailsAt(number_);
+ } else {
+ NameDictionary dict = holder.property_dictionary(isolate_);
+ number_ = dict.FindEntry(isolate(), name_);
+ if (number_.is_not_found()) return NotFound(holder);
+ property_details_ = dict.DetailsAt(number_);
+ }
}
has_property_ = true;
switch (property_details_.kind()) {
diff --git a/deps/v8/src/objects/map-inl.h b/deps/v8/src/objects/map-inl.h
index 01beb50652..9529ea234c 100644
--- a/deps/v8/src/objects/map-inl.h
+++ b/deps/v8/src/objects/map-inl.h
@@ -20,6 +20,7 @@
#include "src/objects/shared-function-info.h"
#include "src/objects/templates-inl.h"
#include "src/objects/transitions-inl.h"
+#include "src/objects/transitions.h"
#include "src/wasm/wasm-objects-inl.h"
// Has to be the last include (doesn't have include guards):
@@ -28,24 +29,23 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/map-tq-inl.inc"
+
OBJECT_CONSTRUCTORS_IMPL(Map, HeapObject)
CAST_ACCESSOR(Map)
-DEF_GETTER(Map, instance_descriptors, DescriptorArray) {
- return TaggedField<DescriptorArray, kInstanceDescriptorsOffset>::load(isolate,
- *this);
-}
-
-SYNCHRONIZED_ACCESSORS(Map, synchronized_instance_descriptors, DescriptorArray,
- kInstanceDescriptorsOffset)
+RELAXED_ACCESSORS(Map, instance_descriptors, DescriptorArray,
+ kInstanceDescriptorsOffset)
+RELEASE_ACQUIRE_ACCESSORS(Map, instance_descriptors, DescriptorArray,
+ kInstanceDescriptorsOffset)
// A freshly allocated layout descriptor can be set on an existing map.
// We need to use release-store and acquire-load accessor pairs to ensure
// that the concurrent marking thread observes initializing stores of the
// layout descriptor.
-SYNCHRONIZED_ACCESSORS_CHECKED(Map, layout_descriptor, LayoutDescriptor,
- kLayoutDescriptorOffset,
- FLAG_unbox_double_fields)
+RELEASE_ACQUIRE_ACCESSORS_CHECKED(Map, layout_descriptor, LayoutDescriptor,
+ kLayoutDescriptorOffset,
+ FLAG_unbox_double_fields)
SYNCHRONIZED_WEAK_ACCESSORS(Map, raw_transitions,
kTransitionsOrPrototypeInfoOffset)
@@ -157,21 +157,22 @@ bool Map::EquivalentToForNormalization(const Map other,
}
bool Map::IsUnboxedDoubleField(FieldIndex index) const {
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
return IsUnboxedDoubleField(isolate, index);
}
-bool Map::IsUnboxedDoubleField(const Isolate* isolate, FieldIndex index) const {
+bool Map::IsUnboxedDoubleField(IsolateRoot isolate, FieldIndex index) const {
if (!FLAG_unbox_double_fields) return false;
if (!index.is_inobject()) return false;
- return !layout_descriptor(isolate).IsTagged(index.property_index());
+ return !layout_descriptor(isolate, kAcquireLoad)
+ .IsTagged(index.property_index());
}
bool Map::TooManyFastProperties(StoreOrigin store_origin) const {
if (UnusedPropertyFields() != 0) return false;
if (is_prototype_map()) return false;
if (store_origin == StoreOrigin::kNamed) {
- int limit = Max(kMaxFastProperties, GetInObjectProperties());
+ int limit = std::max({kMaxFastProperties, GetInObjectProperties()});
FieldCounts counts = GetFieldCounts();
// Only count mutable fields so that objects with large numbers of
// constant functions do not go to dictionary mode. That would be bad
@@ -179,14 +180,14 @@ bool Map::TooManyFastProperties(StoreOrigin store_origin) const {
int external = counts.mutable_count() - GetInObjectProperties();
return external > limit || counts.GetTotal() > kMaxNumberOfDescriptors;
} else {
- int limit = Max(kFastPropertiesSoftLimit, GetInObjectProperties());
+ int limit = std::max({kFastPropertiesSoftLimit, GetInObjectProperties()});
int external = NumberOfFields() - GetInObjectProperties();
return external > limit;
}
}
PropertyDetails Map::GetLastDescriptorDetails(Isolate* isolate) const {
- return instance_descriptors(isolate).GetDetails(LastAdded());
+ return instance_descriptors(isolate, kRelaxedLoad).GetDetails(LastAdded());
}
InternalIndex Map::LastAdded() const {
@@ -200,7 +201,7 @@ int Map::NumberOfOwnDescriptors() const {
}
void Map::SetNumberOfOwnDescriptors(int number) {
- DCHECK_LE(number, instance_descriptors().number_of_descriptors());
+ DCHECK_LE(number, instance_descriptors(kRelaxedLoad).number_of_descriptors());
CHECK_LE(static_cast<unsigned>(number),
static_cast<unsigned>(kMaxNumberOfDescriptors));
set_bit_field3(
@@ -563,7 +564,7 @@ bool Map::is_stable() const {
bool Map::CanBeDeprecated() const {
for (InternalIndex i : IterateOwnDescriptors()) {
- PropertyDetails details = instance_descriptors().GetDetails(i);
+ PropertyDetails details = instance_descriptors(kRelaxedLoad).GetDetails(i);
if (details.representation().IsNone()) return true;
if (details.representation().IsSmi()) return true;
if (details.representation().IsDouble() && FLAG_unbox_double_fields)
@@ -633,17 +634,17 @@ void Map::UpdateDescriptors(Isolate* isolate, DescriptorArray descriptors,
int number_of_own_descriptors) {
SetInstanceDescriptors(isolate, descriptors, number_of_own_descriptors);
if (FLAG_unbox_double_fields) {
- if (layout_descriptor().IsSlowLayout()) {
- set_layout_descriptor(layout_desc);
+ if (layout_descriptor(kAcquireLoad).IsSlowLayout()) {
+ set_layout_descriptor(layout_desc, kReleaseStore);
}
#ifdef VERIFY_HEAP
// TODO(ishell): remove these checks from VERIFY_HEAP mode.
if (FLAG_verify_heap) {
- CHECK(layout_descriptor().IsConsistentWithMap(*this));
+ CHECK(layout_descriptor(kAcquireLoad).IsConsistentWithMap(*this));
CHECK_EQ(Map::GetVisitorId(*this), visitor_id());
}
#else
- SLOW_DCHECK(layout_descriptor().IsConsistentWithMap(*this));
+ SLOW_DCHECK(layout_descriptor(kAcquireLoad).IsConsistentWithMap(*this));
DCHECK(visitor_id() == Map::GetVisitorId(*this));
#endif
}
@@ -655,14 +656,14 @@ void Map::InitializeDescriptors(Isolate* isolate, DescriptorArray descriptors,
descriptors.number_of_descriptors());
if (FLAG_unbox_double_fields) {
- set_layout_descriptor(layout_desc);
+ set_layout_descriptor(layout_desc, kReleaseStore);
#ifdef VERIFY_HEAP
// TODO(ishell): remove these checks from VERIFY_HEAP mode.
if (FLAG_verify_heap) {
- CHECK(layout_descriptor().IsConsistentWithMap(*this));
+ CHECK(layout_descriptor(kAcquireLoad).IsConsistentWithMap(*this));
}
#else
- SLOW_DCHECK(layout_descriptor().IsConsistentWithMap(*this));
+ SLOW_DCHECK(layout_descriptor(kAcquireLoad).IsConsistentWithMap(*this));
#endif
set_visitor_id(Map::GetVisitorId(*this));
}
@@ -684,12 +685,12 @@ void Map::clear_padding() {
}
LayoutDescriptor Map::GetLayoutDescriptor() const {
- return FLAG_unbox_double_fields ? layout_descriptor()
+ return FLAG_unbox_double_fields ? layout_descriptor(kAcquireLoad)
: LayoutDescriptor::FastPointerLayout();
}
void Map::AppendDescriptor(Isolate* isolate, Descriptor* desc) {
- DescriptorArray descriptors = instance_descriptors();
+ DescriptorArray descriptors = instance_descriptors(kRelaxedLoad);
int number_of_own_descriptors = NumberOfOwnDescriptors();
DCHECK(descriptors.number_of_descriptors() == number_of_own_descriptors);
{
@@ -832,7 +833,7 @@ int Map::SlackForArraySize(int old_size, int size_limit) {
DCHECK_LE(1, max_slack);
return 1;
}
- return Min(max_slack, old_size / 4);
+ return std::min(max_slack, old_size / 4);
}
int Map::InstanceSizeFromSlack(int slack) const {
diff --git a/deps/v8/src/objects/map-updater.cc b/deps/v8/src/objects/map-updater.cc
index b4b1587493..36d5da85e8 100644
--- a/deps/v8/src/objects/map-updater.cc
+++ b/deps/v8/src/objects/map-updater.cc
@@ -28,7 +28,7 @@ inline bool EqualImmutableValues(Object obj1, Object obj2) {
MapUpdater::MapUpdater(Isolate* isolate, Handle<Map> old_map)
: isolate_(isolate),
old_map_(old_map),
- old_descriptors_(old_map->instance_descriptors(), isolate_),
+ old_descriptors_(old_map->instance_descriptors(kRelaxedLoad), isolate_),
old_nof_(old_map_->NumberOfOwnDescriptors()),
new_elements_kind_(old_map_->elements_kind()),
is_transitionable_fast_elements_kind_(
@@ -197,8 +197,9 @@ void MapUpdater::GeneralizeField(Handle<Map> map, InternalIndex modify_index,
Map::GeneralizeField(isolate_, map, modify_index, new_constness,
new_representation, new_field_type);
- DCHECK(*old_descriptors_ == old_map_->instance_descriptors() ||
- *old_descriptors_ == integrity_source_map_->instance_descriptors());
+ DCHECK(*old_descriptors_ == old_map_->instance_descriptors(kRelaxedLoad) ||
+ *old_descriptors_ ==
+ integrity_source_map_->instance_descriptors(kRelaxedLoad));
}
MapUpdater::State MapUpdater::Normalize(const char* reason) {
@@ -284,8 +285,8 @@ bool MapUpdater::TrySaveIntegrityLevelTransitions() {
integrity_source_map_->NumberOfOwnDescriptors());
has_integrity_level_transition_ = true;
- old_descriptors_ =
- handle(integrity_source_map_->instance_descriptors(), isolate_);
+ old_descriptors_ = handle(
+ integrity_source_map_->instance_descriptors(kRelaxedLoad), isolate_);
return true;
}
@@ -380,8 +381,8 @@ MapUpdater::State MapUpdater::FindTargetMap() {
if (transition.is_null()) break;
Handle<Map> tmp_map(transition, isolate_);
- Handle<DescriptorArray> tmp_descriptors(tmp_map->instance_descriptors(),
- isolate_);
+ Handle<DescriptorArray> tmp_descriptors(
+ tmp_map->instance_descriptors(kRelaxedLoad), isolate_);
// Check if target map is incompatible.
PropertyDetails tmp_details = tmp_descriptors->GetDetails(i);
@@ -428,7 +429,8 @@ MapUpdater::State MapUpdater::FindTargetMap() {
if (target_nof == old_nof_) {
#ifdef DEBUG
if (modified_descriptor_.is_found()) {
- DescriptorArray target_descriptors = target_map_->instance_descriptors();
+ DescriptorArray target_descriptors =
+ target_map_->instance_descriptors(kRelaxedLoad);
PropertyDetails details =
target_descriptors.GetDetails(modified_descriptor_);
DCHECK_EQ(new_kind_, details.kind());
@@ -476,8 +478,8 @@ MapUpdater::State MapUpdater::FindTargetMap() {
old_details.attributes());
if (transition.is_null()) break;
Handle<Map> tmp_map(transition, isolate_);
- Handle<DescriptorArray> tmp_descriptors(tmp_map->instance_descriptors(),
- isolate_);
+ Handle<DescriptorArray> tmp_descriptors(
+ tmp_map->instance_descriptors(kRelaxedLoad), isolate_);
#ifdef DEBUG
// Check that target map is compatible.
PropertyDetails tmp_details = tmp_descriptors->GetDetails(i);
@@ -501,7 +503,7 @@ Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() {
InstanceType instance_type = old_map_->instance_type();
int target_nof = target_map_->NumberOfOwnDescriptors();
Handle<DescriptorArray> target_descriptors(
- target_map_->instance_descriptors(), isolate_);
+ target_map_->instance_descriptors(kRelaxedLoad), isolate_);
// Allocate a new descriptor array large enough to hold the required
// descriptors, with minimally the exact same size as the old descriptor
@@ -676,7 +678,7 @@ Handle<Map> MapUpdater::FindSplitMap(Handle<DescriptorArray> descriptors) {
TransitionsAccessor(isolate_, current, &no_allocation)
.SearchTransition(name, details.kind(), details.attributes());
if (next.is_null()) break;
- DescriptorArray next_descriptors = next.instance_descriptors();
+ DescriptorArray next_descriptors = next.instance_descriptors(kRelaxedLoad);
PropertyDetails next_details = next_descriptors.GetDetails(i);
DCHECK_EQ(details.kind(), next_details.kind());
diff --git a/deps/v8/src/objects/map.cc b/deps/v8/src/objects/map.cc
index d1370aeaf4..535ec82d63 100644
--- a/deps/v8/src/objects/map.cc
+++ b/deps/v8/src/objects/map.cc
@@ -12,6 +12,7 @@
#include "src/init/bootstrapper.h"
#include "src/logging/counters-inl.h"
#include "src/logging/log.h"
+#include "src/objects/arguments-inl.h"
#include "src/objects/descriptor-array.h"
#include "src/objects/elements-kind.h"
#include "src/objects/field-type.h"
@@ -25,8 +26,6 @@
#include "src/roots/roots.h"
#include "src/utils/ostreams.h"
#include "src/zone/zone-containers.h"
-#include "torque-generated/exported-class-definitions-inl.h"
-#include "torque-generated/exported-class-definitions.h"
#include "torque-generated/field-offsets.h"
namespace v8 {
@@ -66,7 +65,7 @@ void Map::PrintReconfiguration(Isolate* isolate, FILE* file,
PropertyAttributes attributes) {
OFStream os(file);
os << "[reconfiguring]";
- Name name = instance_descriptors().GetKey(modify_index);
+ Name name = instance_descriptors(kRelaxedLoad).GetKey(modify_index);
if (name.IsString()) {
String::cast(name).PrintOn(file);
} else {
@@ -188,9 +187,6 @@ VisitorId Map::GetVisitorId(Map map) {
case FEEDBACK_METADATA_TYPE:
return kVisitFeedbackMetadata;
- case ODDBALL_TYPE:
- return kVisitOddball;
-
case MAP_TYPE:
return kVisitMap;
@@ -203,9 +199,6 @@ VisitorId Map::GetVisitorId(Map map) {
case PROPERTY_CELL_TYPE:
return kVisitPropertyCell;
- case DESCRIPTOR_ARRAY_TYPE:
- return kVisitDescriptorArray;
-
case TRANSITION_ARRAY_TYPE:
return kVisitTransitionArray;
@@ -389,7 +382,7 @@ void Map::PrintGeneralization(
MaybeHandle<Object> new_value) {
OFStream os(file);
os << "[generalizing]";
- Name name = instance_descriptors().GetKey(modify_index);
+ Name name = instance_descriptors(kRelaxedLoad).GetKey(modify_index);
if (name.IsString()) {
String::cast(name).PrintOn(file);
} else {
@@ -450,7 +443,7 @@ MaybeHandle<Map> Map::CopyWithField(Isolate* isolate, Handle<Map> map,
PropertyConstness constness,
Representation representation,
TransitionFlag flag) {
- DCHECK(map->instance_descriptors()
+ DCHECK(map->instance_descriptors(kRelaxedLoad)
.Search(*name, map->NumberOfOwnDescriptors())
.is_not_found());
@@ -509,7 +502,7 @@ bool Map::TransitionRemovesTaggedField(Map target) const {
bool Map::TransitionChangesTaggedFieldToUntaggedField(Map target) const {
int inobject = NumberOfFields();
int target_inobject = target.NumberOfFields();
- int limit = Min(inobject, target_inobject);
+ int limit = std::min(inobject, target_inobject);
for (int i = 0; i < limit; i++) {
FieldIndex index = FieldIndex::ForPropertyIndex(target, i);
if (!IsUnboxedDoubleField(index) && target.IsUnboxedDoubleField(index)) {
@@ -544,8 +537,8 @@ bool Map::InstancesNeedRewriting(Map target, int target_number_of_fields,
if (target_number_of_fields != *old_number_of_fields) return true;
// If smi descriptors were replaced by double descriptors, rewrite.
- DescriptorArray old_desc = instance_descriptors();
- DescriptorArray new_desc = target.instance_descriptors();
+ DescriptorArray old_desc = instance_descriptors(kRelaxedLoad);
+ DescriptorArray new_desc = target.instance_descriptors(kRelaxedLoad);
for (InternalIndex i : IterateOwnDescriptors()) {
if (new_desc.GetDetails(i).representation().IsDouble() !=
old_desc.GetDetails(i).representation().IsDouble()) {
@@ -569,7 +562,7 @@ bool Map::InstancesNeedRewriting(Map target, int target_number_of_fields,
}
int Map::NumberOfFields() const {
- DescriptorArray descriptors = instance_descriptors();
+ DescriptorArray descriptors = instance_descriptors(kRelaxedLoad);
int result = 0;
for (InternalIndex i : IterateOwnDescriptors()) {
if (descriptors.GetDetails(i).location() == kField) result++;
@@ -578,7 +571,7 @@ int Map::NumberOfFields() const {
}
Map::FieldCounts Map::GetFieldCounts() const {
- DescriptorArray descriptors = instance_descriptors();
+ DescriptorArray descriptors = instance_descriptors(kRelaxedLoad);
int mutable_count = 0;
int const_count = 0;
for (InternalIndex i : IterateOwnDescriptors()) {
@@ -630,7 +623,7 @@ void Map::ReplaceDescriptors(Isolate* isolate, DescriptorArray new_descriptors,
return;
}
- DescriptorArray to_replace = instance_descriptors();
+ DescriptorArray to_replace = instance_descriptors(kRelaxedLoad);
// Replace descriptors by new_descriptors in all maps that share it. The old
// descriptors will not be trimmed in the mark-compactor, we need to mark
// all its elements.
@@ -638,7 +631,7 @@ void Map::ReplaceDescriptors(Isolate* isolate, DescriptorArray new_descriptors,
#ifndef V8_DISABLE_WRITE_BARRIERS
WriteBarrier::Marking(to_replace, to_replace.number_of_descriptors());
#endif
- while (current.instance_descriptors(isolate) == to_replace) {
+ while (current.instance_descriptors(isolate, kRelaxedLoad) == to_replace) {
Object next = current.GetBackPointer(isolate);
if (next.IsUndefined(isolate)) break; // Stop overwriting at initial map.
current.SetEnumLength(kInvalidEnumCacheSentinel);
@@ -656,8 +649,9 @@ Map Map::FindRootMap(Isolate* isolate) const {
if (back.IsUndefined(isolate)) {
// Initial map must not contain descriptors in the descriptors array
// that do not belong to the map.
- DCHECK_LE(result.NumberOfOwnDescriptors(),
- result.instance_descriptors().number_of_descriptors());
+ DCHECK_LE(
+ result.NumberOfOwnDescriptors(),
+ result.instance_descriptors(kRelaxedLoad).number_of_descriptors());
return result;
}
result = Map::cast(back);
@@ -666,8 +660,9 @@ Map Map::FindRootMap(Isolate* isolate) const {
Map Map::FindFieldOwner(Isolate* isolate, InternalIndex descriptor) const {
DisallowHeapAllocation no_allocation;
- DCHECK_EQ(kField,
- instance_descriptors(isolate).GetDetails(descriptor).location());
+ DCHECK_EQ(kField, instance_descriptors(isolate, kRelaxedLoad)
+ .GetDetails(descriptor)
+ .location());
Map result = *this;
while (true) {
Object back = result.GetBackPointer(isolate);
@@ -686,7 +681,8 @@ void Map::UpdateFieldType(Isolate* isolate, InternalIndex descriptor,
DCHECK(new_wrapped_type->IsSmi() || new_wrapped_type->IsWeak());
// We store raw pointers in the queue, so no allocations are allowed.
DisallowHeapAllocation no_allocation;
- PropertyDetails details = instance_descriptors().GetDetails(descriptor);
+ PropertyDetails details =
+ instance_descriptors(kRelaxedLoad).GetDetails(descriptor);
if (details.location() != kField) return;
DCHECK_EQ(kData, details.kind());
@@ -708,7 +704,7 @@ void Map::UpdateFieldType(Isolate* isolate, InternalIndex descriptor,
Map target = transitions.GetTarget(i);
backlog.push(target);
}
- DescriptorArray descriptors = current.instance_descriptors();
+ DescriptorArray descriptors = current.instance_descriptors(kRelaxedLoad);
PropertyDetails details = descriptors.GetDetails(descriptor);
// It is allowed to change representation here only from None
@@ -756,7 +752,8 @@ void Map::GeneralizeField(Isolate* isolate, Handle<Map> map,
Representation new_representation,
Handle<FieldType> new_field_type) {
// Check if we actually need to generalize the field type at all.
- Handle<DescriptorArray> old_descriptors(map->instance_descriptors(), isolate);
+ Handle<DescriptorArray> old_descriptors(
+ map->instance_descriptors(kRelaxedLoad), isolate);
PropertyDetails old_details = old_descriptors->GetDetails(modify_index);
PropertyConstness old_constness = old_details.constness();
Representation old_representation = old_details.representation();
@@ -779,8 +776,8 @@ void Map::GeneralizeField(Isolate* isolate, Handle<Map> map,
// Determine the field owner.
Handle<Map> field_owner(map->FindFieldOwner(isolate, modify_index), isolate);
- Handle<DescriptorArray> descriptors(field_owner->instance_descriptors(),
- isolate);
+ Handle<DescriptorArray> descriptors(
+ field_owner->instance_descriptors(kRelaxedLoad), isolate);
DCHECK_EQ(*old_field_type, descriptors->GetFieldType(modify_index));
new_field_type =
@@ -866,7 +863,7 @@ Map SearchMigrationTarget(Isolate* isolate, Map old_map) {
// types instead of old_map's types.
// Go to slow map updating if the old_map has fast properties with cleared
// field types.
- DescriptorArray old_descriptors = old_map.instance_descriptors();
+ DescriptorArray old_descriptors = old_map.instance_descriptors(kRelaxedLoad);
for (InternalIndex i : old_map.IterateOwnDescriptors()) {
PropertyDetails old_details = old_descriptors.GetDetails(i);
if (old_details.location() == kField && old_details.kind() == kData) {
@@ -1029,7 +1026,7 @@ Map Map::TryReplayPropertyTransitions(Isolate* isolate, Map old_map) {
int root_nof = NumberOfOwnDescriptors();
int old_nof = old_map.NumberOfOwnDescriptors();
- DescriptorArray old_descriptors = old_map.instance_descriptors();
+ DescriptorArray old_descriptors = old_map.instance_descriptors(kRelaxedLoad);
Map new_map = *this;
for (InternalIndex i : InternalIndex::Range(root_nof, old_nof)) {
@@ -1040,7 +1037,8 @@ Map Map::TryReplayPropertyTransitions(Isolate* isolate, Map old_map) {
old_details.attributes());
if (transition.is_null()) return Map();
new_map = transition;
- DescriptorArray new_descriptors = new_map.instance_descriptors();
+ DescriptorArray new_descriptors =
+ new_map.instance_descriptors(kRelaxedLoad);
PropertyDetails new_details = new_descriptors.GetDetails(i);
DCHECK_EQ(old_details.kind(), new_details.kind());
@@ -1105,7 +1103,8 @@ void Map::EnsureDescriptorSlack(Isolate* isolate, Handle<Map> map, int slack) {
// Only supports adding slack to owned descriptors.
DCHECK(map->owns_descriptors());
- Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate);
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(kRelaxedLoad),
+ isolate);
int old_size = map->NumberOfOwnDescriptors();
if (slack <= descriptors->number_of_slack_descriptors()) return;
@@ -1137,7 +1136,7 @@ void Map::EnsureDescriptorSlack(Isolate* isolate, Handle<Map> map, int slack) {
#endif
Map current = *map;
- while (current.instance_descriptors() == *descriptors) {
+ while (current.instance_descriptors(kRelaxedLoad) == *descriptors) {
Object next = current.GetBackPointer();
if (next.IsUndefined(isolate)) break; // Stop overwriting at initial map.
current.UpdateDescriptors(isolate, *new_descriptors, layout_descriptor,
@@ -1388,7 +1387,7 @@ Handle<Map> Map::AsElementsKind(Isolate* isolate, Handle<Map> map,
int Map::NumberOfEnumerableProperties() const {
int result = 0;
- DescriptorArray descs = instance_descriptors();
+ DescriptorArray descs = instance_descriptors(kRelaxedLoad);
for (InternalIndex i : IterateOwnDescriptors()) {
if ((descs.GetDetails(i).attributes() & ONLY_ENUMERABLE) == 0 &&
!descs.GetKey(i).FilterKey(ENUMERABLE_STRINGS)) {
@@ -1400,7 +1399,7 @@ int Map::NumberOfEnumerableProperties() const {
int Map::NextFreePropertyIndex() const {
int number_of_own_descriptors = NumberOfOwnDescriptors();
- DescriptorArray descs = instance_descriptors();
+ DescriptorArray descs = instance_descriptors(kRelaxedLoad);
// Search properties backwards to find the last field.
for (int i = number_of_own_descriptors - 1; i >= 0; --i) {
PropertyDetails details = descs.GetDetails(InternalIndex(i));
@@ -1587,18 +1586,20 @@ Handle<Map> Map::TransitionToImmutableProto(Isolate* isolate, Handle<Map> map) {
namespace {
void EnsureInitialMap(Isolate* isolate, Handle<Map> map) {
#ifdef DEBUG
- // Strict function maps have Function as a constructor but the
- // Function's initial map is a sloppy function map. Same holds for
- // GeneratorFunction / AsyncFunction and its initial map.
- Object constructor = map->GetConstructor();
- DCHECK(constructor.IsJSFunction());
- DCHECK(*map == JSFunction::cast(constructor).initial_map() ||
+ Object maybe_constructor = map->GetConstructor();
+ DCHECK((maybe_constructor.IsJSFunction() &&
+ *map == JSFunction::cast(maybe_constructor).initial_map()) ||
+ // Below are the exceptions to the check above.
+ // Strict function maps have Function as a constructor but the
+ // Function's initial map is a sloppy function map.
*map == *isolate->strict_function_map() ||
*map == *isolate->strict_function_with_name_map() ||
+ // Same holds for GeneratorFunction and its initial map.
*map == *isolate->generator_function_map() ||
*map == *isolate->generator_function_with_name_map() ||
*map == *isolate->generator_function_with_home_object_map() ||
*map == *isolate->generator_function_with_name_and_home_object_map() ||
+ // AsyncFunction has Null as a constructor.
*map == *isolate->async_function_map() ||
*map == *isolate->async_function_with_name_map() ||
*map == *isolate->async_function_with_home_object_map() ||
@@ -1607,7 +1608,7 @@ void EnsureInitialMap(Isolate* isolate, Handle<Map> map) {
// Initial maps must not contain descriptors in the descriptors array
// that do not belong to the map.
DCHECK_EQ(map->NumberOfOwnDescriptors(),
- map->instance_descriptors().number_of_descriptors());
+ map->instance_descriptors(kRelaxedLoad).number_of_descriptors());
}
} // namespace
@@ -1623,10 +1624,6 @@ Handle<Map> Map::CopyInitialMap(Isolate* isolate, Handle<Map> map,
int instance_size, int inobject_properties,
int unused_property_fields) {
EnsureInitialMap(isolate, map);
- // Initial map must not contain descriptors in the descriptors array
- // that do not belong to the map.
- DCHECK_EQ(map->NumberOfOwnDescriptors(),
- map->instance_descriptors().number_of_descriptors());
Handle<Map> result =
RawCopy(isolate, map, instance_size, inobject_properties);
@@ -1637,7 +1634,7 @@ Handle<Map> Map::CopyInitialMap(Isolate* isolate, Handle<Map> map,
int number_of_own_descriptors = map->NumberOfOwnDescriptors();
if (number_of_own_descriptors > 0) {
// The copy will use the same descriptors array without ownership.
- DescriptorArray descriptors = map->instance_descriptors();
+ DescriptorArray descriptors = map->instance_descriptors(kRelaxedLoad);
result->set_owns_descriptors(false);
result->UpdateDescriptors(isolate, descriptors, map->GetLayoutDescriptor(),
number_of_own_descriptors);
@@ -1669,7 +1666,7 @@ Handle<Map> Map::ShareDescriptor(Isolate* isolate, Handle<Map> map,
// array, implying that its NumberOfOwnDescriptors equals the number of
// descriptors in the descriptor array.
DCHECK_EQ(map->NumberOfOwnDescriptors(),
- map->instance_descriptors().number_of_descriptors());
+ map->instance_descriptors(kRelaxedLoad).number_of_descriptors());
Handle<Map> result = CopyDropDescriptors(isolate, map);
Handle<Name> name = descriptor->GetKey();
@@ -1687,7 +1684,7 @@ Handle<Map> Map::ShareDescriptor(Isolate* isolate, Handle<Map> map,
} else {
int slack = SlackForArraySize(old_size, kMaxNumberOfDescriptors);
EnsureDescriptorSlack(isolate, map, slack);
- descriptors = handle(map->instance_descriptors(), isolate);
+ descriptors = handle(map->instance_descriptors(kRelaxedLoad), isolate);
}
}
@@ -1721,8 +1718,9 @@ void Map::ConnectTransition(Isolate* isolate, Handle<Map> parent,
} else if (!parent->IsDetached(isolate)) {
// |parent| is initial map and it must not contain descriptors in the
// descriptors array that do not belong to the map.
- DCHECK_EQ(parent->NumberOfOwnDescriptors(),
- parent->instance_descriptors().number_of_descriptors());
+ DCHECK_EQ(
+ parent->NumberOfOwnDescriptors(),
+ parent->instance_descriptors(kRelaxedLoad).number_of_descriptors());
}
if (parent->IsDetached(isolate)) {
DCHECK(child->IsDetached(isolate));
@@ -1846,14 +1844,15 @@ void Map::InstallDescriptors(Isolate* isolate, Handle<Map> parent,
Handle<LayoutDescriptor> layout_descriptor =
LayoutDescriptor::AppendIfFastOrUseFull(isolate, parent, details,
full_layout_descriptor);
- child->set_layout_descriptor(*layout_descriptor);
+ child->set_layout_descriptor(*layout_descriptor, kReleaseStore);
#ifdef VERIFY_HEAP
// TODO(ishell): remove these checks from VERIFY_HEAP mode.
if (FLAG_verify_heap) {
- CHECK(child->layout_descriptor().IsConsistentWithMap(*child));
+ CHECK(child->layout_descriptor(kAcquireLoad).IsConsistentWithMap(*child));
}
#else
- SLOW_DCHECK(child->layout_descriptor().IsConsistentWithMap(*child));
+ SLOW_DCHECK(
+ child->layout_descriptor(kAcquireLoad).IsConsistentWithMap(*child));
#endif
child->set_visitor_id(Map::GetVisitorId(*child));
}
@@ -1959,12 +1958,14 @@ Handle<Map> Map::CopyForElementsTransition(Isolate* isolate, Handle<Map> map) {
// transfer ownership to the new map.
// The properties did not change, so reuse descriptors.
map->set_owns_descriptors(false);
- new_map->InitializeDescriptors(isolate, map->instance_descriptors(),
+ new_map->InitializeDescriptors(isolate,
+ map->instance_descriptors(kRelaxedLoad),
map->GetLayoutDescriptor());
} else {
// In case the map did not own its own descriptors, a split is forced by
// copying the map; creating a new descriptor array cell.
- Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate);
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(kRelaxedLoad),
+ isolate);
int number_of_own_descriptors = map->NumberOfOwnDescriptors();
Handle<DescriptorArray> new_descriptors = DescriptorArray::CopyUpTo(
isolate, descriptors, number_of_own_descriptors);
@@ -1977,7 +1978,8 @@ Handle<Map> Map::CopyForElementsTransition(Isolate* isolate, Handle<Map> map) {
}
Handle<Map> Map::Copy(Isolate* isolate, Handle<Map> map, const char* reason) {
- Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate);
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(kRelaxedLoad),
+ isolate);
int number_of_own_descriptors = map->NumberOfOwnDescriptors();
Handle<DescriptorArray> new_descriptors = DescriptorArray::CopyUpTo(
isolate, descriptors, number_of_own_descriptors);
@@ -2018,8 +2020,8 @@ Handle<Map> Map::CopyForPreventExtensions(
bool old_map_is_dictionary_elements_kind) {
int num_descriptors = map->NumberOfOwnDescriptors();
Handle<DescriptorArray> new_desc = DescriptorArray::CopyUpToAddAttributes(
- isolate, handle(map->instance_descriptors(), isolate), num_descriptors,
- attrs_to_add);
+ isolate, handle(map->instance_descriptors(kRelaxedLoad), isolate),
+ num_descriptors, attrs_to_add);
Handle<LayoutDescriptor> new_layout_descriptor(map->GetLayoutDescriptor(),
isolate);
// Do not track transitions during bootstrapping.
@@ -2115,13 +2117,14 @@ Handle<Map> UpdateDescriptorForValue(Isolate* isolate, Handle<Map> map,
InternalIndex descriptor,
PropertyConstness constness,
Handle<Object> value) {
- if (CanHoldValue(map->instance_descriptors(), descriptor, constness,
- *value)) {
+ if (CanHoldValue(map->instance_descriptors(kRelaxedLoad), descriptor,
+ constness, *value)) {
return map;
}
- PropertyAttributes attributes =
- map->instance_descriptors().GetDetails(descriptor).attributes();
+ PropertyAttributes attributes = map->instance_descriptors(kRelaxedLoad)
+ .GetDetails(descriptor)
+ .attributes();
Representation representation = value->OptimalRepresentation(isolate);
Handle<FieldType> type = value->OptimalType(isolate, representation);
@@ -2168,9 +2171,9 @@ Handle<Map> Map::TransitionToDataProperty(Isolate* isolate, Handle<Map> map,
Handle<Map> transition(maybe_transition, isolate);
InternalIndex descriptor = transition->LastAdded();
- DCHECK_EQ(
- attributes,
- transition->instance_descriptors().GetDetails(descriptor).attributes());
+ DCHECK_EQ(attributes, transition->instance_descriptors(kRelaxedLoad)
+ .GetDetails(descriptor)
+ .attributes());
return UpdateDescriptorForValue(isolate, transition, descriptor, constness,
value);
@@ -2288,7 +2291,8 @@ Handle<Map> Map::TransitionToAccessorProperty(Isolate* isolate, Handle<Map> map,
.SearchTransition(*name, kAccessor, attributes);
if (!maybe_transition.is_null()) {
Handle<Map> transition(maybe_transition, isolate);
- DescriptorArray descriptors = transition->instance_descriptors();
+ DescriptorArray descriptors =
+ transition->instance_descriptors(kRelaxedLoad);
InternalIndex descriptor = transition->LastAdded();
DCHECK(descriptors.GetKey(descriptor).Equals(*name));
@@ -2311,7 +2315,7 @@ Handle<Map> Map::TransitionToAccessorProperty(Isolate* isolate, Handle<Map> map,
}
Handle<AccessorPair> pair;
- DescriptorArray old_descriptors = map->instance_descriptors();
+ DescriptorArray old_descriptors = map->instance_descriptors(kRelaxedLoad);
if (descriptor.is_found()) {
if (descriptor != map->LastAdded()) {
return Map::Normalize(isolate, map, mode, "AccessorsOverwritingNonLast");
@@ -2372,7 +2376,8 @@ Handle<Map> Map::TransitionToAccessorProperty(Isolate* isolate, Handle<Map> map,
Handle<Map> Map::CopyAddDescriptor(Isolate* isolate, Handle<Map> map,
Descriptor* descriptor,
TransitionFlag flag) {
- Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate);
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(kRelaxedLoad),
+ isolate);
// Share descriptors only if map owns descriptors and it not an initial map.
if (flag == INSERT_TRANSITION && map->owns_descriptors() &&
@@ -2399,7 +2404,8 @@ Handle<Map> Map::CopyAddDescriptor(Isolate* isolate, Handle<Map> map,
Handle<Map> Map::CopyInsertDescriptor(Isolate* isolate, Handle<Map> map,
Descriptor* descriptor,
TransitionFlag flag) {
- Handle<DescriptorArray> old_descriptors(map->instance_descriptors(), isolate);
+ Handle<DescriptorArray> old_descriptors(
+ map->instance_descriptors(kRelaxedLoad), isolate);
// We replace the key if it is already present.
InternalIndex index =
@@ -2479,9 +2485,10 @@ bool Map::EquivalentToForTransition(const Map other) const {
if (instance_type() == JS_FUNCTION_TYPE) {
// JSFunctions require more checks to ensure that sloppy function is
// not equivalent to strict function.
- int nof = Min(NumberOfOwnDescriptors(), other.NumberOfOwnDescriptors());
- return instance_descriptors().IsEqualUpTo(other.instance_descriptors(),
- nof);
+ int nof =
+ std::min(NumberOfOwnDescriptors(), other.NumberOfOwnDescriptors());
+ return instance_descriptors(kRelaxedLoad)
+ .IsEqualUpTo(other.instance_descriptors(kRelaxedLoad), nof);
}
return true;
}
@@ -2492,7 +2499,7 @@ bool Map::EquivalentToForElementsKindTransition(const Map other) const {
// Ensure that we don't try to generate elements kind transitions from maps
// with fields that may be generalized in-place. This must already be handled
// during addition of a new field.
- DescriptorArray descriptors = instance_descriptors();
+ DescriptorArray descriptors = instance_descriptors(kRelaxedLoad);
for (InternalIndex i : IterateOwnDescriptors()) {
PropertyDetails details = descriptors.GetDetails(i);
if (details.location() == kField) {
@@ -2573,7 +2580,7 @@ void Map::CompleteInobjectSlackTracking(Isolate* isolate) {
void Map::SetInstanceDescriptors(Isolate* isolate, DescriptorArray descriptors,
int number_of_own_descriptors) {
- set_synchronized_instance_descriptors(descriptors);
+ set_instance_descriptors(descriptors, kReleaseStore);
SetNumberOfOwnDescriptors(number_of_own_descriptors);
#ifndef V8_DISABLE_WRITE_BARRIERS
WriteBarrier::Marking(descriptors, number_of_own_descriptors);
diff --git a/deps/v8/src/objects/map.h b/deps/v8/src/objects/map.h
index 007dd77d6e..f55b39acd2 100644
--- a/deps/v8/src/objects/map.h
+++ b/deps/v8/src/objects/map.h
@@ -20,6 +20,8 @@
namespace v8 {
namespace internal {
+class WasmTypeInfo;
+
enum InstanceType : uint16_t;
#define DATA_ONLY_VISITOR_ID_LIST(V) \
@@ -38,7 +40,6 @@ enum InstanceType : uint16_t;
V(CodeDataContainer) \
V(Context) \
V(DataHandler) \
- V(DescriptorArray) \
V(EmbedderDataArray) \
V(EphemeronHashTable) \
V(FeedbackCell) \
@@ -54,7 +55,6 @@ enum InstanceType : uint16_t;
V(JSWeakCollection) \
V(Map) \
V(NativeContext) \
- V(Oddball) \
V(PreparseData) \
V(PropertyArray) \
V(PropertyCell) \
@@ -71,7 +71,6 @@ enum InstanceType : uint16_t;
V(TransitionArray) \
V(UncompiledDataWithoutPreparseData) \
V(UncompiledDataWithPreparseData) \
- V(WasmCapiFunctionData) \
V(WasmIndirectFunctionTable) \
V(WasmInstanceObject) \
V(WasmArray) \
@@ -105,6 +104,8 @@ enum class ObjectFields {
using MapHandles = std::vector<Handle<Map>>;
+#include "torque-generated/src/objects/map-tq.inc"
+
// All heap objects have a Map that describes their structure.
// A Map contains information about:
// - Size information about the object
@@ -594,14 +595,14 @@ class Map : public HeapObject {
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
// [instance descriptors]: describes the object.
- DECL_GETTER(synchronized_instance_descriptors, DescriptorArray)
- DECL_GETTER(instance_descriptors, DescriptorArray)
+ DECL_RELAXED_ACCESSORS(instance_descriptors, DescriptorArray)
+ DECL_ACQUIRE_GETTER(instance_descriptors, DescriptorArray)
V8_EXPORT_PRIVATE void SetInstanceDescriptors(Isolate* isolate,
DescriptorArray descriptors,
int number_of_own_descriptors);
// [layout descriptor]: describes the object layout.
- DECL_ACCESSORS(layout_descriptor, LayoutDescriptor)
+ DECL_RELEASE_ACQUIRE_ACCESSORS(layout_descriptor, LayoutDescriptor)
// |layout descriptor| accessor which can be used from GC.
inline LayoutDescriptor layout_descriptor_gc_safe() const;
inline bool HasFastPointerLayout() const;
@@ -862,8 +863,7 @@ class Map : public HeapObject {
// Returns true if given field is unboxed double.
inline bool IsUnboxedDoubleField(FieldIndex index) const;
- inline bool IsUnboxedDoubleField(const Isolate* isolate,
- FieldIndex index) const;
+ inline bool IsUnboxedDoubleField(IsolateRoot isolate, FieldIndex index) const;
void PrintMapDetails(std::ostream& os);
@@ -977,8 +977,7 @@ class Map : public HeapObject {
MaybeHandle<Object> new_value);
// Use the high-level instance_descriptors/SetInstanceDescriptors instead.
- inline void set_synchronized_instance_descriptors(
- DescriptorArray value, WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ DECL_RELEASE_SETTER(instance_descriptors, DescriptorArray)
static const int kFastPropertiesSoftLimit = 12;
static const int kMaxFastProperties = 128;
@@ -1007,7 +1006,7 @@ class NormalizedMapCache : public WeakFixedArray {
DECL_VERIFIER(NormalizedMapCache)
private:
- friend bool HeapObject::IsNormalizedMapCache(const Isolate* isolate) const;
+ friend bool HeapObject::IsNormalizedMapCache(IsolateRoot isolate) const;
static const int kEntries = 64;
diff --git a/deps/v8/src/objects/maybe-object-inl.h b/deps/v8/src/objects/maybe-object-inl.h
index afb3a93123..6cabc52312 100644
--- a/deps/v8/src/objects/maybe-object-inl.h
+++ b/deps/v8/src/objects/maybe-object-inl.h
@@ -34,6 +34,15 @@ MaybeObject MaybeObject::MakeWeak(MaybeObject object) {
return MaybeObject(object.ptr() | kWeakHeapObjectMask);
}
+// static
+MaybeObject MaybeObject::Create(MaybeObject o) { return o; }
+
+// static
+MaybeObject MaybeObject::Create(Object o) { return FromObject(o); }
+
+// static
+MaybeObject MaybeObject::Create(Smi smi) { return FromSmi(smi); }
+
//
// HeapObjectReference implementation.
//
@@ -69,7 +78,7 @@ HeapObjectReference HeapObjectReference::From(Object object,
}
// static
-HeapObjectReference HeapObjectReference::ClearedValue(const Isolate* isolate) {
+HeapObjectReference HeapObjectReference::ClearedValue(IsolateRoot isolate) {
// Construct cleared weak ref value.
#ifdef V8_COMPRESS_POINTERS
// This is necessary to make pointer decompression computation also
diff --git a/deps/v8/src/objects/maybe-object.h b/deps/v8/src/objects/maybe-object.h
index fd1363498e..3fe69ee5ec 100644
--- a/deps/v8/src/objects/maybe-object.h
+++ b/deps/v8/src/objects/maybe-object.h
@@ -27,6 +27,10 @@ class MaybeObject : public TaggedImpl<HeapObjectReferenceType::WEAK, Address> {
V8_INLINE static MaybeObject MakeWeak(MaybeObject object);
+ V8_INLINE static MaybeObject Create(MaybeObject o);
+ V8_INLINE static MaybeObject Create(Object o);
+ V8_INLINE static MaybeObject Create(Smi smi);
+
#ifdef VERIFY_HEAP
static void VerifyMaybeObjectPointer(Isolate* isolate, MaybeObject p);
#endif
@@ -50,7 +54,7 @@ class HeapObjectReference : public MaybeObject {
V8_INLINE static HeapObjectReference From(Object object,
HeapObjectReferenceType type);
- V8_INLINE static HeapObjectReference ClearedValue(const Isolate* isolate);
+ V8_INLINE static HeapObjectReference ClearedValue(IsolateRoot isolate);
template <typename THeapObjectSlot>
V8_INLINE static void Update(THeapObjectSlot slot, HeapObject value);
diff --git a/deps/v8/src/objects/microtask-inl.h b/deps/v8/src/objects/microtask-inl.h
index 613ee096c5..c9432817e5 100644
--- a/deps/v8/src/objects/microtask-inl.h
+++ b/deps/v8/src/objects/microtask-inl.h
@@ -18,6 +18,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/microtask-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(Microtask)
TQ_OBJECT_CONSTRUCTORS_IMPL(CallbackTask)
TQ_OBJECT_CONSTRUCTORS_IMPL(CallableTask)
diff --git a/deps/v8/src/objects/microtask.h b/deps/v8/src/objects/microtask.h
index cd8a71f58c..f2869eadc7 100644
--- a/deps/v8/src/objects/microtask.h
+++ b/deps/v8/src/objects/microtask.h
@@ -14,6 +14,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/microtask-tq.inc"
+
// Abstract base class for all microtasks that can be scheduled on the
// microtask queue. This class merely serves the purpose of a marker
// interface.
diff --git a/deps/v8/src/objects/module-inl.h b/deps/v8/src/objects/module-inl.h
index e627aedf18..c72cf2ad0c 100644
--- a/deps/v8/src/objects/module-inl.h
+++ b/deps/v8/src/objects/module-inl.h
@@ -6,12 +6,12 @@
#define V8_OBJECTS_MODULE_INL_H_
#include "src/objects/module.h"
-#include "src/objects/source-text-module.h"
-#include "src/objects/synthetic-module.h"
-
#include "src/objects/objects-inl.h" // Needed for write barriers
#include "src/objects/scope-info.h"
+#include "src/objects/source-text-module-inl.h"
+#include "src/objects/source-text-module.h"
#include "src/objects/string-inl.h"
+#include "src/objects/synthetic-module.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -19,13 +19,13 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/module-tq-inl.inc"
+
OBJECT_CONSTRUCTORS_IMPL(Module, HeapObject)
-TQ_OBJECT_CONSTRUCTORS_IMPL(SourceTextModule)
-TQ_OBJECT_CONSTRUCTORS_IMPL(SourceTextModuleInfoEntry)
-TQ_OBJECT_CONSTRUCTORS_IMPL(SyntheticModule)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSModuleNamespace)
NEVER_READ_ONLY_SPACE_IMPL(Module)
+NEVER_READ_ONLY_SPACE_IMPL(ModuleRequest)
NEVER_READ_ONLY_SPACE_IMPL(SourceTextModule)
NEVER_READ_ONLY_SPACE_IMPL(SyntheticModule)
@@ -44,6 +44,12 @@ ACCESSORS(SourceTextModule, async_parent_modules, ArrayList,
ACCESSORS(SourceTextModule, top_level_capability, HeapObject,
kTopLevelCapabilityOffset)
+struct Module::Hash {
+ V8_INLINE size_t operator()(Module const& module) const {
+ return module.hash();
+ }
+};
+
SourceTextModuleInfo SourceTextModule::info() const {
return status() == kErrored
? SourceTextModuleInfo::cast(code())
diff --git a/deps/v8/src/objects/module.cc b/deps/v8/src/objects/module.cc
index e35870e953..f4c23ae5c4 100644
--- a/deps/v8/src/objects/module.cc
+++ b/deps/v8/src/objects/module.cc
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/objects/module.h"
+
#include <unordered_map>
#include <unordered_set>
-#include "src/objects/module.h"
-
#include "src/api/api-inl.h"
#include "src/ast/modules.h"
#include "src/builtins/accessors.h"
@@ -16,6 +16,7 @@
#include "src/objects/js-generator-inl.h"
#include "src/objects/module-inl.h"
#include "src/objects/objects-inl.h"
+#include "src/objects/synthetic-module-inl.h"
#include "src/utils/ostreams.h"
namespace v8 {
@@ -371,5 +372,38 @@ Maybe<PropertyAttributes> JSModuleNamespace::GetPropertyAttributes(
return Just(it->property_attributes());
}
+bool Module::IsGraphAsync(Isolate* isolate) const {
+ DisallowGarbageCollection no_gc;
+
+ // Only SourceTextModules may be async.
+ if (!IsSourceTextModule()) return false;
+ SourceTextModule root = SourceTextModule::cast(*this);
+
+ Zone zone(isolate->allocator(), ZONE_NAME);
+ const size_t bucket_count = 2;
+ ZoneUnorderedSet<Module, Module::Hash> visited(&zone, bucket_count);
+ ZoneVector<SourceTextModule> worklist(&zone);
+ visited.insert(root);
+ worklist.push_back(root);
+
+ do {
+ SourceTextModule current = worklist.back();
+ worklist.pop_back();
+ DCHECK_GE(current.status(), kInstantiated);
+
+ if (current.async()) return true;
+ FixedArray requested_modules = current.requested_modules();
+ for (int i = 0, length = requested_modules.length(); i < length; ++i) {
+ Module descendant = Module::cast(requested_modules.get(i));
+ if (descendant.IsSourceTextModule()) {
+ const bool cycle = !visited.insert(descendant).second;
+ if (!cycle) worklist.push_back(SourceTextModule::cast(descendant));
+ }
+ }
+ } while (!worklist.empty());
+
+ return false;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/module.h b/deps/v8/src/objects/module.h
index f58454fac2..20be042f82 100644
--- a/deps/v8/src/objects/module.h
+++ b/deps/v8/src/objects/module.h
@@ -27,6 +27,8 @@ class SourceTextModuleInfoEntry;
class String;
class Zone;
+#include "torque-generated/src/objects/module-tq.inc"
+
// Module is the base class for ECMAScript module types, roughly corresponding
// to Abstract Module Record.
// https://tc39.github.io/ecma262/#sec-abstract-module-records
@@ -63,6 +65,10 @@ class Module : public HeapObject {
Object GetException();
DECL_ACCESSORS(exception, Object)
+ // Returns if this module or any transitively requested module is [[Async]],
+ // i.e. has a top-level await.
+ V8_WARN_UNUSED_RESULT bool IsGraphAsync(Isolate* isolate) const;
+
// Implementation of spec operation ModuleDeclarationInstantiation.
// Returns false if an exception occurred during instantiation, true
// otherwise. (In the case where the callback throws an exception, that
@@ -87,6 +93,8 @@ class Module : public HeapObject {
using BodyDescriptor =
FixedBodyDescriptor<kExportsOffset, kHeaderSize, kHeaderSize>;
+ struct Hash;
+
protected:
friend class Factory;
diff --git a/deps/v8/src/objects/name-inl.h b/deps/v8/src/objects/name-inl.h
index ffcd287fd3..55f5915319 100644
--- a/deps/v8/src/objects/name-inl.h
+++ b/deps/v8/src/objects/name-inl.h
@@ -17,6 +17,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/name-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(Name)
TQ_OBJECT_CONSTRUCTORS_IMPL(Symbol)
diff --git a/deps/v8/src/objects/name.h b/deps/v8/src/objects/name.h
index 264cb3698e..fc0927083e 100644
--- a/deps/v8/src/objects/name.h
+++ b/deps/v8/src/objects/name.h
@@ -16,6 +16,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/name-tq.inc"
+
// The Name abstract class captures anything that can be used as a property
// name, i.e., strings and symbols. All names store a hash value.
class Name : public TorqueGeneratedName<Name, PrimitiveHeapObject> {
diff --git a/deps/v8/src/objects/object-list-macros.h b/deps/v8/src/objects/object-list-macros.h
index 9eef5c0dbf..a189f00d27 100644
--- a/deps/v8/src/objects/object-list-macros.h
+++ b/deps/v8/src/objects/object-list-macros.h
@@ -104,7 +104,6 @@ class ZoneForwardList;
V(DataHandler) \
V(DeoptimizationData) \
V(DependentCode) \
- V(DescriptorArray) \
V(EmbedderDataArray) \
V(EphemeronHashTable) \
V(ExternalOneByteString) \
@@ -181,7 +180,6 @@ class ZoneForwardList;
V(NumberWrapper) \
V(ObjectHashSet) \
V(ObjectHashTable) \
- V(Oddball) \
V(OrderedHashMap) \
V(OrderedHashSet) \
V(OrderedNameDictionary) \
diff --git a/deps/v8/src/objects/object-macros-undef.h b/deps/v8/src/objects/object-macros-undef.h
index b96c03c00f..82b4f36251 100644
--- a/deps/v8/src/objects/object-macros-undef.h
+++ b/deps/v8/src/objects/object-macros-undef.h
@@ -32,9 +32,9 @@
#undef ACCESSORS_CHECKED2
#undef ACCESSORS_CHECKED
#undef ACCESSORS
-#undef SYNCHRONIZED_ACCESSORS_CHECKED2
-#undef SYNCHRONIZED_ACCESSORS_CHECKED
-#undef SYNCHRONIZED_ACCESSORS
+#undef RELEASE_ACQUIRE_ACCESSORS_CHECKED2
+#undef RELEASE_ACQUIRE_ACCESSORS_CHECKED
+#undef RELEASE_ACQUIRE_ACCESSORS
#undef WEAK_ACCESSORS_CHECKED2
#undef WEAK_ACCESSORS_CHECKED
#undef WEAK_ACCESSORS
diff --git a/deps/v8/src/objects/object-macros.h b/deps/v8/src/objects/object-macros.h
index b4fc7717fe..3aa56bfbde 100644
--- a/deps/v8/src/objects/object-macros.h
+++ b/deps/v8/src/objects/object-macros.h
@@ -82,25 +82,47 @@
// parameter.
#define DECL_GETTER(name, type) \
inline type name() const; \
- inline type name(const Isolate* isolate) const;
+ inline type name(IsolateRoot isolate) const;
-#define DEF_GETTER(holder, name, type) \
- type holder::name() const { \
- const Isolate* isolate = GetIsolateForPtrCompr(*this); \
- return holder::name(isolate); \
- } \
- type holder::name(const Isolate* isolate) const
+#define DEF_GETTER(holder, name, type) \
+ type holder::name() const { \
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this); \
+ return holder::name(isolate); \
+ } \
+ type holder::name(IsolateRoot isolate) const
#define DECL_ACCESSORS(name, type) \
DECL_GETTER(name, type) \
inline void set_##name(type value, \
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
-// TODO(solanes, neis): Unify naming for synchronized accessor uses.
-#define DECL_SYNCHRONIZED_ACCESSORS(name, type) \
- DECL_GETTER(synchronized_##name, type) \
- inline void set_synchronized_##name( \
- type value, WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+#define DECL_ACCESSORS_LOAD_TAG(name, type, tag_type) \
+ inline type name(tag_type tag) const; \
+ inline type name(IsolateRoot isolate, tag_type) const;
+
+#define DECL_ACCESSORS_STORE_TAG(name, type, tag_type) \
+ inline void set_##name(type value, tag_type, \
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+
+#define DECL_RELAXED_GETTER(name, type) \
+ DECL_ACCESSORS_LOAD_TAG(name, type, RelaxedLoadTag)
+
+#define DECL_RELAXED_SETTER(name, type) \
+ DECL_ACCESSORS_STORE_TAG(name, type, RelaxedStoreTag)
+
+#define DECL_RELAXED_ACCESSORS(name, type) \
+ DECL_RELAXED_GETTER(name, type) \
+ DECL_RELAXED_SETTER(name, type)
+
+#define DECL_ACQUIRE_GETTER(name, type) \
+ DECL_ACCESSORS_LOAD_TAG(name, type, AcquireLoadTag)
+
+#define DECL_RELEASE_SETTER(name, type) \
+ DECL_ACCESSORS_STORE_TAG(name, type, ReleaseStoreTag)
+
+#define DECL_RELEASE_ACQUIRE_ACCESSORS(name, type) \
+ DECL_ACQUIRE_GETTER(name, type) \
+ DECL_RELEASE_SETTER(name, type)
#define DECL_CAST(Type) \
V8_INLINE static Type cast(Object object); \
@@ -162,25 +184,55 @@
#define ACCESSORS(holder, name, type, offset) \
ACCESSORS_CHECKED(holder, name, type, offset, true)
-#define SYNCHRONIZED_ACCESSORS_CHECKED2(holder, name, type, offset, \
- get_condition, set_condition) \
- DEF_GETTER(holder, name, type) { \
+#define RELAXED_ACCESSORS_CHECKED2(holder, name, type, offset, get_condition, \
+ set_condition) \
+ type holder::name(RelaxedLoadTag tag) const { \
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this); \
+ return holder::name(isolate, tag); \
+ } \
+ type holder::name(IsolateRoot isolate, RelaxedLoadTag) const { \
+ type value = TaggedField<type, offset>::load(isolate, *this); \
+ DCHECK(get_condition); \
+ return value; \
+ } \
+ void holder::set_##name(type value, RelaxedStoreTag, \
+ WriteBarrierMode mode) { \
+ DCHECK(set_condition); \
+ TaggedField<type, offset>::store(*this, value); \
+ CONDITIONAL_WRITE_BARRIER(*this, offset, value, mode); \
+ }
+
+#define RELAXED_ACCESSORS_CHECKED(holder, name, type, offset, condition) \
+ RELAXED_ACCESSORS_CHECKED2(holder, name, type, offset, condition, condition)
+
+#define RELAXED_ACCESSORS(holder, name, type, offset) \
+ RELAXED_ACCESSORS_CHECKED(holder, name, type, offset, true)
+
+#define RELEASE_ACQUIRE_ACCESSORS_CHECKED2(holder, name, type, offset, \
+ get_condition, set_condition) \
+ type holder::name(AcquireLoadTag tag) const { \
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this); \
+ return holder::name(isolate, tag); \
+ } \
+ type holder::name(IsolateRoot isolate, AcquireLoadTag) const { \
type value = TaggedField<type, offset>::Acquire_Load(isolate, *this); \
DCHECK(get_condition); \
return value; \
} \
- void holder::set_##name(type value, WriteBarrierMode mode) { \
+ void holder::set_##name(type value, ReleaseStoreTag, \
+ WriteBarrierMode mode) { \
DCHECK(set_condition); \
TaggedField<type, offset>::Release_Store(*this, value); \
CONDITIONAL_WRITE_BARRIER(*this, offset, value, mode); \
}
-#define SYNCHRONIZED_ACCESSORS_CHECKED(holder, name, type, offset, condition) \
- SYNCHRONIZED_ACCESSORS_CHECKED2(holder, name, type, offset, condition, \
- condition)
+#define RELEASE_ACQUIRE_ACCESSORS_CHECKED(holder, name, type, offset, \
+ condition) \
+ RELEASE_ACQUIRE_ACCESSORS_CHECKED2(holder, name, type, offset, condition, \
+ condition)
-#define SYNCHRONIZED_ACCESSORS(holder, name, type, offset) \
- SYNCHRONIZED_ACCESSORS_CHECKED(holder, name, type, offset, true)
+#define RELEASE_ACQUIRE_ACCESSORS(holder, name, type, offset) \
+ RELEASE_ACQUIRE_ACCESSORS_CHECKED(holder, name, type, offset, true)
#define WEAK_ACCESSORS_CHECKED2(holder, name, offset, get_condition, \
set_condition) \
diff --git a/deps/v8/src/objects/objects-body-descriptors-inl.h b/deps/v8/src/objects/objects-body-descriptors-inl.h
index 275ac9a9e6..a7571ae288 100644
--- a/deps/v8/src/objects/objects-body-descriptors-inl.h
+++ b/deps/v8/src/objects/objects-body-descriptors-inl.h
@@ -8,6 +8,7 @@
#include <algorithm>
#include "src/codegen/reloc-info.h"
+#include "src/objects/arguments-inl.h"
#include "src/objects/cell.h"
#include "src/objects/data-handler.h"
#include "src/objects/foreign-inl.h"
@@ -19,11 +20,9 @@
#include "src/objects/ordered-hash-table-inl.h"
#include "src/objects/source-text-module.h"
#include "src/objects/synthetic-module.h"
+#include "src/objects/torque-defined-classes-inl.h"
#include "src/objects/transitions.h"
#include "src/wasm/wasm-objects-inl.h"
-#include "torque-generated/class-definitions-inl.h"
-#include "torque-generated/exported-class-definitions-inl.h"
-#include "torque-generated/internal-class-definitions-inl.h"
namespace v8 {
namespace internal {
@@ -946,9 +945,6 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
p4);
case PROPERTY_ARRAY_TYPE:
return Op::template apply<PropertyArray::BodyDescriptor>(p1, p2, p3, p4);
- case DESCRIPTOR_ARRAY_TYPE:
- return Op::template apply<DescriptorArray::BodyDescriptor>(p1, p2, p3,
- p4);
case TRANSITION_ARRAY_TYPE:
return Op::template apply<TransitionArray::BodyDescriptor>(p1, p2, p3,
p4);
@@ -1032,8 +1028,6 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
return Op::template apply<WeakCell::BodyDescriptor>(p1, p2, p3, p4);
case JS_WEAK_REF_TYPE:
return Op::template apply<JSWeakRef::BodyDescriptor>(p1, p2, p3, p4);
- case ODDBALL_TYPE:
- return Op::template apply<Oddball::BodyDescriptor>(p1, p2, p3, p4);
case JS_PROXY_TYPE:
return Op::template apply<JSProxy::BodyDescriptor>(p1, p2, p3, p4);
case FOREIGN_TYPE:
diff --git a/deps/v8/src/objects/objects-definitions.h b/deps/v8/src/objects/objects-definitions.h
index 30d5bb6ec4..54fab1107a 100644
--- a/deps/v8/src/objects/objects-definitions.h
+++ b/deps/v8/src/objects/objects-definitions.h
@@ -135,6 +135,7 @@ namespace internal {
function_template_rare_data) \
V(_, INTERCEPTOR_INFO_TYPE, InterceptorInfo, interceptor_info) \
V(_, INTERPRETER_DATA_TYPE, InterpreterData, interpreter_data) \
+ V(_, MODULE_REQUEST_TYPE, ModuleRequest, module_request) \
V(_, PROMISE_CAPABILITY_TYPE, PromiseCapability, promise_capability) \
V(_, PROMISE_REACTION_TYPE, PromiseReaction, promise_reaction) \
V(_, PROPERTY_DESCRIPTOR_OBJECT_TYPE, PropertyDescriptorObject, \
@@ -148,8 +149,6 @@ namespace internal {
V(_, TEMPLATE_OBJECT_DESCRIPTION_TYPE, TemplateObjectDescription, \
template_object_description) \
V(_, TUPLE2_TYPE, Tuple2, tuple2) \
- V(_, WASM_CAPI_FUNCTION_DATA_TYPE, WasmCapiFunctionData, \
- wasm_capi_function_data) \
V(_, WASM_EXCEPTION_TAG_TYPE, WasmExceptionTag, wasm_exception_tag) \
V(_, WASM_EXPORTED_FUNCTION_DATA_TYPE, WasmExportedFunctionData, \
wasm_exported_function_data) \
diff --git a/deps/v8/src/objects/objects-inl.h b/deps/v8/src/objects/objects-inl.h
index c8ceea8f9a..65ac811e44 100644
--- a/deps/v8/src/objects/objects-inl.h
+++ b/deps/v8/src/objects/objects-inl.h
@@ -15,6 +15,7 @@
#include "src/base/bits.h"
#include "src/base/memory.h"
#include "src/builtins/builtins.h"
+#include "src/common/external-pointer-inl.h"
#include "src/handles/handles-inl.h"
#include "src/heap/factory.h"
#include "src/heap/heap-write-barrier-inl.h"
@@ -42,7 +43,6 @@
#include "src/objects/tagged-index.h"
#include "src/objects/templates.h"
#include "src/sanitizer/tsan.h"
-#include "torque-generated/class-definitions-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -78,7 +78,7 @@ bool Object::IsTaggedIndex() const {
bool Object::Is##type_() const { \
return IsHeapObject() && HeapObject::cast(*this).Is##type_(); \
} \
- bool Object::Is##type_(const Isolate* isolate) const { \
+ bool Object::Is##type_(IsolateRoot isolate) const { \
return IsHeapObject() && HeapObject::cast(*this).Is##type_(isolate); \
}
HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DEF)
@@ -232,23 +232,23 @@ DEF_GETTER(HeapObject, IsExternalTwoByteString, bool) {
bool Object::IsNumber() const {
if (IsSmi()) return true;
HeapObject this_heap_object = HeapObject::cast(*this);
- const Isolate* isolate = GetIsolateForPtrCompr(this_heap_object);
+ IsolateRoot isolate = GetIsolateForPtrCompr(this_heap_object);
return this_heap_object.IsHeapNumber(isolate);
}
-bool Object::IsNumber(const Isolate* isolate) const {
+bool Object::IsNumber(IsolateRoot isolate) const {
return IsSmi() || IsHeapNumber(isolate);
}
bool Object::IsNumeric() const {
if (IsSmi()) return true;
HeapObject this_heap_object = HeapObject::cast(*this);
- const Isolate* isolate = GetIsolateForPtrCompr(this_heap_object);
+ IsolateRoot isolate = GetIsolateForPtrCompr(this_heap_object);
return this_heap_object.IsHeapNumber(isolate) ||
this_heap_object.IsBigInt(isolate);
}
-bool Object::IsNumeric(const Isolate* isolate) const {
+bool Object::IsNumeric(IsolateRoot isolate) const {
return IsNumber(isolate) || IsBigInt(isolate);
}
@@ -276,11 +276,11 @@ DEF_GETTER(HeapObject, IsRegExpMatchInfo, bool) {
bool Object::IsLayoutDescriptor() const {
if (IsSmi()) return true;
HeapObject this_heap_object = HeapObject::cast(*this);
- const Isolate* isolate = GetIsolateForPtrCompr(this_heap_object);
+ IsolateRoot isolate = GetIsolateForPtrCompr(this_heap_object);
return this_heap_object.IsByteArray(isolate);
}
-bool Object::IsLayoutDescriptor(const Isolate* isolate) const {
+bool Object::IsLayoutDescriptor(IsolateRoot isolate) const {
return IsSmi() || IsByteArray(isolate);
}
@@ -385,11 +385,11 @@ DEF_GETTER(HeapObject, IsWasmExceptionPackage, bool) {
bool Object::IsPrimitive() const {
if (IsSmi()) return true;
HeapObject this_heap_object = HeapObject::cast(*this);
- const Isolate* isolate = GetIsolateForPtrCompr(this_heap_object);
+ IsolateRoot isolate = GetIsolateForPtrCompr(this_heap_object);
return this_heap_object.map(isolate).IsPrimitiveMap();
}
-bool Object::IsPrimitive(const Isolate* isolate) const {
+bool Object::IsPrimitive(IsolateRoot isolate) const {
return IsSmi() || HeapObject::cast(*this).map(isolate).IsPrimitiveMap();
}
@@ -419,7 +419,7 @@ DEF_GETTER(HeapObject, IsAccessCheckNeeded, bool) {
bool Object::Is##Name() const { \
return IsHeapObject() && HeapObject::cast(*this).Is##Name(); \
} \
- bool Object::Is##Name(const Isolate* isolate) const { \
+ bool Object::Is##Name(IsolateRoot isolate) const { \
return IsHeapObject() && HeapObject::cast(*this).Is##Name(isolate); \
}
STRUCT_LIST(MAKE_STRUCT_PREDICATE)
@@ -485,7 +485,7 @@ bool Object::FilterKey(PropertyFilter filter) {
return false;
}
-Representation Object::OptimalRepresentation(const Isolate* isolate) const {
+Representation Object::OptimalRepresentation(IsolateRoot isolate) const {
if (!FLAG_track_fields) return Representation::Tagged();
if (IsSmi()) {
return Representation::Smi();
@@ -504,7 +504,7 @@ Representation Object::OptimalRepresentation(const Isolate* isolate) const {
}
}
-ElementsKind Object::OptimalElementsKind(const Isolate* isolate) const {
+ElementsKind Object::OptimalElementsKind(IsolateRoot isolate) const {
if (IsSmi()) return PACKED_SMI_ELEMENTS;
if (IsNumber(isolate)) return PACKED_DOUBLE_ELEMENTS;
return PACKED_ELEMENTS;
@@ -640,12 +640,31 @@ MaybeHandle<Object> Object::SetElement(Isolate* isolate, Handle<Object> object,
return value;
}
+void Object::InitExternalPointerField(size_t offset, Isolate* isolate) {
+ i::InitExternalPointerField(field_address(offset), isolate);
+}
+
+void Object::InitExternalPointerField(size_t offset, Isolate* isolate,
+ Address value, ExternalPointerTag tag) {
+ i::InitExternalPointerField(field_address(offset), isolate, value, tag);
+}
+
+Address Object::ReadExternalPointerField(size_t offset, IsolateRoot isolate,
+ ExternalPointerTag tag) const {
+ return i::ReadExternalPointerField(field_address(offset), isolate, tag);
+}
+
+void Object::WriteExternalPointerField(size_t offset, Isolate* isolate,
+ Address value, ExternalPointerTag tag) {
+ i::WriteExternalPointerField(field_address(offset), isolate, value, tag);
+}
+
ObjectSlot HeapObject::RawField(int byte_offset) const {
- return ObjectSlot(FIELD_ADDR(*this, byte_offset));
+ return ObjectSlot(field_address(byte_offset));
}
MaybeObjectSlot HeapObject::RawMaybeWeakField(int byte_offset) const {
- return MaybeObjectSlot(FIELD_ADDR(*this, byte_offset));
+ return MaybeObjectSlot(field_address(byte_offset));
}
MapWord MapWord::FromMap(const Map map) { return MapWord(map.ptr()); }
@@ -686,10 +705,10 @@ ReadOnlyRoots HeapObject::GetReadOnlyRoots() const {
return ReadOnlyHeap::GetReadOnlyRoots(*this);
}
-ReadOnlyRoots HeapObject::GetReadOnlyRoots(const Isolate* isolate) const {
+ReadOnlyRoots HeapObject::GetReadOnlyRoots(IsolateRoot isolate) const {
#ifdef V8_COMPRESS_POINTERS
- DCHECK_NOT_NULL(isolate);
- return ReadOnlyRoots(const_cast<Isolate*>(isolate));
+ DCHECK_NE(isolate.address(), 0);
+ return ReadOnlyRoots(Isolate::FromRootAddress(isolate.address()));
#else
return GetReadOnlyRoots();
#endif
@@ -775,8 +794,8 @@ void HeapObject::synchronized_set_map_word(MapWord map_word) {
MapField::Release_Store(*this, map_word);
}
-bool HeapObject::synchronized_compare_and_swap_map_word(MapWord old_map_word,
- MapWord new_map_word) {
+bool HeapObject::release_compare_and_swap_map_word(MapWord old_map_word,
+ MapWord new_map_word) {
Tagged_t result =
MapField::Release_CompareAndSwap(*this, old_map_word, new_map_word);
return result == static_cast<Tagged_t>(old_map_word.ptr());
@@ -905,7 +924,7 @@ AllocationAlignment HeapObject::RequiredAlignment(Map map) {
}
Address HeapObject::GetFieldAddress(int field_offset) const {
- return FIELD_ADDR(*this, field_offset);
+ return field_address(field_offset);
}
// static
diff --git a/deps/v8/src/objects/objects.cc b/deps/v8/src/objects/objects.cc
index bb33b5d097..5c67fa388f 100644
--- a/deps/v8/src/objects/objects.cc
+++ b/deps/v8/src/objects/objects.cc
@@ -51,7 +51,7 @@
#include "src/objects/bigint.h"
#include "src/objects/cell-inl.h"
#include "src/objects/code-inl.h"
-#include "src/objects/compilation-cache-inl.h"
+#include "src/objects/compilation-cache-table-inl.h"
#include "src/objects/debug-objects-inl.h"
#include "src/objects/elements.h"
#include "src/objects/embedder-data-array-inl.h"
@@ -63,6 +63,7 @@
#include "src/objects/free-space-inl.h"
#include "src/objects/function-kind.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/instance-type.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/keys.h"
#include "src/objects/lookup-inl.h"
@@ -126,9 +127,6 @@
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-objects.h"
#include "src/zone/zone.h"
-#include "torque-generated/class-definitions-inl.h"
-#include "torque-generated/exported-class-definitions-inl.h"
-#include "torque-generated/internal-class-definitions-inl.h"
namespace v8 {
namespace internal {
@@ -1304,7 +1302,7 @@ bool FunctionTemplateInfo::IsTemplateFor(Map map) {
Object type;
if (cons_obj.IsJSFunction()) {
JSFunction fun = JSFunction::cast(cons_obj);
- type = fun.shared().function_data();
+ type = fun.shared().function_data(kAcquireLoad);
} else if (cons_obj.IsFunctionTemplateInfo()) {
type = FunctionTemplateInfo::cast(cons_obj);
} else {
@@ -1459,7 +1457,7 @@ MaybeHandle<Object> Object::GetPropertyWithAccessor(LookupIterator* it) {
if (info->replace_on_access() && receiver->IsJSReceiver()) {
RETURN_ON_EXCEPTION(isolate,
Accessors::ReplaceAccessorWithDataProperty(
- receiver, holder, name, result),
+ isolate, receiver, holder, name, result),
Object);
}
return reboxed_result;
@@ -1811,6 +1809,11 @@ bool Object::IterationHasObservableEffects() {
return true;
}
+bool Object::IsCodeLike(Isolate* isolate) const {
+ DisallowGarbageCollection no_gc;
+ return IsJSReceiver() && JSReceiver::cast(*this).IsCodeLike(isolate);
+}
+
void Object::ShortPrint(FILE* out) const {
OFStream os(out);
os << Brief(*this);
@@ -2241,7 +2244,8 @@ int HeapObject::SizeFromMap(Map map) const {
return FeedbackMetadata::SizeFor(
FeedbackMetadata::unchecked_cast(*this).synchronized_slot_count());
}
- if (instance_type == DESCRIPTOR_ARRAY_TYPE) {
+ if (base::IsInRange(instance_type, FIRST_DESCRIPTOR_ARRAY_TYPE,
+ LAST_DESCRIPTOR_ARRAY_TYPE)) {
return DescriptorArray::SizeFor(
DescriptorArray::unchecked_cast(*this).number_of_all_descriptors());
}
@@ -2304,8 +2308,14 @@ int HeapObject::SizeFromMap(Map map) const {
}
bool HeapObject::NeedsRehashing() const {
- switch (map().instance_type()) {
+ return NeedsRehashing(map().instance_type());
+}
+
+bool HeapObject::NeedsRehashing(InstanceType instance_type) const {
+ DCHECK_EQ(instance_type, map().instance_type());
+ switch (instance_type) {
case DESCRIPTOR_ARRAY_TYPE:
+ case STRONG_DESCRIPTOR_ARRAY_TYPE:
return DescriptorArray::cast(*this).number_of_descriptors() > 1;
case TRANSITION_ARRAY_TYPE:
return TransitionArray::cast(*this).number_of_entries() > 1;
@@ -2345,6 +2355,7 @@ bool HeapObject::CanBeRehashed() const {
case SIMPLE_NUMBER_DICTIONARY_TYPE:
return true;
case DESCRIPTOR_ARRAY_TYPE:
+ case STRONG_DESCRIPTOR_ARRAY_TYPE:
return true;
case TRANSITION_ARRAY_TYPE:
return true;
@@ -3508,11 +3519,20 @@ Maybe<bool> JSProxy::SetPrivateSymbol(Isolate* isolate, Handle<JSProxy> proxy,
return Just(true);
}
- Handle<NameDictionary> dict(proxy->property_dictionary(), isolate);
PropertyDetails details(kData, DONT_ENUM, PropertyCellType::kNoCell);
- Handle<NameDictionary> result =
- NameDictionary::Add(isolate, dict, private_name, value, details);
- if (!dict.is_identical_to(result)) proxy->SetProperties(*result);
+ if (V8_DICT_MODE_PROTOTYPES_BOOL) {
+ Handle<OrderedNameDictionary> dict(proxy->property_dictionary_ordered(),
+ isolate);
+ Handle<OrderedNameDictionary> result =
+ OrderedNameDictionary::Add(isolate, dict, private_name, value, details)
+ .ToHandleChecked();
+ if (!dict.is_identical_to(result)) proxy->SetProperties(*result);
+ } else {
+ Handle<NameDictionary> dict(proxy->property_dictionary(), isolate);
+ Handle<NameDictionary> result =
+ NameDictionary::Add(isolate, dict, private_name, value, details);
+ if (!dict.is_identical_to(result)) proxy->SetProperties(*result);
+ }
return Just(true);
}
@@ -3946,7 +3966,7 @@ Handle<FixedArray> EnsureSpaceInFixedArray(Isolate* isolate,
int capacity = array->length();
if (capacity < length) {
int new_capacity = length;
- new_capacity = new_capacity + Max(new_capacity / 2, 2);
+ new_capacity = new_capacity + std::max(new_capacity / 2, 2);
int grow_by = new_capacity - capacity;
array = isolate->factory()->CopyFixedArrayAndGrow(array, grow_by);
}
@@ -4781,30 +4801,43 @@ bool Script::ContainsAsmModule() {
}
namespace {
-bool GetPositionInfoSlow(const Script script, int position,
- Script::PositionInfo* info) {
- if (!script.source().IsString()) return false;
- if (position < 0) position = 0;
- String source_string = String::cast(script.source());
+template <typename Char>
+bool GetPositionInfoSlowImpl(const Vector<Char>& source, int position,
+ Script::PositionInfo* info) {
+ if (position < 0) {
+ position = 0;
+ }
int line = 0;
- int line_start = 0;
- int len = source_string.length();
- for (int pos = 0; pos <= len; ++pos) {
- if (pos == len || source_string.Get(pos) == '\n') {
- if (position <= pos) {
- info->line = line;
- info->column = position - line_start;
- info->line_start = line_start;
- info->line_end = pos;
- return true;
- }
- line++;
- line_start = pos + 1;
+ const auto begin = std::cbegin(source);
+ const auto end = std::cend(source);
+ for (auto line_begin = begin; line_begin < end;) {
+ const auto line_end = std::find(line_begin, end, '\n');
+ if (position <= (line_end - begin)) {
+ info->line = line;
+ info->column = static_cast<int>((begin + position) - line_begin);
+ info->line_start = static_cast<int>(line_begin - begin);
+ info->line_end = static_cast<int>(line_end - begin);
+ return true;
}
+ ++line;
+ line_begin = line_end + 1;
}
return false;
}
+bool GetPositionInfoSlow(const Script script, int position,
+ const DisallowHeapAllocation& no_gc,
+ Script::PositionInfo* info) {
+ if (!script.source().IsString()) {
+ return false;
+ }
+ auto source = String::cast(script.source());
+ const auto flat = source.GetFlatContent(no_gc);
+ return flat.IsOneByte()
+ ? GetPositionInfoSlowImpl(flat.ToOneByteVector(), position, info)
+ : GetPositionInfoSlowImpl(flat.ToUC16Vector(), position, info);
+}
+
} // namespace
bool Script::GetPositionInfo(int position, PositionInfo* info,
@@ -4826,7 +4859,9 @@ bool Script::GetPositionInfo(int position, PositionInfo* info,
if (line_ends().IsUndefined()) {
// Slow mode: we do not have line_ends. We have to iterate through source.
- if (!GetPositionInfoSlow(*this, position, info)) return false;
+ if (!GetPositionInfoSlow(*this, position, no_allocation, info)) {
+ return false;
+ }
} else {
DCHECK(line_ends().IsFixedArray());
FixedArray ends = FixedArray::cast(line_ends());
@@ -5109,9 +5144,11 @@ bool JSArray::MayHaveReadOnlyLength(Map js_array_map) {
// dictionary properties. Since it's not configurable, it's guaranteed to be
// the first in the descriptor array.
InternalIndex first(0);
- DCHECK(js_array_map.instance_descriptors().GetKey(first) ==
+ DCHECK(js_array_map.instance_descriptors(kRelaxedLoad).GetKey(first) ==
js_array_map.GetReadOnlyRoots().length_string());
- return js_array_map.instance_descriptors().GetDetails(first).IsReadOnly();
+ return js_array_map.instance_descriptors(kRelaxedLoad)
+ .GetDetails(first)
+ .IsReadOnly();
}
bool JSArray::HasReadOnlyLength(Handle<JSArray> array) {
@@ -5144,7 +5181,7 @@ bool JSArray::WouldChangeReadOnlyLength(Handle<JSArray> array, uint32_t index) {
template <typename Derived, typename Shape>
void Dictionary<Derived, Shape>::Print(std::ostream& os) {
DisallowHeapAllocation no_gc;
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
ReadOnlyRoots roots = this->GetReadOnlyRoots(isolate);
Derived dictionary = Derived::cast(*this);
for (InternalIndex i : dictionary.IterateEntries()) {
@@ -5198,65 +5235,6 @@ void Symbol::SymbolShortPrint(std::ostream& os) {
os << ">";
}
-// StringSharedKeys are used as keys in the eval cache.
-class StringSharedKey : public HashTableKey {
- public:
- // This tuple unambiguously identifies calls to eval() or
- // CreateDynamicFunction() (such as through the Function() constructor).
- // * source is the string passed into eval(). For dynamic functions, this is
- // the effective source for the function, some of which is implicitly
- // generated.
- // * shared is the shared function info for the function containing the call
- // to eval(). for dynamic functions, shared is the native context closure.
- // * When positive, position is the position in the source where eval is
- // called. When negative, position is the negation of the position in the
- // dynamic function's effective source where the ')' ends the parameters.
- StringSharedKey(Handle<String> source, Handle<SharedFunctionInfo> shared,
- LanguageMode language_mode, int position)
- : HashTableKey(CompilationCacheShape::StringSharedHash(
- *source, *shared, language_mode, position)),
- source_(source),
- shared_(shared),
- language_mode_(language_mode),
- position_(position) {}
-
- bool IsMatch(Object other) override {
- DisallowHeapAllocation no_allocation;
- if (!other.IsFixedArray()) {
- DCHECK(other.IsNumber());
- uint32_t other_hash = static_cast<uint32_t>(other.Number());
- return Hash() == other_hash;
- }
- FixedArray other_array = FixedArray::cast(other);
- SharedFunctionInfo shared = SharedFunctionInfo::cast(other_array.get(0));
- if (shared != *shared_) return false;
- int language_unchecked = Smi::ToInt(other_array.get(2));
- DCHECK(is_valid_language_mode(language_unchecked));
- LanguageMode language_mode = static_cast<LanguageMode>(language_unchecked);
- if (language_mode != language_mode_) return false;
- int position = Smi::ToInt(other_array.get(3));
- if (position != position_) return false;
- String source = String::cast(other_array.get(1));
- return source.Equals(*source_);
- }
-
- Handle<Object> AsHandle(Isolate* isolate) {
- Handle<FixedArray> array = isolate->factory()->NewFixedArray(4);
- array->set(0, *shared_);
- array->set(1, *source_);
- array->set(2, Smi::FromEnum(language_mode_));
- array->set(3, Smi::FromInt(position_));
- array->set_map(ReadOnlyRoots(isolate).fixed_cow_array_map());
- return array;
- }
-
- private:
- Handle<String> source_;
- Handle<SharedFunctionInfo> shared_;
- LanguageMode language_mode_;
- int position_;
-};
-
v8::Promise::PromiseState JSPromise::status() const {
int value = flags() & StatusBits::kMask;
DCHECK(value == 0 || value == 1 || value == 2);
@@ -5420,6 +5398,11 @@ MaybeHandle<Object> JSPromise::Resolve(Handle<JSPromise> promise,
// 10. If then is an abrupt completion, then
Handle<Object> then_action;
if (!then.ToHandle(&then_action)) {
+ // The "then" lookup can cause termination.
+ if (!isolate->is_catchable_by_javascript(isolate->pending_exception())) {
+ return kNullMaybeHandle;
+ }
+
// a. Return RejectPromise(promise, then.[[Value]]).
Handle<Object> reason(isolate->pending_exception(), isolate);
isolate->clear_pending_exception();
@@ -5569,41 +5552,6 @@ Handle<Object> JSPromise::TriggerPromiseReactions(Isolate* isolate,
return isolate->factory()->undefined_value();
}
-// RegExpKey carries the source and flags of a regular expression as key.
-class RegExpKey : public HashTableKey {
- public:
- RegExpKey(Handle<String> string, JSRegExp::Flags flags)
- : HashTableKey(
- CompilationCacheShape::RegExpHash(*string, Smi::FromInt(flags))),
- string_(string),
- flags_(Smi::FromInt(flags)) {}
-
- // Rather than storing the key in the hash table, a pointer to the
- // stored value is stored where the key should be. IsMatch then
- // compares the search key to the found object, rather than comparing
- // a key to a key.
- bool IsMatch(Object obj) override {
- FixedArray val = FixedArray::cast(obj);
- return string_->Equals(String::cast(val.get(JSRegExp::kSourceIndex))) &&
- (flags_ == val.get(JSRegExp::kFlagsIndex));
- }
-
- Handle<String> string_;
- Smi flags_;
-};
-
-// CodeKey carries the SharedFunctionInfo key associated with a Code
-// object value.
-class CodeKey : public HashTableKey {
- public:
- explicit CodeKey(Handle<SharedFunctionInfo> key)
- : HashTableKey(key->Hash()), key_(key) {}
-
- bool IsMatch(Object string) override { return *key_ == string; }
-
- Handle<SharedFunctionInfo> key_;
-};
-
template <typename Derived, typename Shape>
void HashTable<Derived, Shape>::IteratePrefix(ObjectVisitor* v) {
BodyDescriptorBase::IteratePointers(*this, 0, kElementsStartOffset, v);
@@ -5650,8 +5598,7 @@ Handle<Derived> HashTable<Derived, Shape>::NewInternal(
}
template <typename Derived, typename Shape>
-void HashTable<Derived, Shape>::Rehash(const Isolate* isolate,
- Derived new_table) {
+void HashTable<Derived, Shape>::Rehash(IsolateRoot isolate, Derived new_table) {
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = new_table.GetWriteBarrierMode(no_gc);
@@ -5715,7 +5662,7 @@ void HashTable<Derived, Shape>::Swap(InternalIndex entry1, InternalIndex entry2,
}
template <typename Derived, typename Shape>
-void HashTable<Derived, Shape>::Rehash(const Isolate* isolate) {
+void HashTable<Derived, Shape>::Rehash(IsolateRoot isolate) {
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = GetWriteBarrierMode(no_gc);
ReadOnlyRoots roots = GetReadOnlyRoots(isolate);
@@ -5782,7 +5729,7 @@ Handle<Derived> HashTable<Derived, Shape>::EnsureCapacity(
isolate, new_nof,
should_pretenure ? AllocationType::kOld : AllocationType::kYoung);
- table->Rehash(GetIsolateForPtrCompr(isolate), *new_table);
+ table->Rehash(isolate, *new_table);
return new_table;
}
@@ -5848,8 +5795,9 @@ Handle<Derived> HashTable<Derived, Shape>::Shrink(Isolate* isolate,
}
template <typename Derived, typename Shape>
-InternalIndex HashTable<Derived, Shape>::FindInsertionEntry(
- const Isolate* isolate, ReadOnlyRoots roots, uint32_t hash) {
+InternalIndex HashTable<Derived, Shape>::FindInsertionEntry(IsolateRoot isolate,
+ ReadOnlyRoots roots,
+ uint32_t hash) {
uint32_t capacity = Capacity();
uint32_t count = 1;
// EnsureCapacity will guarantee the hash table is never full.
@@ -5898,328 +5846,6 @@ Handle<ObjectHashSet> ObjectHashSet::Add(Isolate* isolate,
return set;
}
-namespace {
-
-const int kLiteralEntryLength = 2;
-const int kLiteralInitialLength = 2;
-const int kLiteralContextOffset = 0;
-const int kLiteralLiteralsOffset = 1;
-
-int SearchLiteralsMapEntry(CompilationCacheTable cache, int cache_entry,
- Context native_context) {
- DisallowHeapAllocation no_gc;
- DCHECK(native_context.IsNativeContext());
- Object obj = cache.get(cache_entry);
-
- // Check that there's no confusion between FixedArray and WeakFixedArray (the
- // object used to be a FixedArray here).
- DCHECK(!obj.IsFixedArray());
- if (obj.IsWeakFixedArray()) {
- WeakFixedArray literals_map = WeakFixedArray::cast(obj);
- int length = literals_map.length();
- for (int i = 0; i < length; i += kLiteralEntryLength) {
- DCHECK(literals_map.Get(i + kLiteralContextOffset)->IsWeakOrCleared());
- if (literals_map.Get(i + kLiteralContextOffset) ==
- HeapObjectReference::Weak(native_context)) {
- return i;
- }
- }
- }
- return -1;
-}
-
-void AddToFeedbackCellsMap(Handle<CompilationCacheTable> cache, int cache_entry,
- Handle<Context> native_context,
- Handle<FeedbackCell> feedback_cell) {
- Isolate* isolate = native_context->GetIsolate();
- DCHECK(native_context->IsNativeContext());
- STATIC_ASSERT(kLiteralEntryLength == 2);
- Handle<WeakFixedArray> new_literals_map;
- int entry;
-
- Object obj = cache->get(cache_entry);
-
- // Check that there's no confusion between FixedArray and WeakFixedArray (the
- // object used to be a FixedArray here).
- DCHECK(!obj.IsFixedArray());
- if (!obj.IsWeakFixedArray() || WeakFixedArray::cast(obj).length() == 0) {
- new_literals_map = isolate->factory()->NewWeakFixedArray(
- kLiteralInitialLength, AllocationType::kOld);
- entry = 0;
- } else {
- Handle<WeakFixedArray> old_literals_map(WeakFixedArray::cast(obj), isolate);
- entry = SearchLiteralsMapEntry(*cache, cache_entry, *native_context);
- if (entry >= 0) {
- // Just set the code of the entry.
- old_literals_map->Set(entry + kLiteralLiteralsOffset,
- HeapObjectReference::Weak(*feedback_cell));
- return;
- }
-
- // Can we reuse an entry?
- DCHECK_LT(entry, 0);
- int length = old_literals_map->length();
- for (int i = 0; i < length; i += kLiteralEntryLength) {
- if (old_literals_map->Get(i + kLiteralContextOffset)->IsCleared()) {
- new_literals_map = old_literals_map;
- entry = i;
- break;
- }
- }
-
- if (entry < 0) {
- // Copy old optimized code map and append one new entry.
- new_literals_map = isolate->factory()->CopyWeakFixedArrayAndGrow(
- old_literals_map, kLiteralEntryLength);
- entry = old_literals_map->length();
- }
- }
-
- new_literals_map->Set(entry + kLiteralContextOffset,
- HeapObjectReference::Weak(*native_context));
- new_literals_map->Set(entry + kLiteralLiteralsOffset,
- HeapObjectReference::Weak(*feedback_cell));
-
-#ifdef DEBUG
- for (int i = 0; i < new_literals_map->length(); i += kLiteralEntryLength) {
- MaybeObject object = new_literals_map->Get(i + kLiteralContextOffset);
- DCHECK(object->IsCleared() ||
- object->GetHeapObjectAssumeWeak().IsNativeContext());
- object = new_literals_map->Get(i + kLiteralLiteralsOffset);
- DCHECK(object->IsCleared() ||
- object->GetHeapObjectAssumeWeak().IsFeedbackCell());
- }
-#endif
-
- Object old_literals_map = cache->get(cache_entry);
- if (old_literals_map != *new_literals_map) {
- cache->set(cache_entry, *new_literals_map);
- }
-}
-
-FeedbackCell SearchLiteralsMap(CompilationCacheTable cache, int cache_entry,
- Context native_context) {
- FeedbackCell result;
- int entry = SearchLiteralsMapEntry(cache, cache_entry, native_context);
- if (entry >= 0) {
- WeakFixedArray literals_map = WeakFixedArray::cast(cache.get(cache_entry));
- DCHECK_LE(entry + kLiteralEntryLength, literals_map.length());
- MaybeObject object = literals_map.Get(entry + kLiteralLiteralsOffset);
-
- if (!object->IsCleared()) {
- result = FeedbackCell::cast(object->GetHeapObjectAssumeWeak());
- }
- }
- DCHECK(result.is_null() || result.IsFeedbackCell());
- return result;
-}
-
-} // namespace
-
-MaybeHandle<SharedFunctionInfo> CompilationCacheTable::LookupScript(
- Handle<CompilationCacheTable> table, Handle<String> src,
- Handle<Context> native_context, LanguageMode language_mode) {
- // We use the empty function SFI as part of the key. Although the
- // empty_function is native context dependent, the SFI is de-duped on
- // snapshot builds by the StartupObjectCache, and so this does not prevent
- // reuse of scripts in the compilation cache across native contexts.
- Handle<SharedFunctionInfo> shared(native_context->empty_function().shared(),
- native_context->GetIsolate());
- Isolate* isolate = native_context->GetIsolate();
- src = String::Flatten(isolate, src);
- StringSharedKey key(src, shared, language_mode, kNoSourcePosition);
- InternalIndex entry = table->FindEntry(isolate, &key);
- if (entry.is_not_found()) return MaybeHandle<SharedFunctionInfo>();
- int index = EntryToIndex(entry);
- if (!table->get(index).IsFixedArray()) {
- return MaybeHandle<SharedFunctionInfo>();
- }
- Object obj = table->get(index + 1);
- if (obj.IsSharedFunctionInfo()) {
- return handle(SharedFunctionInfo::cast(obj), native_context->GetIsolate());
- }
- return MaybeHandle<SharedFunctionInfo>();
-}
-
-InfoCellPair CompilationCacheTable::LookupEval(
- Handle<CompilationCacheTable> table, Handle<String> src,
- Handle<SharedFunctionInfo> outer_info, Handle<Context> native_context,
- LanguageMode language_mode, int position) {
- InfoCellPair empty_result;
- Isolate* isolate = native_context->GetIsolate();
- src = String::Flatten(isolate, src);
- StringSharedKey key(src, outer_info, language_mode, position);
- InternalIndex entry = table->FindEntry(isolate, &key);
- if (entry.is_not_found()) return empty_result;
- int index = EntryToIndex(entry);
- if (!table->get(index).IsFixedArray()) return empty_result;
- Object obj = table->get(index + 1);
- if (obj.IsSharedFunctionInfo()) {
- FeedbackCell feedback_cell =
- SearchLiteralsMap(*table, index + 2, *native_context);
- return InfoCellPair(isolate, SharedFunctionInfo::cast(obj), feedback_cell);
- }
- return empty_result;
-}
-
-Handle<Object> CompilationCacheTable::LookupRegExp(Handle<String> src,
- JSRegExp::Flags flags) {
- Isolate* isolate = GetIsolate();
- DisallowHeapAllocation no_allocation;
- RegExpKey key(src, flags);
- InternalIndex entry = FindEntry(isolate, &key);
- if (entry.is_not_found()) return isolate->factory()->undefined_value();
- return Handle<Object>(get(EntryToIndex(entry) + 1), isolate);
-}
-
-MaybeHandle<Code> CompilationCacheTable::LookupCode(
- Handle<SharedFunctionInfo> key) {
- Isolate* isolate = GetIsolate();
- DisallowHeapAllocation no_allocation;
- CodeKey k(key);
- InternalIndex entry = FindEntry(isolate, &k);
- if (entry.is_not_found()) return {};
- return Handle<Code>(Code::cast(get(EntryToIndex(entry) + 1)), isolate);
-}
-
-Handle<CompilationCacheTable> CompilationCacheTable::PutScript(
- Handle<CompilationCacheTable> cache, Handle<String> src,
- Handle<Context> native_context, LanguageMode language_mode,
- Handle<SharedFunctionInfo> value) {
- Isolate* isolate = native_context->GetIsolate();
- // We use the empty function SFI as part of the key. Although the
- // empty_function is native context dependent, the SFI is de-duped on
- // snapshot builds by the StartupObjectCache, and so this does not prevent
- // reuse of scripts in the compilation cache across native contexts.
- Handle<SharedFunctionInfo> shared(native_context->empty_function().shared(),
- isolate);
- src = String::Flatten(isolate, src);
- StringSharedKey key(src, shared, language_mode, kNoSourcePosition);
- Handle<Object> k = key.AsHandle(isolate);
- cache = EnsureCapacity(isolate, cache);
- InternalIndex entry = cache->FindInsertionEntry(isolate, key.Hash());
- cache->set(EntryToIndex(entry), *k);
- cache->set(EntryToIndex(entry) + 1, *value);
- cache->ElementAdded();
- return cache;
-}
-
-Handle<CompilationCacheTable> CompilationCacheTable::PutEval(
- Handle<CompilationCacheTable> cache, Handle<String> src,
- Handle<SharedFunctionInfo> outer_info, Handle<SharedFunctionInfo> value,
- Handle<Context> native_context, Handle<FeedbackCell> feedback_cell,
- int position) {
- Isolate* isolate = native_context->GetIsolate();
- src = String::Flatten(isolate, src);
- StringSharedKey key(src, outer_info, value->language_mode(), position);
- {
- Handle<Object> k = key.AsHandle(isolate);
- InternalIndex entry = cache->FindEntry(isolate, &key);
- if (entry.is_found()) {
- cache->set(EntryToIndex(entry), *k);
- cache->set(EntryToIndex(entry) + 1, *value);
- // AddToFeedbackCellsMap may allocate a new sub-array to live in the
- // entry, but it won't change the cache array. Therefore EntryToIndex
- // and entry remains correct.
- AddToFeedbackCellsMap(cache, EntryToIndex(entry) + 2, native_context,
- feedback_cell);
- // Add hash again even on cache hit to avoid unnecessary cache delay in
- // case of hash collisions.
- }
- }
-
- cache = EnsureCapacity(isolate, cache);
- InternalIndex entry = cache->FindInsertionEntry(isolate, key.Hash());
- Handle<Object> k =
- isolate->factory()->NewNumber(static_cast<double>(key.Hash()));
- cache->set(EntryToIndex(entry), *k);
- cache->set(EntryToIndex(entry) + 1, Smi::FromInt(kHashGenerations));
- cache->ElementAdded();
- return cache;
-}
-
-Handle<CompilationCacheTable> CompilationCacheTable::PutRegExp(
- Isolate* isolate, Handle<CompilationCacheTable> cache, Handle<String> src,
- JSRegExp::Flags flags, Handle<FixedArray> value) {
- RegExpKey key(src, flags);
- cache = EnsureCapacity(isolate, cache);
- InternalIndex entry = cache->FindInsertionEntry(isolate, key.Hash());
- // We store the value in the key slot, and compare the search key
- // to the stored value with a custom IsMatch function during lookups.
- cache->set(EntryToIndex(entry), *value);
- cache->set(EntryToIndex(entry) + 1, *value);
- cache->ElementAdded();
- return cache;
-}
-
-Handle<CompilationCacheTable> CompilationCacheTable::PutCode(
- Isolate* isolate, Handle<CompilationCacheTable> cache,
- Handle<SharedFunctionInfo> key, Handle<Code> value) {
- CodeKey k(key);
-
- {
- InternalIndex entry = cache->FindEntry(isolate, &k);
- if (entry.is_found()) {
- // Update.
- cache->set(EntryToIndex(entry), *key);
- cache->set(EntryToIndex(entry) + 1, *value);
- return cache;
- }
- }
-
- // Insert.
- cache = EnsureCapacity(isolate, cache);
- InternalIndex entry = cache->FindInsertionEntry(isolate, k.Hash());
- cache->set(EntryToIndex(entry), *key);
- cache->set(EntryToIndex(entry) + 1, *value);
- cache->ElementAdded();
- return cache;
-}
-
-void CompilationCacheTable::Age() {
- DisallowHeapAllocation no_allocation;
- Object the_hole_value = GetReadOnlyRoots().the_hole_value();
- for (InternalIndex entry : IterateEntries()) {
- int entry_index = EntryToIndex(entry);
- int value_index = entry_index + 1;
-
- if (get(entry_index).IsNumber()) {
- Smi count = Smi::cast(get(value_index));
- count = Smi::FromInt(count.value() - 1);
- if (count.value() == 0) {
- NoWriteBarrierSet(*this, entry_index, the_hole_value);
- NoWriteBarrierSet(*this, value_index, the_hole_value);
- ElementRemoved();
- } else {
- NoWriteBarrierSet(*this, value_index, count);
- }
- } else if (get(entry_index).IsFixedArray()) {
- SharedFunctionInfo info = SharedFunctionInfo::cast(get(value_index));
- if (info.IsInterpreted() && info.GetBytecodeArray().IsOld()) {
- for (int i = 0; i < kEntrySize; i++) {
- NoWriteBarrierSet(*this, entry_index + i, the_hole_value);
- }
- ElementRemoved();
- }
- }
- }
-}
-
-void CompilationCacheTable::Remove(Object value) {
- DisallowHeapAllocation no_allocation;
- Object the_hole_value = GetReadOnlyRoots().the_hole_value();
- for (InternalIndex entry : IterateEntries()) {
- int entry_index = EntryToIndex(entry);
- int value_index = entry_index + 1;
- if (get(value_index) == value) {
- for (int i = 0; i < kEntrySize; i++) {
- NoWriteBarrierSet(*this, entry_index + i, the_hole_value);
- }
- ElementRemoved();
- }
- }
-}
-
template <typename Derived, typename Shape>
template <typename LocalIsolate>
Handle<Derived> BaseNameDictionary<Derived, Shape>::New(
@@ -6340,8 +5966,7 @@ Handle<Derived> Dictionary<Derived, Shape>::Add(LocalIsolate* isolate,
// Compute the key object.
Handle<Object> k = Shape::AsHandle(isolate, key);
- InternalIndex entry = dictionary->FindInsertionEntry(
- GetIsolateForPtrCompr(isolate), roots, hash);
+ InternalIndex entry = dictionary->FindInsertionEntry(isolate, roots, hash);
dictionary->SetEntry(entry, *k, *value, details);
DCHECK(dictionary->KeyAt(isolate, entry).IsNumber() ||
Shape::Unwrap(dictionary->KeyAt(isolate, entry)).IsUniqueName());
@@ -6422,71 +6047,6 @@ int Dictionary<Derived, Shape>::NumberOfEnumerableProperties() {
return result;
}
-template <typename Dictionary>
-struct EnumIndexComparator {
- explicit EnumIndexComparator(Dictionary dict) : dict(dict) {}
- bool operator()(Tagged_t a, Tagged_t b) {
- PropertyDetails da(
- dict.DetailsAt(InternalIndex(Smi(static_cast<Address>(a)).value())));
- PropertyDetails db(
- dict.DetailsAt(InternalIndex(Smi(static_cast<Address>(b)).value())));
- return da.dictionary_index() < db.dictionary_index();
- }
- Dictionary dict;
-};
-
-template <typename Derived, typename Shape>
-void BaseNameDictionary<Derived, Shape>::CopyEnumKeysTo(
- Isolate* isolate, Handle<Derived> dictionary, Handle<FixedArray> storage,
- KeyCollectionMode mode, KeyAccumulator* accumulator) {
- DCHECK_IMPLIES(mode != KeyCollectionMode::kOwnOnly, accumulator != nullptr);
- int length = storage->length();
- int properties = 0;
- ReadOnlyRoots roots(isolate);
- {
- AllowHeapAllocation allow_gc;
- for (InternalIndex i : dictionary->IterateEntries()) {
- Object key;
- if (!dictionary->ToKey(roots, i, &key)) continue;
- bool is_shadowing_key = false;
- if (key.IsSymbol()) continue;
- PropertyDetails details = dictionary->DetailsAt(i);
- if (details.IsDontEnum()) {
- if (mode == KeyCollectionMode::kIncludePrototypes) {
- is_shadowing_key = true;
- } else {
- continue;
- }
- }
- if (is_shadowing_key) {
- // This might allocate, but {key} is not used afterwards.
- accumulator->AddShadowingKey(key, &allow_gc);
- continue;
- } else {
- storage->set(properties, Smi::FromInt(i.as_int()));
- }
- properties++;
- if (mode == KeyCollectionMode::kOwnOnly && properties == length) break;
- }
- }
-
- CHECK_EQ(length, properties);
- {
- DisallowHeapAllocation no_gc;
- Derived raw_dictionary = *dictionary;
- FixedArray raw_storage = *storage;
- EnumIndexComparator<Derived> cmp(raw_dictionary);
- // Use AtomicSlot wrapper to ensure that std::sort uses atomic load and
- // store operations that are safe for concurrent marking.
- AtomicSlot start(storage->GetFirstElementAddress());
- std::sort(start, start + length, cmp);
- for (int i = 0; i < length; i++) {
- InternalIndex index(Smi::ToInt(raw_storage.get(i)));
- raw_storage.set(i, raw_dictionary.NameAt(index));
- }
- }
-}
-
template <typename Derived, typename Shape>
Handle<FixedArray> BaseNameDictionary<Derived, Shape>::IterationIndices(
Isolate* isolate, Handle<Derived> dictionary) {
@@ -6520,71 +6080,6 @@ Handle<FixedArray> BaseNameDictionary<Derived, Shape>::IterationIndices(
return FixedArray::ShrinkOrEmpty(isolate, array, array_size);
}
-template <typename Derived, typename Shape>
-ExceptionStatus BaseNameDictionary<Derived, Shape>::CollectKeysTo(
- Handle<Derived> dictionary, KeyAccumulator* keys) {
- Isolate* isolate = keys->isolate();
- ReadOnlyRoots roots(isolate);
- // TODO(jkummerow): Consider using a std::unique_ptr<InternalIndex[]> instead.
- Handle<FixedArray> array =
- isolate->factory()->NewFixedArray(dictionary->NumberOfElements());
- int array_size = 0;
- PropertyFilter filter = keys->filter();
- // Handle enumerable strings in CopyEnumKeysTo.
- DCHECK_NE(keys->filter(), ENUMERABLE_STRINGS);
- {
- DisallowHeapAllocation no_gc;
- for (InternalIndex i : dictionary->IterateEntries()) {
- Object key;
- Derived raw_dictionary = *dictionary;
- if (!raw_dictionary.ToKey(roots, i, &key)) continue;
- if (key.FilterKey(filter)) continue;
- PropertyDetails details = raw_dictionary.DetailsAt(i);
- if ((details.attributes() & filter) != 0) {
- AllowHeapAllocation gc;
- // This might allocate, but {key} is not used afterwards.
- keys->AddShadowingKey(key, &gc);
- continue;
- }
- if (filter & ONLY_ALL_CAN_READ) {
- if (details.kind() != kAccessor) continue;
- Object accessors = raw_dictionary.ValueAt(i);
- if (!accessors.IsAccessorInfo()) continue;
- if (!AccessorInfo::cast(accessors).all_can_read()) continue;
- }
- array->set(array_size++, Smi::FromInt(i.as_int()));
- }
-
- EnumIndexComparator<Derived> cmp(*dictionary);
- // Use AtomicSlot wrapper to ensure that std::sort uses atomic load and
- // store operations that are safe for concurrent marking.
- AtomicSlot start(array->GetFirstElementAddress());
- std::sort(start, start + array_size, cmp);
- }
-
- bool has_seen_symbol = false;
- for (int i = 0; i < array_size; i++) {
- InternalIndex index(Smi::ToInt(array->get(i)));
- Object key = dictionary->NameAt(index);
- if (key.IsSymbol()) {
- has_seen_symbol = true;
- continue;
- }
- ExceptionStatus status = keys->AddKey(key, DO_NOT_CONVERT);
- if (!status) return status;
- }
- if (has_seen_symbol) {
- for (int i = 0; i < array_size; i++) {
- InternalIndex index(Smi::ToInt(array->get(i)));
- Object key = dictionary->NameAt(index);
- if (!key.IsSymbol()) continue;
- ExceptionStatus status = keys->AddKey(key, DO_NOT_CONVERT);
- if (!status) return status;
- }
- }
- return ExceptionStatus::kSuccess;
-}
-
// Backwards lookup (slow).
template <typename Derived, typename Shape>
Object Dictionary<Derived, Shape>::SlowReverseLookup(Object value) {
@@ -6609,7 +6104,7 @@ void ObjectHashTableBase<Derived, Shape>::FillEntriesWithHoles(
}
template <typename Derived, typename Shape>
-Object ObjectHashTableBase<Derived, Shape>::Lookup(const Isolate* isolate,
+Object ObjectHashTableBase<Derived, Shape>::Lookup(IsolateRoot isolate,
Handle<Object> key,
int32_t hash) {
DisallowHeapAllocation no_gc;
@@ -6625,7 +6120,7 @@ template <typename Derived, typename Shape>
Object ObjectHashTableBase<Derived, Shape>::Lookup(Handle<Object> key) {
DisallowHeapAllocation no_gc;
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
ReadOnlyRoots roots = this->GetReadOnlyRoots(isolate);
DCHECK(this->IsKey(roots, *key));
diff --git a/deps/v8/src/objects/objects.h b/deps/v8/src/objects/objects.h
index 94bcb9a479..81117c24db 100644
--- a/deps/v8/src/objects/objects.h
+++ b/deps/v8/src/objects/objects.h
@@ -281,7 +281,7 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
#define IS_TYPE_FUNCTION_DECL(Type) \
V8_INLINE bool Is##Type() const; \
- V8_INLINE bool Is##Type(const Isolate* isolate) const;
+ V8_INLINE bool Is##Type(IsolateRoot isolate) const;
OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
IS_TYPE_FUNCTION_DECL(HashTableBase)
@@ -309,7 +309,7 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
#define DECL_STRUCT_PREDICATE(NAME, Name, name) \
V8_INLINE bool Is##Name() const; \
- V8_INLINE bool Is##Name(const Isolate* isolate) const;
+ V8_INLINE bool Is##Name(IsolateRoot isolate) const;
STRUCT_LIST(DECL_STRUCT_PREDICATE)
#undef DECL_STRUCT_PREDICATE
@@ -324,9 +324,9 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
V8_EXPORT_PRIVATE bool ToInt32(int32_t* value);
inline bool ToUint32(uint32_t* value) const;
- inline Representation OptimalRepresentation(const Isolate* isolate) const;
+ inline Representation OptimalRepresentation(IsolateRoot isolate) const;
- inline ElementsKind OptimalElementsKind(const Isolate* isolate) const;
+ inline ElementsKind OptimalElementsKind(IsolateRoot isolate) const;
inline bool FitsRepresentation(Representation representation);
@@ -586,6 +586,9 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
// and length.
bool IterationHasObservableEffects();
+ // TC39 "Dynamic Code Brand Checks"
+ bool IsCodeLike(Isolate* isolate) const;
+
EXPORT_DECL_VERIFIER(Object)
#ifdef VERIFY_HEAP
@@ -666,6 +669,17 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
}
}
+ //
+ // ExternalPointer_t field accessors.
+ //
+ inline void InitExternalPointerField(size_t offset, Isolate* isolate);
+ inline void InitExternalPointerField(size_t offset, Isolate* isolate,
+ Address value, ExternalPointerTag tag);
+ inline Address ReadExternalPointerField(size_t offset, IsolateRoot isolate,
+ ExternalPointerTag tag) const;
+ inline void WriteExternalPointerField(size_t offset, Isolate* isolate,
+ Address value, ExternalPointerTag tag);
+
protected:
inline Address field_address(size_t offset) const {
return ptr() + offset - kHeapObjectTag;
diff --git a/deps/v8/src/objects/oddball-inl.h b/deps/v8/src/objects/oddball-inl.h
index 4b274097b8..4a022831be 100644
--- a/deps/v8/src/objects/oddball-inl.h
+++ b/deps/v8/src/objects/oddball-inl.h
@@ -18,6 +18,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/oddball-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(Oddball)
void Oddball::set_to_number_raw_as_bits(uint64_t bits) {
diff --git a/deps/v8/src/objects/oddball.h b/deps/v8/src/objects/oddball.h
index 5f0c7ce001..30f6fa70f8 100644
--- a/deps/v8/src/objects/oddball.h
+++ b/deps/v8/src/objects/oddball.h
@@ -13,6 +13,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/oddball-tq.inc"
+
// The Oddball describes objects null, undefined, true, and false.
class Oddball : public TorqueGeneratedOddball<Oddball, PrimitiveHeapObject> {
public:
@@ -49,10 +51,7 @@ class Oddball : public TorqueGeneratedOddball<Oddball, PrimitiveHeapObject> {
static const byte kSelfReferenceMarker = 10;
static const byte kBasicBlockCountersMarker = 11;
- static_assert(kStartOfWeakFieldsOffset == kEndOfWeakFieldsOffset,
- "Ensure BodyDescriptor does not need to handle weak fields.");
- using BodyDescriptor = FixedBodyDescriptor<kStartOfStrongFieldsOffset,
- kEndOfStrongFieldsOffset, kSize>;
+ class BodyDescriptor;
STATIC_ASSERT(kKindOffset == Internals::kOddballKindOffset);
STATIC_ASSERT(kNull == Internals::kNullOddballKind);
diff --git a/deps/v8/src/objects/oddball.tq b/deps/v8/src/objects/oddball.tq
index 44a3d2aa51..d111779a31 100644
--- a/deps/v8/src/objects/oddball.tq
+++ b/deps/v8/src/objects/oddball.tq
@@ -2,10 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-@generateCppClass
+@export
+@customCppClass
+@customMap // Oddballs have one of multiple maps, depending on the kind.
@apiExposedInstanceTypeValue(0x43)
@highestInstanceTypeWithinParentClassRange
-extern class Oddball extends PrimitiveHeapObject {
+class Oddball extends PrimitiveHeapObject {
to_number_raw: float64;
to_string: String;
to_number: Number;
diff --git a/deps/v8/src/objects/ordered-hash-table-inl.h b/deps/v8/src/objects/ordered-hash-table-inl.h
index 6edd5c3cda..959d7a7801 100644
--- a/deps/v8/src/objects/ordered-hash-table-inl.h
+++ b/deps/v8/src/objects/ordered-hash-table-inl.h
@@ -31,6 +31,12 @@ template <class Derived, int entrysize>
OrderedHashTable<Derived, entrysize>::OrderedHashTable(Address ptr)
: FixedArray(ptr) {}
+template <class Derived, int entrysize>
+bool OrderedHashTable<Derived, entrysize>::IsKey(ReadOnlyRoots roots,
+ Object k) {
+ return k != roots.the_hole_value();
+}
+
OrderedHashSet::OrderedHashSet(Address ptr)
: OrderedHashTable<OrderedHashSet, 1>(ptr) {
SLOW_DCHECK(IsOrderedHashSet());
@@ -51,9 +57,9 @@ SmallOrderedHashTable<Derived>::SmallOrderedHashTable(Address ptr)
: HeapObject(ptr) {}
template <class Derived>
-Object SmallOrderedHashTable<Derived>::KeyAt(int entry) const {
- DCHECK_LT(entry, Capacity());
- Offset entry_offset = GetDataEntryOffset(entry, Derived::kKeyIndex);
+Object SmallOrderedHashTable<Derived>::KeyAt(InternalIndex entry) const {
+ DCHECK_LT(entry.as_int(), Capacity());
+ Offset entry_offset = GetDataEntryOffset(entry.as_int(), Derived::kKeyIndex);
return TaggedField<Object>::load(*this, entry_offset);
}
@@ -97,63 +103,65 @@ Handle<Map> SmallOrderedHashSet::GetMap(ReadOnlyRoots roots) {
return roots.small_ordered_hash_set_map_handle();
}
-inline Object OrderedHashMap::ValueAt(int entry) {
- DCHECK_NE(entry, kNotFound);
- DCHECK_LT(entry, UsedCapacity());
+inline Object OrderedHashMap::ValueAt(InternalIndex entry) {
+ DCHECK_LT(entry.as_int(), UsedCapacity());
return get(EntryToIndex(entry) + kValueOffset);
}
-inline Object OrderedNameDictionary::ValueAt(int entry) {
- DCHECK_NE(entry, kNotFound);
- DCHECK_LT(entry, UsedCapacity());
+inline Object OrderedNameDictionary::ValueAt(InternalIndex entry) {
+ DCHECK_LT(entry.as_int(), UsedCapacity());
return get(EntryToIndex(entry) + kValueOffset);
}
+Name OrderedNameDictionary::NameAt(InternalIndex entry) {
+ return Name::cast(KeyAt(entry));
+}
+
// Set the value for entry.
-inline void OrderedNameDictionary::ValueAtPut(int entry, Object value) {
- DCHECK_NE(entry, kNotFound);
- DCHECK_LT(entry, UsedCapacity());
+inline void OrderedNameDictionary::ValueAtPut(InternalIndex entry,
+ Object value) {
+ DCHECK_LT(entry.as_int(), UsedCapacity());
this->set(EntryToIndex(entry) + kValueOffset, value);
}
// Returns the property details for the property at entry.
-inline PropertyDetails OrderedNameDictionary::DetailsAt(int entry) {
- DCHECK_NE(entry, kNotFound);
- DCHECK_LT(entry, this->UsedCapacity());
+inline PropertyDetails OrderedNameDictionary::DetailsAt(InternalIndex entry) {
+ DCHECK_LT(entry.as_int(), this->UsedCapacity());
// TODO(gsathya): Optimize the cast away.
return PropertyDetails(
Smi::cast(get(EntryToIndex(entry) + kPropertyDetailsOffset)));
}
-inline void OrderedNameDictionary::DetailsAtPut(int entry,
+inline void OrderedNameDictionary::DetailsAtPut(InternalIndex entry,
PropertyDetails value) {
- DCHECK_NE(entry, kNotFound);
- DCHECK_LT(entry, this->UsedCapacity());
+ DCHECK_LT(entry.as_int(), this->UsedCapacity());
// TODO(gsathya): Optimize the cast away.
this->set(EntryToIndex(entry) + kPropertyDetailsOffset, value.AsSmi());
}
-inline Object SmallOrderedNameDictionary::ValueAt(int entry) {
- return this->GetDataEntry(entry, kValueIndex);
+inline Object SmallOrderedNameDictionary::ValueAt(InternalIndex entry) {
+ return this->GetDataEntry(entry.as_int(), kValueIndex);
}
// Set the value for entry.
-inline void SmallOrderedNameDictionary::ValueAtPut(int entry, Object value) {
- this->SetDataEntry(entry, kValueIndex, value);
+inline void SmallOrderedNameDictionary::ValueAtPut(InternalIndex entry,
+ Object value) {
+ this->SetDataEntry(entry.as_int(), kValueIndex, value);
}
// Returns the property details for the property at entry.
-inline PropertyDetails SmallOrderedNameDictionary::DetailsAt(int entry) {
+inline PropertyDetails SmallOrderedNameDictionary::DetailsAt(
+ InternalIndex entry) {
// TODO(gsathya): Optimize the cast away. And store this in the data table.
return PropertyDetails(
- Smi::cast(this->GetDataEntry(entry, kPropertyDetailsIndex)));
+ Smi::cast(this->GetDataEntry(entry.as_int(), kPropertyDetailsIndex)));
}
// Set the details for entry.
-inline void SmallOrderedNameDictionary::DetailsAtPut(int entry,
+inline void SmallOrderedNameDictionary::DetailsAtPut(InternalIndex entry,
PropertyDetails value) {
// TODO(gsathya): Optimize the cast away. And store this in the data table.
- this->SetDataEntry(entry, kPropertyDetailsIndex, value.AsSmi());
+ this->SetDataEntry(entry.as_int(), kPropertyDetailsIndex, value.AsSmi());
}
inline bool OrderedHashSet::Is(Handle<HeapObject> table) {
@@ -193,7 +201,9 @@ template <class Derived, class TableType>
Object OrderedHashTableIterator<Derived, TableType>::CurrentKey() {
TableType table = TableType::cast(this->table());
int index = Smi::ToInt(this->index());
- Object key = table.KeyAt(index);
+ DCHECK_LE(0, index);
+ InternalIndex entry(index);
+ Object key = table.KeyAt(entry);
DCHECK(!key.IsTheHole());
return key;
}
diff --git a/deps/v8/src/objects/ordered-hash-table.cc b/deps/v8/src/objects/ordered-hash-table.cc
index d3250bd92d..15673daa62 100644
--- a/deps/v8/src/objects/ordered-hash-table.cc
+++ b/deps/v8/src/objects/ordered-hash-table.cc
@@ -6,6 +6,7 @@
#include "src/execution/isolate.h"
#include "src/heap/heap-inl.h"
+#include "src/objects/internal-index.h"
#include "src/objects/js-collection-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/ordered-hash-table-inl.h"
@@ -22,7 +23,8 @@ MaybeHandle<Derived> OrderedHashTable<Derived, entrysize>::Allocate(
// from number of buckets. If we decide to change kLoadFactor
// to something other than 2, capacity should be stored as another
// field of this object.
- capacity = base::bits::RoundUpToPowerOfTwo32(Max(kMinCapacity, capacity));
+ capacity =
+ base::bits::RoundUpToPowerOfTwo32(std::max({kInitialCapacity, capacity}));
if (capacity > MaxCapacity()) {
return MaybeHandle<Derived>();
}
@@ -42,6 +44,24 @@ MaybeHandle<Derived> OrderedHashTable<Derived, entrysize>::Allocate(
}
template <class Derived, int entrysize>
+MaybeHandle<Derived> OrderedHashTable<Derived, entrysize>::AllocateEmpty(
+ Isolate* isolate, AllocationType allocation, RootIndex root_index) {
+ // This is only supposed to be used to create the canonical empty versions
+ // of each ordered structure, and should not be used afterwards.
+ // Requires that the map has already been set up in the roots table.
+ DCHECK(ReadOnlyRoots(isolate).at(root_index) == kNullAddress);
+
+ Handle<FixedArray> backing_store = isolate->factory()->NewFixedArrayWithMap(
+ Derived::GetMap(ReadOnlyRoots(isolate)), HashTableStartIndex(),
+ allocation);
+ Handle<Derived> table = Handle<Derived>::cast(backing_store);
+ table->SetNumberOfBuckets(0);
+ table->SetNumberOfElements(0);
+ table->SetNumberOfDeletedElements(0);
+ return table;
+}
+
+template <class Derived, int entrysize>
MaybeHandle<Derived> OrderedHashTable<Derived, entrysize>::EnsureGrowable(
Isolate* isolate, Handle<Derived> table) {
DCHECK(!table->IsObsolete());
@@ -50,11 +70,21 @@ MaybeHandle<Derived> OrderedHashTable<Derived, entrysize>::EnsureGrowable(
int nod = table->NumberOfDeletedElements();
int capacity = table->Capacity();
if ((nof + nod) < capacity) return table;
- // Don't need to grow if we can simply clear out deleted entries instead.
- // Note that we can't compact in place, though, so we always allocate
- // a new table.
- return Derived::Rehash(isolate, table,
- (nod < (capacity >> 1)) ? capacity << 1 : capacity);
+
+ int new_capacity;
+ if (capacity == 0) {
+ // step from empty to minimum proper size
+ new_capacity = kInitialCapacity;
+ } else if (nod >= (capacity >> 1)) {
+ // Don't need to grow if we can simply clear out deleted entries instead.
+ // Note that we can't compact in place, though, so we always allocate
+ // a new table.
+ new_capacity = capacity;
+ } else {
+ new_capacity = capacity << 1;
+ }
+
+ return Derived::Rehash(isolate, table, new_capacity);
}
template <class Derived, int entrysize>
@@ -78,10 +108,13 @@ Handle<Derived> OrderedHashTable<Derived, entrysize>::Clear(
: AllocationType::kOld;
Handle<Derived> new_table =
- Allocate(isolate, kMinCapacity, allocation_type).ToHandleChecked();
+ Allocate(isolate, kInitialCapacity, allocation_type).ToHandleChecked();
- table->SetNextTable(*new_table);
- table->SetNumberOfDeletedElements(kClearedTableSentinel);
+ if (table->NumberOfBuckets() > 0) {
+ // Don't try to modify the empty canonical table which lives in RO space.
+ table->SetNextTable(*new_table);
+ table->SetNumberOfDeletedElements(kClearedTableSentinel);
+ }
return new_table;
}
@@ -92,48 +125,56 @@ bool OrderedHashTable<Derived, entrysize>::HasKey(Isolate* isolate,
DCHECK_IMPLIES(entrysize == 1, table.IsOrderedHashSet());
DCHECK_IMPLIES(entrysize == 2, table.IsOrderedHashMap());
DisallowHeapAllocation no_gc;
- int entry = table.FindEntry(isolate, key);
- return entry != kNotFound;
+ InternalIndex entry = table.FindEntry(isolate, key);
+ return entry.is_found();
}
template <class Derived, int entrysize>
-int OrderedHashTable<Derived, entrysize>::FindEntry(Isolate* isolate,
- Object key) {
- int entry;
+InternalIndex OrderedHashTable<Derived, entrysize>::FindEntry(Isolate* isolate,
+ Object key) {
+ if (NumberOfElements() == 0) {
+ // This is not just an optimization but also ensures that we do the right
+ // thing if Capacity() == 0
+ return InternalIndex::NotFound();
+ }
+
+ int raw_entry;
// This special cases for Smi, so that we avoid the HandleScope
// creation below.
if (key.IsSmi()) {
uint32_t hash = ComputeUnseededHash(Smi::ToInt(key));
- entry = HashToEntry(hash & Smi::kMaxValue);
+ raw_entry = HashToEntryRaw(hash & Smi::kMaxValue);
} else {
HandleScope scope(isolate);
Object hash = key.GetHash();
// If the object does not have an identity hash, it was never used as a key
- if (hash.IsUndefined(isolate)) return kNotFound;
- entry = HashToEntry(Smi::ToInt(hash));
+ if (hash.IsUndefined(isolate)) return InternalIndex::NotFound();
+ raw_entry = HashToEntryRaw(Smi::ToInt(hash));
}
// Walk the chain in the bucket to find the key.
- while (entry != kNotFound) {
- Object candidate_key = KeyAt(entry);
- if (candidate_key.SameValueZero(key)) break;
- entry = NextChainEntry(entry);
+ while (raw_entry != kNotFound) {
+ Object candidate_key = KeyAt(InternalIndex(raw_entry));
+ if (candidate_key.SameValueZero(key)) return InternalIndex(raw_entry);
+ raw_entry = NextChainEntryRaw(raw_entry);
}
- return entry;
+ return InternalIndex::NotFound();
}
MaybeHandle<OrderedHashSet> OrderedHashSet::Add(Isolate* isolate,
Handle<OrderedHashSet> table,
Handle<Object> key) {
int hash = key->GetOrCreateHash(isolate).value();
- int entry = table->HashToEntry(hash);
- // Walk the chain of the bucket and try finding the key.
- while (entry != kNotFound) {
- Object candidate_key = table->KeyAt(entry);
- // Do not add if we have the key already
- if (candidate_key.SameValueZero(*key)) return table;
- entry = table->NextChainEntry(entry);
+ if (table->NumberOfElements() > 0) {
+ int raw_entry = table->HashToEntryRaw(hash);
+ // Walk the chain of the bucket and try finding the key.
+ while (raw_entry != kNotFound) {
+ Object candidate_key = table->KeyAt(InternalIndex(raw_entry));
+ // Do not add if we have the key already
+ if (candidate_key.SameValueZero(*key)) return table;
+ raw_entry = table->NextChainEntryRaw(raw_entry);
+ }
}
MaybeHandle<OrderedHashSet> table_candidate =
@@ -143,11 +184,11 @@ MaybeHandle<OrderedHashSet> OrderedHashSet::Add(Isolate* isolate,
}
// Read the existing bucket values.
int bucket = table->HashToBucket(hash);
- int previous_entry = table->HashToEntry(hash);
+ int previous_entry = table->HashToEntryRaw(hash);
int nof = table->NumberOfElements();
// Insert a new entry at the end,
int new_entry = nof + table->NumberOfDeletedElements();
- int new_index = table->EntryToIndex(new_entry);
+ int new_index = table->EntryToIndexRaw(new_entry);
table->set(new_index, *key);
table->set(new_index + kChainOffset, Smi::FromInt(previous_entry));
// and point the bucket to the new entry.
@@ -214,17 +255,17 @@ MaybeHandle<Derived> OrderedHashTable<Derived, entrysize>::Rehash(
if (!new_table_candidate.ToHandle(&new_table)) {
return new_table_candidate;
}
- int nof = table->NumberOfElements();
- int nod = table->NumberOfDeletedElements();
int new_buckets = new_table->NumberOfBuckets();
int new_entry = 0;
int removed_holes_index = 0;
DisallowHeapAllocation no_gc;
- for (int old_entry = 0; old_entry < (nof + nod); ++old_entry) {
+
+ for (InternalIndex old_entry : table->IterateEntries()) {
+ int old_entry_raw = old_entry.as_int();
Object key = table->KeyAt(old_entry);
if (key.IsTheHole(isolate)) {
- table->SetRemovedIndexAt(removed_holes_index++, old_entry);
+ table->SetRemovedIndexAt(removed_holes_index++, old_entry_raw);
continue;
}
@@ -232,8 +273,8 @@ MaybeHandle<Derived> OrderedHashTable<Derived, entrysize>::Rehash(
int bucket = Smi::ToInt(hash) & (new_buckets - 1);
Object chain_entry = new_table->get(HashTableStartIndex() + bucket);
new_table->set(HashTableStartIndex() + bucket, Smi::FromInt(new_entry));
- int new_index = new_table->EntryToIndex(new_entry);
- int old_index = table->EntryToIndex(old_entry);
+ int new_index = new_table->EntryToIndexRaw(new_entry);
+ int old_index = table->EntryToIndexRaw(old_entry_raw);
for (int i = 0; i < entrysize; ++i) {
Object value = table->get(old_index + i);
new_table->set(new_index + i, value);
@@ -242,10 +283,13 @@ MaybeHandle<Derived> OrderedHashTable<Derived, entrysize>::Rehash(
++new_entry;
}
- DCHECK_EQ(nod, removed_holes_index);
+ DCHECK_EQ(table->NumberOfDeletedElements(), removed_holes_index);
- new_table->SetNumberOfElements(nof);
- table->SetNextTable(*new_table);
+ new_table->SetNumberOfElements(table->NumberOfElements());
+ if (table->NumberOfBuckets() > 0) {
+ // Don't try to modify the empty canonical table which lives in RO space.
+ table->SetNextTable(*new_table);
+ }
return new_table_candidate;
}
@@ -253,36 +297,29 @@ MaybeHandle<Derived> OrderedHashTable<Derived, entrysize>::Rehash(
MaybeHandle<OrderedHashSet> OrderedHashSet::Rehash(Isolate* isolate,
Handle<OrderedHashSet> table,
int new_capacity) {
- return OrderedHashTable<OrderedHashSet, 1>::Rehash(isolate, table,
- new_capacity);
+ return Base::Rehash(isolate, table, new_capacity);
}
MaybeHandle<OrderedHashSet> OrderedHashSet::Rehash(
Isolate* isolate, Handle<OrderedHashSet> table) {
- return OrderedHashTable<
- OrderedHashSet, OrderedHashSet::kEntrySizeWithoutChain>::Rehash(isolate,
- table);
+ return Base::Rehash(isolate, table);
}
MaybeHandle<OrderedHashMap> OrderedHashMap::Rehash(
Isolate* isolate, Handle<OrderedHashMap> table) {
- return OrderedHashTable<
- OrderedHashMap, OrderedHashMap::kEntrySizeWithoutChain>::Rehash(isolate,
- table);
+ return Base::Rehash(isolate, table);
}
MaybeHandle<OrderedHashMap> OrderedHashMap::Rehash(Isolate* isolate,
Handle<OrderedHashMap> table,
int new_capacity) {
- return OrderedHashTable<OrderedHashMap, 2>::Rehash(isolate, table,
- new_capacity);
+ return Base::Rehash(isolate, table, new_capacity);
}
MaybeHandle<OrderedNameDictionary> OrderedNameDictionary::Rehash(
Isolate* isolate, Handle<OrderedNameDictionary> table, int new_capacity) {
MaybeHandle<OrderedNameDictionary> new_table_candidate =
- OrderedHashTable<OrderedNameDictionary, 3>::Rehash(isolate, table,
- new_capacity);
+ Base::Rehash(isolate, table, new_capacity);
Handle<OrderedNameDictionary> new_table;
if (new_table_candidate.ToHandle(&new_table)) {
new_table->SetHash(table->Hash());
@@ -294,8 +331,8 @@ template <class Derived, int entrysize>
bool OrderedHashTable<Derived, entrysize>::Delete(Isolate* isolate,
Derived table, Object key) {
DisallowHeapAllocation no_gc;
- int entry = table.FindEntry(isolate, key);
- if (entry == kNotFound) return false;
+ InternalIndex entry = table.FindEntry(isolate, key);
+ if (entry.is_not_found()) return false;
int nof = table.NumberOfElements();
int nod = table.NumberOfDeletedElements();
@@ -312,6 +349,17 @@ bool OrderedHashTable<Derived, entrysize>::Delete(Isolate* isolate,
return true;
}
+// Parameter |roots| only here for compatibility with HashTable<...>::ToKey.
+template <class Derived, int entrysize>
+bool OrderedHashTable<Derived, entrysize>::ToKey(ReadOnlyRoots roots,
+ InternalIndex entry,
+ Object* out_key) {
+ Object k = KeyAt(entry);
+ if (!IsKey(roots, k)) return false;
+ *out_key = k;
+ return true;
+}
+
Address OrderedHashMap::GetHash(Isolate* isolate, Address raw_key) {
DisallowHeapAllocation no_gc;
Object key(raw_key);
@@ -328,16 +376,18 @@ MaybeHandle<OrderedHashMap> OrderedHashMap::Add(Isolate* isolate,
Handle<Object> key,
Handle<Object> value) {
int hash = key->GetOrCreateHash(isolate).value();
- int entry = table->HashToEntry(hash);
- // Walk the chain of the bucket and try finding the key.
- {
- DisallowHeapAllocation no_gc;
- Object raw_key = *key;
- while (entry != kNotFound) {
- Object candidate_key = table->KeyAt(entry);
- // Do not add if we have the key already
- if (candidate_key.SameValueZero(raw_key)) return table;
- entry = table->NextChainEntry(entry);
+ if (table->NumberOfElements() > 0) {
+ int raw_entry = table->HashToEntryRaw(hash);
+ // Walk the chain of the bucket and try finding the key.
+ {
+ DisallowHeapAllocation no_gc;
+ Object raw_key = *key;
+ while (raw_entry != kNotFound) {
+ Object candidate_key = table->KeyAt(InternalIndex(raw_entry));
+ // Do not add if we have the key already
+ if (candidate_key.SameValueZero(raw_key)) return table;
+ raw_entry = table->NextChainEntryRaw(raw_entry);
+ }
}
}
@@ -348,11 +398,11 @@ MaybeHandle<OrderedHashMap> OrderedHashMap::Add(Isolate* isolate,
}
// Read the existing bucket values.
int bucket = table->HashToBucket(hash);
- int previous_entry = table->HashToEntry(hash);
+ int previous_entry = table->HashToEntryRaw(hash);
int nof = table->NumberOfElements();
// Insert a new entry at the end,
int new_entry = nof + table->NumberOfDeletedElements();
- int new_index = table->EntryToIndex(new_entry);
+ int new_index = table->EntryToIndexRaw(new_entry);
table->set(new_index, *key);
table->set(new_index + kValueOffset, *value);
table->set(new_index + kChainOffset, Smi::FromInt(previous_entry));
@@ -362,16 +412,21 @@ MaybeHandle<OrderedHashMap> OrderedHashMap::Add(Isolate* isolate,
return table;
}
-template <>
-V8_EXPORT_PRIVATE int OrderedHashTable<OrderedNameDictionary, 3>::FindEntry(
- Isolate* isolate, Object key) {
+InternalIndex OrderedNameDictionary::FindEntry(Isolate* isolate, Object key) {
DisallowHeapAllocation no_gc;
DCHECK(key.IsUniqueName());
Name raw_key = Name::cast(key);
- int entry = HashToEntry(raw_key.Hash());
- while (entry != kNotFound) {
+ if (NumberOfElements() == 0) {
+ // This is not just an optimization but also ensures that we do the right
+ // thing if Capacity() == 0
+ return InternalIndex::NotFound();
+ }
+
+ int raw_entry = HashToEntryRaw(raw_key.Hash());
+ while (raw_entry != kNotFound) {
+ InternalIndex entry(raw_entry);
Object candidate_key = KeyAt(entry);
DCHECK(candidate_key.IsTheHole() ||
Name::cast(candidate_key).IsUniqueName());
@@ -380,16 +435,48 @@ V8_EXPORT_PRIVATE int OrderedHashTable<OrderedNameDictionary, 3>::FindEntry(
// TODO(gsathya): This is loading the bucket count from the hash
// table for every iteration. This should be peeled out of the
// loop.
- entry = NextChainEntry(entry);
+ raw_entry = NextChainEntryRaw(raw_entry);
}
- return kNotFound;
+ return InternalIndex::NotFound();
+}
+
+// TODO(emrich): This is almost an identical copy of
+// Dictionary<..>::SlowReverseLookup.
+// Consolidate both versions elsewhere (e.g., hash-table-utils)?
+Object OrderedNameDictionary::SlowReverseLookup(Isolate* isolate,
+ Object value) {
+ ReadOnlyRoots roots(isolate);
+ for (InternalIndex i : IterateEntries()) {
+ Object k;
+ if (!ToKey(roots, i, &k)) continue;
+ Object e = this->ValueAt(i);
+ if (e == value) return k;
+ }
+ return roots.undefined_value();
+}
+
+// TODO(emrich): This is almost an identical copy of
+// HashTable<..>::NumberOfEnumerableProperties.
+// Consolidate both versions elsewhere (e.g., hash-table-utils)?
+int OrderedNameDictionary::NumberOfEnumerableProperties() {
+ ReadOnlyRoots roots = this->GetReadOnlyRoots();
+ int result = 0;
+ for (InternalIndex i : this->IterateEntries()) {
+ Object k;
+ if (!this->ToKey(roots, i, &k)) continue;
+ if (k.FilterKey(ENUMERABLE_STRINGS)) continue;
+ PropertyDetails details = this->DetailsAt(i);
+ PropertyAttributes attr = details.attributes();
+ if ((attr & ONLY_ENUMERABLE) == 0) result++;
+ }
+ return result;
}
MaybeHandle<OrderedNameDictionary> OrderedNameDictionary::Add(
Isolate* isolate, Handle<OrderedNameDictionary> table, Handle<Name> key,
Handle<Object> value, PropertyDetails details) {
- DCHECK_EQ(kNotFound, table->FindEntry(isolate, *key));
+ DCHECK(table->FindEntry(isolate, *key).is_not_found());
MaybeHandle<OrderedNameDictionary> table_candidate =
OrderedNameDictionary::EnsureGrowable(isolate, table);
@@ -399,11 +486,11 @@ MaybeHandle<OrderedNameDictionary> OrderedNameDictionary::Add(
// Read the existing bucket values.
int hash = key->Hash();
int bucket = table->HashToBucket(hash);
- int previous_entry = table->HashToEntry(hash);
+ int previous_entry = table->HashToEntryRaw(hash);
int nof = table->NumberOfElements();
// Insert a new entry at the end,
int new_entry = nof + table->NumberOfDeletedElements();
- int new_index = table->EntryToIndex(new_entry);
+ int new_index = table->EntryToIndexRaw(new_entry);
table->set(new_index, *key);
table->set(new_index + kValueOffset, *value);
@@ -419,8 +506,8 @@ MaybeHandle<OrderedNameDictionary> OrderedNameDictionary::Add(
return table;
}
-void OrderedNameDictionary::SetEntry(int entry, Object key, Object value,
- PropertyDetails details) {
+void OrderedNameDictionary::SetEntry(InternalIndex entry, Object key,
+ Object value, PropertyDetails details) {
DisallowHeapAllocation gc;
DCHECK_IMPLIES(!key.IsName(), key.IsTheHole());
DisallowHeapAllocation no_gc;
@@ -435,8 +522,9 @@ void OrderedNameDictionary::SetEntry(int entry, Object key, Object value,
}
Handle<OrderedNameDictionary> OrderedNameDictionary::DeleteEntry(
- Isolate* isolate, Handle<OrderedNameDictionary> table, int entry) {
- DCHECK_NE(entry, kNotFound);
+ Isolate* isolate, Handle<OrderedNameDictionary> table,
+ InternalIndex entry) {
+ DCHECK(entry.is_found());
Object hole = ReadOnlyRoots(isolate).the_hole_value();
PropertyDetails details = PropertyDetails::Empty();
@@ -452,25 +540,47 @@ Handle<OrderedNameDictionary> OrderedNameDictionary::DeleteEntry(
MaybeHandle<OrderedHashSet> OrderedHashSet::Allocate(
Isolate* isolate, int capacity, AllocationType allocation) {
- return OrderedHashTable<OrderedHashSet, 1>::Allocate(isolate, capacity,
- allocation);
+ return Base::Allocate(isolate, capacity, allocation);
}
MaybeHandle<OrderedHashMap> OrderedHashMap::Allocate(
Isolate* isolate, int capacity, AllocationType allocation) {
- return OrderedHashTable<OrderedHashMap, 2>::Allocate(isolate, capacity,
- allocation);
+ return Base::Allocate(isolate, capacity, allocation);
}
MaybeHandle<OrderedNameDictionary> OrderedNameDictionary::Allocate(
Isolate* isolate, int capacity, AllocationType allocation) {
MaybeHandle<OrderedNameDictionary> table_candidate =
- OrderedHashTable<OrderedNameDictionary, 3>::Allocate(isolate, capacity,
- allocation);
+ Base::Allocate(isolate, capacity, allocation);
+ Handle<OrderedNameDictionary> table;
+ if (table_candidate.ToHandle(&table)) {
+ table->SetHash(PropertyArray::kNoHashSentinel);
+ }
+ return table_candidate;
+}
+
+MaybeHandle<OrderedHashSet> OrderedHashSet::AllocateEmpty(
+ Isolate* isolate, AllocationType allocation) {
+ RootIndex ri = RootIndex::kEmptyOrderedHashSet;
+ return Base::AllocateEmpty(isolate, allocation, ri);
+}
+
+MaybeHandle<OrderedHashMap> OrderedHashMap::AllocateEmpty(
+ Isolate* isolate, AllocationType allocation) {
+ RootIndex ri = RootIndex::kEmptyOrderedHashMap;
+ return Base::AllocateEmpty(isolate, allocation, ri);
+}
+
+MaybeHandle<OrderedNameDictionary> OrderedNameDictionary::AllocateEmpty(
+ Isolate* isolate, AllocationType allocation) {
+ RootIndex ri = RootIndex::kEmptyOrderedPropertyDictionary;
+ MaybeHandle<OrderedNameDictionary> table_candidate =
+ Base::AllocateEmpty(isolate, allocation, ri);
Handle<OrderedNameDictionary> table;
if (table_candidate.ToHandle(&table)) {
table->SetHash(PropertyArray::kNoHashSentinel);
}
+
return table_candidate;
}
@@ -492,8 +602,8 @@ template V8_EXPORT_PRIVATE bool OrderedHashTable<OrderedHashSet, 1>::HasKey(
template V8_EXPORT_PRIVATE bool OrderedHashTable<OrderedHashSet, 1>::Delete(
Isolate* isolate, OrderedHashSet table, Object key);
-template V8_EXPORT_PRIVATE int OrderedHashTable<OrderedHashSet, 1>::FindEntry(
- Isolate* isolate, Object key);
+template V8_EXPORT_PRIVATE InternalIndex
+OrderedHashTable<OrderedHashSet, 1>::FindEntry(Isolate* isolate, Object key);
template V8_EXPORT_PRIVATE MaybeHandle<OrderedHashMap>
OrderedHashTable<OrderedHashMap, 2>::EnsureGrowable(
@@ -513,10 +623,10 @@ template V8_EXPORT_PRIVATE bool OrderedHashTable<OrderedHashMap, 2>::HasKey(
template V8_EXPORT_PRIVATE bool OrderedHashTable<OrderedHashMap, 2>::Delete(
Isolate* isolate, OrderedHashMap table, Object key);
-template V8_EXPORT_PRIVATE int OrderedHashTable<OrderedHashMap, 2>::FindEntry(
- Isolate* isolate, Object key);
+template V8_EXPORT_PRIVATE InternalIndex
+OrderedHashTable<OrderedHashMap, 2>::FindEntry(Isolate* isolate, Object key);
-template Handle<OrderedNameDictionary>
+template V8_EXPORT_PRIVATE Handle<OrderedNameDictionary>
OrderedHashTable<OrderedNameDictionary, 3>::Shrink(
Isolate* isolate, Handle<OrderedNameDictionary> table);
@@ -679,29 +789,30 @@ bool SmallOrderedHashMap::HasKey(Isolate* isolate, Handle<Object> key) {
}
template <>
-int V8_EXPORT_PRIVATE
+InternalIndex V8_EXPORT_PRIVATE
SmallOrderedHashTable<SmallOrderedNameDictionary>::FindEntry(Isolate* isolate,
Object key) {
DisallowHeapAllocation no_gc;
DCHECK(key.IsUniqueName());
Name raw_key = Name::cast(key);
- int entry = HashToFirstEntry(raw_key.Hash());
+ int raw_entry = HashToFirstEntry(raw_key.Hash());
// Walk the chain in the bucket to find the key.
- while (entry != kNotFound) {
+ while (raw_entry != kNotFound) {
+ InternalIndex entry(raw_entry);
Object candidate_key = KeyAt(entry);
if (candidate_key == key) return entry;
- entry = GetNextEntry(entry);
+ raw_entry = GetNextEntry(raw_entry);
}
- return kNotFound;
+ return InternalIndex::NotFound();
}
MaybeHandle<SmallOrderedNameDictionary> SmallOrderedNameDictionary::Add(
Isolate* isolate, Handle<SmallOrderedNameDictionary> table,
Handle<Name> key, Handle<Object> value, PropertyDetails details) {
- DCHECK_EQ(kNotFound, table->FindEntry(isolate, *key));
+ DCHECK(table->FindEntry(isolate, *key).is_not_found());
if (table->UsedCapacity() >= table->Capacity()) {
MaybeHandle<SmallOrderedNameDictionary> new_table =
@@ -739,15 +850,17 @@ MaybeHandle<SmallOrderedNameDictionary> SmallOrderedNameDictionary::Add(
return table;
}
-void SmallOrderedNameDictionary::SetEntry(int entry, Object key, Object value,
+void SmallOrderedNameDictionary::SetEntry(InternalIndex entry, Object key,
+ Object value,
PropertyDetails details) {
+ int raw_entry = entry.as_int();
DCHECK_IMPLIES(!key.IsName(), key.IsTheHole());
- SetDataEntry(entry, SmallOrderedNameDictionary::kValueIndex, value);
- SetDataEntry(entry, SmallOrderedNameDictionary::kKeyIndex, key);
+ SetDataEntry(raw_entry, SmallOrderedNameDictionary::kValueIndex, value);
+ SetDataEntry(raw_entry, SmallOrderedNameDictionary::kKeyIndex, key);
// TODO(gsathya): PropertyDetails should be stored as part of the
// data table to save more memory.
- SetDataEntry(entry, SmallOrderedNameDictionary::kPropertyDetailsIndex,
+ SetDataEntry(raw_entry, SmallOrderedNameDictionary::kPropertyDetailsIndex,
details.AsSmi());
}
@@ -755,22 +868,22 @@ template <class Derived>
bool SmallOrderedHashTable<Derived>::HasKey(Isolate* isolate,
Handle<Object> key) {
DisallowHeapAllocation no_gc;
- return FindEntry(isolate, *key) != kNotFound;
+ return FindEntry(isolate, *key).is_found();
}
template <class Derived>
bool SmallOrderedHashTable<Derived>::Delete(Isolate* isolate, Derived table,
Object key) {
DisallowHeapAllocation no_gc;
- int entry = table.FindEntry(isolate, key);
- if (entry == kNotFound) return false;
+ InternalIndex entry = table.FindEntry(isolate, key);
+ if (entry.is_not_found()) return false;
int nof = table.NumberOfElements();
int nod = table.NumberOfDeletedElements();
Object hole = ReadOnlyRoots(isolate).the_hole_value();
for (int j = 0; j < Derived::kEntrySize; j++) {
- table.SetDataEntry(entry, j, hole);
+ table.SetDataEntry(entry.as_int(), j, hole);
}
table.SetNumberOfElements(nof - 1);
@@ -780,8 +893,9 @@ bool SmallOrderedHashTable<Derived>::Delete(Isolate* isolate, Derived table,
}
Handle<SmallOrderedNameDictionary> SmallOrderedNameDictionary::DeleteEntry(
- Isolate* isolate, Handle<SmallOrderedNameDictionary> table, int entry) {
- DCHECK_NE(entry, kNotFound);
+ Isolate* isolate, Handle<SmallOrderedNameDictionary> table,
+ InternalIndex entry) {
+ DCHECK(entry.is_found());
{
DisallowHeapAllocation no_gc;
Object hole = ReadOnlyRoots(isolate).the_hole_value();
@@ -806,13 +920,11 @@ Handle<Derived> SmallOrderedHashTable<Derived>::Rehash(Isolate* isolate,
isolate, new_capacity,
Heap::InYoungGeneration(*table) ? AllocationType::kYoung
: AllocationType::kOld);
- int nof = table->NumberOfElements();
- int nod = table->NumberOfDeletedElements();
int new_entry = 0;
{
DisallowHeapAllocation no_gc;
- for (int old_entry = 0; old_entry < (nof + nod); ++old_entry) {
+ for (InternalIndex old_entry : table->IterateEntries()) {
Object key = table->KeyAt(old_entry);
if (key.IsTheHole(isolate)) continue;
@@ -824,14 +936,14 @@ Handle<Derived> SmallOrderedHashTable<Derived>::Rehash(Isolate* isolate,
new_table->SetNextEntry(new_entry, chain);
for (int i = 0; i < Derived::kEntrySize; ++i) {
- Object value = table->GetDataEntry(old_entry, i);
+ Object value = table->GetDataEntry(old_entry.as_int(), i);
new_table->SetDataEntry(new_entry, i, value);
}
++new_entry;
}
- new_table->SetNumberOfElements(nof);
+ new_table->SetNumberOfElements(table->NumberOfElements());
}
return new_table;
}
@@ -895,20 +1007,22 @@ MaybeHandle<Derived> SmallOrderedHashTable<Derived>::Grow(
}
template <class Derived>
-int SmallOrderedHashTable<Derived>::FindEntry(Isolate* isolate, Object key) {
+InternalIndex SmallOrderedHashTable<Derived>::FindEntry(Isolate* isolate,
+ Object key) {
DisallowHeapAllocation no_gc;
Object hash = key.GetHash();
- if (hash.IsUndefined(isolate)) return kNotFound;
- int entry = HashToFirstEntry(Smi::ToInt(hash));
+ if (hash.IsUndefined(isolate)) return InternalIndex::NotFound();
+ int raw_entry = HashToFirstEntry(Smi::ToInt(hash));
// Walk the chain in the bucket to find the key.
- while (entry != kNotFound) {
+ while (raw_entry != kNotFound) {
+ InternalIndex entry(raw_entry);
Object candidate_key = KeyAt(entry);
if (candidate_key.SameValueZero(key)) return entry;
- entry = GetNextEntry(entry);
+ raw_entry = GetNextEntry(raw_entry);
}
- return kNotFound;
+ return InternalIndex::NotFound();
}
template bool EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
@@ -1031,17 +1145,16 @@ MaybeHandle<OrderedHashMap> OrderedHashMapHandler::AdjustRepresentation(
if (!new_table_candidate.ToHandle(&new_table)) {
return new_table_candidate;
}
- int nof = table->NumberOfElements();
- int nod = table->NumberOfDeletedElements();
// TODO(gsathya): Optimize the lookup to not re calc offsets. Also,
// unhandlify this code as we preallocate the new backing store with
// the proper capacity.
- for (int entry = 0; entry < (nof + nod); ++entry) {
+ for (InternalIndex entry : table->IterateEntries()) {
Handle<Object> key = handle(table->KeyAt(entry), isolate);
if (key->IsTheHole(isolate)) continue;
Handle<Object> value = handle(
- table->GetDataEntry(entry, SmallOrderedHashMap::kValueIndex), isolate);
+ table->GetDataEntry(entry.as_int(), SmallOrderedHashMap::kValueIndex),
+ isolate);
new_table_candidate = OrderedHashMap::Add(isolate, new_table, key, value);
if (!new_table_candidate.ToHandle(&new_table)) {
return new_table_candidate;
@@ -1059,13 +1172,11 @@ MaybeHandle<OrderedHashSet> OrderedHashSetHandler::AdjustRepresentation(
if (!new_table_candidate.ToHandle(&new_table)) {
return new_table_candidate;
}
- int nof = table->NumberOfElements();
- int nod = table->NumberOfDeletedElements();
// TODO(gsathya): Optimize the lookup to not re calc offsets. Also,
// unhandlify this code as we preallocate the new backing store with
// the proper capacity.
- for (int entry = 0; entry < (nof + nod); ++entry) {
+ for (InternalIndex entry : table->IterateEntries()) {
Handle<Object> key = handle(table->KeyAt(entry), isolate);
if (key->IsTheHole(isolate)) continue;
new_table_candidate = OrderedHashSet::Add(isolate, new_table, key);
@@ -1086,13 +1197,11 @@ OrderedNameDictionaryHandler::AdjustRepresentation(
if (!new_table_candidate.ToHandle(&new_table)) {
return new_table_candidate;
}
- int nof = table->NumberOfElements();
- int nod = table->NumberOfDeletedElements();
// TODO(gsathya): Optimize the lookup to not re calc offsets. Also,
// unhandlify this code as we preallocate the new backing store with
// the proper capacity.
- for (int entry = 0; entry < (nof + nod); ++entry) {
+ for (InternalIndex entry : table->IterateEntries()) {
Handle<Name> key(Name::cast(table->KeyAt(entry)), isolate);
if (key->IsTheHole(isolate)) continue;
Handle<Object> value(table->ValueAt(entry), isolate);
@@ -1180,8 +1289,9 @@ MaybeHandle<HeapObject> OrderedNameDictionaryHandler::Add(
isolate, Handle<OrderedNameDictionary>::cast(table), key, value, details);
}
-void OrderedNameDictionaryHandler::SetEntry(HeapObject table, int entry,
- Object key, Object value,
+void OrderedNameDictionaryHandler::SetEntry(HeapObject table,
+ InternalIndex entry, Object key,
+ Object value,
PropertyDetails details) {
DisallowHeapAllocation no_gc;
if (table.IsSmallOrderedNameDictionary()) {
@@ -1190,28 +1300,24 @@ void OrderedNameDictionaryHandler::SetEntry(HeapObject table, int entry,
}
DCHECK(table.IsOrderedNameDictionary());
- return OrderedNameDictionary::cast(table).SetEntry(entry, key, value,
- details);
+ return OrderedNameDictionary::cast(table).SetEntry(InternalIndex(entry), key,
+ value, details);
}
-int OrderedNameDictionaryHandler::FindEntry(Isolate* isolate, HeapObject table,
- Name key) {
+InternalIndex OrderedNameDictionaryHandler::FindEntry(Isolate* isolate,
+ HeapObject table,
+ Name key) {
DisallowHeapAllocation no_gc;
if (table.IsSmallOrderedNameDictionary()) {
- int entry = SmallOrderedNameDictionary::cast(table).FindEntry(isolate, key);
- return entry == SmallOrderedNameDictionary::kNotFound
- ? OrderedNameDictionaryHandler::kNotFound
- : entry;
+ return SmallOrderedNameDictionary::cast(table).FindEntry(isolate, key);
}
DCHECK(table.IsOrderedNameDictionary());
- int entry = OrderedNameDictionary::cast(table).FindEntry(isolate, key);
- return entry == OrderedNameDictionary::kNotFound
- ? OrderedNameDictionaryHandler::kNotFound
- : entry;
+ return OrderedNameDictionary::cast(table).FindEntry(isolate, key);
}
-Object OrderedNameDictionaryHandler::ValueAt(HeapObject table, int entry) {
+Object OrderedNameDictionaryHandler::ValueAt(HeapObject table,
+ InternalIndex entry) {
if (table.IsSmallOrderedNameDictionary()) {
return SmallOrderedNameDictionary::cast(table).ValueAt(entry);
}
@@ -1220,7 +1326,8 @@ Object OrderedNameDictionaryHandler::ValueAt(HeapObject table, int entry) {
return OrderedNameDictionary::cast(table).ValueAt(entry);
}
-void OrderedNameDictionaryHandler::ValueAtPut(HeapObject table, int entry,
+void OrderedNameDictionaryHandler::ValueAtPut(HeapObject table,
+ InternalIndex entry,
Object value) {
if (table.IsSmallOrderedNameDictionary()) {
return SmallOrderedNameDictionary::cast(table).ValueAtPut(entry, value);
@@ -1231,7 +1338,7 @@ void OrderedNameDictionaryHandler::ValueAtPut(HeapObject table, int entry,
}
PropertyDetails OrderedNameDictionaryHandler::DetailsAt(HeapObject table,
- int entry) {
+ InternalIndex entry) {
if (table.IsSmallOrderedNameDictionary()) {
return SmallOrderedNameDictionary::cast(table).DetailsAt(entry);
}
@@ -1240,7 +1347,8 @@ PropertyDetails OrderedNameDictionaryHandler::DetailsAt(HeapObject table,
return OrderedNameDictionary::cast(table).DetailsAt(entry);
}
-void OrderedNameDictionaryHandler::DetailsAtPut(HeapObject table, int entry,
+void OrderedNameDictionaryHandler::DetailsAtPut(HeapObject table,
+ InternalIndex entry,
PropertyDetails details) {
if (table.IsSmallOrderedNameDictionary()) {
return SmallOrderedNameDictionary::cast(table).DetailsAtPut(entry, details);
@@ -1268,12 +1376,14 @@ void OrderedNameDictionaryHandler::SetHash(HeapObject table, int hash) {
OrderedNameDictionary::cast(table).SetHash(hash);
}
-Name OrderedNameDictionaryHandler::KeyAt(HeapObject table, int entry) {
+Name OrderedNameDictionaryHandler::KeyAt(HeapObject table,
+ InternalIndex entry) {
if (table.IsSmallOrderedNameDictionary()) {
return Name::cast(SmallOrderedNameDictionary::cast(table).KeyAt(entry));
}
- return Name::cast(OrderedNameDictionary::cast(table).KeyAt(entry));
+ return Name::cast(
+ OrderedNameDictionary::cast(table).KeyAt(InternalIndex(entry)));
}
int OrderedNameDictionaryHandler::NumberOfElements(HeapObject table) {
@@ -1306,7 +1416,7 @@ Handle<HeapObject> OrderedNameDictionaryHandler::Shrink(
}
Handle<HeapObject> OrderedNameDictionaryHandler::DeleteEntry(
- Isolate* isolate, Handle<HeapObject> table, int entry) {
+ Isolate* isolate, Handle<HeapObject> table, InternalIndex entry) {
DisallowHeapAllocation no_gc;
if (table->IsSmallOrderedNameDictionary()) {
Handle<SmallOrderedNameDictionary> small_dict =
@@ -1316,7 +1426,8 @@ Handle<HeapObject> OrderedNameDictionaryHandler::DeleteEntry(
Handle<OrderedNameDictionary> large_dict =
Handle<OrderedNameDictionary>::cast(table);
- return OrderedNameDictionary::DeleteEntry(isolate, large_dict, entry);
+ return OrderedNameDictionary::DeleteEntry(isolate, large_dict,
+ InternalIndex(entry));
}
template <class Derived, class TableType>
@@ -1326,6 +1437,7 @@ void OrderedHashTableIterator<Derived, TableType>::Transition() {
if (!table.IsObsolete()) return;
int index = Smi::ToInt(this->index());
+ DCHECK_LE(0, index);
while (table.IsObsolete()) {
TableType next_table = table.NextTable();
@@ -1362,7 +1474,8 @@ bool OrderedHashTableIterator<Derived, TableType>::HasMore() {
int index = Smi::ToInt(this->index());
int used_capacity = table.UsedCapacity();
- while (index < used_capacity && table.KeyAt(index).IsTheHole(ro_roots)) {
+ while (index < used_capacity &&
+ table.KeyAt(InternalIndex(index)).IsTheHole(ro_roots)) {
index++;
}
diff --git a/deps/v8/src/objects/ordered-hash-table.h b/deps/v8/src/objects/ordered-hash-table.h
index 5f3c45a110..0172986d1f 100644
--- a/deps/v8/src/objects/ordered-hash-table.h
+++ b/deps/v8/src/objects/ordered-hash-table.h
@@ -8,6 +8,7 @@
#include "src/base/export-template.h"
#include "src/common/globals.h"
#include "src/objects/fixed-array.h"
+#include "src/objects/internal-index.h"
#include "src/objects/js-objects.h"
#include "src/objects/smi.h"
#include "src/roots/roots.h"
@@ -34,7 +35,7 @@ namespace internal {
// [kPrefixSize]: element count
// [kPrefixSize + 1]: deleted element count
// [kPrefixSize + 2]: bucket count
-// [kPrefixSize + 3..(3 + NumberOfBuckets() - 1)]: "hash table",
+// [kPrefixSize + 3..(kPrefixSize + 3 + NumberOfBuckets() - 1)]: "hash table",
// where each item is an offset into the
// data table (see below) where the first
// item in this bucket is stored.
@@ -52,13 +53,15 @@ namespace internal {
//
// Memory layout for obsolete table:
// [0] : Prefix
-// [kPrefixSize + 0]: bucket count
-// [kPrefixSize + 1]: Next newer table
-// [kPrefixSize + 2]: Number of removed holes or -1 when the table was
-// cleared.
-// [kPrefixSize + 3..(3 + NumberOfRemovedHoles() - 1)]: The indexes
-// of the removed holes.
-// [kPrefixSize + 3 + NumberOfRemovedHoles()..length]: Not used
+// [kPrefixSize + 0]: Next newer table
+// [kPrefixSize + 1]: deleted element count or kClearedTableSentinel if
+// the table was cleared
+// [kPrefixSize + 2]: bucket count
+// [kPrefixSize + 3..(kPrefixSize + 3 + NumberOfDeletedElements() - 1)]:
+// The indexes of the removed holes. This part is only
+// usable for non-cleared tables, as clearing removes the
+// deleted elements count.
+// [kPrefixSize + 3 + NumberOfDeletedElements()..length]: Not used
template <class Derived, int entrysize>
class OrderedHashTable : public FixedArray {
public:
@@ -78,11 +81,17 @@ class OrderedHashTable : public FixedArray {
// Returns true if the OrderedHashTable contains the key
static bool HasKey(Isolate* isolate, Derived table, Object key);
+ // Returns whether a potential key |k| returned by KeyAt is a real
+ // key (meaning that it is not a hole).
+ static inline bool IsKey(ReadOnlyRoots roots, Object k);
+
// Returns a true value if the OrderedHashTable contains the key and
// the key has been deleted. This does not shrink the table.
static bool Delete(Isolate* isolate, Derived table, Object key);
- int FindEntry(Isolate* isolate, Object key);
+ InternalIndex FindEntry(Isolate* isolate, Object key);
+
+ Object SlowReverseLookup(Isolate* isolate, Object value);
int NumberOfElements() const {
return Smi::ToInt(get(NumberOfElementsIndex()));
@@ -102,30 +111,20 @@ class OrderedHashTable : public FixedArray {
return Smi::ToInt(get(NumberOfBucketsIndex()));
}
- // Returns an index into |this| for the given entry.
- int EntryToIndex(int entry) {
- return HashTableStartIndex() + NumberOfBuckets() + (entry * kEntrySize);
- }
-
- int HashToBucket(int hash) { return hash & (NumberOfBuckets() - 1); }
-
- int HashToEntry(int hash) {
- int bucket = HashToBucket(hash);
- Object entry = this->get(HashTableStartIndex() + bucket);
- return Smi::ToInt(entry);
- }
-
- int NextChainEntry(int entry) {
- Object next_entry = get(EntryToIndex(entry) + kChainOffset);
- return Smi::ToInt(next_entry);
+ InternalIndex::Range IterateEntries() {
+ return InternalIndex::Range(UsedCapacity());
}
- // use KeyAt(i)->IsTheHole(isolate) to determine if this is a deleted entry.
- Object KeyAt(int entry) {
- DCHECK_LT(entry, this->UsedCapacity());
+ // use IsKey to check if this is a deleted entry.
+ Object KeyAt(InternalIndex entry) {
+ DCHECK_LT(entry.as_int(), this->UsedCapacity());
return get(EntryToIndex(entry));
}
+ // Similar to KeyAt, but indicates whether the given entry is valid
+ // (not deleted one)
+ bool ToKey(ReadOnlyRoots roots, InternalIndex entry, Object* out_key);
+
bool IsObsolete() { return !get(NextTableIndex()).IsSmi(); }
// The next newer table. This is only valid if the table is obsolete.
@@ -142,7 +141,9 @@ class OrderedHashTable : public FixedArray {
static const int kChainOffset = entrysize;
static const int kNotFound = -1;
- static const int kMinCapacity = 4;
+ // The minimum capacity. Note that despite this value, 0 is also a permitted
+ // capacity, indicating a table without any storage for elements.
+ static const int kInitialCapacity = 4;
static constexpr int PrefixIndex() { return 0; }
@@ -202,10 +203,41 @@ class OrderedHashTable : public FixedArray {
Isolate* isolate, int capacity,
AllocationType allocation = AllocationType::kYoung);
+ static MaybeHandle<Derived> AllocateEmpty(Isolate* isolate,
+ AllocationType allocation,
+ RootIndex root_ndex);
+
static MaybeHandle<Derived> Rehash(Isolate* isolate, Handle<Derived> table);
static MaybeHandle<Derived> Rehash(Isolate* isolate, Handle<Derived> table,
int new_capacity);
+ int HashToEntryRaw(int hash) {
+ int bucket = HashToBucket(hash);
+ Object entry = this->get(HashTableStartIndex() + bucket);
+ int entry_int = Smi::ToInt(entry);
+ DCHECK(entry_int == kNotFound || entry_int >= 0);
+ return entry_int;
+ }
+
+ int NextChainEntryRaw(int entry) {
+ DCHECK_LT(entry, this->UsedCapacity());
+ Object next_entry = get(EntryToIndexRaw(entry) + kChainOffset);
+ int next_entry_int = Smi::ToInt(next_entry);
+ DCHECK(next_entry_int == kNotFound || next_entry_int >= 0);
+ return next_entry_int;
+ }
+
+ // Returns an index into |this| for the given entry.
+ int EntryToIndexRaw(int entry) {
+ return HashTableStartIndex() + NumberOfBuckets() + (entry * kEntrySize);
+ }
+
+ int EntryToIndex(InternalIndex entry) {
+ return EntryToIndexRaw(entry.as_int());
+ }
+
+ int HashToBucket(int hash) { return hash & (NumberOfBuckets() - 1); }
+
void SetNumberOfBuckets(int num) {
set(NumberOfBucketsIndex(), Smi::FromInt(num));
}
@@ -235,6 +267,8 @@ class OrderedHashTable : public FixedArray {
class V8_EXPORT_PRIVATE OrderedHashSet
: public OrderedHashTable<OrderedHashSet, 1> {
+ using Base = OrderedHashTable<OrderedHashSet, 1>;
+
public:
DECL_CAST(OrderedHashSet)
@@ -252,6 +286,10 @@ class V8_EXPORT_PRIVATE OrderedHashSet
static MaybeHandle<OrderedHashSet> Allocate(
Isolate* isolate, int capacity,
AllocationType allocation = AllocationType::kYoung);
+
+ static MaybeHandle<OrderedHashSet> AllocateEmpty(
+ Isolate* isolate, AllocationType allocation = AllocationType::kReadOnly);
+
static HeapObject GetEmpty(ReadOnlyRoots ro_roots);
static inline Handle<Map> GetMap(ReadOnlyRoots roots);
static inline bool Is(Handle<HeapObject> table);
@@ -262,6 +300,8 @@ class V8_EXPORT_PRIVATE OrderedHashSet
class V8_EXPORT_PRIVATE OrderedHashMap
: public OrderedHashTable<OrderedHashMap, 2> {
+ using Base = OrderedHashTable<OrderedHashMap, 2>;
+
public:
DECL_CAST(OrderedHashMap)
@@ -275,12 +315,16 @@ class V8_EXPORT_PRIVATE OrderedHashMap
static MaybeHandle<OrderedHashMap> Allocate(
Isolate* isolate, int capacity,
AllocationType allocation = AllocationType::kYoung);
+
+ static MaybeHandle<OrderedHashMap> AllocateEmpty(
+ Isolate* isolate, AllocationType allocation = AllocationType::kReadOnly);
+
static MaybeHandle<OrderedHashMap> Rehash(Isolate* isolate,
Handle<OrderedHashMap> table,
int new_capacity);
static MaybeHandle<OrderedHashMap> Rehash(Isolate* isolate,
Handle<OrderedHashMap> table);
- Object ValueAt(int entry);
+ Object ValueAt(InternalIndex entry);
// This takes and returns raw Address values containing tagged Object
// pointers because it is called via ExternalReference.
@@ -371,7 +415,7 @@ class SmallOrderedHashTable : public HeapObject {
// we've already reached MaxCapacity.
static MaybeHandle<Derived> Grow(Isolate* isolate, Handle<Derived> table);
- int FindEntry(Isolate* isolate, Object key);
+ InternalIndex FindEntry(Isolate* isolate, Object key);
static Handle<Derived> Shrink(Isolate* isolate, Handle<Derived> table);
// Iterates only fields in the DataTable.
@@ -418,7 +462,11 @@ class SmallOrderedHashTable : public HeapObject {
int NumberOfBuckets() const { return getByte(NumberOfBucketsOffset(), 0); }
- V8_INLINE Object KeyAt(int entry) const;
+ V8_INLINE Object KeyAt(InternalIndex entry) const;
+
+ InternalIndex::Range IterateEntries() {
+ return InternalIndex::Range(UsedCapacity());
+ }
DECL_VERIFIER(SmallOrderedHashTable)
@@ -460,8 +508,7 @@ class SmallOrderedHashTable : public HeapObject {
}
Address GetHashTableStartAddress(int capacity) const {
- return FIELD_ADDR(*this,
- DataTableStartOffset() + DataTableSizeFor(capacity));
+ return field_address(DataTableStartOffset() + DataTableSizeFor(capacity));
}
void SetFirstEntry(int bucket, byte value) {
@@ -699,39 +746,54 @@ class V8_EXPORT_PRIVATE OrderedHashSetHandler
Isolate* isolate, Handle<SmallOrderedHashSet> table);
};
-class OrderedNameDictionary
+class V8_EXPORT_PRIVATE OrderedNameDictionary
: public OrderedHashTable<OrderedNameDictionary, 3> {
+ using Base = OrderedHashTable<OrderedNameDictionary, 3>;
+
public:
DECL_CAST(OrderedNameDictionary)
- V8_EXPORT_PRIVATE static MaybeHandle<OrderedNameDictionary> Add(
+ static MaybeHandle<OrderedNameDictionary> Add(
Isolate* isolate, Handle<OrderedNameDictionary> table, Handle<Name> key,
Handle<Object> value, PropertyDetails details);
- V8_EXPORT_PRIVATE void SetEntry(int entry, Object key, Object value,
- PropertyDetails details);
+ void SetEntry(InternalIndex entry, Object key, Object value,
+ PropertyDetails details);
+
+ InternalIndex FindEntry(Isolate* isolate, Object key);
+
+ int NumberOfEnumerableProperties();
+
+ Object SlowReverseLookup(Isolate* isolate, Object value);
- V8_EXPORT_PRIVATE static Handle<OrderedNameDictionary> DeleteEntry(
- Isolate* isolate, Handle<OrderedNameDictionary> table, int entry);
+ static Handle<OrderedNameDictionary> DeleteEntry(
+ Isolate* isolate, Handle<OrderedNameDictionary> table,
+ InternalIndex entry);
static MaybeHandle<OrderedNameDictionary> Allocate(
Isolate* isolate, int capacity,
AllocationType allocation = AllocationType::kYoung);
+ static MaybeHandle<OrderedNameDictionary> AllocateEmpty(
+ Isolate* isolate, AllocationType allocation = AllocationType::kReadOnly);
+
static MaybeHandle<OrderedNameDictionary> Rehash(
Isolate* isolate, Handle<OrderedNameDictionary> table, int new_capacity);
// Returns the value for entry.
- inline Object ValueAt(int entry);
+ inline Object ValueAt(InternalIndex entry);
+
+ // Like KeyAt, but casts to Name
+ inline Name NameAt(InternalIndex entry);
// Set the value for entry.
- inline void ValueAtPut(int entry, Object value);
+ inline void ValueAtPut(InternalIndex entry, Object value);
// Returns the property details for the property at entry.
- inline PropertyDetails DetailsAt(int entry);
+ inline PropertyDetails DetailsAt(InternalIndex entry);
// Set the details for entry.
- inline void DetailsAtPut(int entry, PropertyDetails value);
+ inline void DetailsAtPut(InternalIndex entry, PropertyDetails value);
inline void SetHash(int hash);
inline int Hash();
@@ -744,6 +806,8 @@ class OrderedNameDictionary
static const int kPropertyDetailsOffset = 2;
static const int kPrefixSize = 1;
+ static const bool kIsOrderedDictionaryType = true;
+
OBJECT_CONSTRUCTORS(OrderedNameDictionary,
OrderedHashTable<OrderedNameDictionary, 3>);
};
@@ -761,24 +825,26 @@ class V8_EXPORT_PRIVATE OrderedNameDictionaryHandler
static Handle<HeapObject> Shrink(Isolate* isolate, Handle<HeapObject> table);
static Handle<HeapObject> DeleteEntry(Isolate* isolate,
- Handle<HeapObject> table, int entry);
- static int FindEntry(Isolate* isolate, HeapObject table, Name key);
- static void SetEntry(HeapObject table, int entry, Object key, Object value,
- PropertyDetails details);
+ Handle<HeapObject> table,
+ InternalIndex entry);
+ static InternalIndex FindEntry(Isolate* isolate, HeapObject table, Name key);
+ static void SetEntry(HeapObject table, InternalIndex entry, Object key,
+ Object value, PropertyDetails details);
// Returns the value for entry.
- static Object ValueAt(HeapObject table, int entry);
+ static Object ValueAt(HeapObject table, InternalIndex entry);
// Set the value for entry.
- static void ValueAtPut(HeapObject table, int entry, Object value);
+ static void ValueAtPut(HeapObject table, InternalIndex entry, Object value);
// Returns the property details for the property at entry.
- static PropertyDetails DetailsAt(HeapObject table, int entry);
+ static PropertyDetails DetailsAt(HeapObject table, InternalIndex entry);
// Set the details for entry.
- static void DetailsAtPut(HeapObject table, int entry, PropertyDetails value);
+ static void DetailsAtPut(HeapObject table, InternalIndex entry,
+ PropertyDetails value);
- static Name KeyAt(HeapObject table, int entry);
+ static Name KeyAt(HeapObject table, InternalIndex entry);
static void SetHash(HeapObject table, int hash);
static int Hash(HeapObject table);
@@ -786,8 +852,6 @@ class V8_EXPORT_PRIVATE OrderedNameDictionaryHandler
static int NumberOfElements(HeapObject table);
static int Capacity(HeapObject table);
- static const int kNotFound = -1;
-
protected:
static MaybeHandle<OrderedNameDictionary> AdjustRepresentation(
Isolate* isolate, Handle<SmallOrderedNameDictionary> table);
@@ -802,23 +866,24 @@ class SmallOrderedNameDictionary
DECL_VERIFIER(SmallOrderedNameDictionary)
// Returns the value for entry.
- inline Object ValueAt(int entry);
+ inline Object ValueAt(InternalIndex entry);
static Handle<SmallOrderedNameDictionary> Rehash(
Isolate* isolate, Handle<SmallOrderedNameDictionary> table,
int new_capacity);
V8_EXPORT_PRIVATE static Handle<SmallOrderedNameDictionary> DeleteEntry(
- Isolate* isolate, Handle<SmallOrderedNameDictionary> table, int entry);
+ Isolate* isolate, Handle<SmallOrderedNameDictionary> table,
+ InternalIndex entry);
// Set the value for entry.
- inline void ValueAtPut(int entry, Object value);
+ inline void ValueAtPut(InternalIndex entry, Object value);
// Returns the property details for the property at entry.
- inline PropertyDetails DetailsAt(int entry);
+ inline PropertyDetails DetailsAt(InternalIndex entry);
// Set the details for entry.
- inline void DetailsAtPut(int entry, PropertyDetails value);
+ inline void DetailsAtPut(InternalIndex entry, PropertyDetails value);
inline void SetHash(int hash);
inline int Hash();
@@ -836,7 +901,7 @@ class SmallOrderedNameDictionary
Isolate* isolate, Handle<SmallOrderedNameDictionary> table,
Handle<Name> key, Handle<Object> value, PropertyDetails details);
- V8_EXPORT_PRIVATE void SetEntry(int entry, Object key, Object value,
+ V8_EXPORT_PRIVATE void SetEntry(InternalIndex entry, Object key, Object value,
PropertyDetails details);
static inline Handle<Map> GetMap(ReadOnlyRoots roots);
diff --git a/deps/v8/src/objects/ordered-hash-table.tq b/deps/v8/src/objects/ordered-hash-table.tq
index d1b58d93eb..311f08aee7 100644
--- a/deps/v8/src/objects/ordered-hash-table.tq
+++ b/deps/v8/src/objects/ordered-hash-table.tq
@@ -23,7 +23,6 @@ extern class SmallOrderedHashTable extends HeapObject
extern macro SmallOrderedHashSetMapConstant(): Map;
const kSmallOrderedHashSetMap: Map = SmallOrderedHashSetMapConstant();
-@noVerifier
extern class SmallOrderedHashSet extends SmallOrderedHashTable {
number_of_elements: uint8;
number_of_deleted_elements: uint8;
@@ -62,7 +61,6 @@ struct HashMapEntry {
extern macro SmallOrderedHashMapMapConstant(): Map;
const kSmallOrderedHashMapMap: Map = SmallOrderedHashMapMapConstant();
-@noVerifier
extern class SmallOrderedHashMap extends SmallOrderedHashTable {
number_of_elements: uint8;
number_of_deleted_elements: uint8;
@@ -99,7 +97,6 @@ struct NameDictionaryEntry {
property_details: Smi|TheHole;
}
-@noVerifier
extern class SmallOrderedNameDictionary extends SmallOrderedHashTable {
hash: int32;
number_of_elements: uint8;
diff --git a/deps/v8/src/objects/primitive-heap-object-inl.h b/deps/v8/src/objects/primitive-heap-object-inl.h
index 609bf027da..a194126ceb 100644
--- a/deps/v8/src/objects/primitive-heap-object-inl.h
+++ b/deps/v8/src/objects/primitive-heap-object-inl.h
@@ -8,7 +8,6 @@
#include "src/objects/primitive-heap-object.h"
#include "src/objects/heap-object-inl.h"
-#include "torque-generated/class-definitions-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -16,6 +15,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/primitive-heap-object-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(PrimitiveHeapObject)
} // namespace internal
diff --git a/deps/v8/src/objects/primitive-heap-object.h b/deps/v8/src/objects/primitive-heap-object.h
index f7a57ffc08..14023c5198 100644
--- a/deps/v8/src/objects/primitive-heap-object.h
+++ b/deps/v8/src/objects/primitive-heap-object.h
@@ -6,7 +6,6 @@
#define V8_OBJECTS_PRIMITIVE_HEAP_OBJECT_H_
#include "src/objects/heap-object.h"
-#include "torque-generated/class-definitions.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -14,6 +13,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/primitive-heap-object-tq.inc"
+
// An abstract superclass for classes representing JavaScript primitive values
// other than Smi. It doesn't carry any functionality but allows primitive
// classes to be identified in the type system.
diff --git a/deps/v8/src/objects/promise-inl.h b/deps/v8/src/objects/promise-inl.h
index da11731e25..8d3be5b68c 100644
--- a/deps/v8/src/objects/promise-inl.h
+++ b/deps/v8/src/objects/promise-inl.h
@@ -16,6 +16,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/promise-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(PromiseReactionJobTask)
TQ_OBJECT_CONSTRUCTORS_IMPL(PromiseFulfillReactionJobTask)
TQ_OBJECT_CONSTRUCTORS_IMPL(PromiseRejectReactionJobTask)
diff --git a/deps/v8/src/objects/promise.h b/deps/v8/src/objects/promise.h
index 2582543f77..497498c166 100644
--- a/deps/v8/src/objects/promise.h
+++ b/deps/v8/src/objects/promise.h
@@ -15,6 +15,8 @@ namespace internal {
class JSPromise;
+#include "torque-generated/src/objects/promise-tq.inc"
+
// Struct to hold state required for PromiseReactionJob. See the comment on the
// PromiseReaction below for details on how this is being managed to reduce the
// memory and allocation overhead. This is the base class for the concrete
diff --git a/deps/v8/src/objects/property-array-inl.h b/deps/v8/src/objects/property-array-inl.h
index c942177554..d4a4bc94fa 100644
--- a/deps/v8/src/objects/property-array-inl.h
+++ b/deps/v8/src/objects/property-array-inl.h
@@ -25,11 +25,11 @@ SMI_ACCESSORS(PropertyArray, length_and_hash, kLengthAndHashOffset)
SYNCHRONIZED_SMI_ACCESSORS(PropertyArray, length_and_hash, kLengthAndHashOffset)
Object PropertyArray::get(int index) const {
- const Isolate* isolate = GetIsolateForPtrCompr(*this);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*this);
return get(isolate, index);
}
-Object PropertyArray::get(const Isolate* isolate, int index) const {
+Object PropertyArray::get(IsolateRoot isolate, int index) const {
DCHECK_LT(static_cast<unsigned>(index),
static_cast<unsigned>(this->length()));
return TaggedField<Object>::Relaxed_Load(isolate, *this,
diff --git a/deps/v8/src/objects/property-array.h b/deps/v8/src/objects/property-array.h
index e7fbb49c72..da15e8d732 100644
--- a/deps/v8/src/objects/property-array.h
+++ b/deps/v8/src/objects/property-array.h
@@ -30,7 +30,7 @@ class PropertyArray : public HeapObject {
inline int Hash() const;
inline Object get(int index) const;
- inline Object get(const Isolate* isolate, int index) const;
+ inline Object get(IsolateRoot isolate, int index) const;
inline void set(int index, Object value);
// Setter with explicit barrier mode.
diff --git a/deps/v8/src/objects/property-descriptor-object-inl.h b/deps/v8/src/objects/property-descriptor-object-inl.h
index 7033107613..3f16b16e56 100644
--- a/deps/v8/src/objects/property-descriptor-object-inl.h
+++ b/deps/v8/src/objects/property-descriptor-object-inl.h
@@ -14,6 +14,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/property-descriptor-object-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(PropertyDescriptorObject)
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/property-descriptor-object.h b/deps/v8/src/objects/property-descriptor-object.h
index 1b019e48f2..c9affb4ff7 100644
--- a/deps/v8/src/objects/property-descriptor-object.h
+++ b/deps/v8/src/objects/property-descriptor-object.h
@@ -14,6 +14,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/property-descriptor-object-tq.inc"
+
class PropertyDescriptorObject
: public TorqueGeneratedPropertyDescriptorObject<PropertyDescriptorObject,
Struct> {
diff --git a/deps/v8/src/objects/property-descriptor.cc b/deps/v8/src/objects/property-descriptor.cc
index a14601bc74..e7bfd039de 100644
--- a/deps/v8/src/objects/property-descriptor.cc
+++ b/deps/v8/src/objects/property-descriptor.cc
@@ -57,7 +57,7 @@ bool ToPropertyDescriptorFastPath(Isolate* isolate, Handle<JSReceiver> obj,
// TODO(jkummerow): support dictionary properties?
if (map->is_dictionary_map()) return false;
Handle<DescriptorArray> descs =
- Handle<DescriptorArray>(map->instance_descriptors(), isolate);
+ Handle<DescriptorArray>(map->instance_descriptors(kRelaxedLoad), isolate);
for (InternalIndex i : map->IterateOwnDescriptors()) {
PropertyDetails details = descs->GetDetails(i);
Handle<Object> value;
diff --git a/deps/v8/src/objects/property.cc b/deps/v8/src/objects/property.cc
index 9b94739bf2..a4336e295b 100644
--- a/deps/v8/src/objects/property.cc
+++ b/deps/v8/src/objects/property.cc
@@ -75,7 +75,7 @@ Descriptor Descriptor::DataField(Handle<Name> key, int field_index,
Descriptor Descriptor::DataConstant(Handle<Name> key, Handle<Object> value,
PropertyAttributes attributes) {
- const Isolate* isolate = GetIsolateForPtrCompr(*key);
+ IsolateRoot isolate = GetIsolateForPtrCompr(*key);
return Descriptor(key, MaybeObjectHandle(value), kData, attributes,
kDescriptor, PropertyConstness::kConst,
value->OptimalRepresentation(isolate), 0);
diff --git a/deps/v8/src/objects/prototype-info-inl.h b/deps/v8/src/objects/prototype-info-inl.h
index 8c93b21f24..9e18949db3 100644
--- a/deps/v8/src/objects/prototype-info-inl.h
+++ b/deps/v8/src/objects/prototype-info-inl.h
@@ -20,6 +20,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/prototype-info-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(PrototypeInfo)
Map PrototypeInfo::ObjectCreateMap() {
diff --git a/deps/v8/src/objects/prototype-info.h b/deps/v8/src/objects/prototype-info.h
index ab312b71a7..e4baa78fde 100644
--- a/deps/v8/src/objects/prototype-info.h
+++ b/deps/v8/src/objects/prototype-info.h
@@ -16,6 +16,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/prototype-info-tq.inc"
+
// Container for metadata stored on each prototype map.
class PrototypeInfo
: public TorqueGeneratedPrototypeInfo<PrototypeInfo, Struct> {
diff --git a/deps/v8/src/objects/prototype.h b/deps/v8/src/objects/prototype.h
index cd003837ca..0a8f21819a 100644
--- a/deps/v8/src/objects/prototype.h
+++ b/deps/v8/src/objects/prototype.h
@@ -42,6 +42,8 @@ class PrototypeIterator {
WhereToEnd where_to_end = END_AT_NULL);
~PrototypeIterator() = default;
+ PrototypeIterator(const PrototypeIterator&) = delete;
+ PrototypeIterator& operator=(const PrototypeIterator&) = delete;
inline bool HasAccess() const;
@@ -78,8 +80,6 @@ class PrototypeIterator {
WhereToEnd where_to_end_;
bool is_at_end_;
int seen_proxies_;
-
- DISALLOW_COPY_AND_ASSIGN(PrototypeIterator);
};
} // namespace internal
diff --git a/deps/v8/src/objects/regexp-match-info.h b/deps/v8/src/objects/regexp-match-info.h
index e862f687c6..9a1b03828e 100644
--- a/deps/v8/src/objects/regexp-match-info.h
+++ b/deps/v8/src/objects/regexp-match-info.h
@@ -8,6 +8,7 @@
#include "src/base/compiler-specific.h"
#include "src/objects/fixed-array.h"
#include "src/objects/objects.h"
+#include "torque-generated/field-offsets.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -18,6 +19,8 @@ namespace internal {
class Object;
class String;
+#include "torque-generated/src/objects/regexp-match-info-tq.inc"
+
// The property RegExpMatchInfo includes the matchIndices
// array of the last successful regexp match (an array of start/end index
// pairs for the match and all the captured substrings), the invariant is
diff --git a/deps/v8/src/objects/scope-info.cc b/deps/v8/src/objects/scope-info.cc
index 01a4964bdb..6f9f944b68 100644
--- a/deps/v8/src/objects/scope-info.cc
+++ b/deps/v8/src/objects/scope-info.cc
@@ -1044,6 +1044,24 @@ std::ostream& operator<<(std::ostream& os, VariableAllocationInfo var_info) {
}
template <typename LocalIsolate>
+Handle<ModuleRequest> ModuleRequest::New(LocalIsolate* isolate,
+ Handle<String> specifier,
+ Handle<FixedArray> import_assertions) {
+ Handle<ModuleRequest> result = Handle<ModuleRequest>::cast(
+ isolate->factory()->NewStruct(MODULE_REQUEST_TYPE, AllocationType::kOld));
+ result->set_specifier(*specifier);
+ result->set_import_assertions(*import_assertions);
+ return result;
+}
+
+template Handle<ModuleRequest> ModuleRequest::New(
+ Isolate* isolate, Handle<String> specifier,
+ Handle<FixedArray> import_assertions);
+template Handle<ModuleRequest> ModuleRequest::New(
+ LocalIsolate* isolate, Handle<String> specifier,
+ Handle<FixedArray> import_assertions);
+
+template <typename LocalIsolate>
Handle<SourceTextModuleInfoEntry> SourceTextModuleInfoEntry::New(
LocalIsolate* isolate, Handle<PrimitiveHeapObject> export_name,
Handle<PrimitiveHeapObject> local_name,
@@ -1082,7 +1100,9 @@ Handle<SourceTextModuleInfo> SourceTextModuleInfo::New(
Handle<FixedArray> module_request_positions =
isolate->factory()->NewFixedArray(size);
for (const auto& elem : descr->module_requests()) {
- module_requests->set(elem.second.index, *elem.first->string());
+ Handle<ModuleRequest> serialized_module_request =
+ elem.first->Serialize(isolate);
+ module_requests->set(elem.second.index, *serialized_module_request);
module_request_positions->set(elem.second.index,
Smi::FromInt(elem.second.position));
}
diff --git a/deps/v8/src/objects/script-inl.h b/deps/v8/src/objects/script-inl.h
index aaa1910ceb..00c8bb0e2e 100644
--- a/deps/v8/src/objects/script-inl.h
+++ b/deps/v8/src/objects/script-inl.h
@@ -17,6 +17,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/script-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(Script)
NEVER_READ_ONLY_SPACE_IMPL(Script)
diff --git a/deps/v8/src/objects/script.h b/deps/v8/src/objects/script.h
index d9caff4bb6..6e3e633f53 100644
--- a/deps/v8/src/objects/script.h
+++ b/deps/v8/src/objects/script.h
@@ -20,6 +20,8 @@ namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/script-tq.inc"
+
// Script describes a script which has been added to the VM.
class Script : public TorqueGeneratedScript<Script, Struct> {
public:
@@ -172,11 +174,12 @@ class Script : public TorqueGeneratedScript<Script, Struct> {
class V8_EXPORT_PRIVATE Iterator {
public:
explicit Iterator(Isolate* isolate);
+ Iterator(const Iterator&) = delete;
+ Iterator& operator=(const Iterator&) = delete;
Script Next();
private:
WeakArrayList::Iterator iterator_;
- DISALLOW_COPY_AND_ASSIGN(Iterator);
};
// Dispatched behavior.
diff --git a/deps/v8/src/objects/shared-function-info-inl.h b/deps/v8/src/objects/shared-function-info-inl.h
index 17677106d9..caf14e8bc3 100644
--- a/deps/v8/src/objects/shared-function-info-inl.h
+++ b/deps/v8/src/objects/shared-function-info-inl.h
@@ -22,6 +22,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/shared-function-info-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(PreparseData)
int PreparseData::inner_start_offset() const {
@@ -57,7 +59,7 @@ void PreparseData::set(int index, byte value) {
void PreparseData::copy_in(int index, const byte* buffer, int length) {
DCHECK(index >= 0 && length >= 0 && length <= kMaxInt - index &&
index + length <= this->data_length());
- Address dst_addr = FIELD_ADDR(*this, kDataStartOffset + index * kByteSize);
+ Address dst_addr = field_address(kDataStartOffset + index * kByteSize);
memcpy(reinterpret_cast<void*>(dst_addr), buffer, length);
}
@@ -97,12 +99,12 @@ NEVER_READ_ONLY_SPACE_IMPL(SharedFunctionInfo)
CAST_ACCESSOR(SharedFunctionInfo)
DEFINE_DEOPT_ELEMENT_ACCESSORS(SharedFunctionInfo, Object)
-SYNCHRONIZED_ACCESSORS(SharedFunctionInfo, function_data, Object,
- kFunctionDataOffset)
-ACCESSORS(SharedFunctionInfo, name_or_scope_info, Object,
- kNameOrScopeInfoOffset)
-ACCESSORS(SharedFunctionInfo, script_or_debug_info, HeapObject,
- kScriptOrDebugInfoOffset)
+RELEASE_ACQUIRE_ACCESSORS(SharedFunctionInfo, function_data, Object,
+ kFunctionDataOffset)
+RELEASE_ACQUIRE_ACCESSORS(SharedFunctionInfo, name_or_scope_info, Object,
+ kNameOrScopeInfoOffset)
+RELEASE_ACQUIRE_ACCESSORS(SharedFunctionInfo, script_or_debug_info, HeapObject,
+ kScriptOrDebugInfoOffset)
INT32_ACCESSORS(SharedFunctionInfo, function_literal_id,
kFunctionLiteralIdOffset)
@@ -121,7 +123,7 @@ RELAXED_INT32_ACCESSORS(SharedFunctionInfo, flags, kFlagsOffset)
UINT8_ACCESSORS(SharedFunctionInfo, flags2, kFlags2Offset)
bool SharedFunctionInfo::HasSharedName() const {
- Object value = name_or_scope_info();
+ Object value = name_or_scope_info(kAcquireLoad);
if (value.IsScopeInfo()) {
return ScopeInfo::cast(value).HasSharedFunctionName();
}
@@ -130,7 +132,7 @@ bool SharedFunctionInfo::HasSharedName() const {
String SharedFunctionInfo::Name() const {
if (!HasSharedName()) return GetReadOnlyRoots().empty_string();
- Object value = name_or_scope_info();
+ Object value = name_or_scope_info(kAcquireLoad);
if (value.IsScopeInfo()) {
if (ScopeInfo::cast(value).HasFunctionName()) {
return String::cast(ScopeInfo::cast(value).FunctionName());
@@ -141,13 +143,13 @@ String SharedFunctionInfo::Name() const {
}
void SharedFunctionInfo::SetName(String name) {
- Object maybe_scope_info = name_or_scope_info();
+ Object maybe_scope_info = name_or_scope_info(kAcquireLoad);
if (maybe_scope_info.IsScopeInfo()) {
ScopeInfo::cast(maybe_scope_info).SetFunctionName(name);
} else {
DCHECK(maybe_scope_info.IsString() ||
maybe_scope_info == kNoSharedNameSentinel);
- set_name_or_scope_info(name);
+ set_name_or_scope_info(name, kReleaseStore);
}
UpdateFunctionMapIndex();
}
@@ -219,9 +221,6 @@ BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags,
is_oneshot_iife_or_properties_are_final,
SharedFunctionInfo::IsOneshotIifeOrPropertiesAreFinalBit)
BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags,
- is_safe_to_skip_arguments_adaptor,
- SharedFunctionInfo::IsSafeToSkipArgumentsAdaptorBit)
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags,
private_name_lookup_skips_outer_class,
SharedFunctionInfo::PrivateNameLookupSkipsOuterClassBit)
@@ -334,17 +333,17 @@ void SharedFunctionInfo::DontAdaptArguments() {
bool SharedFunctionInfo::IsInterpreted() const { return HasBytecodeArray(); }
ScopeInfo SharedFunctionInfo::scope_info() const {
- Object maybe_scope_info = name_or_scope_info();
+ Object maybe_scope_info = name_or_scope_info(kAcquireLoad);
if (maybe_scope_info.IsScopeInfo()) {
return ScopeInfo::cast(maybe_scope_info);
}
return GetReadOnlyRoots().empty_scope_info();
}
-void SharedFunctionInfo::set_scope_info(ScopeInfo scope_info,
- WriteBarrierMode mode) {
+void SharedFunctionInfo::SetScopeInfo(ScopeInfo scope_info,
+ WriteBarrierMode mode) {
// Move the existing name onto the ScopeInfo.
- Object name = name_or_scope_info();
+ Object name = name_or_scope_info(kAcquireLoad);
if (name.IsScopeInfo()) {
name = ScopeInfo::cast(name).FunctionName();
}
@@ -354,7 +353,7 @@ void SharedFunctionInfo::set_scope_info(ScopeInfo scope_info,
if (HasInferredName() && inferred_name().length() != 0) {
scope_info.SetInferredFunctionName(inferred_name());
}
- set_raw_scope_info(scope_info, mode);
+ set_name_or_scope_info(scope_info, kReleaseStore, mode);
}
void SharedFunctionInfo::set_raw_scope_info(ScopeInfo scope_info,
@@ -415,7 +414,7 @@ void SharedFunctionInfo::set_feedback_metadata(FeedbackMetadata value,
}
bool SharedFunctionInfo::is_compiled() const {
- Object data = function_data();
+ Object data = function_data(kAcquireLoad);
return data != Smi::FromEnum(Builtins::kCompileLazy) &&
!data.IsUncompiledData();
}
@@ -450,56 +449,60 @@ bool SharedFunctionInfo::has_simple_parameters() {
}
bool SharedFunctionInfo::IsApiFunction() const {
- return function_data().IsFunctionTemplateInfo();
+ return function_data(kAcquireLoad).IsFunctionTemplateInfo();
}
FunctionTemplateInfo SharedFunctionInfo::get_api_func_data() const {
DCHECK(IsApiFunction());
- return FunctionTemplateInfo::cast(function_data());
+ return FunctionTemplateInfo::cast(function_data(kAcquireLoad));
}
bool SharedFunctionInfo::HasBytecodeArray() const {
- return function_data().IsBytecodeArray() ||
- function_data().IsInterpreterData();
+ Object data = function_data(kAcquireLoad);
+ return data.IsBytecodeArray() || data.IsInterpreterData();
}
BytecodeArray SharedFunctionInfo::GetBytecodeArray() const {
DCHECK(HasBytecodeArray());
if (HasDebugInfo() && GetDebugInfo().HasInstrumentedBytecodeArray()) {
return GetDebugInfo().OriginalBytecodeArray();
- } else if (function_data().IsBytecodeArray()) {
- return BytecodeArray::cast(function_data());
+ }
+
+ Object data = function_data(kAcquireLoad);
+ if (data.IsBytecodeArray()) {
+ return BytecodeArray::cast(data);
} else {
- DCHECK(function_data().IsInterpreterData());
- return InterpreterData::cast(function_data()).bytecode_array();
+ DCHECK(data.IsInterpreterData());
+ return InterpreterData::cast(data).bytecode_array();
}
}
BytecodeArray SharedFunctionInfo::GetDebugBytecodeArray() const {
- DCHECK(HasBytecodeArray());
DCHECK(HasDebugInfo() && GetDebugInfo().HasInstrumentedBytecodeArray());
- if (function_data().IsBytecodeArray()) {
- return BytecodeArray::cast(function_data());
+
+ Object data = function_data(kAcquireLoad);
+ if (data.IsBytecodeArray()) {
+ return BytecodeArray::cast(data);
} else {
- DCHECK(function_data().IsInterpreterData());
- return InterpreterData::cast(function_data()).bytecode_array();
+ DCHECK(data.IsInterpreterData());
+ return InterpreterData::cast(data).bytecode_array();
}
}
void SharedFunctionInfo::SetDebugBytecodeArray(BytecodeArray bytecode) {
- DCHECK(HasBytecodeArray());
- if (function_data().IsBytecodeArray()) {
- set_function_data(bytecode);
+ Object data = function_data(kAcquireLoad);
+ if (data.IsBytecodeArray()) {
+ set_function_data(bytecode, kReleaseStore);
} else {
- DCHECK(function_data().IsInterpreterData());
+ DCHECK(data.IsInterpreterData());
interpreter_data().set_bytecode_array(bytecode);
}
}
void SharedFunctionInfo::set_bytecode_array(BytecodeArray bytecode) {
- DCHECK(function_data() == Smi::FromEnum(Builtins::kCompileLazy) ||
+ DCHECK(function_data(kAcquireLoad) == Smi::FromEnum(Builtins::kCompileLazy) ||
HasUncompiledData());
- set_function_data(bytecode);
+ set_function_data(bytecode, kReleaseStore);
}
bool SharedFunctionInfo::ShouldFlushBytecode(BytecodeFlushMode mode) {
@@ -513,7 +516,7 @@ bool SharedFunctionInfo::ShouldFlushBytecode(BytecodeFlushMode mode) {
// Get a snapshot of the function data field, and if it is a bytecode array,
// check if it is old. Note, this is done this way since this function can be
// called by the concurrent marker.
- Object data = function_data();
+ Object data = function_data(kAcquireLoad);
if (!data.IsBytecodeArray()) return false;
if (mode == BytecodeFlushMode::kStressFlushBytecode) return true;
@@ -529,86 +532,87 @@ Code SharedFunctionInfo::InterpreterTrampoline() const {
}
bool SharedFunctionInfo::HasInterpreterData() const {
- return function_data().IsInterpreterData();
+ return function_data(kAcquireLoad).IsInterpreterData();
}
InterpreterData SharedFunctionInfo::interpreter_data() const {
DCHECK(HasInterpreterData());
- return InterpreterData::cast(function_data());
+ return InterpreterData::cast(function_data(kAcquireLoad));
}
void SharedFunctionInfo::set_interpreter_data(
InterpreterData interpreter_data) {
DCHECK(FLAG_interpreted_frames_native_stack);
- set_function_data(interpreter_data);
+ set_function_data(interpreter_data, kReleaseStore);
}
bool SharedFunctionInfo::HasAsmWasmData() const {
- return function_data().IsAsmWasmData();
+ return function_data(kAcquireLoad).IsAsmWasmData();
}
AsmWasmData SharedFunctionInfo::asm_wasm_data() const {
DCHECK(HasAsmWasmData());
- return AsmWasmData::cast(function_data());
+ return AsmWasmData::cast(function_data(kAcquireLoad));
}
void SharedFunctionInfo::set_asm_wasm_data(AsmWasmData data) {
- DCHECK(function_data() == Smi::FromEnum(Builtins::kCompileLazy) ||
+ DCHECK(function_data(kAcquireLoad) == Smi::FromEnum(Builtins::kCompileLazy) ||
HasUncompiledData() || HasAsmWasmData());
- set_function_data(data);
+ set_function_data(data, kReleaseStore);
}
bool SharedFunctionInfo::HasBuiltinId() const {
- return function_data().IsSmi();
+ return function_data(kAcquireLoad).IsSmi();
}
int SharedFunctionInfo::builtin_id() const {
DCHECK(HasBuiltinId());
- int id = Smi::ToInt(function_data());
+ int id = Smi::ToInt(function_data(kAcquireLoad));
DCHECK(Builtins::IsBuiltinId(id));
return id;
}
void SharedFunctionInfo::set_builtin_id(int builtin_id) {
DCHECK(Builtins::IsBuiltinId(builtin_id));
- set_function_data(Smi::FromInt(builtin_id), SKIP_WRITE_BARRIER);
+ set_function_data(Smi::FromInt(builtin_id), kReleaseStore,
+ SKIP_WRITE_BARRIER);
}
bool SharedFunctionInfo::HasUncompiledData() const {
- return function_data().IsUncompiledData();
+ return function_data(kAcquireLoad).IsUncompiledData();
}
UncompiledData SharedFunctionInfo::uncompiled_data() const {
DCHECK(HasUncompiledData());
- return UncompiledData::cast(function_data());
+ return UncompiledData::cast(function_data(kAcquireLoad));
}
void SharedFunctionInfo::set_uncompiled_data(UncompiledData uncompiled_data) {
- DCHECK(function_data() == Smi::FromEnum(Builtins::kCompileLazy) ||
+ DCHECK(function_data(kAcquireLoad) == Smi::FromEnum(Builtins::kCompileLazy) ||
HasUncompiledData());
DCHECK(uncompiled_data.IsUncompiledData());
- set_function_data(uncompiled_data);
+ set_function_data(uncompiled_data, kReleaseStore);
}
bool SharedFunctionInfo::HasUncompiledDataWithPreparseData() const {
- return function_data().IsUncompiledDataWithPreparseData();
+ return function_data(kAcquireLoad).IsUncompiledDataWithPreparseData();
}
UncompiledDataWithPreparseData
SharedFunctionInfo::uncompiled_data_with_preparse_data() const {
DCHECK(HasUncompiledDataWithPreparseData());
- return UncompiledDataWithPreparseData::cast(function_data());
+ return UncompiledDataWithPreparseData::cast(function_data(kAcquireLoad));
}
void SharedFunctionInfo::set_uncompiled_data_with_preparse_data(
UncompiledDataWithPreparseData uncompiled_data_with_preparse_data) {
- DCHECK(function_data() == Smi::FromEnum(Builtins::kCompileLazy));
+ DCHECK(function_data(kAcquireLoad) == Smi::FromEnum(Builtins::kCompileLazy));
DCHECK(uncompiled_data_with_preparse_data.IsUncompiledDataWithPreparseData());
- set_function_data(uncompiled_data_with_preparse_data);
+ set_function_data(uncompiled_data_with_preparse_data, kReleaseStore);
}
bool SharedFunctionInfo::HasUncompiledDataWithoutPreparseData() const {
- return function_data().IsUncompiledDataWithoutPreparseData();
+ return function_data(kAcquireLoad).IsUncompiledDataWithoutPreparseData();
}
void SharedFunctionInfo::ClearPreparseData() {
@@ -670,19 +674,19 @@ void UncompiledDataWithPreparseData::Init(LocalIsolate* isolate,
}
bool SharedFunctionInfo::HasWasmExportedFunctionData() const {
- return function_data().IsWasmExportedFunctionData();
+ return function_data(kAcquireLoad).IsWasmExportedFunctionData();
}
bool SharedFunctionInfo::HasWasmJSFunctionData() const {
- return function_data().IsWasmJSFunctionData();
+ return function_data(kAcquireLoad).IsWasmJSFunctionData();
}
bool SharedFunctionInfo::HasWasmCapiFunctionData() const {
- return function_data().IsWasmCapiFunctionData();
+ return function_data(kAcquireLoad).IsWasmCapiFunctionData();
}
HeapObject SharedFunctionInfo::script() const {
- HeapObject maybe_script = script_or_debug_info();
+ HeapObject maybe_script = script_or_debug_info(kAcquireLoad);
if (maybe_script.IsDebugInfo()) {
return DebugInfo::cast(maybe_script).script();
}
@@ -690,11 +694,11 @@ HeapObject SharedFunctionInfo::script() const {
}
void SharedFunctionInfo::set_script(HeapObject script) {
- HeapObject maybe_debug_info = script_or_debug_info();
+ HeapObject maybe_debug_info = script_or_debug_info(kAcquireLoad);
if (maybe_debug_info.IsDebugInfo()) {
DebugInfo::cast(maybe_debug_info).set_script(script);
} else {
- set_script_or_debug_info(script);
+ set_script_or_debug_info(script, kReleaseStore);
}
}
@@ -703,22 +707,23 @@ bool SharedFunctionInfo::is_repl_mode() const {
}
bool SharedFunctionInfo::HasDebugInfo() const {
- return script_or_debug_info().IsDebugInfo();
+ return script_or_debug_info(kAcquireLoad).IsDebugInfo();
}
DebugInfo SharedFunctionInfo::GetDebugInfo() const {
- DCHECK(HasDebugInfo());
- return DebugInfo::cast(script_or_debug_info());
+ auto debug_info = script_or_debug_info(kAcquireLoad);
+ DCHECK(debug_info.IsDebugInfo());
+ return DebugInfo::cast(debug_info);
}
void SharedFunctionInfo::SetDebugInfo(DebugInfo debug_info) {
DCHECK(!HasDebugInfo());
- DCHECK_EQ(debug_info.script(), script_or_debug_info());
- set_script_or_debug_info(debug_info);
+ DCHECK_EQ(debug_info.script(), script_or_debug_info(kAcquireLoad));
+ set_script_or_debug_info(debug_info, kReleaseStore);
}
bool SharedFunctionInfo::HasInferredName() {
- Object scope_info = name_or_scope_info();
+ Object scope_info = name_or_scope_info(kAcquireLoad);
if (scope_info.IsScopeInfo()) {
return ScopeInfo::cast(scope_info).HasInferredFunctionName();
}
@@ -726,7 +731,7 @@ bool SharedFunctionInfo::HasInferredName() {
}
String SharedFunctionInfo::inferred_name() {
- Object maybe_scope_info = name_or_scope_info();
+ Object maybe_scope_info = name_or_scope_info(kAcquireLoad);
if (maybe_scope_info.IsScopeInfo()) {
ScopeInfo scope_info = ScopeInfo::cast(maybe_scope_info);
if (scope_info.HasInferredFunctionName()) {
diff --git a/deps/v8/src/objects/shared-function-info.cc b/deps/v8/src/objects/shared-function-info.cc
index e920425d24..885d88e689 100644
--- a/deps/v8/src/objects/shared-function-info.cc
+++ b/deps/v8/src/objects/shared-function-info.cc
@@ -36,14 +36,15 @@ void SharedFunctionInfo::Init(ReadOnlyRoots ro_roots, int unique_id) {
// Set the name to the no-name sentinel, this can be updated later.
set_name_or_scope_info(SharedFunctionInfo::kNoSharedNameSentinel,
- SKIP_WRITE_BARRIER);
+ kReleaseStore, SKIP_WRITE_BARRIER);
// Generally functions won't have feedback, unless they have been created
// from a FunctionLiteral. Those can just reset this field to keep the
// SharedFunctionInfo in a consistent state.
set_raw_outer_scope_info_or_feedback_metadata(ro_roots.the_hole_value(),
SKIP_WRITE_BARRIER);
- set_script_or_debug_info(ro_roots.undefined_value(), SKIP_WRITE_BARRIER);
+ set_script_or_debug_info(ro_roots.undefined_value(), kReleaseStore,
+ SKIP_WRITE_BARRIER);
set_function_literal_id(kFunctionLiteralIdInvalid);
#if V8_SFI_HAS_UNIQUE_ID
set_unique_id(unique_id);
@@ -72,7 +73,7 @@ Code SharedFunctionInfo::GetCode() const {
// ======
Isolate* isolate = GetIsolate();
- Object data = function_data();
+ Object data = function_data(kAcquireLoad);
if (data.IsSmi()) {
// Holding a Smi means we are a builtin.
DCHECK(HasBuiltinId());
@@ -113,17 +114,17 @@ Code SharedFunctionInfo::GetCode() const {
WasmExportedFunctionData SharedFunctionInfo::wasm_exported_function_data()
const {
DCHECK(HasWasmExportedFunctionData());
- return WasmExportedFunctionData::cast(function_data());
+ return WasmExportedFunctionData::cast(function_data(kAcquireLoad));
}
WasmJSFunctionData SharedFunctionInfo::wasm_js_function_data() const {
DCHECK(HasWasmJSFunctionData());
- return WasmJSFunctionData::cast(function_data());
+ return WasmJSFunctionData::cast(function_data(kAcquireLoad));
}
WasmCapiFunctionData SharedFunctionInfo::wasm_capi_function_data() const {
DCHECK(HasWasmCapiFunctionData());
- return WasmCapiFunctionData::cast(function_data());
+ return WasmCapiFunctionData::cast(function_data(kAcquireLoad));
}
SharedFunctionInfo::ScriptIterator::ScriptIterator(Isolate* isolate,
@@ -310,7 +311,7 @@ void SharedFunctionInfo::DiscardCompiled(
Handle<UncompiledData> data =
isolate->factory()->NewUncompiledDataWithoutPreparseData(
inferred_name_val, start_position, end_position);
- shared_info->set_function_data(*data);
+ shared_info->set_function_data(*data, kReleaseStore);
}
}
@@ -450,7 +451,7 @@ template <typename LocalIsolate>
void SharedFunctionInfo::InitFromFunctionLiteral(
LocalIsolate* isolate, Handle<SharedFunctionInfo> shared_info,
FunctionLiteral* lit, bool is_toplevel) {
- DCHECK(!shared_info->name_or_scope_info().IsScopeInfo());
+ DCHECK(!shared_info->name_or_scope_info(kAcquireLoad).IsScopeInfo());
// When adding fields here, make sure DeclarationScope::AnalyzePartially is
// updated accordingly.
@@ -497,8 +498,6 @@ void SharedFunctionInfo::InitFromFunctionLiteral(
if (lit->ShouldEagerCompile()) {
shared_info->set_has_duplicate_parameters(lit->has_duplicate_parameters());
shared_info->UpdateAndFinalizeExpectedNofPropertiesFromEstimate(lit);
- shared_info->set_is_safe_to_skip_arguments_adaptor(
- lit->SafeToSkipArgumentsAdaptor());
DCHECK_NULL(lit->produced_preparse_data());
// If we're about to eager compile, we'll have the function literal
@@ -506,7 +505,6 @@ void SharedFunctionInfo::InitFromFunctionLiteral(
return;
}
- shared_info->set_is_safe_to_skip_arguments_adaptor(false);
shared_info->UpdateExpectedNofPropertiesFromEstimate(lit);
Handle<UncompiledData> data;
@@ -593,7 +591,7 @@ void SharedFunctionInfo::SetFunctionTokenPosition(int function_token_position,
}
int SharedFunctionInfo::StartPosition() const {
- Object maybe_scope_info = name_or_scope_info();
+ Object maybe_scope_info = name_or_scope_info(kAcquireLoad);
if (maybe_scope_info.IsScopeInfo()) {
ScopeInfo info = ScopeInfo::cast(maybe_scope_info);
if (info.HasPositionInfo()) {
@@ -618,7 +616,7 @@ int SharedFunctionInfo::StartPosition() const {
}
int SharedFunctionInfo::EndPosition() const {
- Object maybe_scope_info = name_or_scope_info();
+ Object maybe_scope_info = name_or_scope_info(kAcquireLoad);
if (maybe_scope_info.IsScopeInfo()) {
ScopeInfo info = ScopeInfo::cast(maybe_scope_info);
if (info.HasPositionInfo()) {
@@ -643,7 +641,7 @@ int SharedFunctionInfo::EndPosition() const {
}
void SharedFunctionInfo::SetPosition(int start_position, int end_position) {
- Object maybe_scope_info = name_or_scope_info();
+ Object maybe_scope_info = name_or_scope_info(kAcquireLoad);
if (maybe_scope_info.IsScopeInfo()) {
ScopeInfo info = ScopeInfo::cast(maybe_scope_info);
if (info.HasPositionInfo()) {
diff --git a/deps/v8/src/objects/shared-function-info.h b/deps/v8/src/objects/shared-function-info.h
index e195f99771..be6705e327 100644
--- a/deps/v8/src/objects/shared-function-info.h
+++ b/deps/v8/src/objects/shared-function-info.h
@@ -38,6 +38,8 @@ class WasmCapiFunctionData;
class WasmExportedFunctionData;
class WasmJSFunctionData;
+#include "torque-generated/src/objects/shared-function-info-tq.inc"
+
// Data collected by the pre-parser storing information about scopes and inner
// functions.
//
@@ -215,13 +217,15 @@ class SharedFunctionInfo : public HeapObject {
static const int kNotFound = -1;
- // [scope_info]: Scope info.
- DECL_ACCESSORS(scope_info, ScopeInfo)
+ DECL_GETTER(scope_info, ScopeInfo)
// Set scope_info without moving the existing name onto the ScopeInfo.
inline void set_raw_scope_info(ScopeInfo scope_info,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ inline void SetScopeInfo(ScopeInfo scope_info,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+
inline bool is_script() const;
inline bool needs_script_context() const;
@@ -304,7 +308,7 @@ class SharedFunctionInfo : public HeapObject {
// - a UncompiledDataWithPreparseData for lazy compilation
// [HasUncompiledDataWithPreparseData()]
// - a WasmExportedFunctionData for Wasm [HasWasmExportedFunctionData()]
- DECL_ACCESSORS(function_data, Object)
+ DECL_RELEASE_ACQUIRE_ACCESSORS(function_data, Object)
inline bool IsApiFunction() const;
inline bool is_class_constructor() const;
@@ -337,7 +341,8 @@ class SharedFunctionInfo : public HeapObject {
UncompiledDataWithPreparseData data);
inline bool HasUncompiledDataWithoutPreparseData() const;
inline bool HasWasmExportedFunctionData() const;
- WasmExportedFunctionData wasm_exported_function_data() const;
+ V8_EXPORT_PRIVATE WasmExportedFunctionData
+ wasm_exported_function_data() const;
inline bool HasWasmJSFunctionData() const;
WasmJSFunctionData wasm_js_function_data() const;
inline bool HasWasmCapiFunctionData() const;
@@ -373,7 +378,7 @@ class SharedFunctionInfo : public HeapObject {
// [script_or_debug_info]: One of:
// - Script from which the function originates.
// - a DebugInfo which holds the actual script [HasDebugInfo()].
- DECL_ACCESSORS(script_or_debug_info, HeapObject)
+ DECL_RELEASE_ACQUIRE_ACCESSORS(script_or_debug_info, HeapObject)
inline HeapObject script() const;
inline void set_script(HeapObject script);
@@ -463,17 +468,6 @@ class SharedFunctionInfo : public HeapObject {
// Whether or not the number of expected properties may change.
DECL_BOOLEAN_ACCESSORS(are_properties_final)
- // Indicates that the function represented by the shared function info
- // cannot observe the actual parameters passed at a call site, which
- // means the function doesn't use the arguments object, doesn't use
- // rest parameters, and is also in strict mode (meaning that there's
- // no way to get to the actual arguments via the non-standard "arguments"
- // accessor on sloppy mode functions). This can be used to speed up calls
- // to this function even in the presence of arguments mismatch.
- // See http://bit.ly/v8-faster-calls-with-arguments-mismatch for more
- // information on this.
- DECL_BOOLEAN_ACCESSORS(is_safe_to_skip_arguments_adaptor)
-
// Indicates that the function has been reported for binary code coverage.
DECL_BOOLEAN_ACCESSORS(has_reported_binary_coverage)
@@ -619,6 +613,8 @@ class SharedFunctionInfo : public HeapObject {
public:
V8_EXPORT_PRIVATE ScriptIterator(Isolate* isolate, Script script);
explicit ScriptIterator(Handle<WeakFixedArray> shared_function_infos);
+ ScriptIterator(const ScriptIterator&) = delete;
+ ScriptIterator& operator=(const ScriptIterator&) = delete;
V8_EXPORT_PRIVATE SharedFunctionInfo Next();
int CurrentIndex() const { return index_ - 1; }
@@ -628,7 +624,6 @@ class SharedFunctionInfo : public HeapObject {
private:
Handle<WeakFixedArray> shared_function_infos_;
int index_;
- DISALLOW_COPY_AND_ASSIGN(ScriptIterator);
};
DECL_CAST(SharedFunctionInfo)
@@ -665,7 +660,7 @@ class SharedFunctionInfo : public HeapObject {
// [name_or_scope_info]: Function name string, kNoSharedNameSentinel or
// ScopeInfo.
- DECL_ACCESSORS(name_or_scope_info, Object)
+ DECL_RELEASE_ACQUIRE_ACCESSORS(name_or_scope_info, Object)
// [outer scope info] The outer scope info, needed to lazily parse this
// function.
diff --git a/deps/v8/src/objects/shared-function-info.tq b/deps/v8/src/objects/shared-function-info.tq
index 17ec1f2fea..838703454c 100644
--- a/deps/v8/src/objects/shared-function-info.tq
+++ b/deps/v8/src/objects/shared-function-info.tq
@@ -37,7 +37,6 @@ bitfield struct SharedFunctionInfoFlags extends uint32 {
has_reported_binary_coverage: bool: 1 bit;
is_top_level: bool: 1 bit;
is_oneshot_iife_or_properties_are_final: bool: 1 bit;
- is_safe_to_skip_arguments_adaptor: bool: 1 bit;
private_name_lookup_skips_outer_class: bool: 1 bit;
}
diff --git a/deps/v8/src/objects/slots-inl.h b/deps/v8/src/objects/slots-inl.h
index bd243cd8ba..2943c117c7 100644
--- a/deps/v8/src/objects/slots-inl.h
+++ b/deps/v8/src/objects/slots-inl.h
@@ -31,7 +31,7 @@ bool FullObjectSlot::contains_value(Address raw_value) const {
Object FullObjectSlot::operator*() const { return Object(*location()); }
-Object FullObjectSlot::load(const Isolate* isolate) const { return **this; }
+Object FullObjectSlot::load(IsolateRoot isolate) const { return **this; }
void FullObjectSlot::store(Object value) const { *location() = value.ptr(); }
@@ -39,7 +39,7 @@ Object FullObjectSlot::Acquire_Load() const {
return Object(base::AsAtomicPointer::Acquire_Load(location()));
}
-Object FullObjectSlot::Acquire_Load(const Isolate* isolate) const {
+Object FullObjectSlot::Acquire_Load(IsolateRoot isolate) const {
return Acquire_Load();
}
@@ -47,7 +47,7 @@ Object FullObjectSlot::Relaxed_Load() const {
return Object(base::AsAtomicPointer::Relaxed_Load(location()));
}
-Object FullObjectSlot::Relaxed_Load(const Isolate* isolate) const {
+Object FullObjectSlot::Relaxed_Load(IsolateRoot isolate) const {
return Relaxed_Load();
}
@@ -79,7 +79,7 @@ MaybeObject FullMaybeObjectSlot::operator*() const {
return MaybeObject(*location());
}
-MaybeObject FullMaybeObjectSlot::load(const Isolate* isolate) const {
+MaybeObject FullMaybeObjectSlot::load(IsolateRoot isolate) const {
return **this;
}
@@ -91,7 +91,7 @@ MaybeObject FullMaybeObjectSlot::Relaxed_Load() const {
return MaybeObject(base::AsAtomicPointer::Relaxed_Load(location()));
}
-MaybeObject FullMaybeObjectSlot::Relaxed_Load(const Isolate* isolate) const {
+MaybeObject FullMaybeObjectSlot::Relaxed_Load(IsolateRoot isolate) const {
return Relaxed_Load();
}
@@ -113,7 +113,7 @@ HeapObjectReference FullHeapObjectSlot::operator*() const {
return HeapObjectReference(*location());
}
-HeapObjectReference FullHeapObjectSlot::load(const Isolate* isolate) const {
+HeapObjectReference FullHeapObjectSlot::load(IsolateRoot isolate) const {
return **this;
}
diff --git a/deps/v8/src/objects/slots.h b/deps/v8/src/objects/slots.h
index cb726eba46..2221fb41c8 100644
--- a/deps/v8/src/objects/slots.h
+++ b/deps/v8/src/objects/slots.h
@@ -110,13 +110,13 @@ class FullObjectSlot : public SlotBase<FullObjectSlot, Address> {
inline bool contains_value(Address raw_value) const;
inline Object operator*() const;
- inline Object load(const Isolate* isolate) const;
+ inline Object load(IsolateRoot isolate) const;
inline void store(Object value) const;
inline Object Acquire_Load() const;
- inline Object Acquire_Load(const Isolate* isolate) const;
+ inline Object Acquire_Load(IsolateRoot isolate) const;
inline Object Relaxed_Load() const;
- inline Object Relaxed_Load(const Isolate* isolate) const;
+ inline Object Relaxed_Load(IsolateRoot isolate) const;
inline void Relaxed_Store(Object value) const;
inline void Release_Store(Object value) const;
inline Object Relaxed_CompareAndSwap(Object old, Object target) const;
@@ -147,11 +147,11 @@ class FullMaybeObjectSlot
: SlotBase(slot.address()) {}
inline MaybeObject operator*() const;
- inline MaybeObject load(const Isolate* isolate) const;
+ inline MaybeObject load(IsolateRoot isolate) const;
inline void store(MaybeObject value) const;
inline MaybeObject Relaxed_Load() const;
- inline MaybeObject Relaxed_Load(const Isolate* isolate) const;
+ inline MaybeObject Relaxed_Load(IsolateRoot isolate) const;
inline void Relaxed_Store(MaybeObject value) const;
inline void Release_CompareAndSwap(MaybeObject old, MaybeObject target) const;
};
@@ -174,7 +174,7 @@ class FullHeapObjectSlot : public SlotBase<FullHeapObjectSlot, Address> {
: SlotBase(slot.address()) {}
inline HeapObjectReference operator*() const;
- inline HeapObjectReference load(const Isolate* isolate) const;
+ inline HeapObjectReference load(IsolateRoot isolate) const;
inline void store(HeapObjectReference value) const;
inline HeapObject ToHeapObject() const;
diff --git a/deps/v8/src/objects/source-text-module-inl.h b/deps/v8/src/objects/source-text-module-inl.h
new file mode 100644
index 0000000000..20c80a1799
--- /dev/null
+++ b/deps/v8/src/objects/source-text-module-inl.h
@@ -0,0 +1,29 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_SOURCE_TEXT_MODULE_INL_H_
+#define V8_OBJECTS_SOURCE_TEXT_MODULE_INL_H_
+
+#include "src/objects/module-inl.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/source-text-module.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+#include "torque-generated/src/objects/source-text-module-tq-inl.inc"
+
+TQ_OBJECT_CONSTRUCTORS_IMPL(ModuleRequest)
+TQ_OBJECT_CONSTRUCTORS_IMPL(SourceTextModule)
+TQ_OBJECT_CONSTRUCTORS_IMPL(SourceTextModuleInfoEntry)
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_SOURCE_TEXT_MODULE_INL_H_
diff --git a/deps/v8/src/objects/source-text-module.cc b/deps/v8/src/objects/source-text-module.cc
index fb29c18e2f..e3c7a2d6cd 100644
--- a/deps/v8/src/objects/source-text-module.cc
+++ b/deps/v8/src/objects/source-text-module.cc
@@ -189,7 +189,7 @@ MaybeHandle<Cell> SourceTextModule::ResolveExport(
} else if (name_set->count(export_name)) {
// Cycle detected.
if (must_resolve) {
- return isolate->Throw<Cell>(
+ return isolate->ThrowAt<Cell>(
isolate->factory()->NewSyntaxError(
MessageTemplate::kCyclicModuleDependency, export_name,
module_specifier),
@@ -234,16 +234,20 @@ MaybeHandle<Cell> SourceTextModule::ResolveExport(
MaybeHandle<Cell> SourceTextModule::ResolveImport(
Isolate* isolate, Handle<SourceTextModule> module, Handle<String> name,
- int module_request, MessageLocation loc, bool must_resolve,
+ int module_request_index, MessageLocation loc, bool must_resolve,
Module::ResolveSet* resolve_set) {
Handle<Module> requested_module(
- Module::cast(module->requested_modules().get(module_request)), isolate);
- Handle<String> specifier(
- String::cast(module->info().module_requests().get(module_request)),
+ Module::cast(module->requested_modules().get(module_request_index)),
+ isolate);
+ Handle<ModuleRequest> module_request(
+ ModuleRequest::cast(
+ module->info().module_requests().get(module_request_index)),
isolate);
+ Handle<String> module_specifier(String::cast(module_request->specifier()),
+ isolate);
MaybeHandle<Cell> result =
- Module::ResolveExport(isolate, requested_module, specifier, name, loc,
- must_resolve, resolve_set);
+ Module::ResolveExport(isolate, requested_module, module_specifier, name,
+ loc, must_resolve, resolve_set);
DCHECK_IMPLIES(isolate->has_pending_exception(), result.is_null());
return result;
}
@@ -274,10 +278,10 @@ MaybeHandle<Cell> SourceTextModule::ResolveExportUsingStarExports(
.ToHandle(&cell)) {
if (unique_cell.is_null()) unique_cell = cell;
if (*unique_cell != *cell) {
- return isolate->Throw<Cell>(isolate->factory()->NewSyntaxError(
- MessageTemplate::kAmbiguousExport,
- module_specifier, export_name),
- &loc);
+ return isolate->ThrowAt<Cell>(isolate->factory()->NewSyntaxError(
+ MessageTemplate::kAmbiguousExport,
+ module_specifier, export_name),
+ &loc);
}
} else if (isolate->has_pending_exception()) {
return MaybeHandle<Cell>();
@@ -296,7 +300,7 @@ MaybeHandle<Cell> SourceTextModule::ResolveExportUsingStarExports(
// Unresolvable.
if (must_resolve) {
- return isolate->Throw<Cell>(
+ return isolate->ThrowAt<Cell>(
isolate->factory()->NewSyntaxError(MessageTemplate::kUnresolvableExport,
module_specifier, export_name),
&loc);
@@ -312,7 +316,10 @@ bool SourceTextModule::PrepareInstantiate(
Handle<FixedArray> module_requests(module_info->module_requests(), isolate);
Handle<FixedArray> requested_modules(module->requested_modules(), isolate);
for (int i = 0, length = module_requests->length(); i < length; ++i) {
- Handle<String> specifier(String::cast(module_requests->get(i)), isolate);
+ Handle<ModuleRequest> module_request(
+ ModuleRequest::cast(module_requests->get(i)), isolate);
+ Handle<String> specifier(module_request->specifier(), isolate);
+ // TODO(v8:10958) Pass import assertions to the callback
v8::Local<v8::Module> api_requested_module;
if (!callback(context, v8::Utils::ToLocal(specifier),
v8::Utils::ToLocal(Handle<Module>::cast(module)))
diff --git a/deps/v8/src/objects/source-text-module.h b/deps/v8/src/objects/source-text-module.h
index 7d79213f13..c3ef4e36b3 100644
--- a/deps/v8/src/objects/source-text-module.h
+++ b/deps/v8/src/objects/source-text-module.h
@@ -17,6 +17,8 @@ namespace internal {
class UnorderedModuleSet;
+#include "torque-generated/src/objects/source-text-module-tq.inc"
+
// The runtime representation of an ECMAScript Source Text Module Record.
// https://tc39.github.io/ecma262/#sec-source-text-module-records
class SourceTextModule
@@ -124,7 +126,7 @@ class SourceTextModule
MessageLocation loc, bool must_resolve, ResolveSet* resolve_set);
static V8_WARN_UNUSED_RESULT MaybeHandle<Cell> ResolveImport(
Isolate* isolate, Handle<SourceTextModule> module, Handle<String> name,
- int module_request, MessageLocation loc, bool must_resolve,
+ int module_request_index, MessageLocation loc, bool must_resolve,
ResolveSet* resolve_set);
static V8_WARN_UNUSED_RESULT MaybeHandle<Cell> ResolveExportUsingStarExports(
@@ -236,6 +238,20 @@ class SourceTextModuleInfo : public FixedArray {
OBJECT_CONSTRUCTORS(SourceTextModuleInfo, FixedArray);
};
+class ModuleRequest
+ : public TorqueGeneratedModuleRequest<ModuleRequest, Struct> {
+ public:
+ NEVER_READ_ONLY_SPACE
+ DECL_VERIFIER(ModuleRequest)
+
+ template <typename LocalIsolate>
+ static Handle<ModuleRequest> New(LocalIsolate* isolate,
+ Handle<String> specifier,
+ Handle<FixedArray> import_assertions);
+
+ TQ_OBJECT_CONSTRUCTORS(ModuleRequest)
+};
+
class SourceTextModuleInfoEntry
: public TorqueGeneratedSourceTextModuleInfoEntry<SourceTextModuleInfoEntry,
Struct> {
diff --git a/deps/v8/src/objects/source-text-module.tq b/deps/v8/src/objects/source-text-module.tq
index 185443414d..1fee28a31f 100644
--- a/deps/v8/src/objects/source-text-module.tq
+++ b/deps/v8/src/objects/source-text-module.tq
@@ -48,6 +48,16 @@ extern class SourceTextModule extends Module {
}
@generateCppClass
+@generatePrint
+extern class ModuleRequest extends Struct {
+ specifier: String;
+
+ // Import assertions are stored in this array in the form:
+ // [key1, value1, location1, key2, value2, location2, ...]
+ import_assertions: FixedArray;
+}
+
+@generateCppClass
extern class SourceTextModuleInfoEntry extends Struct {
export_name: String|Undefined;
local_name: String|Undefined;
diff --git a/deps/v8/src/objects/stack-frame-info-inl.h b/deps/v8/src/objects/stack-frame-info-inl.h
index 820d4324a2..376eda3a65 100644
--- a/deps/v8/src/objects/stack-frame-info-inl.h
+++ b/deps/v8/src/objects/stack-frame-info-inl.h
@@ -18,6 +18,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/stack-frame-info-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(StackFrameInfo)
NEVER_READ_ONLY_SPACE_IMPL(StackFrameInfo)
diff --git a/deps/v8/src/objects/stack-frame-info.cc b/deps/v8/src/objects/stack-frame-info.cc
index 2f07c75ecf..6fe5316631 100644
--- a/deps/v8/src/objects/stack-frame-info.cc
+++ b/deps/v8/src/objects/stack-frame-info.cc
@@ -196,6 +196,9 @@ Handle<StackFrameInfo> StackTraceFrame::GetFrameInfo(
// static
void StackTraceFrame::InitializeFrameInfo(Handle<StackTraceFrame> frame) {
+ TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.stack_trace"),
+ "SymbolizeStackFrame", "frameIndex", frame->frame_index());
+
Isolate* isolate = frame->GetIsolate();
Handle<StackFrameInfo> frame_info = isolate->factory()->NewStackFrameInfo(
handle(FrameArray::cast(frame->frame_array()), isolate),
diff --git a/deps/v8/src/objects/stack-frame-info.h b/deps/v8/src/objects/stack-frame-info.h
index 837c7e4b30..a2802792fd 100644
--- a/deps/v8/src/objects/stack-frame-info.h
+++ b/deps/v8/src/objects/stack-frame-info.h
@@ -17,6 +17,8 @@ namespace internal {
class FrameArray;
class WasmInstanceObject;
+#include "torque-generated/src/objects/stack-frame-info-tq.inc"
+
class StackFrameInfo
: public TorqueGeneratedStackFrameInfo<StackFrameInfo, Struct> {
public:
diff --git a/deps/v8/src/objects/string-comparator.cc b/deps/v8/src/objects/string-comparator.cc
index 6f517edb20..79ec348c71 100644
--- a/deps/v8/src/objects/string-comparator.cc
+++ b/deps/v8/src/objects/string-comparator.cc
@@ -44,7 +44,7 @@ bool StringComparator::Equals(String string_1, String string_2) {
state_1_.Init(string_1);
state_2_.Init(string_2);
while (true) {
- int to_check = Min(state_1_.length_, state_2_.length_);
+ int to_check = std::min(state_1_.length_, state_2_.length_);
DCHECK(to_check > 0 && to_check <= length);
bool is_equal;
if (state_1_.is_one_byte_) {
diff --git a/deps/v8/src/objects/string-comparator.h b/deps/v8/src/objects/string-comparator.h
index 8cee98a642..dc58d9aeb2 100644
--- a/deps/v8/src/objects/string-comparator.h
+++ b/deps/v8/src/objects/string-comparator.h
@@ -55,6 +55,8 @@ class StringComparator {
class State {
public:
State() : is_one_byte_(true), length_(0), buffer8_(nullptr) {}
+ State(const State&) = delete;
+ State& operator=(const State&) = delete;
void Init(String string);
@@ -79,13 +81,12 @@ class StringComparator {
const uint8_t* buffer8_;
const uint16_t* buffer16_;
};
-
- private:
- DISALLOW_COPY_AND_ASSIGN(State);
};
public:
inline StringComparator() = default;
+ StringComparator(const StringComparator&) = delete;
+ StringComparator& operator=(const StringComparator&) = delete;
template <typename Chars1, typename Chars2>
static inline bool Equals(State* state_1, State* state_2, int to_check) {
@@ -99,8 +100,6 @@ class StringComparator {
private:
State state_1_;
State state_2_;
-
- DISALLOW_COPY_AND_ASSIGN(StringComparator);
};
} // namespace internal
diff --git a/deps/v8/src/objects/string-inl.h b/deps/v8/src/objects/string-inl.h
index e9dfc594d1..ee1afd23a4 100644
--- a/deps/v8/src/objects/string-inl.h
+++ b/deps/v8/src/objects/string-inl.h
@@ -24,14 +24,53 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/string-tq-inl.inc"
+
+// Creates a SharedMutexGuard<kShared> for the string access if:
+// A) {str} is not a read only string, and
+// B) We are on a background thread.
+class SharedStringAccessGuardIfNeeded {
+ public:
+ explicit SharedStringAccessGuardIfNeeded(String str) {
+ Isolate* isolate;
+ if (IsNeeded(str, &isolate)) mutex_guard.emplace(isolate->string_access());
+ }
+
+ static SharedStringAccessGuardIfNeeded NotNeeded() {
+ return SharedStringAccessGuardIfNeeded();
+ }
+
+ static bool IsNeeded(String str, Isolate** out_isolate = nullptr) {
+ Isolate* isolate;
+ if (!GetIsolateFromHeapObject(str, &isolate)) {
+ // If we can't get the isolate from the String, it must be read-only.
+ DCHECK(ReadOnlyHeap::Contains(str));
+ return false;
+ }
+ if (out_isolate) *out_isolate = isolate;
+ return ThreadId::Current() != isolate->thread_id();
+ }
+
+ private:
+ // Default constructor and move constructor required for the NotNeeded()
+ // static constructor.
+ constexpr SharedStringAccessGuardIfNeeded() = default;
+ constexpr SharedStringAccessGuardIfNeeded(SharedStringAccessGuardIfNeeded&&)
+ V8_NOEXCEPT {
+ DCHECK(!mutex_guard.has_value());
+ }
+
+ base::Optional<base::SharedMutexGuard<base::kShared>> mutex_guard;
+};
+
int String::synchronized_length() const {
return base::AsAtomic32::Acquire_Load(
- reinterpret_cast<const int32_t*>(FIELD_ADDR(*this, kLengthOffset)));
+ reinterpret_cast<const int32_t*>(field_address(kLengthOffset)));
}
void String::synchronized_set_length(int value) {
base::AsAtomic32::Release_Store(
- reinterpret_cast<int32_t*>(FIELD_ADDR(*this, kLengthOffset)), value);
+ reinterpret_cast<int32_t*>(field_address(kLengthOffset)), value);
}
TQ_OBJECT_CONSTRUCTORS_IMPL(String)
@@ -50,7 +89,8 @@ CAST_ACCESSOR(ExternalOneByteString)
CAST_ACCESSOR(ExternalString)
CAST_ACCESSOR(ExternalTwoByteString)
-StringShape::StringShape(const String str) : type_(str.map().instance_type()) {
+StringShape::StringShape(const String str)
+ : type_(str.synchronized_map().instance_type()) {
set_valid();
DCHECK_EQ(type_ & kIsNotStringMask, kStringTag);
}
@@ -237,7 +277,7 @@ uc32 FlatStringReader::Get(int index) {
template <typename Char>
Char FlatStringReader::Get(int index) {
DCHECK_EQ(is_one_byte_, sizeof(Char) == 1);
- DCHECK(0 <= index && index <= length_);
+ DCHECK(0 <= index && index < length_);
if (sizeof(Char) == 1) {
return static_cast<Char>(static_cast<const uint8_t*>(start_)[index]);
} else {
@@ -261,12 +301,13 @@ class SequentialStringKey final : public StringTableKey {
convert_(convert) {}
bool IsMatch(String s) override {
+ SharedStringAccessGuardIfNeeded access_guard(s);
DisallowHeapAllocation no_gc;
if (s.IsOneByteRepresentation()) {
- const uint8_t* chars = s.GetChars<uint8_t>(no_gc);
+ const uint8_t* chars = s.GetChars<uint8_t>(no_gc, access_guard);
return CompareChars(chars, chars_.begin(), chars_.length()) == 0;
}
- const uint16_t* chars = s.GetChars<uint16_t>(no_gc);
+ const uint16_t* chars = s.GetChars<uint16_t>(no_gc, access_guard);
return CompareChars(chars, chars_.begin(), chars_.length()) == 0;
}
@@ -392,6 +433,16 @@ const Char* String::GetChars(const DisallowHeapAllocation& no_gc) {
: CharTraits<Char>::String::cast(*this).GetChars(no_gc);
}
+template <typename Char>
+const Char* String::GetChars(
+ const DisallowHeapAllocation& no_gc,
+ const SharedStringAccessGuardIfNeeded& access_guard) {
+ return StringShape(*this).IsExternal()
+ ? CharTraits<Char>::ExternalString::cast(*this).GetChars()
+ : CharTraits<Char>::String::cast(*this).GetChars(no_gc,
+ access_guard);
+}
+
Handle<String> String::Flatten(Isolate* isolate, Handle<String> string,
AllocationType allocation) {
if (string->IsConsString()) {
@@ -419,6 +470,8 @@ Handle<String> String::Flatten(LocalIsolate* isolate, Handle<String> string,
uint16_t String::Get(int index) {
DCHECK(index >= 0 && index < length());
+ SharedStringAccessGuardIfNeeded scope(*this);
+
class StringGetDispatcher : public AllStatic {
public:
#define DEFINE_METHOD(Type) \
@@ -554,21 +607,39 @@ void SeqOneByteString::SeqOneByteStringSet(int index, uint16_t value) {
}
Address SeqOneByteString::GetCharsAddress() {
- return FIELD_ADDR(*this, kHeaderSize);
+ return field_address(kHeaderSize);
}
uint8_t* SeqOneByteString::GetChars(const DisallowHeapAllocation& no_gc) {
USE(no_gc);
+ DCHECK(!SharedStringAccessGuardIfNeeded::IsNeeded(*this));
+ return reinterpret_cast<uint8_t*>(GetCharsAddress());
+}
+
+uint8_t* SeqOneByteString::GetChars(
+ const DisallowHeapAllocation& no_gc,
+ const SharedStringAccessGuardIfNeeded& access_guard) {
+ USE(no_gc);
+ USE(access_guard);
return reinterpret_cast<uint8_t*>(GetCharsAddress());
}
Address SeqTwoByteString::GetCharsAddress() {
- return FIELD_ADDR(*this, kHeaderSize);
+ return field_address(kHeaderSize);
}
uc16* SeqTwoByteString::GetChars(const DisallowHeapAllocation& no_gc) {
USE(no_gc);
- return reinterpret_cast<uc16*>(FIELD_ADDR(*this, kHeaderSize));
+ DCHECK(!SharedStringAccessGuardIfNeeded::IsNeeded(*this));
+ return reinterpret_cast<uc16*>(GetCharsAddress());
+}
+
+uc16* SeqTwoByteString::GetChars(
+ const DisallowHeapAllocation& no_gc,
+ const SharedStringAccessGuardIfNeeded& access_guard) {
+ USE(no_gc);
+ USE(access_guard);
+ return reinterpret_cast<uc16*>(GetCharsAddress());
}
uint16_t SeqTwoByteString::Get(int index) {
@@ -612,17 +683,20 @@ bool ExternalString::is_uncached() const {
return (type & kUncachedExternalStringMask) == kUncachedExternalStringTag;
}
+void ExternalString::AllocateExternalPointerEntries(Isolate* isolate) {
+ InitExternalPointerField(kResourceOffset, isolate);
+ if (is_uncached()) return;
+ InitExternalPointerField(kResourceDataOffset, isolate);
+}
+
DEF_GETTER(ExternalString, resource_as_address, Address) {
- ExternalPointer_t encoded_address =
- ReadField<ExternalPointer_t>(kResourceOffset);
- return DecodeExternalPointer(isolate, encoded_address);
+ return ReadExternalPointerField(kResourceOffset, isolate,
+ kExternalStringResourceTag);
}
-void ExternalString::set_address_as_resource(Isolate* isolate,
- Address address) {
- const ExternalPointer_t encoded_address =
- EncodeExternalPointer(isolate, address);
- WriteField<ExternalPointer_t>(kResourceOffset, encoded_address);
+void ExternalString::set_address_as_resource(Isolate* isolate, Address value) {
+ WriteExternalPointerField(kResourceOffset, isolate, value,
+ kExternalStringResourceTag);
if (IsExternalOneByteString()) {
ExternalOneByteString::cast(*this).update_data_cache(isolate);
} else {
@@ -630,48 +704,43 @@ void ExternalString::set_address_as_resource(Isolate* isolate,
}
}
-uint32_t ExternalString::resource_as_uint32() {
+uint32_t ExternalString::GetResourceRefForDeserialization() {
ExternalPointer_t encoded_address =
ReadField<ExternalPointer_t>(kResourceOffset);
return static_cast<uint32_t>(encoded_address);
}
-void ExternalString::set_uint32_as_resource(Isolate* isolate, uint32_t value) {
- WriteField<ExternalPointer_t>(kResourceOffset, value);
+void ExternalString::SetResourceRefForSerialization(uint32_t ref) {
+ WriteField<ExternalPointer_t>(kResourceOffset,
+ static_cast<ExternalPointer_t>(ref));
if (is_uncached()) return;
- WriteField<ExternalPointer_t>(kResourceDataOffset,
- EncodeExternalPointer(isolate, kNullAddress));
+ WriteField<ExternalPointer_t>(kResourceDataOffset, kNullExternalPointer);
}
void ExternalString::DisposeResource(Isolate* isolate) {
- const ExternalPointer_t encoded_address =
- ReadField<ExternalPointer_t>(kResourceOffset);
+ Address value = ReadExternalPointerField(kResourceOffset, isolate,
+ kExternalStringResourceTag);
v8::String::ExternalStringResourceBase* resource =
- reinterpret_cast<v8::String::ExternalStringResourceBase*>(
- DecodeExternalPointer(isolate, encoded_address));
+ reinterpret_cast<v8::String::ExternalStringResourceBase*>(value);
// Dispose of the C++ object if it has not already been disposed.
if (resource != nullptr) {
resource->Dispose();
- const ExternalPointer_t encoded_address =
- EncodeExternalPointer(isolate, kNullAddress);
- WriteField<ExternalPointer_t>(kResourceOffset, encoded_address);
+ WriteExternalPointerField(kResourceOffset, isolate, kNullAddress,
+ kExternalStringResourceTag);
}
}
DEF_GETTER(ExternalOneByteString, resource,
const ExternalOneByteString::Resource*) {
- const ExternalPointer_t encoded_address =
- ReadField<ExternalPointer_t>(kResourceOffset);
- return reinterpret_cast<Resource*>(
- DecodeExternalPointer(isolate, encoded_address));
+ return reinterpret_cast<Resource*>(resource_as_address(isolate));
}
void ExternalOneByteString::update_data_cache(Isolate* isolate) {
if (is_uncached()) return;
- const ExternalPointer_t encoded_resource_data = EncodeExternalPointer(
- isolate, reinterpret_cast<Address>(resource()->data()));
- WriteField<ExternalPointer_t>(kResourceDataOffset, encoded_resource_data);
+ WriteExternalPointerField(kResourceDataOffset, isolate,
+ reinterpret_cast<Address>(resource()->data()),
+ kExternalStringResourceDataTag);
}
void ExternalOneByteString::SetResource(
@@ -685,9 +754,9 @@ void ExternalOneByteString::SetResource(
void ExternalOneByteString::set_resource(
Isolate* isolate, const ExternalOneByteString::Resource* resource) {
- const ExternalPointer_t encoded_address =
- EncodeExternalPointer(isolate, reinterpret_cast<Address>(resource));
- WriteField<ExternalPointer_t>(kResourceOffset, encoded_address);
+ WriteExternalPointerField(kResourceOffset, isolate,
+ reinterpret_cast<Address>(resource),
+ kExternalStringResourceTag);
if (resource != nullptr) update_data_cache(isolate);
}
@@ -702,17 +771,14 @@ uint8_t ExternalOneByteString::Get(int index) {
DEF_GETTER(ExternalTwoByteString, resource,
const ExternalTwoByteString::Resource*) {
- const ExternalPointer_t encoded_address =
- ReadField<ExternalPointer_t>(kResourceOffset);
- return reinterpret_cast<Resource*>(
- DecodeExternalPointer(isolate, encoded_address));
+ return reinterpret_cast<Resource*>(resource_as_address(isolate));
}
void ExternalTwoByteString::update_data_cache(Isolate* isolate) {
if (is_uncached()) return;
- const ExternalPointer_t encoded_resource_data = EncodeExternalPointer(
- isolate, reinterpret_cast<Address>(resource()->data()));
- WriteField<ExternalPointer_t>(kResourceDataOffset, encoded_resource_data);
+ WriteExternalPointerField(kResourceDataOffset, isolate,
+ reinterpret_cast<Address>(resource()->data()),
+ kExternalStringResourceDataTag);
}
void ExternalTwoByteString::SetResource(
@@ -726,9 +792,9 @@ void ExternalTwoByteString::SetResource(
void ExternalTwoByteString::set_resource(
Isolate* isolate, const ExternalTwoByteString::Resource* resource) {
- const ExternalPointer_t encoded_address =
- EncodeExternalPointer(isolate, reinterpret_cast<Address>(resource));
- WriteField<ExternalPointer_t>(kResourceOffset, encoded_address);
+ WriteExternalPointerField(kResourceOffset, isolate,
+ reinterpret_cast<Address>(resource),
+ kExternalStringResourceTag);
if (resource != nullptr) update_data_cache(isolate);
}
diff --git a/deps/v8/src/objects/string-table.cc b/deps/v8/src/objects/string-table.cc
index ae8da8412c..85f31a2e56 100644
--- a/deps/v8/src/objects/string-table.cc
+++ b/deps/v8/src/objects/string-table.cc
@@ -51,7 +51,7 @@ int ComputeStringTableCapacity(int at_least_space_for) {
// See matching computation in StringTableHasSufficientCapacityToAdd().
int raw_capacity = at_least_space_for + (at_least_space_for >> 1);
int capacity = base::bits::RoundUpToPowerOfTwo32(raw_capacity);
- return Max(capacity, kStringTableMinCapacity);
+ return std::max(capacity, kStringTableMinCapacity);
}
int ComputeStringTableCapacityWithShrink(int current_capacity,
@@ -91,14 +91,14 @@ bool KeyIsMatch(StringTableKey* key, String string) {
class StringTable::Data {
public:
static std::unique_ptr<Data> New(int capacity);
- static std::unique_ptr<Data> Resize(const Isolate* isolate,
+ static std::unique_ptr<Data> Resize(IsolateRoot isolate,
std::unique_ptr<Data> data, int capacity);
OffHeapObjectSlot slot(InternalIndex index) const {
return OffHeapObjectSlot(&elements_[index.as_uint32()]);
}
- Object Get(const Isolate* isolate, InternalIndex index) const {
+ Object Get(IsolateRoot isolate, InternalIndex index) const {
return slot(index).Acquire_Load(isolate);
}
@@ -136,13 +136,13 @@ class StringTable::Data {
int number_of_deleted_elements() const { return number_of_deleted_elements_; }
template <typename StringTableKey>
- InternalIndex FindEntry(const Isolate* isolate, StringTableKey* key,
+ InternalIndex FindEntry(IsolateRoot isolate, StringTableKey* key,
uint32_t hash) const;
- InternalIndex FindInsertionEntry(const Isolate* isolate, uint32_t hash) const;
+ InternalIndex FindInsertionEntry(IsolateRoot isolate, uint32_t hash) const;
template <typename StringTableKey>
- InternalIndex FindEntryOrInsertionEntry(const Isolate* isolate,
+ InternalIndex FindEntryOrInsertionEntry(IsolateRoot isolate,
StringTableKey* key,
uint32_t hash) const;
@@ -157,7 +157,7 @@ class StringTable::Data {
Data* PreviousData() { return previous_data_.get(); }
void DropPreviousData() { previous_data_.reset(); }
- void Print(const Isolate* isolate) const;
+ void Print(IsolateRoot isolate) const;
size_t GetCurrentMemoryUsage() const;
private:
@@ -224,7 +224,7 @@ std::unique_ptr<StringTable::Data> StringTable::Data::New(int capacity) {
}
std::unique_ptr<StringTable::Data> StringTable::Data::Resize(
- const Isolate* isolate, std::unique_ptr<Data> data, int capacity) {
+ IsolateRoot isolate, std::unique_ptr<Data> data, int capacity) {
std::unique_ptr<Data> new_data(new (capacity) Data(capacity));
DCHECK_LT(data->number_of_elements(), new_data->capacity());
@@ -248,7 +248,7 @@ std::unique_ptr<StringTable::Data> StringTable::Data::Resize(
}
template <typename StringTableKey>
-InternalIndex StringTable::Data::FindEntry(const Isolate* isolate,
+InternalIndex StringTable::Data::FindEntry(IsolateRoot isolate,
StringTableKey* key,
uint32_t hash) const {
uint32_t count = 1;
@@ -266,7 +266,7 @@ InternalIndex StringTable::Data::FindEntry(const Isolate* isolate,
}
}
-InternalIndex StringTable::Data::FindInsertionEntry(const Isolate* isolate,
+InternalIndex StringTable::Data::FindInsertionEntry(IsolateRoot isolate,
uint32_t hash) const {
uint32_t count = 1;
// EnsureCapacity will guarantee the hash table is never full.
@@ -283,7 +283,7 @@ InternalIndex StringTable::Data::FindInsertionEntry(const Isolate* isolate,
template <typename StringTableKey>
InternalIndex StringTable::Data::FindEntryOrInsertionEntry(
- const Isolate* isolate, StringTableKey* key, uint32_t hash) const {
+ IsolateRoot isolate, StringTableKey* key, uint32_t hash) const {
InternalIndex insertion_entry = InternalIndex::NotFound();
uint32_t count = 1;
// EnsureCapacity will guarantee the hash table is never full.
@@ -317,7 +317,7 @@ void StringTable::Data::IterateElements(RootVisitor* visitor) {
visitor->VisitRootPointers(Root::kStringTable, nullptr, first_slot, end_slot);
}
-void StringTable::Data::Print(const Isolate* isolate) const {
+void StringTable::Data::Print(IsolateRoot isolate) const {
OFStream os(stdout);
os << "StringTable {" << std::endl;
for (InternalIndex i : InternalIndex::Range(capacity_)) {
@@ -358,7 +358,10 @@ class InternalizedStringKey final : public StringTableKey {
set_hash_field(string->hash_field());
}
- bool IsMatch(String string) override { return string_->SlowEquals(string); }
+ bool IsMatch(String string) override {
+ DCHECK(!SharedStringAccessGuardIfNeeded::IsNeeded(string));
+ return string_->SlowEquals(string);
+ }
Handle<String> AsHandle(Isolate* isolate) {
// Internalize the string if possible.
@@ -461,8 +464,6 @@ Handle<String> StringTable::LookupKey(LocalIsolate* isolate,
// allocation if another write also did an allocation. This assumes that
// writes are rarer than reads.
- const Isolate* ptr_cmp_isolate = GetIsolateForPtrCompr(isolate);
-
Handle<String> new_string;
while (true) {
// Load the current string table data, in case another thread updates the
@@ -474,9 +475,9 @@ Handle<String> StringTable::LookupKey(LocalIsolate* isolate,
// because the new table won't delete it's corresponding entry until the
// string is dead, in which case it will die in this table too and worst
// case we'll have a false miss.
- InternalIndex entry = data->FindEntry(ptr_cmp_isolate, key, key->hash());
+ InternalIndex entry = data->FindEntry(isolate, key, key->hash());
if (entry.is_found()) {
- return handle(String::cast(data->Get(ptr_cmp_isolate, entry)), isolate);
+ return handle(String::cast(data->Get(isolate, entry)), isolate);
}
// No entry found, so adding new string.
@@ -490,14 +491,14 @@ Handle<String> StringTable::LookupKey(LocalIsolate* isolate,
{
base::MutexGuard table_write_guard(&write_mutex_);
- Data* data = EnsureCapacity(ptr_cmp_isolate, 1);
+ Data* data = EnsureCapacity(isolate, 1);
// Check one last time if the key is present in the table, in case it was
// added after the check.
InternalIndex entry =
- data->FindEntryOrInsertionEntry(ptr_cmp_isolate, key, key->hash());
+ data->FindEntryOrInsertionEntry(isolate, key, key->hash());
- Object element = data->Get(ptr_cmp_isolate, entry);
+ Object element = data->Get(isolate, entry);
if (element == empty_element()) {
// This entry is empty, so write it and register that we added an
// element.
@@ -539,7 +540,7 @@ template Handle<String> StringTable::LookupKey(LocalIsolate* isolate,
template Handle<String> StringTable::LookupKey(Isolate* isolate,
StringTableInsertionKey* key);
-StringTable::Data* StringTable::EnsureCapacity(const Isolate* isolate,
+StringTable::Data* StringTable::EnsureCapacity(IsolateRoot isolate,
int additional_elements) {
// This call is only allowed while the write mutex is held.
write_mutex_.AssertHeld();
@@ -677,7 +678,7 @@ Address StringTable::TryStringToIndexOrLookupExisting(Isolate* isolate,
isolate, string, source, start);
}
-void StringTable::Print(const Isolate* isolate) const {
+void StringTable::Print(IsolateRoot isolate) const {
data_.load(std::memory_order_acquire)->Print(isolate);
}
diff --git a/deps/v8/src/objects/string-table.h b/deps/v8/src/objects/string-table.h
index 9efcc6e016..cdbb22db80 100644
--- a/deps/v8/src/objects/string-table.h
+++ b/deps/v8/src/objects/string-table.h
@@ -77,7 +77,7 @@ class V8_EXPORT_PRIVATE StringTable {
static Address TryStringToIndexOrLookupExisting(Isolate* isolate,
Address raw_string);
- void Print(const Isolate* isolate) const;
+ void Print(IsolateRoot isolate) const;
size_t GetCurrentMemoryUsage() const;
// The following methods must be called either while holding the write lock,
@@ -89,7 +89,7 @@ class V8_EXPORT_PRIVATE StringTable {
private:
class Data;
- Data* EnsureCapacity(const Isolate* isolate, int additional_elements);
+ Data* EnsureCapacity(IsolateRoot isolate, int additional_elements);
std::atomic<Data*> data_;
// Write mutex is mutable so that readers of concurrently mutated values (e.g.
diff --git a/deps/v8/src/objects/string.cc b/deps/v8/src/objects/string.cc
index c450485a1b..4c023f9801 100644
--- a/deps/v8/src/objects/string.cc
+++ b/deps/v8/src/objects/string.cc
@@ -6,6 +6,7 @@
#include "src/common/assert-scope.h"
#include "src/common/globals.h"
+#include "src/execution/thread-id.h"
#include "src/handles/handles-inl.h"
#include "src/heap/heap-inl.h"
#include "src/heap/memory-chunk.h"
@@ -117,17 +118,16 @@ void String::MakeThin(Isolate* isolate, String internalized) {
bool has_pointers = StringShape(*this).IsIndirect();
int old_size = this->Size();
- // Slot invalidation is not necessary here: ThinString only stores tagged
- // value, so it can't store an untagged value in a recorded slot.
- isolate->heap()->NotifyObjectLayoutChange(*this, no_gc,
- InvalidateRecordedSlots::kNo);
bool one_byte = internalized.IsOneByteRepresentation();
Handle<Map> map = one_byte ? isolate->factory()->thin_one_byte_string_map()
: isolate->factory()->thin_string_map();
+ // Update actual first and then do release store on the map word. This ensures
+ // that the concurrent marker will read the pointer when visiting a
+ // ThinString.
+ ThinString thin = ThinString::unchecked_cast(*this);
+ thin.set_actual(internalized);
DCHECK_GE(old_size, ThinString::kSize);
this->synchronized_set_map(*map);
- ThinString thin = ThinString::cast(*this);
- thin.set_actual(internalized);
Address thin_end = thin.address() + ThinString::kSize;
int size_delta = old_size - ThinString::kSize;
if (size_delta != 0) {
@@ -168,6 +168,11 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
isolate->heap()->NotifyObjectLayoutChange(*this, no_allocation,
InvalidateRecordedSlots::kYes);
}
+
+ // Disallow garbage collection to avoid possible GC vs string access deadlock.
+ DisallowGarbageCollection no_gc;
+ base::SharedMutexGuard<base::kExclusive> shared_mutex_guard(
+ isolate->string_access());
// Morph the string to an external string by replacing the map and
// reinitializing the fields. This won't work if the space the existing
// string occupies is too small for a regular external string. Instead, we
@@ -198,6 +203,7 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
this->synchronized_set_map(new_map);
ExternalTwoByteString self = ExternalTwoByteString::cast(*this);
+ self.AllocateExternalPointerEntries(isolate);
self.SetResource(isolate, resource);
isolate->heap()->RegisterExternalString(*this);
if (is_internalized) self.Hash(); // Force regeneration of the hash value.
@@ -239,6 +245,11 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
isolate->heap()->NotifyObjectLayoutChange(*this, no_allocation,
InvalidateRecordedSlots::kYes);
}
+
+ // Disallow garbage collection to avoid possible GC vs string access deadlock.
+ DisallowGarbageCollection no_gc;
+ base::SharedMutexGuard<base::kExclusive> shared_mutex_guard(
+ isolate->string_access());
// Morph the string to an external string by replacing the map and
// reinitializing the fields. This won't work if the space the existing
// string occupies is too small for a regular external string. Instead, we
@@ -268,6 +279,7 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
this->synchronized_set_map(new_map);
ExternalOneByteString self = ExternalOneByteString::cast(*this);
+ self.AllocateExternalPointerEntries(isolate);
self.SetResource(isolate, resource);
isolate->heap()->RegisterExternalString(*this);
if (is_internalized) self.Hash(); // Force regeneration of the hash value.
@@ -519,6 +531,15 @@ Handle<Object> String::ToNumber(Isolate* isolate, Handle<String> subject) {
String::FlatContent String::GetFlatContent(
const DisallowHeapAllocation& no_gc) {
+#if DEBUG
+ // Check that this method is called only from the main thread.
+ {
+ Isolate* isolate;
+ // We don't have to check read only strings as those won't move.
+ DCHECK_IMPLIES(GetIsolateFromHeapObject(*this, &isolate),
+ ThreadId::Current() == isolate->thread_id());
+ }
+#endif
USE(no_gc);
int length = this->length();
StringShape shape(*this);
@@ -527,7 +548,7 @@ String::FlatContent String::GetFlatContent(
if (shape.representation_tag() == kConsStringTag) {
ConsString cons = ConsString::cast(string);
if (cons.second().length() != 0) {
- return FlatContent();
+ return FlatContent(no_gc);
}
string = cons.first();
shape = StringShape(string);
@@ -553,7 +574,7 @@ String::FlatContent String::GetFlatContent(
} else {
start = ExternalOneByteString::cast(string).GetChars();
}
- return FlatContent(start + offset, length);
+ return FlatContent(start + offset, length, no_gc);
} else {
DCHECK_EQ(shape.encoding_tag(), kTwoByteStringTag);
const uc16* start;
@@ -562,7 +583,7 @@ String::FlatContent String::GetFlatContent(
} else {
start = ExternalTwoByteString::cast(string).GetChars();
}
- return FlatContent(start + offset, length);
+ return FlatContent(start + offset, length, no_gc);
}
}
@@ -618,11 +639,9 @@ std::unique_ptr<char[]> String::ToCString(AllowNullsFlag allow_nulls,
}
template <typename sinkchar>
-void String::WriteToFlat(String src, sinkchar* sink, int f, int t) {
+void String::WriteToFlat(String source, sinkchar* sink, int from, int to) {
DisallowHeapAllocation no_gc;
- String source = src;
- int from = f;
- int to = t;
+ SharedStringAccessGuardIfNeeded access_guard(source);
while (from < to) {
DCHECK_LE(0, from);
DCHECK_LE(to, source.length());
@@ -638,13 +657,17 @@ void String::WriteToFlat(String src, sinkchar* sink, int f, int t) {
return;
}
case kOneByteStringTag | kSeqStringTag: {
- CopyChars(sink, SeqOneByteString::cast(source).GetChars(no_gc) + from,
- to - from);
+ CopyChars(
+ sink,
+ SeqOneByteString::cast(source).GetChars(no_gc, access_guard) + from,
+ to - from);
return;
}
case kTwoByteStringTag | kSeqStringTag: {
- CopyChars(sink, SeqTwoByteString::cast(source).GetChars(no_gc) + from,
- to - from);
+ CopyChars(
+ sink,
+ SeqTwoByteString::cast(source).GetChars(no_gc, access_guard) + from,
+ to - from);
return;
}
case kOneByteStringTag | kConsStringTag:
@@ -677,9 +700,10 @@ void String::WriteToFlat(String src, sinkchar* sink, int f, int t) {
if (to - boundary == 1) {
sink[boundary - from] = static_cast<sinkchar>(second.Get(0));
} else if (second.IsSeqOneByteString()) {
- CopyChars(sink + boundary - from,
- SeqOneByteString::cast(second).GetChars(no_gc),
- to - boundary);
+ CopyChars(
+ sink + boundary - from,
+ SeqOneByteString::cast(second).GetChars(no_gc, access_guard),
+ to - boundary);
} else {
WriteToFlat(second, sink + boundary - from, 0, to - boundary);
}
@@ -1510,24 +1534,19 @@ int ExternalString::ExternalPayloadSize() const {
}
FlatStringReader::FlatStringReader(Isolate* isolate, Handle<String> str)
- : Relocatable(isolate), str_(str.location()), length_(str->length()) {
+ : Relocatable(isolate), str_(str), length_(str->length()) {
+#if DEBUG
+ // Check that this constructor is called only from the main thread.
+ DCHECK_EQ(ThreadId::Current(), isolate->thread_id());
+#endif
PostGarbageCollection();
}
-FlatStringReader::FlatStringReader(Isolate* isolate, Vector<const char> input)
- : Relocatable(isolate),
- str_(nullptr),
- is_one_byte_(true),
- length_(input.length()),
- start_(input.begin()) {}
-
void FlatStringReader::PostGarbageCollection() {
- if (str_ == nullptr) return;
- Handle<String> str(str_);
- DCHECK(str->IsFlat());
+ DCHECK(str_->IsFlat());
DisallowHeapAllocation no_gc;
// This does not actually prevent the vector from being relocated later.
- String::FlatContent content = str->GetFlatContent(no_gc);
+ String::FlatContent content = str_->GetFlatContent(no_gc);
DCHECK(content.IsFlat());
is_one_byte_ = content.IsOneByte();
if (is_one_byte_) {
diff --git a/deps/v8/src/objects/string.h b/deps/v8/src/objects/string.h
index 0b7bd55aee..dc4381b39d 100644
--- a/deps/v8/src/objects/string.h
+++ b/deps/v8/src/objects/string.h
@@ -13,6 +13,7 @@
#include "src/objects/name.h"
#include "src/objects/smi.h"
#include "src/strings/unicode-decoder.h"
+#include "torque-generated/field-offsets.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -20,6 +21,8 @@
namespace v8 {
namespace internal {
+class SharedStringAccessGuardIfNeeded;
+
enum InstanceType : uint16_t;
enum AllowNullsFlag { ALLOW_NULLS, DISALLOW_NULLS };
@@ -80,6 +83,8 @@ class StringShape {
#endif
};
+#include "torque-generated/src/objects/string-tq.inc"
+
// The String abstract class captures JavaScript string values:
//
// Ecma-262:
@@ -97,6 +102,10 @@ class String : public TorqueGeneratedString<String, Name> {
// A flat string has content that's encoded as a sequence of either
// one-byte chars or two-byte UC16.
// Returned by String::GetFlatContent().
+ // Not safe to use from concurrent background threads.
+ // TODO(solanes): Move FlatContent into FlatStringReader, and make it private.
+ // This would de-duplicate code, as well as taking advantage of the fact that
+ // FlatStringReader is relocatable.
class FlatContent {
public:
// Returns true if the string is flat and this structure contains content.
@@ -134,11 +143,20 @@ class String : public TorqueGeneratedString<String, Name> {
enum State { NON_FLAT, ONE_BYTE, TWO_BYTE };
// Constructors only used by String::GetFlatContent().
- explicit FlatContent(const uint8_t* start, int length)
- : onebyte_start(start), length_(length), state_(ONE_BYTE) {}
- explicit FlatContent(const uc16* start, int length)
- : twobyte_start(start), length_(length), state_(TWO_BYTE) {}
- FlatContent() : onebyte_start(nullptr), length_(0), state_(NON_FLAT) {}
+ FlatContent(const uint8_t* start, int length,
+ const DisallowHeapAllocation& no_gc)
+ : onebyte_start(start),
+ length_(length),
+ state_(ONE_BYTE),
+ no_gc_(no_gc) {}
+ FlatContent(const uc16* start, int length,
+ const DisallowHeapAllocation& no_gc)
+ : twobyte_start(start),
+ length_(length),
+ state_(TWO_BYTE),
+ no_gc_(no_gc) {}
+ explicit FlatContent(const DisallowHeapAllocation& no_gc)
+ : onebyte_start(nullptr), length_(0), state_(NON_FLAT), no_gc_(no_gc) {}
union {
const uint8_t* onebyte_start;
@@ -146,6 +164,7 @@ class String : public TorqueGeneratedString<String, Name> {
};
int length_;
State state_;
+ const DisallowHeapAllocation& no_gc_;
friend class String;
friend class IterableSubString;
@@ -157,10 +176,18 @@ class String : public TorqueGeneratedString<String, Name> {
V8_INLINE Vector<const Char> GetCharVector(
const DisallowHeapAllocation& no_gc);
- // Get chars from sequential or external strings.
+ // Get chars from sequential or external strings. May only be called when a
+ // SharedStringAccessGuard is not needed (i.e. on the main thread or on
+ // read-only strings).
template <typename Char>
inline const Char* GetChars(const DisallowHeapAllocation& no_gc);
+ // Get chars from sequential or external strings.
+ template <typename Char>
+ inline const Char* GetChars(
+ const DisallowHeapAllocation& no_gc,
+ const SharedStringAccessGuardIfNeeded& access_guard);
+
// Returns the address of the character at an offset into this string.
// Requires: this->IsFlat()
const byte* AddressOfCharacterAt(int start_index,
@@ -558,8 +585,15 @@ class SeqOneByteString
// Get the address of the characters in this string.
inline Address GetCharsAddress();
+ // Get a pointer to the characters of the string. May only be called when a
+ // SharedStringAccessGuard is not needed (i.e. on the main thread or on
+ // read-only strings).
inline uint8_t* GetChars(const DisallowHeapAllocation& no_gc);
+ // Get a pointer to the characters of the string.
+ inline uint8_t* GetChars(const DisallowHeapAllocation& no_gc,
+ const SharedStringAccessGuardIfNeeded& access_guard);
+
// Clear uninitialized padding space. This ensures that the snapshot content
// is deterministic.
void clear_padding();
@@ -596,8 +630,15 @@ class SeqTwoByteString
// Get the address of the characters in this string.
inline Address GetCharsAddress();
+ // Get a pointer to the characters of the string. May only be called when a
+ // SharedStringAccessGuard is not needed (i.e. on the main thread or on
+ // read-only strings).
inline uc16* GetChars(const DisallowHeapAllocation& no_gc);
+ // Get a pointer to the characters of the string.
+ inline uc16* GetChars(const DisallowHeapAllocation& no_gc,
+ const SharedStringAccessGuardIfNeeded& access_guard);
+
// Clear uninitialized padding space. This ensures that the snapshot content
// is deterministic.
void clear_padding();
@@ -721,6 +762,8 @@ class ExternalString : public String {
static const int kUncachedSize =
kResourceOffset + FIELD_SIZE(kResourceOffset);
+ inline void AllocateExternalPointerEntries(Isolate* isolate);
+
// Return whether the external string data pointer is not cached.
inline bool is_uncached() const;
// Size in bytes of the external payload.
@@ -729,8 +772,8 @@ class ExternalString : public String {
// Used in the serializer/deserializer.
DECL_GETTER(resource_as_address, Address)
inline void set_address_as_resource(Isolate* isolate, Address address);
- inline uint32_t resource_as_uint32();
- inline void set_uint32_as_resource(Isolate* isolate, uint32_t value);
+ inline uint32_t GetResourceRefForDeserialization();
+ inline void SetResourceRefForSerialization(uint32_t ref);
// Disposes string's resource object if it has not already been disposed.
inline void DisposeResource(Isolate* isolate);
@@ -755,6 +798,7 @@ class ExternalOneByteString : public ExternalString {
// It is assumed that the previous resource is null. If it is not null, then
// it is the responsability of the caller the handle the previous resource.
inline void SetResource(Isolate* isolate, const Resource* buffer);
+
// Used only during serialization.
inline void set_resource(Isolate* isolate, const Resource* buffer);
@@ -796,6 +840,7 @@ class ExternalTwoByteString : public ExternalString {
// It is assumed that the previous resource is null. If it is not null, then
// it is the responsability of the caller the handle the previous resource.
inline void SetResource(Isolate* isolate, const Resource* buffer);
+
// Used only during serialization.
inline void set_resource(Isolate* isolate, const Resource* buffer);
@@ -827,12 +872,12 @@ class ExternalTwoByteString : public ExternalString {
};
// A flat string reader provides random access to the contents of a
-// string independent of the character width of the string. The handle
+// string independent of the character width of the string. The handle
// must be valid as long as the reader is being used.
+// Not safe to use from concurrent background threads.
class V8_EXPORT_PRIVATE FlatStringReader : public Relocatable {
public:
FlatStringReader(Isolate* isolate, Handle<String> str);
- FlatStringReader(Isolate* isolate, Vector<const char> input);
void PostGarbageCollection() override;
inline uc32 Get(int index);
template <typename Char>
@@ -840,7 +885,7 @@ class V8_EXPORT_PRIVATE FlatStringReader : public Relocatable {
int length() { return length_; }
private:
- Address* str_;
+ Handle<String> str_;
bool is_one_byte_;
int length_;
const void* start_;
@@ -855,6 +900,8 @@ class ConsStringIterator {
inline explicit ConsStringIterator(ConsString cons_string, int offset = 0) {
Reset(cons_string, offset);
}
+ ConsStringIterator(const ConsStringIterator&) = delete;
+ ConsStringIterator& operator=(const ConsStringIterator&) = delete;
inline void Reset(ConsString cons_string, int offset = 0) {
depth_ = 0;
// Next will always return nullptr.
@@ -893,12 +940,13 @@ class ConsStringIterator {
int depth_;
int maximum_depth_;
int consumed_;
- DISALLOW_COPY_AND_ASSIGN(ConsStringIterator);
};
class StringCharacterStream {
public:
inline explicit StringCharacterStream(String string, int offset = 0);
+ StringCharacterStream(const StringCharacterStream&) = delete;
+ StringCharacterStream& operator=(const StringCharacterStream&) = delete;
inline uint16_t GetNext();
inline bool HasMore();
inline void Reset(String string, int offset = 0);
@@ -913,7 +961,6 @@ class StringCharacterStream {
const uint16_t* buffer16_;
};
const uint8_t* end_;
- DISALLOW_COPY_AND_ASSIGN(StringCharacterStream);
};
template <typename Char>
diff --git a/deps/v8/src/objects/string.tq b/deps/v8/src/objects/string.tq
index 1bc51ce5da..df9b0f4ff0 100644
--- a/deps/v8/src/objects/string.tq
+++ b/deps/v8/src/objects/string.tq
@@ -25,6 +25,11 @@ extern class ExternalString extends String {
resource_data: ExternalPointer;
}
+extern operator '.resource_ptr' macro LoadExternalStringResourcePtr(
+ ExternalString): RawPtr;
+extern operator '.resource_data_ptr' macro LoadExternalStringResourceDataPtr(
+ ExternalString): RawPtr;
+
@doNotGenerateCast
extern class ExternalOneByteString extends ExternalString {
}
diff --git a/deps/v8/src/objects/struct-inl.h b/deps/v8/src/objects/struct-inl.h
index afc4c6ce49..b313bc43f8 100644
--- a/deps/v8/src/objects/struct-inl.h
+++ b/deps/v8/src/objects/struct-inl.h
@@ -11,7 +11,6 @@
#include "src/objects/objects-inl.h"
#include "src/objects/oddball.h"
#include "src/roots/roots-inl.h"
-#include "torque-generated/class-definitions-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -19,6 +18,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/struct-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(Struct)
TQ_OBJECT_CONSTRUCTORS_IMPL(Tuple2)
TQ_OBJECT_CONSTRUCTORS_IMPL(AccessorPair)
diff --git a/deps/v8/src/objects/struct.h b/deps/v8/src/objects/struct.h
index fcae2e593d..fa4fe42b62 100644
--- a/deps/v8/src/objects/struct.h
+++ b/deps/v8/src/objects/struct.h
@@ -7,7 +7,6 @@
#include "src/objects/heap-object.h"
#include "src/objects/objects.h"
-#include "torque-generated/class-definitions.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -15,6 +14,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/struct-tq.inc"
+
// An abstract superclass, a marker class really, for simple structure classes.
// It doesn't carry any functionality but allows struct classes to be
// identified in the type system.
diff --git a/deps/v8/src/objects/synthetic-module-inl.h b/deps/v8/src/objects/synthetic-module-inl.h
new file mode 100644
index 0000000000..a958e50373
--- /dev/null
+++ b/deps/v8/src/objects/synthetic-module-inl.h
@@ -0,0 +1,27 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_SYNTHETIC_MODULE_INL_H_
+#define V8_OBJECTS_SYNTHETIC_MODULE_INL_H_
+
+#include "src/objects/module-inl.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/synthetic-module.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+#include "torque-generated/src/objects/synthetic-module-tq-inl.inc"
+
+TQ_OBJECT_CONSTRUCTORS_IMPL(SyntheticModule)
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_SYNTHETIC_MODULE_INL_H_
diff --git a/deps/v8/src/objects/synthetic-module.cc b/deps/v8/src/objects/synthetic-module.cc
index abe9ad2ed2..6c288f97cf 100644
--- a/deps/v8/src/objects/synthetic-module.cc
+++ b/deps/v8/src/objects/synthetic-module.cc
@@ -10,6 +10,7 @@
#include "src/objects/module-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/shared-function-info.h"
+#include "src/objects/synthetic-module-inl.h"
#include "src/utils/ostreams.h"
namespace v8 {
@@ -59,7 +60,7 @@ MaybeHandle<Cell> SyntheticModule::ResolveExport(
if (!must_resolve) return MaybeHandle<Cell>();
- return isolate->Throw<Cell>(
+ return isolate->ThrowAt<Cell>(
isolate->factory()->NewSyntaxError(MessageTemplate::kUnresolvableExport,
module_specifier, export_name),
&loc);
diff --git a/deps/v8/src/objects/synthetic-module.h b/deps/v8/src/objects/synthetic-module.h
index 8ac6668170..7c0060e6f0 100644
--- a/deps/v8/src/objects/synthetic-module.h
+++ b/deps/v8/src/objects/synthetic-module.h
@@ -13,6 +13,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/synthetic-module-tq.inc"
+
// The runtime representation of a Synthetic Module Record, a module that can be
// instantiated by an embedder with embedder-defined exports and evaluation
// steps.
diff --git a/deps/v8/src/objects/tagged-field-inl.h b/deps/v8/src/objects/tagged-field-inl.h
index fed3192dd9..eaaa557431 100644
--- a/deps/v8/src/objects/tagged-field-inl.h
+++ b/deps/v8/src/objects/tagged-field-inl.h
@@ -61,7 +61,7 @@ T TaggedField<T, kFieldOffset>::load(HeapObject host, int offset) {
// static
template <typename T, int kFieldOffset>
-T TaggedField<T, kFieldOffset>::load(const Isolate* isolate, HeapObject host,
+T TaggedField<T, kFieldOffset>::load(IsolateRoot isolate, HeapObject host,
int offset) {
Tagged_t value = *location(host, offset);
return T(tagged_to_full(isolate, value));
@@ -70,7 +70,7 @@ T TaggedField<T, kFieldOffset>::load(const Isolate* isolate, HeapObject host,
// static
template <typename T, int kFieldOffset>
void TaggedField<T, kFieldOffset>::store(HeapObject host, T value) {
-#ifdef V8_CONCURRENT_MARKING
+#ifdef V8_ATOMIC_OBJECT_FIELD_WRITES
Relaxed_Store(host, value);
#else
*location(host) = full_to_tagged(value.ptr());
@@ -80,7 +80,7 @@ void TaggedField<T, kFieldOffset>::store(HeapObject host, T value) {
// static
template <typename T, int kFieldOffset>
void TaggedField<T, kFieldOffset>::store(HeapObject host, int offset, T value) {
-#ifdef V8_CONCURRENT_MARKING
+#ifdef V8_ATOMIC_OBJECT_FIELD_WRITES
Relaxed_Store(host, offset, value);
#else
*location(host, offset) = full_to_tagged(value.ptr());
@@ -96,8 +96,7 @@ T TaggedField<T, kFieldOffset>::Relaxed_Load(HeapObject host, int offset) {
// static
template <typename T, int kFieldOffset>
-template <typename LocalIsolate>
-T TaggedField<T, kFieldOffset>::Relaxed_Load(const LocalIsolate* isolate,
+T TaggedField<T, kFieldOffset>::Relaxed_Load(IsolateRoot isolate,
HeapObject host, int offset) {
AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location(host, offset));
return T(tagged_to_full(isolate, value));
@@ -126,8 +125,7 @@ T TaggedField<T, kFieldOffset>::Acquire_Load(HeapObject host, int offset) {
// static
template <typename T, int kFieldOffset>
-template <typename LocalIsolate>
-T TaggedField<T, kFieldOffset>::Acquire_Load(const LocalIsolate* isolate,
+T TaggedField<T, kFieldOffset>::Acquire_Load(IsolateRoot isolate,
HeapObject host, int offset) {
AtomicTagged_t value = AsAtomicTagged::Acquire_Load(location(host, offset));
return T(tagged_to_full(isolate, value));
diff --git a/deps/v8/src/objects/tagged-field.h b/deps/v8/src/objects/tagged-field.h
index 82b6268ecd..8560c54cc4 100644
--- a/deps/v8/src/objects/tagged-field.h
+++ b/deps/v8/src/objects/tagged-field.h
@@ -38,22 +38,20 @@ class TaggedField : public AllStatic {
static inline Address address(HeapObject host, int offset = 0);
static inline T load(HeapObject host, int offset = 0);
- static inline T load(const Isolate* isolate, HeapObject host, int offset = 0);
+ static inline T load(IsolateRoot isolate, HeapObject host, int offset = 0);
static inline void store(HeapObject host, T value);
static inline void store(HeapObject host, int offset, T value);
static inline T Relaxed_Load(HeapObject host, int offset = 0);
- template <typename LocalIsolate>
- static T Relaxed_Load(const LocalIsolate* isolate, HeapObject host,
- int offset = 0);
+ static inline T Relaxed_Load(IsolateRoot isolate, HeapObject host,
+ int offset = 0);
static inline void Relaxed_Store(HeapObject host, T value);
- static void Relaxed_Store(HeapObject host, int offset, T value);
+ static inline void Relaxed_Store(HeapObject host, int offset, T value);
static inline T Acquire_Load(HeapObject host, int offset = 0);
- template <typename LocalIsolate>
- static inline T Acquire_Load(const LocalIsolate* isolate, HeapObject host,
+ static inline T Acquire_Load(IsolateRoot isolate, HeapObject host,
int offset = 0);
static inline void Release_Store(HeapObject host, T value);
diff --git a/deps/v8/src/objects/template-objects-inl.h b/deps/v8/src/objects/template-objects-inl.h
index 3718955fb7..caae8ed8ed 100644
--- a/deps/v8/src/objects/template-objects-inl.h
+++ b/deps/v8/src/objects/template-objects-inl.h
@@ -15,6 +15,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/template-objects-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(TemplateObjectDescription)
TQ_OBJECT_CONSTRUCTORS_IMPL(CachedTemplateObject)
diff --git a/deps/v8/src/objects/template-objects.h b/deps/v8/src/objects/template-objects.h
index 094485de50..8e888f6ca0 100644
--- a/deps/v8/src/objects/template-objects.h
+++ b/deps/v8/src/objects/template-objects.h
@@ -14,6 +14,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/template-objects-tq.inc"
+
// CachedTemplateObject is a tuple used to cache a TemplateObject that has been
// created. All the CachedTemplateObject's for a given SharedFunctionInfo form a
// linked list via the next fields.
diff --git a/deps/v8/src/objects/templates-inl.h b/deps/v8/src/objects/templates-inl.h
index 8dd5aa6e2d..613a4279a4 100644
--- a/deps/v8/src/objects/templates-inl.h
+++ b/deps/v8/src/objects/templates-inl.h
@@ -17,6 +17,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/templates-tq-inl.inc"
+
TQ_OBJECT_CONSTRUCTORS_IMPL(TemplateInfo)
TQ_OBJECT_CONSTRUCTORS_IMPL(FunctionTemplateInfo)
TQ_OBJECT_CONSTRUCTORS_IMPL(ObjectTemplateInfo)
@@ -36,6 +38,9 @@ BOOL_ACCESSORS(FunctionTemplateInfo, flag, do_not_cache, DoNotCacheBit::kShift)
BOOL_ACCESSORS(FunctionTemplateInfo, flag, accept_any_receiver,
AcceptAnyReceiverBit::kShift)
+RELEASE_ACQUIRE_ACCESSORS(FunctionTemplateInfo, call_code, HeapObject,
+ kCallCodeOffset)
+
// static
FunctionTemplateRareData FunctionTemplateInfo::EnsureFunctionTemplateRareData(
Isolate* isolate, Handle<FunctionTemplateInfo> function_template_info) {
@@ -129,6 +134,14 @@ void ObjectTemplateInfo::set_immutable_proto(bool immutable) {
return set_data(IsImmutablePrototypeBit::update(data(), immutable));
}
+bool ObjectTemplateInfo::code_like() const {
+ return IsCodeKindBit::decode(data());
+}
+
+void ObjectTemplateInfo::set_code_like(bool is_code_like) {
+ return set_data(IsCodeKindBit::update(data(), is_code_like));
+}
+
bool FunctionTemplateInfo::IsTemplateFor(JSObject object) {
return IsTemplateFor(object.map());
}
diff --git a/deps/v8/src/objects/templates.h b/deps/v8/src/objects/templates.h
index 5aa0dc16a3..13d68ef391 100644
--- a/deps/v8/src/objects/templates.h
+++ b/deps/v8/src/objects/templates.h
@@ -14,6 +14,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/templates-tq.inc"
+
class TemplateInfo : public TorqueGeneratedTemplateInfo<TemplateInfo, Struct> {
public:
NEVER_READ_ONLY_SPACE
@@ -85,6 +87,8 @@ class FunctionTemplateInfo
DECL_RARE_ACCESSORS(c_signature, CSignature, Object)
#undef DECL_RARE_ACCESSORS
+ DECL_RELEASE_ACQUIRE_ACCESSORS(call_code, HeapObject)
+
// Begin flag bits ---------------------
DECL_BOOLEAN_ACCESSORS(undetectable)
@@ -156,6 +160,7 @@ class ObjectTemplateInfo
public:
DECL_INT_ACCESSORS(embedder_field_count)
DECL_BOOLEAN_ACCESSORS(immutable_proto)
+ DECL_BOOLEAN_ACCESSORS(code_like)
// Dispatched behavior.
DECL_PRINTER(ObjectTemplateInfo)
diff --git a/deps/v8/src/objects/template.tq b/deps/v8/src/objects/templates.tq
index 1336fb19ba..564d3569dc 100644
--- a/deps/v8/src/objects/template.tq
+++ b/deps/v8/src/objects/templates.tq
@@ -65,7 +65,8 @@ extern class FunctionTemplateInfo extends TemplateInfo {
bitfield struct ObjectTemplateInfoFlags extends uint31 {
is_immutable_prototype: bool: 1 bit;
- embedder_field_count: int32: 29 bit;
+ is_code_kind: bool: 1 bit;
+ embedder_field_count: int32: 28 bit;
}
@generateCppClass
diff --git a/deps/v8/src/objects/torque-defined-classes-inl.h b/deps/v8/src/objects/torque-defined-classes-inl.h
new file mode 100644
index 0000000000..2579e9f430
--- /dev/null
+++ b/deps/v8/src/objects/torque-defined-classes-inl.h
@@ -0,0 +1,23 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef V8_OBJECTS_TORQUE_DEFINED_CLASSES_INL_H_
+#define V8_OBJECTS_TORQUE_DEFINED_CLASSES_INL_H_
+
+#include "src/objects/objects-inl.h"
+#include "src/objects/torque-defined-classes.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+#include "torque-generated/src/objects/torque-defined-classes-tq-inl.inc"
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_TORQUE_DEFINED_CLASSES_INL_H_
diff --git a/deps/v8/src/objects/torque-defined-classes.h b/deps/v8/src/objects/torque-defined-classes.h
new file mode 100644
index 0000000000..aeea4e1c53
--- /dev/null
+++ b/deps/v8/src/objects/torque-defined-classes.h
@@ -0,0 +1,25 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef V8_OBJECTS_TORQUE_DEFINED_CLASSES_H_
+#define V8_OBJECTS_TORQUE_DEFINED_CLASSES_H_
+
+#include "src/objects/descriptor-array.h"
+#include "src/objects/fixed-array.h"
+#include "src/objects/heap-object.h"
+#include "src/objects/objects.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+#include "torque-generated/src/objects/torque-defined-classes-tq.inc"
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_TORQUE_DEFINED_CLASSES_H_
diff --git a/deps/v8/src/objects/torque-defined-classes.tq b/deps/v8/src/objects/torque-defined-classes.tq
new file mode 100644
index 0000000000..883576777b
--- /dev/null
+++ b/deps/v8/src/objects/torque-defined-classes.tq
@@ -0,0 +1,17 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/objects/torque-defined-classes.h"
+
+// Classes defined in Torque that are not exported are attributed to this file,
+// independently of where they are actually defined. This gives them
+// corresponding C++ headers and removes the need to add another C++ header for
+// each file defining such a class.
+// In addition, classes defined in the test directory are also attributed to
+// here, because there is no directory corresponding to src/objects in test/ and
+// it would be confusing to add one there.
+
+// The corresponding C++ headers are:
+// - src/objects/torque-defined-classes.h
+// - src/objects/torque-defined-classes-inl.h
diff --git a/deps/v8/src/objects/transitions-inl.h b/deps/v8/src/objects/transitions-inl.h
index b2ee5366b2..d9d2f83a7f 100644
--- a/deps/v8/src/objects/transitions-inl.h
+++ b/deps/v8/src/objects/transitions-inl.h
@@ -5,13 +5,13 @@
#ifndef V8_OBJECTS_TRANSITIONS_INL_H_
#define V8_OBJECTS_TRANSITIONS_INL_H_
-#include "src/objects/transitions.h"
-
#include "src/ic/handler-configuration-inl.h"
#include "src/objects/fixed-array-inl.h"
#include "src/objects/maybe-object-inl.h"
#include "src/objects/slots.h"
#include "src/objects/smi.h"
+#include "src/objects/transitions.h"
+#include "src/snapshot/deserializer.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -100,7 +100,7 @@ HeapObjectSlot TransitionArray::GetTargetSlot(int transition_number) {
PropertyDetails TransitionsAccessor::GetTargetDetails(Name name, Map target) {
DCHECK(!IsSpecialTransition(name.GetReadOnlyRoots(), name));
InternalIndex descriptor = target.LastAdded();
- DescriptorArray descriptors = target.instance_descriptors();
+ DescriptorArray descriptors = target.instance_descriptors(kRelaxedLoad);
// Transitions are allowed only for the last added property.
DCHECK(descriptors.GetKey(descriptor).Equals(name));
return descriptors.GetDetails(descriptor);
@@ -113,7 +113,7 @@ PropertyDetails TransitionsAccessor::GetSimpleTargetDetails(Map transition) {
// static
Name TransitionsAccessor::GetSimpleTransitionKey(Map transition) {
InternalIndex descriptor = transition.LastAdded();
- return transition.instance_descriptors().GetKey(descriptor);
+ return transition.instance_descriptors(kRelaxedLoad).GetKey(descriptor);
}
// static
@@ -157,6 +157,14 @@ bool TransitionArray::GetTargetIfExists(int transition_number, Isolate* isolate,
Map* target) {
MaybeObject raw = GetRawTarget(transition_number);
HeapObject heap_object;
+ // If the raw target is a Smi, then this TransitionArray is in the process of
+ // being deserialized, and doesn't yet have an initialized entry for this
+ // transition.
+ if (raw.IsSmi()) {
+ DCHECK(isolate->has_active_deserializer());
+ DCHECK_EQ(raw.ToSmi(), Deserializer::uninitialized_field_value());
+ return false;
+ }
if (raw->GetHeapObjectIfStrong(&heap_object) &&
heap_object.IsUndefined(isolate)) {
return false;
diff --git a/deps/v8/src/objects/transitions.cc b/deps/v8/src/objects/transitions.cc
index d2c5f56fd5..f6623258d6 100644
--- a/deps/v8/src/objects/transitions.cc
+++ b/deps/v8/src/objects/transitions.cc
@@ -285,7 +285,7 @@ bool TransitionsAccessor::IsMatchingMap(Map target, Name name,
PropertyKind kind,
PropertyAttributes attributes) {
InternalIndex descriptor = target.LastAdded();
- DescriptorArray descriptors = target.instance_descriptors();
+ DescriptorArray descriptors = target.instance_descriptors(kRelaxedLoad);
Name key = descriptors.GetKey(descriptor);
if (key != name) return false;
return descriptors.GetDetails(descriptor)
@@ -330,7 +330,7 @@ Handle<WeakFixedArray> TransitionArray::GrowPrototypeTransitionArray(
Handle<WeakFixedArray> array, int new_capacity, Isolate* isolate) {
// Grow array by factor 2 up to MaxCachedPrototypeTransitions.
int capacity = array->length() - kProtoTransitionHeaderSize;
- new_capacity = Min(kMaxCachedPrototypeTransitions, new_capacity);
+ new_capacity = std::min({kMaxCachedPrototypeTransitions, new_capacity});
DCHECK_GT(new_capacity, capacity);
int grow_by = new_capacity - capacity;
array = isolate->factory()->CopyWeakFixedArrayAndGrow(array, grow_by);
@@ -530,7 +530,8 @@ void TransitionsAccessor::CheckNewTransitionsAreConsistent(
TransitionArray new_transitions = TransitionArray::cast(transitions);
for (int i = 0; i < old_transitions.number_of_transitions(); i++) {
Map target = old_transitions.GetTarget(i);
- if (target.instance_descriptors() == map_.instance_descriptors()) {
+ if (target.instance_descriptors(kRelaxedLoad) ==
+ map_.instance_descriptors(kRelaxedLoad)) {
Name key = old_transitions.GetKey(i);
int new_target_index;
if (IsSpecialTransition(ReadOnlyRoots(isolate_), key)) {
diff --git a/deps/v8/src/objects/value-serializer.cc b/deps/v8/src/objects/value-serializer.cc
index d9abe45124..d5f5f05c29 100644
--- a/deps/v8/src/objects/value-serializer.cc
+++ b/deps/v8/src/objects/value-serializer.cc
@@ -7,6 +7,7 @@
#include <type_traits>
#include "include/v8-value-serializer-version.h"
+#include "include/v8.h"
#include "src/api/api-inl.h"
#include "src/base/logging.h"
#include "src/execution/isolate.h"
@@ -20,9 +21,11 @@
#include "src/objects/js-collection-inl.h"
#include "src/objects/js-regexp-inl.h"
#include "src/objects/objects-inl.h"
+#include "src/objects/objects.h"
#include "src/objects/oddball-inl.h"
#include "src/objects/ordered-hash-table-inl.h"
#include "src/objects/property-descriptor.h"
+#include "src/objects/property-details.h"
#include "src/objects/smi.h"
#include "src/objects/transitions-inl.h"
#include "src/snapshot/code-serializer.h"
@@ -384,7 +387,7 @@ void ValueSerializer::TransferArrayBuffer(uint32_t transfer_id,
Handle<JSArrayBuffer> array_buffer) {
DCHECK(!array_buffer_transfer_map_.Find(array_buffer));
DCHECK(!array_buffer->is_shared());
- array_buffer_transfer_map_.Set(array_buffer, transfer_id);
+ array_buffer_transfer_map_.Insert(array_buffer, transfer_id);
}
Maybe<bool> ValueSerializer::WriteObject(Handle<Object> object) {
@@ -500,16 +503,16 @@ void ValueSerializer::WriteString(Handle<String> string) {
Maybe<bool> ValueSerializer::WriteJSReceiver(Handle<JSReceiver> receiver) {
// If the object has already been serialized, just write its ID.
- uint32_t* id_map_entry = id_map_.Get(receiver);
- if (uint32_t id = *id_map_entry) {
+ auto find_result = id_map_.FindOrInsert(receiver);
+ if (find_result.already_exists) {
WriteTag(SerializationTag::kObjectReference);
- WriteVarint(id - 1);
+ WriteVarint(*find_result.entry - 1);
return ThrowIfOutOfMemory();
}
// Otherwise, allocate an ID for it.
uint32_t id = next_id_++;
- *id_map_entry = id + 1;
+ *find_result.entry = id + 1;
// Eliminate callable and exotic objects, which should not be serialized.
InstanceType instance_type = receiver->map().instance_type();
@@ -588,13 +591,15 @@ Maybe<bool> ValueSerializer::WriteJSObject(Handle<JSObject> object) {
uint32_t properties_written = 0;
bool map_changed = false;
for (InternalIndex i : map->IterateOwnDescriptors()) {
- Handle<Name> key(map->instance_descriptors().GetKey(i), isolate_);
+ Handle<Name> key(map->instance_descriptors(kRelaxedLoad).GetKey(i),
+ isolate_);
if (!key->IsString()) continue;
- PropertyDetails details = map->instance_descriptors().GetDetails(i);
+ PropertyDetails details =
+ map->instance_descriptors(kRelaxedLoad).GetDetails(i);
if (details.IsDontEnum()) continue;
Handle<Object> value;
- if (V8_LIKELY(!map_changed)) map_changed = *map == object->map();
+ if (V8_LIKELY(!map_changed)) map_changed = *map != object->map();
if (V8_LIKELY(!map_changed && details.location() == kField)) {
DCHECK_EQ(kData, details.kind());
FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
@@ -715,13 +720,14 @@ Maybe<bool> ValueSerializer::WriteJSArray(Handle<JSArray> array) {
}
}
- KeyAccumulator accumulator(isolate_, KeyCollectionMode::kOwnOnly,
- ENUMERABLE_STRINGS);
- if (!accumulator.CollectOwnPropertyNames(array, array).FromMaybe(false)) {
+ Handle<FixedArray> keys;
+ if (!KeyAccumulator::GetKeys(array, KeyCollectionMode::kOwnOnly,
+ ENUMERABLE_STRINGS,
+ GetKeysConversion::kKeepNumbers, false, true)
+ .ToHandle(&keys)) {
return Nothing<bool>();
}
- Handle<FixedArray> keys =
- accumulator.GetKeys(GetKeysConversion::kConvertToString);
+
uint32_t properties_written;
if (!WriteJSObjectPropertiesSlow(array, keys).To(&properties_written)) {
return Nothing<bool>();
@@ -790,13 +796,12 @@ Maybe<bool> ValueSerializer::WriteJSMap(Handle<JSMap> map) {
{
DisallowHeapAllocation no_gc;
Oddball the_hole = ReadOnlyRoots(isolate_).the_hole_value();
- int capacity = table->UsedCapacity();
int result_index = 0;
- for (int i = 0; i < capacity; i++) {
- Object key = table->KeyAt(i);
+ for (InternalIndex entry : table->IterateEntries()) {
+ Object key = table->KeyAt(entry);
if (key == the_hole) continue;
entries->set(result_index++, key);
- entries->set(result_index++, table->ValueAt(i));
+ entries->set(result_index++, table->ValueAt(entry));
}
DCHECK_EQ(result_index, length);
}
@@ -821,10 +826,9 @@ Maybe<bool> ValueSerializer::WriteJSSet(Handle<JSSet> set) {
{
DisallowHeapAllocation no_gc;
Oddball the_hole = ReadOnlyRoots(isolate_).the_hole_value();
- int capacity = table->UsedCapacity();
int result_index = 0;
- for (int i = 0; i < capacity; i++) {
- Object key = table->KeyAt(i);
+ for (InternalIndex entry : table->IterateEntries()) {
+ Object key = table->KeyAt(entry);
if (key == the_hole) continue;
entries->set(result_index++, key);
}
@@ -1647,8 +1651,12 @@ MaybeHandle<JSRegExp> ValueDeserializer::ReadJSRegExp() {
}
// Ensure the deserialized flags are valid.
- uint32_t flags_mask = static_cast<uint32_t>(-1) << JSRegExp::kFlagCount;
- if ((raw_flags & flags_mask) ||
+ uint32_t bad_flags_mask = static_cast<uint32_t>(-1) << JSRegExp::kFlagCount;
+ // kLinear is accepted only with the appropriate flag.
+ if (!FLAG_enable_experimental_regexp_engine) {
+ bad_flags_mask |= JSRegExp::kLinear;
+ }
+ if ((raw_flags & bad_flags_mask) ||
!JSRegExp::New(isolate_, pattern, static_cast<JSRegExp::Flags>(raw_flags))
.ToHandle(&regexp)) {
return MaybeHandle<JSRegExp>();
@@ -1980,7 +1988,8 @@ static void CommitProperties(Handle<JSObject> object, Handle<Map> map,
DCHECK(!object->map().is_dictionary_map());
DisallowHeapAllocation no_gc;
- DescriptorArray descriptors = object->map().instance_descriptors();
+ DescriptorArray descriptors =
+ object->map().instance_descriptors(kRelaxedLoad);
for (InternalIndex i : InternalIndex::Range(properties.size())) {
// Initializing store.
object->WriteToField(i, descriptors.GetDetails(i),
@@ -2002,7 +2011,8 @@ Maybe<uint32_t> ValueDeserializer::ReadJSObjectProperties(
bool transitioning = true;
Handle<Map> map(object->map(), isolate_);
DCHECK(!map->is_dictionary_map());
- DCHECK_EQ(0, map->instance_descriptors().number_of_descriptors());
+ DCHECK_EQ(0,
+ map->instance_descriptors(kRelaxedLoad).number_of_descriptors());
std::vector<Handle<Object>> properties;
properties.reserve(8);
@@ -2053,11 +2063,11 @@ Maybe<uint32_t> ValueDeserializer::ReadJSObjectProperties(
if (transitioning) {
InternalIndex descriptor(properties.size());
PropertyDetails details =
- target->instance_descriptors().GetDetails(descriptor);
+ target->instance_descriptors(kRelaxedLoad).GetDetails(descriptor);
Representation expected_representation = details.representation();
if (value->FitsRepresentation(expected_representation)) {
if (expected_representation.IsHeapObject() &&
- !target->instance_descriptors()
+ !target->instance_descriptors(kRelaxedLoad)
.GetFieldType(descriptor)
.NowContains(value)) {
Handle<FieldType> value_type =
@@ -2066,7 +2076,7 @@ Maybe<uint32_t> ValueDeserializer::ReadJSObjectProperties(
details.constness(), expected_representation,
value_type);
}
- DCHECK(target->instance_descriptors()
+ DCHECK(target->instance_descriptors(kRelaxedLoad)
.GetFieldType(descriptor)
.NowContains(value));
properties.push_back(value);
diff --git a/deps/v8/src/objects/value-serializer.h b/deps/v8/src/objects/value-serializer.h
index acb3f3d25e..e06badece3 100644
--- a/deps/v8/src/objects/value-serializer.h
+++ b/deps/v8/src/objects/value-serializer.h
@@ -48,6 +48,8 @@ class ValueSerializer {
public:
ValueSerializer(Isolate* isolate, v8::ValueSerializer::Delegate* delegate);
~ValueSerializer();
+ ValueSerializer(const ValueSerializer&) = delete;
+ ValueSerializer& operator=(const ValueSerializer&) = delete;
/*
* Writes out a header, which includes the format version.
@@ -168,8 +170,6 @@ class ValueSerializer {
// A similar map, for transferred array buffers.
IdentityMap<uint32_t, ZoneAllocationPolicy> array_buffer_transfer_map_;
-
- DISALLOW_COPY_AND_ASSIGN(ValueSerializer);
};
/*
@@ -181,6 +181,8 @@ class ValueDeserializer {
ValueDeserializer(Isolate* isolate, Vector<const uint8_t> data,
v8::ValueDeserializer::Delegate* delegate);
~ValueDeserializer();
+ ValueDeserializer(const ValueDeserializer&) = delete;
+ ValueDeserializer& operator=(const ValueDeserializer&) = delete;
/*
* Runs version detection logic, which may fail if the format is invalid.
@@ -299,8 +301,6 @@ class ValueDeserializer {
// Always global handles.
Handle<FixedArray> id_map_;
MaybeHandle<SimpleNumberDictionary> array_buffer_transfer_map_;
-
- DISALLOW_COPY_AND_ASSIGN(ValueDeserializer);
};
} // namespace internal
diff --git a/deps/v8/src/parsing/DIR_METADATA b/deps/v8/src/parsing/DIR_METADATA
new file mode 100644
index 0000000000..165380ae4f
--- /dev/null
+++ b/deps/v8/src/parsing/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>Parser"
+} \ No newline at end of file
diff --git a/deps/v8/src/parsing/OWNERS b/deps/v8/src/parsing/OWNERS
index 40e6e8b427..9d54af5f2d 100644
--- a/deps/v8/src/parsing/OWNERS
+++ b/deps/v8/src/parsing/OWNERS
@@ -5,5 +5,3 @@ littledan@chromium.org
marja@chromium.org
neis@chromium.org
verwaest@chromium.org
-
-# COMPONENT: Blink>JavaScript>Parser
diff --git a/deps/v8/src/parsing/parse-info.cc b/deps/v8/src/parsing/parse-info.cc
index 5fd685505c..1c2b1b91ed 100644
--- a/deps/v8/src/parsing/parse-info.cc
+++ b/deps/v8/src/parsing/parse-info.cc
@@ -32,8 +32,6 @@ UnoptimizedCompileFlags::UnoptimizedCompileFlags(Isolate* isolate,
set_might_always_opt(FLAG_always_opt || FLAG_prepare_always_opt);
set_allow_natives_syntax(FLAG_allow_natives_syntax);
set_allow_lazy_compile(FLAG_lazy);
- set_allow_harmony_dynamic_import(FLAG_harmony_dynamic_import);
- set_allow_harmony_import_meta(FLAG_harmony_import_meta);
set_allow_harmony_private_methods(FLAG_harmony_private_methods);
set_collect_source_positions(!FLAG_enable_lazy_source_positions ||
isolate->NeedsDetailedOptimizedCodeLineInfo());
diff --git a/deps/v8/src/parsing/parse-info.h b/deps/v8/src/parsing/parse-info.h
index c774f0ae94..d99ddcda89 100644
--- a/deps/v8/src/parsing/parse-info.h
+++ b/deps/v8/src/parsing/parse-info.h
@@ -60,8 +60,6 @@ class Zone;
V(might_always_opt, bool, 1, _) \
V(allow_natives_syntax, bool, 1, _) \
V(allow_lazy_compile, bool, 1, _) \
- V(allow_harmony_dynamic_import, bool, 1, _) \
- V(allow_harmony_import_meta, bool, 1, _) \
V(allow_harmony_private_methods, bool, 1, _) \
V(is_oneshot_iife, bool, 1, _) \
V(collect_source_positions, bool, 1, _) \
diff --git a/deps/v8/src/parsing/parser-base.h b/deps/v8/src/parsing/parser-base.h
index 47981c768b..ee54709345 100644
--- a/deps/v8/src/parsing/parser-base.h
+++ b/deps/v8/src/parsing/parser-base.h
@@ -1853,7 +1853,6 @@ ParserBase<Impl>::ParsePrimaryExpression() {
return ParseSuperExpression(is_new);
}
case Token::IMPORT:
- if (!flags().allow_harmony_dynamic_import()) break;
return ParseImportExpressions();
case Token::LBRACK:
@@ -2989,15 +2988,12 @@ ParserBase<Impl>::ParseCoalesceExpression(ExpressionT expression) {
bool first_nullish = true;
while (peek() == Token::NULLISH) {
SourceRange right_range;
- int pos;
- ExpressionT y;
- {
- SourceRangeScope right_range_scope(scanner(), &right_range);
- Consume(Token::NULLISH);
- pos = peek_position();
- // Parse BitwiseOR or higher.
- y = ParseBinaryExpression(6);
- }
+ SourceRangeScope right_range_scope(scanner(), &right_range);
+ Consume(Token::NULLISH);
+ int pos = peek_position();
+
+ // Parse BitwiseOR or higher.
+ ExpressionT y = ParseBinaryExpression(6);
if (first_nullish) {
expression =
factory()->NewBinaryOperation(Token::NULLISH, expression, y, pos);
@@ -3297,7 +3293,6 @@ ParserBase<Impl>::ParseLeftHandSideContinuation(ExpressionT result) {
bool optional_chaining = false;
bool is_optional = false;
- int optional_link_begin;
do {
switch (peek()) {
case Token::QUESTION_PERIOD: {
@@ -3305,16 +3300,10 @@ ParserBase<Impl>::ParseLeftHandSideContinuation(ExpressionT result) {
ReportUnexpectedToken(peek());
return impl()->FailureExpression();
}
- // Include the ?. in the source range position.
- optional_link_begin = scanner()->peek_location().beg_pos;
Consume(Token::QUESTION_PERIOD);
is_optional = true;
optional_chaining = true;
- if (Token::IsPropertyOrCall(peek())) continue;
- int pos = position();
- ExpressionT key = ParsePropertyOrPrivatePropertyName();
- result = factory()->NewProperty(result, key, pos, is_optional);
- break;
+ continue;
}
/* Property */
@@ -3394,7 +3383,14 @@ ParserBase<Impl>::ParseLeftHandSideContinuation(ExpressionT result) {
}
default:
- // Template literals in/after an Optional Chain not supported:
+ /* Optional Property */
+ if (is_optional) {
+ DCHECK_EQ(scanner()->current_token(), Token::QUESTION_PERIOD);
+ int pos = position();
+ ExpressionT key = ParsePropertyOrPrivatePropertyName();
+ result = factory()->NewProperty(result, key, pos, is_optional);
+ break;
+ }
if (optional_chaining) {
impl()->ReportMessageAt(scanner()->peek_location(),
MessageTemplate::kOptionalChainingNoTemplate);
@@ -3405,12 +3401,8 @@ ParserBase<Impl>::ParseLeftHandSideContinuation(ExpressionT result) {
result = ParseTemplateLiteral(result, position(), true);
break;
}
- if (is_optional) {
- SourceRange chain_link_range(optional_link_begin, end_position());
- impl()->RecordExpressionSourceRange(result, chain_link_range);
- is_optional = false;
- }
- } while (Token::IsPropertyOrCall(peek()));
+ is_optional = false;
+ } while (is_optional || Token::IsPropertyOrCall(peek()));
if (optional_chaining) return factory()->NewOptionalChain(result);
return result;
}
@@ -3446,10 +3438,7 @@ ParserBase<Impl>::ParseMemberWithPresentNewPrefixesExpression() {
if (peek() == Token::SUPER) {
const bool is_new = true;
result = ParseSuperExpression(is_new);
- } else if (flags().allow_harmony_dynamic_import() &&
- peek() == Token::IMPORT &&
- (!flags().allow_harmony_import_meta() ||
- PeekAhead() == Token::LPAREN)) {
+ } else if (peek() == Token::IMPORT && PeekAhead() == Token::LPAREN) {
impl()->ReportMessageAt(scanner()->peek_location(),
MessageTemplate::kImportCallNotNewExpression);
return impl()->FailureExpression();
@@ -3547,11 +3536,9 @@ ParserBase<Impl>::ParseMemberExpression() {
template <typename Impl>
typename ParserBase<Impl>::ExpressionT
ParserBase<Impl>::ParseImportExpressions() {
- DCHECK(flags().allow_harmony_dynamic_import());
-
Consume(Token::IMPORT);
int pos = position();
- if (flags().allow_harmony_import_meta() && Check(Token::PERIOD)) {
+ if (Check(Token::PERIOD)) {
ExpectContextualKeyword(ast_value_factory()->meta_string(), "import.meta",
pos);
if (!flags().is_module()) {
diff --git a/deps/v8/src/parsing/parser.cc b/deps/v8/src/parsing/parser.cc
index b05ae32bba..c65c1dc6b6 100644
--- a/deps/v8/src/parsing/parser.cc
+++ b/deps/v8/src/parsing/parser.cc
@@ -27,6 +27,7 @@
#include "src/runtime/runtime.h"
#include "src/strings/char-predicates-inl.h"
#include "src/strings/string-stream.h"
+#include "src/strings/unicode-inl.h"
#include "src/tracing/trace-event.h"
#include "src/zone/zone-list-inl.h"
@@ -1033,9 +1034,7 @@ Statement* Parser::ParseModuleItem() {
// We must be careful not to parse a dynamic import expression as an import
// declaration. Same for import.meta expressions.
Token::Value peek_ahead = PeekAhead();
- if ((!flags().allow_harmony_dynamic_import() ||
- peek_ahead != Token::LPAREN) &&
- (!flags().allow_harmony_import_meta() || peek_ahead != Token::PERIOD)) {
+ if (peek_ahead != Token::LPAREN && peek_ahead != Token::PERIOD) {
ParseImportDeclaration();
return factory()->EmptyStatement();
}
@@ -1071,7 +1070,8 @@ const AstRawString* Parser::ParseModuleSpecifier() {
}
ZoneChunkList<Parser::ExportClauseData>* Parser::ParseExportClause(
- Scanner::Location* reserved_loc) {
+ Scanner::Location* reserved_loc,
+ Scanner::Location* string_literal_local_name_loc) {
// ExportClause :
// '{' '}'
// '{' ExportsList '}'
@@ -1084,6 +1084,12 @@ ZoneChunkList<Parser::ExportClauseData>* Parser::ParseExportClause(
// ExportSpecifier :
// IdentifierName
// IdentifierName 'as' IdentifierName
+ // IdentifierName 'as' ModuleExportName
+ // ModuleExportName
+ // ModuleExportName 'as' ModuleExportName
+ //
+ // ModuleExportName :
+ // StringLiteral
ZoneChunkList<ExportClauseData>* export_data =
zone()->New<ZoneChunkList<ExportClauseData>>(zone());
@@ -1091,23 +1097,27 @@ ZoneChunkList<Parser::ExportClauseData>* Parser::ParseExportClause(
Token::Value name_tok;
while ((name_tok = peek()) != Token::RBRACE) {
- // Keep track of the first reserved word encountered in case our
- // caller needs to report an error.
- if (!reserved_loc->IsValid() &&
- !Token::IsValidIdentifier(name_tok, LanguageMode::kStrict, false,
- flags().is_module())) {
+ const AstRawString* local_name = ParseExportSpecifierName();
+ if (!string_literal_local_name_loc->IsValid() &&
+ name_tok == Token::STRING) {
+ // Keep track of the first string literal local name exported for error
+ // reporting. These must be followed by a 'from' clause.
+ *string_literal_local_name_loc = scanner()->location();
+ } else if (!reserved_loc->IsValid() &&
+ !Token::IsValidIdentifier(name_tok, LanguageMode::kStrict, false,
+ flags().is_module())) {
+ // Keep track of the first reserved word encountered in case our
+ // caller needs to report an error.
*reserved_loc = scanner()->location();
}
- const AstRawString* local_name = ParsePropertyName();
- const AstRawString* export_name = nullptr;
+ const AstRawString* export_name;
Scanner::Location location = scanner()->location();
if (CheckContextualKeyword(ast_value_factory()->as_string())) {
- export_name = ParsePropertyName();
+ export_name = ParseExportSpecifierName();
// Set the location to the whole "a as b" string, so that it makes sense
// both for errors due to "a" and for errors due to "b".
location.end_pos = scanner()->location().end_pos;
- }
- if (export_name == nullptr) {
+ } else {
export_name = local_name;
}
export_data->push_back({export_name, local_name, location});
@@ -1122,6 +1132,31 @@ ZoneChunkList<Parser::ExportClauseData>* Parser::ParseExportClause(
return export_data;
}
+const AstRawString* Parser::ParseExportSpecifierName() {
+ Token::Value next = Next();
+
+ // IdentifierName
+ if (V8_LIKELY(Token::IsPropertyName(next))) {
+ return GetSymbol();
+ }
+
+ // ModuleExportName
+ if (next == Token::STRING) {
+ const AstRawString* export_name = GetSymbol();
+ if (V8_LIKELY(export_name->is_one_byte())) return export_name;
+ if (!unibrow::Utf16::HasUnpairedSurrogate(
+ reinterpret_cast<const uint16_t*>(export_name->raw_data()),
+ export_name->length())) {
+ return export_name;
+ }
+ ReportMessage(MessageTemplate::kInvalidModuleExportName);
+ return EmptyIdentifierString();
+ }
+
+ ReportUnexpectedToken(next);
+ return EmptyIdentifierString();
+}
+
ZonePtrList<const Parser::NamedImport>* Parser::ParseNamedImports(int pos) {
// NamedImports :
// '{' '}'
@@ -1135,12 +1170,13 @@ ZonePtrList<const Parser::NamedImport>* Parser::ParseNamedImports(int pos) {
// ImportSpecifier :
// BindingIdentifier
// IdentifierName 'as' BindingIdentifier
+ // ModuleExportName 'as' BindingIdentifier
Expect(Token::LBRACE);
auto result = zone()->New<ZonePtrList<const NamedImport>>(1, zone());
while (peek() != Token::RBRACE) {
- const AstRawString* import_name = ParsePropertyName();
+ const AstRawString* import_name = ParseExportSpecifierName();
const AstRawString* local_name = import_name;
Scanner::Location location = scanner()->location();
// In the presence of 'as', the left-side of the 'as' can
@@ -1174,10 +1210,80 @@ ZonePtrList<const Parser::NamedImport>* Parser::ParseNamedImports(int pos) {
return result;
}
+Parser::ImportAssertions* Parser::ParseImportAssertClause() {
+ // AssertClause :
+ // assert '{' '}'
+ // assert '{' AssertEntries '}'
+
+ // AssertEntries :
+ // IdentifierName: AssertionKey
+ // IdentifierName: AssertionKey , AssertEntries
+
+ // AssertionKey :
+ // IdentifierName
+ // StringLiteral
+
+ auto import_assertions = zone()->New<ImportAssertions>(zone());
+
+ if (!FLAG_harmony_import_assertions) {
+ return import_assertions;
+ }
+
+ // Assert clause is optional, and cannot be preceded by a LineTerminator.
+ if (scanner()->HasLineTerminatorBeforeNext() ||
+ !CheckContextualKeyword(ast_value_factory()->assert_string())) {
+ return import_assertions;
+ }
+
+ Expect(Token::LBRACE);
+
+ while (peek() != Token::RBRACE) {
+ const AstRawString* attribute_key = nullptr;
+ if (Check(Token::STRING)) {
+ attribute_key = GetSymbol();
+ } else {
+ attribute_key = ParsePropertyName();
+ }
+
+ Scanner::Location location = scanner()->location();
+
+ Expect(Token::COLON);
+ Expect(Token::STRING);
+
+ const AstRawString* attribute_value = GetSymbol();
+
+ // Set the location to the whole "key: 'value'"" string, so that it makes
+ // sense both for errors due to the key and errors due to the value.
+ location.end_pos = scanner()->location().end_pos;
+
+ auto result = import_assertions->insert(std::make_pair(
+ attribute_key, std::make_pair(attribute_value, location)));
+ if (!result.second) {
+ // It is a syntax error if two AssertEntries have the same key.
+ ReportMessageAt(location, MessageTemplate::kImportAssertionDuplicateKey,
+ attribute_key);
+ break;
+ }
+
+ if (peek() == Token::RBRACE) break;
+ if (V8_UNLIKELY(!Check(Token::COMMA))) {
+ ReportUnexpectedToken(Next());
+ break;
+ }
+ }
+
+ Expect(Token::RBRACE);
+
+ return import_assertions;
+}
+
void Parser::ParseImportDeclaration() {
// ImportDeclaration :
// 'import' ImportClause 'from' ModuleSpecifier ';'
// 'import' ModuleSpecifier ';'
+ // 'import' ImportClause 'from' ModuleSpecifier [no LineTerminator here]
+ // AssertClause ';'
+ // 'import' ModuleSpecifier [no LineTerminator here] AssertClause';'
//
// ImportClause :
// ImportedDefaultBinding
@@ -1198,8 +1304,10 @@ void Parser::ParseImportDeclaration() {
if (tok == Token::STRING) {
Scanner::Location specifier_loc = scanner()->peek_location();
const AstRawString* module_specifier = ParseModuleSpecifier();
+ const ImportAssertions* import_assertions = ParseImportAssertClause();
ExpectSemicolon();
- module()->AddEmptyImport(module_specifier, specifier_loc);
+ module()->AddEmptyImport(module_specifier, import_assertions, specifier_loc,
+ zone());
return;
}
@@ -1242,6 +1350,7 @@ void Parser::ParseImportDeclaration() {
ExpectContextualKeyword(ast_value_factory()->from_string());
Scanner::Location specifier_loc = scanner()->peek_location();
const AstRawString* module_specifier = ParseModuleSpecifier();
+ const ImportAssertions* import_assertions = ParseImportAssertClause();
ExpectSemicolon();
// Now that we have all the information, we can make the appropriate
@@ -1254,24 +1363,26 @@ void Parser::ParseImportDeclaration() {
if (module_namespace_binding != nullptr) {
module()->AddStarImport(module_namespace_binding, module_specifier,
- module_namespace_binding_loc, specifier_loc,
- zone());
+ import_assertions, module_namespace_binding_loc,
+ specifier_loc, zone());
}
if (import_default_binding != nullptr) {
module()->AddImport(ast_value_factory()->default_string(),
import_default_binding, module_specifier,
- import_default_binding_loc, specifier_loc, zone());
+ import_assertions, import_default_binding_loc,
+ specifier_loc, zone());
}
if (named_imports != nullptr) {
if (named_imports->length() == 0) {
- module()->AddEmptyImport(module_specifier, specifier_loc);
+ module()->AddEmptyImport(module_specifier, import_assertions,
+ specifier_loc, zone());
} else {
for (const NamedImport* import : *named_imports) {
module()->AddImport(import->import_name, import->local_name,
- module_specifier, import->location, specifier_loc,
- zone());
+ module_specifier, import_assertions,
+ import->location, specifier_loc, zone());
}
}
}
@@ -1354,18 +1465,18 @@ void Parser::ParseExportStar() {
int pos = position();
Consume(Token::MUL);
- if (!FLAG_harmony_namespace_exports ||
- !PeekContextualKeyword(ast_value_factory()->as_string())) {
+ if (!PeekContextualKeyword(ast_value_factory()->as_string())) {
// 'export' '*' 'from' ModuleSpecifier ';'
Scanner::Location loc = scanner()->location();
ExpectContextualKeyword(ast_value_factory()->from_string());
Scanner::Location specifier_loc = scanner()->peek_location();
const AstRawString* module_specifier = ParseModuleSpecifier();
+ const ImportAssertions* import_assertions = ParseImportAssertClause();
ExpectSemicolon();
- module()->AddStarExport(module_specifier, loc, specifier_loc, zone());
+ module()->AddStarExport(module_specifier, import_assertions, loc,
+ specifier_loc, zone());
return;
}
- if (!FLAG_harmony_namespace_exports) return;
// 'export' '*' 'as' IdentifierName 'from' ModuleSpecifier ';'
//
@@ -1373,9 +1484,14 @@ void Parser::ParseExportStar() {
// export * as x from "...";
// ~>
// import * as .x from "..."; export {.x as x};
+ //
+ // Note that the desugared internal namespace export name (.x above) will
+ // never conflict with a string literal export name, as literal string export
+ // names in local name positions (i.e. left of 'as' or in a clause without
+ // 'as') are disallowed without a following 'from' clause.
ExpectContextualKeyword(ast_value_factory()->as_string());
- const AstRawString* export_name = ParsePropertyName();
+ const AstRawString* export_name = ParseExportSpecifierName();
Scanner::Location export_name_loc = scanner()->location();
const AstRawString* local_name = NextInternalNamespaceExportName();
Scanner::Location local_name_loc = Scanner::Location::invalid();
@@ -1385,21 +1501,34 @@ void Parser::ParseExportStar() {
ExpectContextualKeyword(ast_value_factory()->from_string());
Scanner::Location specifier_loc = scanner()->peek_location();
const AstRawString* module_specifier = ParseModuleSpecifier();
+ const ImportAssertions* import_assertions = ParseImportAssertClause();
ExpectSemicolon();
- module()->AddStarImport(local_name, module_specifier, local_name_loc,
- specifier_loc, zone());
+ module()->AddStarImport(local_name, module_specifier, import_assertions,
+ local_name_loc, specifier_loc, zone());
module()->AddExport(local_name, export_name, export_name_loc, zone());
}
Statement* Parser::ParseExportDeclaration() {
// ExportDeclaration:
// 'export' '*' 'from' ModuleSpecifier ';'
+ // 'export' '*' 'from' ModuleSpecifier [no LineTerminator here]
+ // AssertClause ';'
// 'export' '*' 'as' IdentifierName 'from' ModuleSpecifier ';'
+ // 'export' '*' 'as' IdentifierName 'from' ModuleSpecifier
+ // [no LineTerminator here] AssertClause ';'
+ // 'export' '*' 'as' ModuleExportName 'from' ModuleSpecifier ';'
+ // 'export' '*' 'as' ModuleExportName 'from' ModuleSpecifier ';'
+ // [no LineTerminator here] AssertClause ';'
// 'export' ExportClause ('from' ModuleSpecifier)? ';'
+ // 'export' ExportClause ('from' ModuleSpecifier [no LineTerminator here]
+ // AssertClause)? ';'
// 'export' VariableStatement
// 'export' Declaration
// 'export' 'default' ... (handled in ParseExportDefault)
+ //
+ // ModuleExportName :
+ // StringLiteral
Expect(Token::EXPORT);
Statement* result = nullptr;
@@ -1426,30 +1555,41 @@ Statement* Parser::ParseExportDeclaration() {
// encountered, and then throw a SyntaxError if we are in the
// non-FromClause case.
Scanner::Location reserved_loc = Scanner::Location::invalid();
+ Scanner::Location string_literal_local_name_loc =
+ Scanner::Location::invalid();
ZoneChunkList<ExportClauseData>* export_data =
- ParseExportClause(&reserved_loc);
- const AstRawString* module_specifier = nullptr;
- Scanner::Location specifier_loc;
+ ParseExportClause(&reserved_loc, &string_literal_local_name_loc);
if (CheckContextualKeyword(ast_value_factory()->from_string())) {
- specifier_loc = scanner()->peek_location();
- module_specifier = ParseModuleSpecifier();
- } else if (reserved_loc.IsValid()) {
- // No FromClause, so reserved words are invalid in ExportClause.
- ReportMessageAt(reserved_loc, MessageTemplate::kUnexpectedReserved);
- return nullptr;
- }
- ExpectSemicolon();
- if (module_specifier == nullptr) {
- for (const ExportClauseData& data : *export_data) {
- module()->AddExport(data.local_name, data.export_name, data.location,
- zone());
+ Scanner::Location specifier_loc = scanner()->peek_location();
+ const AstRawString* module_specifier = ParseModuleSpecifier();
+ const ImportAssertions* import_assertions = ParseImportAssertClause();
+ ExpectSemicolon();
+
+ if (export_data->is_empty()) {
+ module()->AddEmptyImport(module_specifier, import_assertions,
+ specifier_loc, zone());
+ } else {
+ for (const ExportClauseData& data : *export_data) {
+ module()->AddExport(data.local_name, data.export_name,
+ module_specifier, import_assertions,
+ data.location, specifier_loc, zone());
+ }
}
- } else if (export_data->is_empty()) {
- module()->AddEmptyImport(module_specifier, specifier_loc);
} else {
+ if (reserved_loc.IsValid()) {
+ // No FromClause, so reserved words are invalid in ExportClause.
+ ReportMessageAt(reserved_loc, MessageTemplate::kUnexpectedReserved);
+ return nullptr;
+ } else if (string_literal_local_name_loc.IsValid()) {
+ ReportMessageAt(string_literal_local_name_loc,
+ MessageTemplate::kModuleExportNameWithoutFromClause);
+ return nullptr;
+ }
+
+ ExpectSemicolon();
+
for (const ExportClauseData& data : *export_data) {
- module()->AddExport(data.local_name, data.export_name,
- module_specifier, data.location, specifier_loc,
+ module()->AddExport(data.local_name, data.export_name, data.location,
zone());
}
}
diff --git a/deps/v8/src/parsing/parser.h b/deps/v8/src/parsing/parser.h
index 8897030a0c..073f517b56 100644
--- a/deps/v8/src/parsing/parser.h
+++ b/deps/v8/src/parsing/parser.h
@@ -269,7 +269,8 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
Scanner::Location location;
};
ZoneChunkList<ExportClauseData>* ParseExportClause(
- Scanner::Location* reserved_loc);
+ Scanner::Location* reserved_loc,
+ Scanner::Location* string_literal_local_name_loc);
struct NamedImport : public ZoneObject {
const AstRawString* import_name;
const AstRawString* local_name;
@@ -280,7 +281,12 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
local_name(local_name),
location(location) {}
};
+ const AstRawString* ParseExportSpecifierName();
ZonePtrList<const NamedImport>* ParseNamedImports(int pos);
+ using ImportAssertions =
+ ZoneMap<const AstRawString*,
+ std::pair<const AstRawString*, Scanner::Location>>;
+ ImportAssertions* ParseImportAssertClause();
Statement* BuildInitializationBlock(DeclarationParsingResult* parsing_result);
Expression* RewriteReturn(Expression* return_value, int pos);
Statement* RewriteSwitchStatement(SwitchStatement* switch_statement,
@@ -997,14 +1003,6 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
node, zone()->New<IterationStatementSourceRanges>(body_range));
}
- // Used to record source ranges of expressions associated with optional chain:
- V8_INLINE void RecordExpressionSourceRange(Expression* node,
- const SourceRange& right_range) {
- if (source_range_map_ == nullptr) return;
- source_range_map_->Insert(node,
- zone()->New<ExpressionSourceRanges>(right_range));
- }
-
V8_INLINE void RecordSuspendSourceRange(Expression* node,
int32_t continuation_position) {
if (source_range_map_ == nullptr) return;
diff --git a/deps/v8/src/parsing/rewriter.cc b/deps/v8/src/parsing/rewriter.cc
index 942acf13f8..36461ee762 100644
--- a/deps/v8/src/parsing/rewriter.cc
+++ b/deps/v8/src/parsing/rewriter.cc
@@ -246,23 +246,40 @@ void Processor::VisitTryFinallyStatement(TryFinallyStatement* node) {
is_set_ = true;
Visit(node->finally_block());
node->set_finally_block(replacement_->AsBlock());
- // Save .result value at the beginning of the finally block and restore it
- // at the end again: ".backup = .result; ...; .result = .backup"
- // This is necessary because the finally block does not normally contribute
- // to the completion value.
CHECK_NOT_NULL(closure_scope());
- Variable* backup = closure_scope()->NewTemporary(
- factory()->ast_value_factory()->dot_result_string());
- Expression* backup_proxy = factory()->NewVariableProxy(backup);
- Expression* result_proxy = factory()->NewVariableProxy(result_);
- Expression* save = factory()->NewAssignment(
- Token::ASSIGN, backup_proxy, result_proxy, kNoSourcePosition);
- Expression* restore = factory()->NewAssignment(
- Token::ASSIGN, result_proxy, backup_proxy, kNoSourcePosition);
- node->finally_block()->statements()->InsertAt(
- 0, factory()->NewExpressionStatement(save, kNoSourcePosition), zone());
- node->finally_block()->statements()->Add(
- factory()->NewExpressionStatement(restore, kNoSourcePosition), zone());
+ if (is_set_) {
+ // Save .result value at the beginning of the finally block and restore it
+ // at the end again: ".backup = .result; ...; .result = .backup" This is
+ // necessary because the finally block does not normally contribute to the
+ // completion value.
+ Variable* backup = closure_scope()->NewTemporary(
+ factory()->ast_value_factory()->dot_result_string());
+ Expression* backup_proxy = factory()->NewVariableProxy(backup);
+ Expression* result_proxy = factory()->NewVariableProxy(result_);
+ Expression* save = factory()->NewAssignment(
+ Token::ASSIGN, backup_proxy, result_proxy, kNoSourcePosition);
+ Expression* restore = factory()->NewAssignment(
+ Token::ASSIGN, result_proxy, backup_proxy, kNoSourcePosition);
+ node->finally_block()->statements()->InsertAt(
+ 0, factory()->NewExpressionStatement(save, kNoSourcePosition),
+ zone());
+ node->finally_block()->statements()->Add(
+ factory()->NewExpressionStatement(restore, kNoSourcePosition),
+ zone());
+ } else {
+ // If is_set_ is false, it means the finally block has a 'break' or a
+ // 'continue' and was not preceded by a statement that assigned to
+ // .result. Try-finally statements return the abrupt completions from the
+ // finally block, meaning this case should get an undefined.
+ //
+ // Since the finally block will definitely result in an abrupt completion,
+ // there's no need to save and restore the .result.
+ Expression* undef = factory()->NewUndefinedLiteral(kNoSourcePosition);
+ Expression* assignment = SetResult(undef);
+ node->finally_block()->statements()->InsertAt(
+ 0, factory()->NewExpressionStatement(assignment, kNoSourcePosition),
+ zone());
+ }
// We can't tell whether the finally-block is guaranteed to set .result, so
// reset is_set_ before visiting the try-block.
is_set_ = false;
diff --git a/deps/v8/src/parsing/scanner-character-streams.cc b/deps/v8/src/parsing/scanner-character-streams.cc
index 1414b3490b..dde90d910f 100644
--- a/deps/v8/src/parsing/scanner-character-streams.cc
+++ b/deps/v8/src/parsing/scanner-character-streams.cc
@@ -331,7 +331,7 @@ class UnbufferedCharacterStream : public Utf16CharacterStream {
// Provides a unbuffered utf-16 view on the bytes from the underlying
// ByteStream.
-class RelocatingCharacterStream
+class RelocatingCharacterStream final
: public UnbufferedCharacterStream<OnHeapStream> {
public:
template <class... TArgs>
@@ -422,7 +422,7 @@ bool BufferedUtf16CharacterStream::ReadBlock() {
// TODO(verwaest): Decode utf8 chunks into utf16 chunks on the blink side
// instead so we don't need to buffer.
-class Utf8ExternalStreamingStream : public BufferedUtf16CharacterStream {
+class Utf8ExternalStreamingStream final : public BufferedUtf16CharacterStream {
public:
Utf8ExternalStreamingStream(
ScriptCompiler::ExternalSourceStream* source_stream)
diff --git a/deps/v8/src/profiler/DIR_METADATA b/deps/v8/src/profiler/DIR_METADATA
new file mode 100644
index 0000000000..3ba1106a5f
--- /dev/null
+++ b/deps/v8/src/profiler/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Platform>DevTools>JavaScript"
+} \ No newline at end of file
diff --git a/deps/v8/src/profiler/OWNERS b/deps/v8/src/profiler/OWNERS
index 001abef49d..28a7353ef4 100644
--- a/deps/v8/src/profiler/OWNERS
+++ b/deps/v8/src/profiler/OWNERS
@@ -2,5 +2,3 @@ alph@chromium.org
petermarshall@chromium.org
per-file *heap*=ulan@chromium.org
-
-# COMPONENT: Platform>DevTools>JavaScript
diff --git a/deps/v8/src/profiler/cpu-profiler.cc b/deps/v8/src/profiler/cpu-profiler.cc
index 3b72ef818a..6ee7539dda 100644
--- a/deps/v8/src/profiler/cpu-profiler.cc
+++ b/deps/v8/src/profiler/cpu-profiler.cc
@@ -18,6 +18,7 @@
#include "src/logging/log.h"
#include "src/profiler/cpu-profiler-inl.h"
#include "src/profiler/profiler-stats.h"
+#include "src/profiler/symbolizer.h"
#include "src/utils/locked-queue-inl.h"
#include "src/wasm/wasm-engine.h"
@@ -96,10 +97,10 @@ ProfilingScope::~ProfilingScope() {
}
ProfilerEventsProcessor::ProfilerEventsProcessor(
- Isolate* isolate, ProfileGenerator* generator,
+ Isolate* isolate, Symbolizer* symbolizer,
ProfilerCodeObserver* code_observer)
: Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)),
- generator_(generator),
+ symbolizer_(symbolizer),
code_observer_(code_observer),
last_code_event_id_(0),
last_processed_code_event_id_(0),
@@ -109,11 +110,12 @@ ProfilerEventsProcessor::ProfilerEventsProcessor(
}
SamplingEventsProcessor::SamplingEventsProcessor(
- Isolate* isolate, ProfileGenerator* generator,
- ProfilerCodeObserver* code_observer, base::TimeDelta period,
- bool use_precise_sampling)
- : ProfilerEventsProcessor(isolate, generator, code_observer),
+ Isolate* isolate, Symbolizer* symbolizer,
+ ProfilerCodeObserver* code_observer, CpuProfilesCollection* profiles,
+ base::TimeDelta period, bool use_precise_sampling)
+ : ProfilerEventsProcessor(isolate, symbolizer, code_observer),
sampler_(new CpuSampler(isolate, this)),
+ profiles_(profiles),
period_(period),
use_precise_sampling_(use_precise_sampling) {
sampler_->Start();
@@ -209,6 +211,15 @@ void ProfilerEventsProcessor::CodeEventHandler(
}
}
+void SamplingEventsProcessor::SymbolizeAndAddToProfiles(
+ const TickSampleEventRecord* record) {
+ Symbolizer::SymbolizedSample symbolized =
+ symbolizer_->SymbolizeTickSample(record->sample);
+ profiles_->AddPathToCurrentProfiles(
+ record->sample.timestamp, symbolized.stack_trace, symbolized.src_line,
+ record->sample.update_stats, record->sample.sampling_interval);
+}
+
ProfilerEventsProcessor::SampleProcessingResult
SamplingEventsProcessor::ProcessOneSample() {
TickSampleEventRecord record1;
@@ -216,7 +227,7 @@ SamplingEventsProcessor::ProcessOneSample() {
(record1.order == last_processed_code_event_id_)) {
TickSampleEventRecord record;
ticks_from_vm_buffer_.Dequeue(&record);
- generator_->SymbolizeTickSample(record.sample);
+ SymbolizeAndAddToProfiles(&record);
return OneSampleProcessed;
}
@@ -228,7 +239,7 @@ SamplingEventsProcessor::ProcessOneSample() {
if (record->order != last_processed_code_event_id_) {
return FoundSampleForNextCodeEvent;
}
- generator_->SymbolizeTickSample(record->sample);
+ SymbolizeAndAddToProfiles(record);
ticks_buffer_.Remove();
return OneSampleProcessed;
}
@@ -315,6 +326,8 @@ ProfilerCodeObserver::ProfilerCodeObserver(Isolate* isolate)
LogBuiltins();
}
+void ProfilerCodeObserver::ClearCodeMap() { code_map_.Clear(); }
+
void ProfilerCodeObserver::CodeEventHandler(
const CodeEventsContainer& evt_rec) {
if (processor_) {
@@ -437,7 +450,7 @@ CpuProfiler::CpuProfiler(Isolate* isolate, CpuProfilingNamingMode naming_mode,
CpuProfiler::CpuProfiler(Isolate* isolate, CpuProfilingNamingMode naming_mode,
CpuProfilingLoggingMode logging_mode,
CpuProfilesCollection* test_profiles,
- ProfileGenerator* test_generator,
+ Symbolizer* test_symbolizer,
ProfilerEventsProcessor* test_processor)
: isolate_(isolate),
naming_mode_(naming_mode),
@@ -445,7 +458,7 @@ CpuProfiler::CpuProfiler(Isolate* isolate, CpuProfilingNamingMode naming_mode,
base_sampling_interval_(base::TimeDelta::FromMicroseconds(
FLAG_cpu_profiler_sampling_interval)),
profiles_(test_profiles),
- generator_(test_generator),
+ symbolizer_(test_symbolizer),
processor_(test_processor),
code_observer_(isolate),
is_profiling_(false) {
@@ -475,8 +488,11 @@ void CpuProfiler::set_use_precise_sampling(bool value) {
void CpuProfiler::ResetProfiles() {
profiles_.reset(new CpuProfilesCollection(isolate_));
profiles_->set_cpu_profiler(this);
- generator_.reset();
- if (!profiling_scope_) profiler_listener_.reset();
+ symbolizer_.reset();
+ if (!profiling_scope_) {
+ profiler_listener_.reset();
+ code_observer_.ClearCodeMap();
+ }
}
void CpuProfiler::EnableLogging() {
@@ -519,17 +535,25 @@ void CpuProfiler::CollectSample() {
}
}
-void CpuProfiler::StartProfiling(const char* title,
- CpuProfilingOptions options) {
- if (profiles_->StartProfiling(title, options)) {
+CpuProfilingStatus CpuProfiler::StartProfiling(const char* title,
+ CpuProfilingOptions options) {
+ StartProfilingStatus status = profiles_->StartProfiling(title, options);
+
+ // TODO(nicodubus): Revisit logic for if we want to do anything different for
+ // kAlreadyStarted
+ if (status == CpuProfilingStatus::kStarted ||
+ status == CpuProfilingStatus::kAlreadyStarted) {
TRACE_EVENT0("v8", "CpuProfiler::StartProfiling");
AdjustSamplingInterval();
StartProcessorIfNotStarted();
}
+
+ return status;
}
-void CpuProfiler::StartProfiling(String title, CpuProfilingOptions options) {
- StartProfiling(profiles_->GetName(title), options);
+CpuProfilingStatus CpuProfiler::StartProfiling(String title,
+ CpuProfilingOptions options) {
+ return StartProfiling(profiles_->GetName(title), options);
}
void CpuProfiler::StartProcessorIfNotStarted() {
@@ -543,15 +567,14 @@ void CpuProfiler::StartProcessorIfNotStarted() {
EnableLogging();
}
- if (!generator_) {
- generator_.reset(
- new ProfileGenerator(profiles_.get(), code_observer_.code_map()));
+ if (!symbolizer_) {
+ symbolizer_ = std::make_unique<Symbolizer>(code_observer_.code_map());
}
base::TimeDelta sampling_interval = ComputeSamplingInterval();
- processor_.reset(
- new SamplingEventsProcessor(isolate_, generator_.get(), &code_observer_,
- sampling_interval, use_precise_sampling_));
+ processor_.reset(new SamplingEventsProcessor(
+ isolate_, symbolizer_.get(), &code_observer_, profiles_.get(),
+ sampling_interval, use_precise_sampling_));
is_profiling_ = true;
// Enable stack sampling.
diff --git a/deps/v8/src/profiler/cpu-profiler.h b/deps/v8/src/profiler/cpu-profiler.h
index e8d977424b..e7ca3fbd7b 100644
--- a/deps/v8/src/profiler/cpu-profiler.h
+++ b/deps/v8/src/profiler/cpu-profiler.h
@@ -27,7 +27,7 @@ class CodeEntry;
class CodeMap;
class CpuProfilesCollection;
class Isolate;
-class ProfileGenerator;
+class Symbolizer;
#define CODE_EVENTS_TYPE_LIST(V) \
V(CODE_CREATION, CodeCreateEventRecord) \
@@ -165,7 +165,7 @@ class V8_EXPORT_PRIVATE ProfilerEventsProcessor : public base::Thread,
virtual void SetSamplingInterval(base::TimeDelta) {}
protected:
- ProfilerEventsProcessor(Isolate* isolate, ProfileGenerator* generator,
+ ProfilerEventsProcessor(Isolate* isolate, Symbolizer* symbolizer,
ProfilerCodeObserver* code_observer);
// Called from events processing thread (Run() method.)
@@ -178,7 +178,7 @@ class V8_EXPORT_PRIVATE ProfilerEventsProcessor : public base::Thread,
};
virtual SampleProcessingResult ProcessOneSample() = 0;
- ProfileGenerator* generator_;
+ Symbolizer* symbolizer_;
ProfilerCodeObserver* code_observer_;
std::atomic_bool running_{true};
base::ConditionVariable running_cond_;
@@ -193,8 +193,9 @@ class V8_EXPORT_PRIVATE ProfilerEventsProcessor : public base::Thread,
class V8_EXPORT_PRIVATE SamplingEventsProcessor
: public ProfilerEventsProcessor {
public:
- SamplingEventsProcessor(Isolate* isolate, ProfileGenerator* generator,
+ SamplingEventsProcessor(Isolate* isolate, Symbolizer* symbolizer,
ProfilerCodeObserver* code_observer,
+ CpuProfilesCollection* profiles,
base::TimeDelta period, bool use_precise_sampling);
~SamplingEventsProcessor() override;
@@ -221,6 +222,7 @@ class V8_EXPORT_PRIVATE SamplingEventsProcessor
private:
SampleProcessingResult ProcessOneSample() override;
+ void SymbolizeAndAddToProfiles(const TickSampleEventRecord* record);
static const size_t kTickSampleBufferSize = 512 * KB;
static const size_t kTickSampleQueueLength =
@@ -228,6 +230,7 @@ class V8_EXPORT_PRIVATE SamplingEventsProcessor
SamplingCircularQueue<TickSampleEventRecord,
kTickSampleQueueLength> ticks_buffer_;
std::unique_ptr<sampler::Sampler> sampler_;
+ CpuProfilesCollection* profiles_;
base::TimeDelta period_; // Samples & code events processing period.
const bool use_precise_sampling_; // Whether or not busy-waiting is used for
// low sampling intervals on Windows.
@@ -243,6 +246,7 @@ class V8_EXPORT_PRIVATE ProfilerCodeObserver : public CodeEventObserver {
void CodeEventHandler(const CodeEventsContainer& evt_rec) override;
CodeMap* code_map() { return &code_map_; }
+ void ClearCodeMap();
private:
friend class ProfilerEventsProcessor;
@@ -294,7 +298,7 @@ class V8_EXPORT_PRIVATE CpuProfiler {
CpuProfiler(Isolate* isolate, CpuProfilingNamingMode naming_mode,
CpuProfilingLoggingMode logging_mode,
- CpuProfilesCollection* profiles, ProfileGenerator* test_generator,
+ CpuProfilesCollection* profiles, Symbolizer* test_symbolizer,
ProfilerEventsProcessor* test_processor);
~CpuProfiler();
@@ -304,13 +308,16 @@ class V8_EXPORT_PRIVATE CpuProfiler {
using ProfilingMode = v8::CpuProfilingMode;
using NamingMode = v8::CpuProfilingNamingMode;
using LoggingMode = v8::CpuProfilingLoggingMode;
+ using StartProfilingStatus = CpuProfilingStatus;
base::TimeDelta sampling_interval() const { return base_sampling_interval_; }
void set_sampling_interval(base::TimeDelta value);
void set_use_precise_sampling(bool);
void CollectSample();
- void StartProfiling(const char* title, CpuProfilingOptions options = {});
- void StartProfiling(String title, CpuProfilingOptions options = {});
+ StartProfilingStatus StartProfiling(const char* title,
+ CpuProfilingOptions options = {});
+ StartProfilingStatus StartProfiling(String title,
+ CpuProfilingOptions options = {});
CpuProfile* StopProfiling(const char* title);
CpuProfile* StopProfiling(String title);
@@ -321,13 +328,14 @@ class V8_EXPORT_PRIVATE CpuProfiler {
bool is_profiling() const { return is_profiling_; }
- ProfileGenerator* generator() const { return generator_.get(); }
+ Symbolizer* symbolizer() const { return symbolizer_.get(); }
ProfilerEventsProcessor* processor() const { return processor_.get(); }
Isolate* isolate() const { return isolate_; }
ProfilerListener* profiler_listener_for_test() const {
return profiler_listener_.get();
}
+ CodeMap* code_map_for_test() { return code_observer_.code_map(); }
private:
void StartProcessorIfNotStarted();
@@ -352,7 +360,7 @@ class V8_EXPORT_PRIVATE CpuProfiler {
// to a multiple of, or used as the default if unspecified.
base::TimeDelta base_sampling_interval_;
std::unique_ptr<CpuProfilesCollection> profiles_;
- std::unique_ptr<ProfileGenerator> generator_;
+ std::unique_ptr<Symbolizer> symbolizer_;
std::unique_ptr<ProfilerEventsProcessor> processor_;
std::unique_ptr<ProfilerListener> profiler_listener_;
std::unique_ptr<ProfilingScope> profiling_scope_;
diff --git a/deps/v8/src/profiler/heap-profiler.cc b/deps/v8/src/profiler/heap-profiler.cc
index fc9bd00f47..f742b7e1cc 100644
--- a/deps/v8/src/profiler/heap-profiler.cc
+++ b/deps/v8/src/profiler/heap-profiler.cc
@@ -64,6 +64,19 @@ void HeapProfiler::BuildEmbedderGraph(Isolate* isolate,
}
}
+void HeapProfiler::SetGetDetachednessCallback(
+ v8::HeapProfiler::GetDetachednessCallback callback, void* data) {
+ get_detachedness_callback_ = {callback, data};
+}
+
+v8::EmbedderGraph::Node::Detachedness HeapProfiler::GetDetachedness(
+ const v8::Local<v8::Value> v8_value, uint16_t class_id) {
+ DCHECK(HasGetDetachednessCallback());
+ return get_detachedness_callback_.first(
+ reinterpret_cast<v8::Isolate*>(heap()->isolate()), v8_value, class_id,
+ get_detachedness_callback_.second);
+}
+
HeapSnapshot* HeapProfiler::TakeSnapshot(
v8::ActivityControl* control,
v8::HeapProfiler::ObjectNameResolver* resolver,
diff --git a/deps/v8/src/profiler/heap-profiler.h b/deps/v8/src/profiler/heap-profiler.h
index 21d9bb8fcf..67fd1e5bd8 100644
--- a/deps/v8/src/profiler/heap-profiler.h
+++ b/deps/v8/src/profiler/heap-profiler.h
@@ -72,6 +72,14 @@ class HeapProfiler : public HeapObjectAllocationTracker {
return !build_embedder_graph_callbacks_.empty();
}
+ void SetGetDetachednessCallback(
+ v8::HeapProfiler::GetDetachednessCallback callback, void* data);
+ bool HasGetDetachednessCallback() const {
+ return get_detachedness_callback_.first != nullptr;
+ }
+ v8::EmbedderGraph::Node::Detachedness GetDetachedness(
+ const v8::Local<v8::Value> v8_value, uint16_t class_id);
+
bool is_tracking_object_moves() const { return is_tracking_object_moves_; }
Handle<HeapObject> FindHeapObjectById(SnapshotObjectId id);
@@ -99,6 +107,8 @@ class HeapProfiler : public HeapObjectAllocationTracker {
std::unique_ptr<SamplingHeapProfiler> sampling_heap_profiler_;
std::vector<std::pair<v8::HeapProfiler::BuildEmbedderGraphCallback, void*>>
build_embedder_graph_callbacks_;
+ std::pair<v8::HeapProfiler::GetDetachednessCallback, void*>
+ get_detachedness_callback_;
DISALLOW_COPY_AND_ASSIGN(HeapProfiler);
};
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.cc b/deps/v8/src/profiler/heap-snapshot-generator.cc
index 0c5af20b01..2907a215c6 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.cc
+++ b/deps/v8/src/profiler/heap-snapshot-generator.cc
@@ -7,6 +7,7 @@
#include <utility>
#include "src/api/api-inl.h"
+#include "src/base/optional.h"
#include "src/codegen/assembler-inl.h"
#include "src/common/globals.h"
#include "src/debug/debug.h"
@@ -103,8 +104,8 @@ void HeapEntry::SetNamedAutoIndexReference(HeapGraphEdge::Type type,
SetNamedReference(type, name, child);
}
-void HeapEntry::Print(
- const char* prefix, const char* edge_name, int max_depth, int indent) {
+void HeapEntry::Print(const char* prefix, const char* edge_name, int max_depth,
+ int indent) const {
STATIC_ASSERT(sizeof(unsigned) == sizeof(id()));
base::OS::Print("%6zu @%6u %*c %s%s: ", self_size(), id(), indent, ' ',
prefix, edge_name);
@@ -162,7 +163,7 @@ void HeapEntry::Print(
}
}
-const char* HeapEntry::TypeAsString() {
+const char* HeapEntry::TypeAsString() const {
switch (type()) {
case kHidden: return "/hidden/";
case kObject: return "/object/";
@@ -578,9 +579,9 @@ void V8HeapExplorer::ExtractLocationForJSFunction(HeapEntry* entry,
Script script = Script::cast(func.shared().script());
int scriptId = script.id();
int start = func.shared().StartPosition();
- int line = script.GetLineNumber(start);
- int col = script.GetColumnNumber(start);
- snapshot_->AddLocation(entry, scriptId, line, col);
+ Script::PositionInfo info;
+ script.GetPositionInfo(start, &info, Script::WITH_OFFSET);
+ snapshot_->AddLocation(entry, scriptId, info.line, info.column);
}
HeapEntry* V8HeapExplorer::AddEntry(HeapObject object) {
@@ -598,8 +599,8 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject object) {
const char* name = names_->GetName(
GetConstructorName(JSObject::cast(object)));
if (object.IsJSGlobalObject()) {
- auto it = objects_tags_.find(JSGlobalObject::cast(object));
- if (it != objects_tags_.end()) {
+ auto it = global_object_tag_map_.find(JSGlobalObject::cast(object));
+ if (it != global_object_tag_map_.end()) {
name = names_->GetFormatted("%s / %s", name, it->second);
}
}
@@ -1066,14 +1067,15 @@ void V8HeapExplorer::ExtractMapReferences(HeapEntry* entry, Map map) {
Map::kTransitionsOrPrototypeInfoOffset);
}
}
- DescriptorArray descriptors = map.instance_descriptors();
+ DescriptorArray descriptors = map.instance_descriptors(kRelaxedLoad);
TagObject(descriptors, "(map descriptors)");
SetInternalReference(entry, "descriptors", descriptors,
Map::kInstanceDescriptorsOffset);
SetInternalReference(entry, "prototype", map.prototype(),
Map::kPrototypeOffset);
if (FLAG_unbox_double_fields) {
- SetInternalReference(entry, "layout_descriptor", map.layout_descriptor(),
+ SetInternalReference(entry, "layout_descriptor",
+ map.layout_descriptor(kAcquireLoad),
Map::kLayoutDescriptorOffset);
}
if (map.IsContextMap()) {
@@ -1115,15 +1117,17 @@ void V8HeapExplorer::ExtractSharedFunctionInfoReferences(
CodeKindToString(shared.GetCode().kind())));
}
- if (shared.name_or_scope_info().IsScopeInfo()) {
- TagObject(shared.name_or_scope_info(), "(function scope info)");
+ Object name_or_scope_info = shared.name_or_scope_info(kAcquireLoad);
+ if (name_or_scope_info.IsScopeInfo()) {
+ TagObject(name_or_scope_info, "(function scope info)");
}
- SetInternalReference(entry, "name_or_scope_info", shared.name_or_scope_info(),
+ SetInternalReference(entry, "name_or_scope_info", name_or_scope_info,
SharedFunctionInfo::kNameOrScopeInfoOffset);
SetInternalReference(entry, "script_or_debug_info",
- shared.script_or_debug_info(),
+ shared.script_or_debug_info(kAcquireLoad),
SharedFunctionInfo::kScriptOrDebugInfoOffset);
- SetInternalReference(entry, "function_data", shared.function_data(),
+ SetInternalReference(entry, "function_data",
+ shared.function_data(kAcquireLoad),
SharedFunctionInfo::kFunctionDataOffset);
SetInternalReference(
entry, "raw_outer_scope_info_or_feedback_metadata",
@@ -1277,11 +1281,11 @@ void V8HeapExplorer::ExtractFixedArrayReferences(HeapEntry* entry,
void V8HeapExplorer::ExtractFeedbackVectorReferences(
HeapEntry* entry, FeedbackVector feedback_vector) {
- MaybeObject code = feedback_vector.optimized_code_weak_or_smi();
+ MaybeObject code = feedback_vector.maybe_optimized_code();
HeapObject code_heap_object;
if (code->GetHeapObjectIfWeak(&code_heap_object)) {
SetWeakReference(entry, "optimized code", code_heap_object,
- FeedbackVector::kOptimizedCodeWeakOrSmiOffset);
+ FeedbackVector::kMaybeOptimizedCodeOffset);
}
}
@@ -1324,7 +1328,7 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject js_obj,
HeapEntry* entry) {
Isolate* isolate = js_obj.GetIsolate();
if (js_obj.HasFastProperties()) {
- DescriptorArray descs = js_obj.map().instance_descriptors();
+ DescriptorArray descs = js_obj.map().instance_descriptors(kRelaxedLoad);
for (InternalIndex i : js_obj.map().IterateOwnDescriptors()) {
PropertyDetails details = descs.GetDetails(i);
switch (details.location()) {
@@ -1477,7 +1481,7 @@ class RootsReferencesExtractor : public RootVisitor {
OffHeapObjectSlot start,
OffHeapObjectSlot end) override {
DCHECK_EQ(root, Root::kStringTable);
- const Isolate* isolate = Isolate::FromHeap(explorer_->heap_);
+ IsolateRoot isolate = Isolate::FromHeap(explorer_->heap_);
for (OffHeapObjectSlot p = start; p < end; ++p) {
explorer_->SetGcSubrootReference(root, description, visiting_weak_roots_,
p.load(isolate));
@@ -1819,22 +1823,26 @@ class GlobalObjectsEnumerator : public RootVisitor {
// Modifies heap. Must not be run during heap traversal.
-void V8HeapExplorer::TagGlobalObjects() {
+void V8HeapExplorer::CollectGlobalObjectsTags() {
+ if (!global_object_name_resolver_) return;
+
Isolate* isolate = Isolate::FromHeap(heap_);
- HandleScope scope(isolate);
GlobalObjectsEnumerator enumerator(isolate);
isolate->global_handles()->IterateAllRoots(&enumerator);
- std::vector<const char*> urls(enumerator.count());
for (int i = 0, l = enumerator.count(); i < l; ++i) {
- urls[i] = global_object_name_resolver_
- ? global_object_name_resolver_->GetName(Utils::ToLocal(
- Handle<JSObject>::cast(enumerator.at(i))))
- : nullptr;
+ Handle<JSGlobalObject> obj = enumerator.at(i);
+ const char* tag = global_object_name_resolver_->GetName(
+ Utils::ToLocal(Handle<JSObject>::cast(obj)));
+ if (tag) {
+ global_object_tag_pairs_.emplace_back(obj, tag);
+ }
}
+}
- DisallowHeapAllocation no_allocation;
- for (int i = 0, l = enumerator.count(); i < l; ++i) {
- if (urls[i]) objects_tags_.emplace(*enumerator.at(i), urls[i]);
+void V8HeapExplorer::MakeGlobalObjectTagMap(
+ const SafepointScope& safepoint_scope) {
+ for (const auto& pair : global_object_tag_pairs_) {
+ global_object_tag_map_.emplace(*pair.first, pair.second);
}
}
@@ -2077,19 +2085,16 @@ class NullContextForSnapshotScope {
} // namespace
bool HeapSnapshotGenerator::GenerateSnapshot() {
- v8_heap_explorer_.TagGlobalObjects();
+ Isolate* isolate = Isolate::FromHeap(heap_);
+ base::Optional<HandleScope> handle_scope(base::in_place, isolate);
+ v8_heap_explorer_.CollectGlobalObjectsTags();
- // TODO(1562) Profiler assumes that any object that is in the heap after
- // full GC is reachable from the root when computing dominators.
- // This is not true for weakly reachable objects.
- // As a temporary solution we call GC twice.
- heap_->PreciseCollectAllGarbage(Heap::kNoGCFlags,
- GarbageCollectionReason::kHeapProfiler);
- heap_->PreciseCollectAllGarbage(Heap::kNoGCFlags,
- GarbageCollectionReason::kHeapProfiler);
+ heap_->CollectAllAvailableGarbage(GarbageCollectionReason::kHeapProfiler);
- NullContextForSnapshotScope null_context_scope(Isolate::FromHeap(heap_));
+ NullContextForSnapshotScope null_context_scope(isolate);
SafepointScope scope(heap_);
+ v8_heap_explorer_.MakeGlobalObjectTagMap(scope);
+ handle_scope.reset();
#ifdef VERIFY_HEAP
Heap* debug_heap = heap_;
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.h b/deps/v8/src/profiler/heap-snapshot-generator.h
index fcf253ea35..df95787f96 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.h
+++ b/deps/v8/src/profiler/heap-snapshot-generator.h
@@ -38,6 +38,7 @@ class JSGlobalObject;
class JSGlobalProxy;
class JSPromise;
class JSWeakCollection;
+class SafepointScope;
struct SourceLocation {
SourceLocation(int entry_index, int scriptId, int line, int col)
@@ -151,12 +152,12 @@ class HeapEntry {
StringsStorage* strings);
V8_EXPORT_PRIVATE void Print(const char* prefix, const char* edge_name,
- int max_depth, int indent);
+ int max_depth, int indent) const;
private:
V8_INLINE std::vector<HeapGraphEdge*>::iterator children_begin() const;
V8_INLINE std::vector<HeapGraphEdge*>::iterator children_end() const;
- const char* TypeAsString();
+ const char* TypeAsString() const;
unsigned type_: 4;
unsigned index_ : 28; // Supports up to ~250M objects.
@@ -196,7 +197,9 @@ class HeapSnapshot {
return gc_subroot_entries_[static_cast<int>(root)];
}
std::deque<HeapEntry>& entries() { return entries_; }
+ const std::deque<HeapEntry>& entries() const { return entries_; }
std::deque<HeapGraphEdge>& edges() { return edges_; }
+ const std::deque<HeapGraphEdge>& edges() const { return edges_; }
std::vector<HeapGraphEdge*>& children() { return children_; }
const std::vector<SourceLocation>& locations() const { return locations_; }
void RememberLastJSObjectId();
@@ -338,7 +341,8 @@ class V8_EXPORT_PRIVATE V8HeapExplorer : public HeapEntriesAllocator {
HeapEntry* AllocateEntry(HeapThing ptr) override;
int EstimateObjectsCount();
bool IterateAndExtractReferences(HeapSnapshotGenerator* generator);
- void TagGlobalObjects();
+ void CollectGlobalObjectsTags();
+ void MakeGlobalObjectTagMap(const SafepointScope& safepoint_scope);
void TagBuiltinCodeObject(Code code, const char* name);
HeapEntry* AddEntry(Address address,
HeapEntry::Type type,
@@ -445,7 +449,10 @@ class V8_EXPORT_PRIVATE V8HeapExplorer : public HeapEntriesAllocator {
HeapObjectsMap* heap_object_map_;
SnapshottingProgressReportingInterface* progress_;
HeapSnapshotGenerator* generator_ = nullptr;
- std::unordered_map<JSGlobalObject, const char*, Object::Hasher> objects_tags_;
+ std::vector<std::pair<Handle<JSGlobalObject>, const char*>>
+ global_object_tag_pairs_;
+ std::unordered_map<JSGlobalObject, const char*, Object::Hasher>
+ global_object_tag_map_;
std::unordered_map<Object, const char*, Object::Hasher>
strong_gc_subroot_names_;
std::unordered_set<JSGlobalObject, Object::Hasher> user_roots_;
diff --git a/deps/v8/src/profiler/profile-generator-inl.h b/deps/v8/src/profiler/profile-generator-inl.h
index 3abacb7b61..8239bdb000 100644
--- a/deps/v8/src/profiler/profile-generator-inl.h
+++ b/deps/v8/src/profiler/profile-generator-inl.h
@@ -28,13 +28,6 @@ CodeEntry::CodeEntry(CodeEventListener::LogEventsAndTags tag, const char* name,
position_(0),
line_info_(std::move(line_info)) {}
-inline CodeEntry* ProfileGenerator::FindEntry(Address address,
- Address* out_instruction_start) {
- CodeEntry* entry = code_map_->FindEntry(address, out_instruction_start);
- if (entry) entry->mark_used();
- return entry;
-}
-
ProfileNode::ProfileNode(ProfileTree* tree, CodeEntry* entry,
ProfileNode* parent, int line_number)
: tree_(tree),
diff --git a/deps/v8/src/profiler/profile-generator.cc b/deps/v8/src/profiler/profile-generator.cc
index cf448fcd20..f3344c57a0 100644
--- a/deps/v8/src/profiler/profile-generator.cc
+++ b/deps/v8/src/profiler/profile-generator.cc
@@ -403,11 +403,11 @@ ProfileNode* ProfileTree::AddPathFromEnd(const ProfileStackTrace& path,
CodeEntry* last_entry = nullptr;
int parent_line_number = v8::CpuProfileNode::kNoLineNumberInfo;
for (auto it = path.rbegin(); it != path.rend(); ++it) {
- if (it->entry.code_entry == nullptr) continue;
- last_entry = (*it).entry.code_entry;
- node = node->FindOrAddChild((*it).entry.code_entry, parent_line_number);
+ if (it->code_entry == nullptr) continue;
+ last_entry = it->code_entry;
+ node = node->FindOrAddChild(it->code_entry, parent_line_number);
parent_line_number = mode == ProfilingMode::kCallerLineNumbers
- ? (*it).entry.line_number
+ ? it->line_number
: v8::CpuProfileNode::kNoLineNumberInfo;
}
if (last_entry && last_entry->has_deopt_info()) {
@@ -644,7 +644,9 @@ void CpuProfile::Print() const {
CodeMap::CodeMap() = default;
-CodeMap::~CodeMap() {
+CodeMap::~CodeMap() { Clear(); }
+
+void CodeMap::Clear() {
// First clean the free list as it's otherwise impossible to tell
// the slot type.
unsigned free_slot = free_list_head_;
@@ -654,6 +656,10 @@ CodeMap::~CodeMap() {
free_slot = next_slot;
}
for (auto slot : code_entries_) delete slot.entry;
+
+ code_entries_.clear();
+ code_map_.clear();
+ free_list_head_ = kNoFreeSlot;
}
void CodeMap::AddCode(Address addr, CodeEntry* entry, unsigned size) {
@@ -727,24 +733,26 @@ void CodeMap::Print() {
CpuProfilesCollection::CpuProfilesCollection(Isolate* isolate)
: profiler_(nullptr), current_profiles_semaphore_(1) {}
-bool CpuProfilesCollection::StartProfiling(const char* title,
- CpuProfilingOptions options) {
+CpuProfilingStatus CpuProfilesCollection::StartProfiling(
+ const char* title, CpuProfilingOptions options) {
current_profiles_semaphore_.Wait();
+
if (static_cast<int>(current_profiles_.size()) >= kMaxSimultaneousProfiles) {
current_profiles_semaphore_.Signal();
- return false;
+
+ return CpuProfilingStatus::kErrorTooManyProfilers;
}
for (const std::unique_ptr<CpuProfile>& profile : current_profiles_) {
if (strcmp(profile->title(), title) == 0) {
// Ignore attempts to start profile with the same title...
current_profiles_semaphore_.Signal();
- // ... though return true to force it collect a sample.
- return true;
+ // ... though return kAlreadyStarted to force it collect a sample.
+ return CpuProfilingStatus::kAlreadyStarted;
}
}
current_profiles_.emplace_back(new CpuProfile(profiler_, title, options));
current_profiles_semaphore_.Signal();
- return true;
+ return CpuProfilingStatus::kStarted;
}
CpuProfile* CpuProfilesCollection::StopProfiling(const char* title) {
@@ -769,7 +777,6 @@ CpuProfile* CpuProfilesCollection::StopProfiling(const char* title) {
return profile;
}
-
bool CpuProfilesCollection::IsLastProfile(const char* title) {
// Called from VM thread, and only it can mutate the list,
// so no locking is needed here.
@@ -833,172 +840,5 @@ void CpuProfilesCollection::AddPathToCurrentProfiles(
current_profiles_semaphore_.Signal();
}
-ProfileGenerator::ProfileGenerator(CpuProfilesCollection* profiles,
- CodeMap* code_map)
- : profiles_(profiles), code_map_(code_map) {}
-
-void ProfileGenerator::SymbolizeTickSample(const TickSample& sample) {
- ProfileStackTrace stack_trace;
- // Conservatively reserve space for stack frames + pc + function + vm-state.
- // There could in fact be more of them because of inlined entries.
- stack_trace.reserve(sample.frames_count + 3);
-
- // The ProfileNode knows nothing about all versions of generated code for
- // the same JS function. The line number information associated with
- // the latest version of generated code is used to find a source line number
- // for a JS function. Then, the detected source line is passed to
- // ProfileNode to increase the tick count for this source line.
- const int no_line_info = v8::CpuProfileNode::kNoLineNumberInfo;
- int src_line = no_line_info;
- bool src_line_not_found = true;
-
- if (sample.pc != nullptr) {
- if (sample.has_external_callback && sample.state == EXTERNAL) {
- // Don't use PC when in external callback code, as it can point
- // inside a callback's code, and we will erroneously report
- // that a callback calls itself.
- stack_trace.push_back({{FindEntry(reinterpret_cast<Address>(
- sample.external_callback_entry)),
- no_line_info}});
- } else {
- Address attributed_pc = reinterpret_cast<Address>(sample.pc);
- Address pc_entry_instruction_start = kNullAddress;
- CodeEntry* pc_entry =
- FindEntry(attributed_pc, &pc_entry_instruction_start);
- // If there is no pc_entry, we're likely in native code. Find out if the
- // top of the stack (the return address) was pointing inside a JS
- // function, meaning that we have encountered a frameless invocation.
- if (!pc_entry && !sample.has_external_callback) {
- attributed_pc = reinterpret_cast<Address>(sample.tos);
- pc_entry = FindEntry(attributed_pc, &pc_entry_instruction_start);
- }
- // If pc is in the function code before it set up stack frame or after the
- // frame was destroyed, SafeStackFrameIterator incorrectly thinks that
- // ebp contains the return address of the current function and skips the
- // caller's frame. Check for this case and just skip such samples.
- if (pc_entry) {
- int pc_offset =
- static_cast<int>(attributed_pc - pc_entry_instruction_start);
- // TODO(petermarshall): pc_offset can still be negative in some cases.
- src_line = pc_entry->GetSourceLine(pc_offset);
- if (src_line == v8::CpuProfileNode::kNoLineNumberInfo) {
- src_line = pc_entry->line_number();
- }
- src_line_not_found = false;
- stack_trace.push_back({{pc_entry, src_line}});
-
- if (pc_entry->builtin_id() == Builtins::kFunctionPrototypeApply ||
- pc_entry->builtin_id() == Builtins::kFunctionPrototypeCall) {
- // When current function is either the Function.prototype.apply or the
- // Function.prototype.call builtin the top frame is either frame of
- // the calling JS function or internal frame.
- // In the latter case we know the caller for sure but in the
- // former case we don't so we simply replace the frame with
- // 'unresolved' entry.
- if (!sample.has_external_callback) {
- ProfilerStats::Instance()->AddReason(
- ProfilerStats::Reason::kInCallOrApply);
- stack_trace.push_back(
- {{CodeEntry::unresolved_entry(), no_line_info}});
- }
- }
- }
- }
-
- for (unsigned i = 0; i < sample.frames_count; ++i) {
- Address stack_pos = reinterpret_cast<Address>(sample.stack[i]);
- Address instruction_start = kNullAddress;
- CodeEntry* entry = FindEntry(stack_pos, &instruction_start);
- int line_number = no_line_info;
- if (entry) {
- // Find out if the entry has an inlining stack associated.
- int pc_offset = static_cast<int>(stack_pos - instruction_start);
- // TODO(petermarshall): pc_offset can still be negative in some cases.
- const std::vector<CodeEntryAndLineNumber>* inline_stack =
- entry->GetInlineStack(pc_offset);
- if (inline_stack) {
- int most_inlined_frame_line_number = entry->GetSourceLine(pc_offset);
- for (auto entry : *inline_stack) {
- stack_trace.push_back({entry});
- }
-
- // This is a bit of a messy hack. The line number for the most-inlined
- // frame (the function at the end of the chain of function calls) has
- // the wrong line number in inline_stack. The actual line number in
- // this function is stored in the SourcePositionTable in entry. We fix
- // up the line number for the most-inlined frame here.
- // TODO(petermarshall): Remove this and use a tree with a node per
- // inlining_id.
- DCHECK(!inline_stack->empty());
- size_t index = stack_trace.size() - inline_stack->size();
- stack_trace[index].entry.line_number = most_inlined_frame_line_number;
- }
- // Skip unresolved frames (e.g. internal frame) and get source line of
- // the first JS caller.
- if (src_line_not_found) {
- src_line = entry->GetSourceLine(pc_offset);
- if (src_line == v8::CpuProfileNode::kNoLineNumberInfo) {
- src_line = entry->line_number();
- }
- src_line_not_found = false;
- }
- line_number = entry->GetSourceLine(pc_offset);
-
- // The inline stack contains the top-level function i.e. the same
- // function as entry. We don't want to add it twice. The one from the
- // inline stack has the correct line number for this particular inlining
- // so we use it instead of pushing entry to stack_trace.
- if (inline_stack) continue;
- }
- stack_trace.push_back({{entry, line_number}});
- }
- }
-
- if (FLAG_prof_browser_mode) {
- bool no_symbolized_entries = true;
- for (auto e : stack_trace) {
- if (e.entry.code_entry != nullptr) {
- no_symbolized_entries = false;
- break;
- }
- }
- // If no frames were symbolized, put the VM state entry in.
- if (no_symbolized_entries) {
- if (sample.pc == nullptr) {
- ProfilerStats::Instance()->AddReason(ProfilerStats::Reason::kNullPC);
- } else {
- ProfilerStats::Instance()->AddReason(
- ProfilerStats::Reason::kNoSymbolizedFrames);
- }
- stack_trace.push_back({{EntryForVMState(sample.state), no_line_info}});
- }
- }
-
- profiles_->AddPathToCurrentProfiles(sample.timestamp, stack_trace, src_line,
- sample.update_stats,
- sample.sampling_interval);
-}
-
-CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
- switch (tag) {
- case GC:
- return CodeEntry::gc_entry();
- case JS:
- case PARSER:
- case COMPILER:
- case BYTECODE_COMPILER:
- case ATOMICS_WAIT:
- // DOM events handlers are reported as OTHER / EXTERNAL entries.
- // To avoid confusing people, let's put all these entries into
- // one bucket.
- case OTHER:
- case EXTERNAL:
- return CodeEntry::program_entry();
- case IDLE:
- return CodeEntry::idle_entry();
- }
- UNREACHABLE();
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/profiler/profile-generator.h b/deps/v8/src/profiler/profile-generator.h
index 0852ee7802..9183d56d42 100644
--- a/deps/v8/src/profiler/profile-generator.h
+++ b/deps/v8/src/profiler/profile-generator.h
@@ -228,11 +228,7 @@ struct CodeEntryAndLineNumber {
int line_number;
};
-struct ProfileStackFrame {
- CodeEntryAndLineNumber entry;
-};
-
-typedef std::vector<ProfileStackFrame> ProfileStackTrace;
+using ProfileStackTrace = std::vector<CodeEntryAndLineNumber>;
class ProfileTree;
@@ -416,6 +412,8 @@ class V8_EXPORT_PRIVATE CodeMap {
CodeEntry* FindEntry(Address addr, Address* out_instruction_start = nullptr);
void Print();
+ void Clear();
+
private:
struct CodeEntryMapInfo {
unsigned index;
@@ -435,6 +433,7 @@ class V8_EXPORT_PRIVATE CodeMap {
CodeEntry* entry(unsigned index) { return code_entries_[index].entry; }
+ // Added state here needs to be dealt with in Clear() as well.
std::deque<CodeEntrySlotInfo> code_entries_;
std::map<Address, CodeEntryMapInfo> code_map_;
unsigned free_list_head_ = kNoFreeSlot;
@@ -447,7 +446,8 @@ class V8_EXPORT_PRIVATE CpuProfilesCollection {
explicit CpuProfilesCollection(Isolate* isolate);
void set_cpu_profiler(CpuProfiler* profiler) { profiler_ = profiler; }
- bool StartProfiling(const char* title, CpuProfilingOptions options = {});
+ CpuProfilingStatus StartProfiling(const char* title,
+ CpuProfilingOptions options = {});
CpuProfile* StopProfiling(const char* title);
std::vector<std::unique_ptr<CpuProfile>>* profiles() {
@@ -483,28 +483,6 @@ class V8_EXPORT_PRIVATE CpuProfilesCollection {
DISALLOW_COPY_AND_ASSIGN(CpuProfilesCollection);
};
-class V8_EXPORT_PRIVATE ProfileGenerator {
- public:
- explicit ProfileGenerator(CpuProfilesCollection* profiles, CodeMap* code_map);
-
- // Use the CodeMap to turn the raw addresses recorded in the sample into
- // code/function names. The symbolized stack is added to the relevant
- // profiles in the CpuProfilesCollection.
- void SymbolizeTickSample(const TickSample& sample);
-
- CodeMap* code_map() { return code_map_; }
-
- private:
- CodeEntry* FindEntry(Address address,
- Address* out_instruction_start = nullptr);
- CodeEntry* EntryForVMState(StateTag tag);
-
- CpuProfilesCollection* profiles_;
- CodeMap* const code_map_;
-
- DISALLOW_COPY_AND_ASSIGN(ProfileGenerator);
-};
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/profiler/symbolizer.cc b/deps/v8/src/profiler/symbolizer.cc
new file mode 100644
index 0000000000..ca6eb269fa
--- /dev/null
+++ b/deps/v8/src/profiler/symbolizer.cc
@@ -0,0 +1,190 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/profiler/symbolizer.h"
+
+#include "src/execution/vm-state.h"
+#include "src/profiler/profile-generator.h"
+#include "src/profiler/profiler-stats.h"
+#include "src/profiler/tick-sample.h"
+
+namespace v8 {
+namespace internal {
+
+Symbolizer::Symbolizer(CodeMap* code_map) : code_map_(code_map) {}
+
+CodeEntry* Symbolizer::FindEntry(Address address,
+ Address* out_instruction_start) {
+ CodeEntry* entry = code_map_->FindEntry(address, out_instruction_start);
+ if (entry) entry->mark_used();
+ return entry;
+}
+
+namespace {
+
+CodeEntry* EntryForVMState(StateTag tag) {
+ switch (tag) {
+ case GC:
+ return CodeEntry::gc_entry();
+ case JS:
+ case PARSER:
+ case COMPILER:
+ case BYTECODE_COMPILER:
+ case ATOMICS_WAIT:
+ // DOM events handlers are reported as OTHER / EXTERNAL entries.
+ // To avoid confusing people, let's put all these entries into
+ // one bucket.
+ case OTHER:
+ case EXTERNAL:
+ return CodeEntry::program_entry();
+ case IDLE:
+ return CodeEntry::idle_entry();
+ }
+}
+
+} // namespace
+
+Symbolizer::SymbolizedSample Symbolizer::SymbolizeTickSample(
+ const TickSample& sample) {
+ ProfileStackTrace stack_trace;
+ // Conservatively reserve space for stack frames + pc + function + vm-state.
+ // There could in fact be more of them because of inlined entries.
+ stack_trace.reserve(sample.frames_count + 3);
+
+ // The ProfileNode knows nothing about all versions of generated code for
+ // the same JS function. The line number information associated with
+ // the latest version of generated code is used to find a source line number
+ // for a JS function. Then, the detected source line is passed to
+ // ProfileNode to increase the tick count for this source line.
+ const int no_line_info = v8::CpuProfileNode::kNoLineNumberInfo;
+ int src_line = no_line_info;
+ bool src_line_not_found = true;
+
+ if (sample.pc != nullptr) {
+ if (sample.has_external_callback && sample.state == EXTERNAL) {
+ // Don't use PC when in external callback code, as it can point
+ // inside a callback's code, and we will erroneously report
+ // that a callback calls itself.
+ stack_trace.push_back(
+ {FindEntry(reinterpret_cast<Address>(sample.external_callback_entry)),
+ no_line_info});
+ } else {
+ Address attributed_pc = reinterpret_cast<Address>(sample.pc);
+ Address pc_entry_instruction_start = kNullAddress;
+ CodeEntry* pc_entry =
+ FindEntry(attributed_pc, &pc_entry_instruction_start);
+ // If there is no pc_entry, we're likely in native code. Find out if the
+ // top of the stack (the return address) was pointing inside a JS
+ // function, meaning that we have encountered a frameless invocation.
+ if (!pc_entry && !sample.has_external_callback) {
+ attributed_pc = reinterpret_cast<Address>(sample.tos);
+ pc_entry = FindEntry(attributed_pc, &pc_entry_instruction_start);
+ }
+ // If pc is in the function code before it set up stack frame or after the
+ // frame was destroyed, SafeStackFrameIterator incorrectly thinks that
+ // ebp contains the return address of the current function and skips the
+ // caller's frame. Check for this case and just skip such samples.
+ if (pc_entry) {
+ int pc_offset =
+ static_cast<int>(attributed_pc - pc_entry_instruction_start);
+ // TODO(petermarshall): pc_offset can still be negative in some cases.
+ src_line = pc_entry->GetSourceLine(pc_offset);
+ if (src_line == v8::CpuProfileNode::kNoLineNumberInfo) {
+ src_line = pc_entry->line_number();
+ }
+ src_line_not_found = false;
+ stack_trace.push_back({pc_entry, src_line});
+
+ if (pc_entry->builtin_id() == Builtins::kFunctionPrototypeApply ||
+ pc_entry->builtin_id() == Builtins::kFunctionPrototypeCall) {
+ // When current function is either the Function.prototype.apply or the
+ // Function.prototype.call builtin the top frame is either frame of
+ // the calling JS function or internal frame.
+ // In the latter case we know the caller for sure but in the
+ // former case we don't so we simply replace the frame with
+ // 'unresolved' entry.
+ if (!sample.has_external_callback) {
+ ProfilerStats::Instance()->AddReason(
+ ProfilerStats::Reason::kInCallOrApply);
+ stack_trace.push_back(
+ {CodeEntry::unresolved_entry(), no_line_info});
+ }
+ }
+ }
+ }
+
+ for (unsigned i = 0; i < sample.frames_count; ++i) {
+ Address stack_pos = reinterpret_cast<Address>(sample.stack[i]);
+ Address instruction_start = kNullAddress;
+ CodeEntry* entry = FindEntry(stack_pos, &instruction_start);
+ int line_number = no_line_info;
+ if (entry) {
+ // Find out if the entry has an inlining stack associated.
+ int pc_offset = static_cast<int>(stack_pos - instruction_start);
+ // TODO(petermarshall): pc_offset can still be negative in some cases.
+ const std::vector<CodeEntryAndLineNumber>* inline_stack =
+ entry->GetInlineStack(pc_offset);
+ if (inline_stack) {
+ int most_inlined_frame_line_number = entry->GetSourceLine(pc_offset);
+ for (auto entry : *inline_stack) {
+ stack_trace.push_back(entry);
+ }
+
+ // This is a bit of a messy hack. The line number for the most-inlined
+ // frame (the function at the end of the chain of function calls) has
+ // the wrong line number in inline_stack. The actual line number in
+ // this function is stored in the SourcePositionTable in entry. We fix
+ // up the line number for the most-inlined frame here.
+ // TODO(petermarshall): Remove this and use a tree with a node per
+ // inlining_id.
+ DCHECK(!inline_stack->empty());
+ size_t index = stack_trace.size() - inline_stack->size();
+ stack_trace[index].line_number = most_inlined_frame_line_number;
+ }
+ // Skip unresolved frames (e.g. internal frame) and get source line of
+ // the first JS caller.
+ if (src_line_not_found) {
+ src_line = entry->GetSourceLine(pc_offset);
+ if (src_line == v8::CpuProfileNode::kNoLineNumberInfo) {
+ src_line = entry->line_number();
+ }
+ src_line_not_found = false;
+ }
+ line_number = entry->GetSourceLine(pc_offset);
+
+ // The inline stack contains the top-level function i.e. the same
+ // function as entry. We don't want to add it twice. The one from the
+ // inline stack has the correct line number for this particular inlining
+ // so we use it instead of pushing entry to stack_trace.
+ if (inline_stack) continue;
+ }
+ stack_trace.push_back({entry, line_number});
+ }
+ }
+
+ if (FLAG_prof_browser_mode) {
+ bool no_symbolized_entries = true;
+ for (auto e : stack_trace) {
+ if (e.code_entry != nullptr) {
+ no_symbolized_entries = false;
+ break;
+ }
+ }
+ // If no frames were symbolized, put the VM state entry in.
+ if (no_symbolized_entries) {
+ if (sample.pc == nullptr) {
+ ProfilerStats::Instance()->AddReason(ProfilerStats::Reason::kNullPC);
+ } else {
+ ProfilerStats::Instance()->AddReason(
+ ProfilerStats::Reason::kNoSymbolizedFrames);
+ }
+ stack_trace.push_back({EntryForVMState(sample.state), no_line_info});
+ }
+ }
+
+ return SymbolizedSample{stack_trace, src_line};
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/profiler/symbolizer.h b/deps/v8/src/profiler/symbolizer.h
new file mode 100644
index 0000000000..f18339e7f1
--- /dev/null
+++ b/deps/v8/src/profiler/symbolizer.h
@@ -0,0 +1,44 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PROFILER_SYMBOLIZER_H_
+#define V8_PROFILER_SYMBOLIZER_H_
+
+#include "src/base/macros.h"
+#include "src/profiler/profile-generator.h"
+
+namespace v8 {
+namespace internal {
+
+class CodeEntry;
+class CodeMap;
+
+class V8_EXPORT_PRIVATE Symbolizer {
+ public:
+ explicit Symbolizer(CodeMap* code_map);
+
+ struct SymbolizedSample {
+ ProfileStackTrace stack_trace;
+ int src_line;
+ };
+
+ // Use the CodeMap to turn the raw addresses recorded in the sample into
+ // code/function names.
+ SymbolizedSample SymbolizeTickSample(const TickSample& sample);
+
+ CodeMap* code_map() { return code_map_; }
+
+ private:
+ CodeEntry* FindEntry(Address address,
+ Address* out_instruction_start = nullptr);
+
+ CodeMap* const code_map_;
+
+ DISALLOW_COPY_AND_ASSIGN(Symbolizer);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_PROFILER_SYMBOLIZER_H_
diff --git a/deps/v8/src/regexp/DIR_METADATA b/deps/v8/src/regexp/DIR_METADATA
new file mode 100644
index 0000000000..b183b81885
--- /dev/null
+++ b/deps/v8/src/regexp/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>Runtime"
+} \ No newline at end of file
diff --git a/deps/v8/src/regexp/OWNERS b/deps/v8/src/regexp/OWNERS
index 250c8c6b88..3322bb9505 100644
--- a/deps/v8/src/regexp/OWNERS
+++ b/deps/v8/src/regexp/OWNERS
@@ -1,4 +1,2 @@
jgruber@chromium.org
yangguo@chromium.org
-
-# COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
index 78b586e265..48e8fae663 100644
--- a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
+++ b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
@@ -127,6 +127,7 @@ RegExpMacroAssemblerARM::~RegExpMacroAssemblerARM() {
exit_label_.Unuse();
check_preempt_label_.Unuse();
stack_overflow_label_.Unuse();
+ fallback_label_.Unuse();
}
@@ -164,8 +165,13 @@ void RegExpMacroAssemblerARM::Backtrack() {
__ cmp(r0, Operand(backtrack_limit()));
__ b(ne, &next);
- // Exceeded limits are treated as a failed match.
- Fail();
+ // Backtrack limit exceeded.
+ if (can_fallback()) {
+ __ jmp(&fallback_label_);
+ } else {
+ // Can't fallback, so we treat it as a failed match.
+ Fail();
+ }
__ bind(&next);
}
@@ -901,6 +907,12 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ jmp(&return_r0);
}
+ if (fallback_label_.is_linked()) {
+ __ bind(&fallback_label_);
+ __ mov(r0, Operand(FALLBACK_TO_EXPERIMENTAL));
+ __ jmp(&return_r0);
+ }
+
CodeDesc code_desc;
masm_->GetCode(isolate(), &code_desc);
Handle<Code> code =
@@ -1072,7 +1084,6 @@ void RegExpMacroAssemblerARM::CallCheckStackGuardState() {
__ mov(ip, Operand(stack_guard_check));
EmbeddedData d = EmbeddedData::FromBlob();
- CHECK(Builtins::IsIsolateIndependent(Builtins::kDirectCEntry));
Address entry = d.InstructionStartOfBuiltin(Builtins::kDirectCEntry);
__ mov(lr, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
__ Call(lr);
diff --git a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h
index 910e5c4607..92cac644e5 100644
--- a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h
+++ b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h
@@ -203,6 +203,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerARM
Label exit_label_;
Label check_preempt_label_;
Label stack_overflow_label_;
+ Label fallback_label_;
};
} // namespace internal
diff --git a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
index ac33f8631f..32fed3703b 100644
--- a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
+++ b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
@@ -142,6 +142,7 @@ RegExpMacroAssemblerARM64::~RegExpMacroAssemblerARM64() {
exit_label_.Unuse();
check_preempt_label_.Unuse();
stack_overflow_label_.Unuse();
+ fallback_label_.Unuse();
}
int RegExpMacroAssemblerARM64::stack_limit_slack() {
@@ -201,8 +202,13 @@ void RegExpMacroAssemblerARM64::Backtrack() {
__ Cmp(scratch, Operand(backtrack_limit()));
__ B(ne, &next);
- // Exceeded limits are treated as a failed match.
- Fail();
+ // Backtrack limit exceeded.
+ if (can_fallback()) {
+ __ B(&fallback_label_);
+ } else {
+ // Can't fallback, so we treat it as a failed match.
+ Fail();
+ }
__ bind(&next);
}
@@ -1094,6 +1100,12 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
__ B(&return_w0);
}
+ if (fallback_label_.is_linked()) {
+ __ Bind(&fallback_label_);
+ __ Mov(w0, FALLBACK_TO_EXPERIMENTAL);
+ __ B(&return_w0);
+ }
+
CodeDesc code_desc;
masm_->GetCode(isolate(), &code_desc);
Handle<Code> code =
@@ -1399,7 +1411,6 @@ void RegExpMacroAssemblerARM64::CallCheckStackGuardState(Register scratch) {
Register scratch = temps.AcquireX();
EmbeddedData d = EmbeddedData::FromBlob();
- CHECK(Builtins::IsIsolateIndependent(Builtins::kDirectCEntry));
Address entry = d.InstructionStartOfBuiltin(Builtins::kDirectCEntry);
__ Ldr(scratch, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
diff --git a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
index aeb49aa9ff..6d60271a43 100644
--- a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
+++ b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
@@ -279,6 +279,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerARM64
Label exit_label_;
Label check_preempt_label_;
Label stack_overflow_label_;
+ Label fallback_label_;
};
} // namespace internal
diff --git a/deps/v8/src/regexp/experimental/experimental-bytecode.h b/deps/v8/src/regexp/experimental/experimental-bytecode.h
index 3cb65828c5..4e9bc9396b 100644
--- a/deps/v8/src/regexp/experimental/experimental-bytecode.h
+++ b/deps/v8/src/regexp/experimental/experimental-bytecode.h
@@ -106,21 +106,21 @@ struct RegExpInstruction {
uc16 max; // Inclusive.
};
- static RegExpInstruction ConsumeRange(Uc16Range consume_range) {
+ static RegExpInstruction ConsumeRange(uc16 min, uc16 max) {
RegExpInstruction result;
result.opcode = CONSUME_RANGE;
- result.payload.consume_range = consume_range;
+ result.payload.consume_range = Uc16Range{min, max};
return result;
}
static RegExpInstruction ConsumeAnyChar() {
- return ConsumeRange(Uc16Range{0x0000, 0xFFFF});
+ return ConsumeRange(0x0000, 0xFFFF);
}
static RegExpInstruction Fail() {
// This is encoded as the empty CONSUME_RANGE of characters 0xFFFF <= c <=
// 0x0000.
- return ConsumeRange(Uc16Range{0xFFFF, 0x0000});
+ return ConsumeRange(0xFFFF, 0x0000);
}
static RegExpInstruction Fork(int32_t alt_index) {
diff --git a/deps/v8/src/regexp/experimental/experimental-compiler.cc b/deps/v8/src/regexp/experimental/experimental-compiler.cc
index 615f7566f4..4d53c2c0c5 100644
--- a/deps/v8/src/regexp/experimental/experimental-compiler.cc
+++ b/deps/v8/src/regexp/experimental/experimental-compiler.cc
@@ -35,7 +35,7 @@ class CanBeHandledVisitor final : private RegExpVisitor {
// future.
static constexpr JSRegExp::Flags kAllowedFlags =
JSRegExp::kGlobal | JSRegExp::kSticky | JSRegExp::kMultiline |
- JSRegExp::kDotAll;
+ JSRegExp::kDotAll | JSRegExp::kLinear;
// We support Unicode iff kUnicode is among the supported flags.
STATIC_ASSERT(ExperimentalRegExp::kSupportsUnicode ==
((kAllowedFlags & JSRegExp::kUnicode) != 0));
@@ -177,94 +177,120 @@ class CanBeHandledVisitor final : private RegExpVisitor {
bool ExperimentalRegExpCompiler::CanBeHandled(RegExpTree* tree,
JSRegExp::Flags flags,
int capture_count) {
- DCHECK(FLAG_enable_experimental_regexp_engine);
return CanBeHandledVisitor::Check(tree, flags, capture_count);
}
namespace {
-// A label in bytecode with known address.
-class Label {
+// A label in bytecode which starts with no known address. The address *must*
+// be bound with `Bind` before the label goes out of scope.
+// Implemented as a linked list through the `payload.pc` of FORK and JMP
+// instructions.
+struct Label {
public:
- explicit Label(int index) : index_(index) { DCHECK_GE(index_, 0); }
-
- int index() { return index_; }
-
- // Friend functions because `label.AddForkTo(code, zone)` reads like we're
- // adding code to where `label` is defined, but we're adding a fork with
- // target `label` at the end of `code`.
- friend void AddForkTo(Label target, ZoneList<RegExpInstruction>& code,
- Zone* zone) {
- code.Add(RegExpInstruction::Fork(target.index_), zone);
+ Label() = default;
+ ~Label() {
+ DCHECK_EQ(state_, BOUND);
+ DCHECK_GE(bound_index_, 0);
}
- friend void AddJmpTo(Label target, ZoneList<RegExpInstruction>& code,
- Zone* zone) {
- code.Add(RegExpInstruction::Jmp(target.index_), zone);
- }
+ // Don't copy, don't move. Moving could be implemented, but it's not
+ // needed anywhere.
+ Label(const Label&) = delete;
+ Label& operator=(const Label&) = delete;
private:
- int index_;
+ friend class BytecodeAssembler;
+
+ // UNBOUND implies unbound_patch_list_begin_.
+ // BOUND implies bound_index_.
+ enum { UNBOUND, BOUND } state_ = UNBOUND;
+ union {
+ int unbound_patch_list_begin_ = -1;
+ int bound_index_;
+ };
};
-// A label in bytecode whose address is not known yet. The address *must* be
-// `Bind` before the deferred label object goes out of scope, and the deferred
-// label object *must not* be used after it was defined. (Use the `Label`
-// object returned by `Bind` instead.)
-struct DeferredLabel {
- // Implemented as a linked list through the `payload.pc` of FORK and JMP
- // instructions.
+class BytecodeAssembler {
public:
- DeferredLabel() = default;
- ~DeferredLabel() { DCHECK_EQ(patch_list_begin_, kLabelWasDefined); }
+ // TODO(mbid,v8:10765): Use some upper bound for code_ capacity computed from
+ // the `tree` size we're going to compile?
+ explicit BytecodeAssembler(Zone* zone) : zone_(zone), code_(0, zone) {}
+
+ ZoneList<RegExpInstruction> IntoCode() && { return std::move(code_); }
+
+ void Accept() { code_.Add(RegExpInstruction::Accept(), zone_); }
+
+ void Assertion(RegExpAssertion::AssertionType t) {
+ code_.Add(RegExpInstruction::Assertion(t), zone_);
+ }
+
+ void ClearRegister(int32_t register_index) {
+ code_.Add(RegExpInstruction::ClearRegister(register_index), zone_);
+ }
+
+ void ConsumeRange(uc16 from, uc16 to) {
+ code_.Add(RegExpInstruction::ConsumeRange(from, to), zone_);
+ }
+
+ void ConsumeAnyChar() {
+ code_.Add(RegExpInstruction::ConsumeAnyChar(), zone_);
+ }
+
+ void Fork(Label& target) {
+ LabelledInstrImpl(RegExpInstruction::Opcode::FORK, target);
+ }
- friend void AddForkTo(DeferredLabel& target,
- ZoneList<RegExpInstruction>& code, Zone* zone) {
- DCHECK_NE(target.patch_list_begin_, DeferredLabel::kLabelWasDefined);
- int new_list_begin = code.length();
- DCHECK_GE(new_list_begin, 0);
- code.Add(RegExpInstruction::Fork(target.patch_list_begin_), zone);
- target.patch_list_begin_ = new_list_begin;
+ void Jmp(Label& target) {
+ LabelledInstrImpl(RegExpInstruction::Opcode::JMP, target);
}
- friend void AddJmpTo(DeferredLabel& target, ZoneList<RegExpInstruction>& code,
- Zone* zone) {
- DCHECK_NE(target.patch_list_begin_, DeferredLabel::kLabelWasDefined);
- int new_list_begin = code.length();
- DCHECK_GE(new_list_begin, 0);
- code.Add(RegExpInstruction::Jmp(target.patch_list_begin_), zone);
- target.patch_list_begin_ = new_list_begin;
+ void SetRegisterToCp(int32_t register_index) {
+ code_.Add(RegExpInstruction::SetRegisterToCp(register_index), zone_);
}
- // Define the deferred label as referring to the next instruction that will
- // be pushed to `code`. Consumes the DeferredLabel object and returns a
- // Label object.
- Label Bind(ZoneList<RegExpInstruction>& code) && {
- DCHECK_NE(patch_list_begin_, kLabelWasDefined);
+ void Bind(Label& target) {
+ DCHECK_EQ(target.state_, Label::UNBOUND);
- int index = code.length();
+ int index = code_.length();
- while (patch_list_begin_ != kEmptyList) {
- RegExpInstruction& inst = code[patch_list_begin_];
+ while (target.unbound_patch_list_begin_ != -1) {
+ RegExpInstruction& inst = code_[target.unbound_patch_list_begin_];
DCHECK(inst.opcode == RegExpInstruction::FORK ||
inst.opcode == RegExpInstruction::JMP);
- patch_list_begin_ = inst.payload.pc;
+ target.unbound_patch_list_begin_ = inst.payload.pc;
inst.payload.pc = index;
}
- patch_list_begin_ = kLabelWasDefined;
- return Label(index);
+ target.state_ = Label::BOUND;
+ target.bound_index_ = index;
}
+ void Fail() { code_.Add(RegExpInstruction::Fail(), zone_); }
+
private:
- static constexpr int kEmptyList = -1;
- static constexpr int kLabelWasDefined = -2;
- int patch_list_begin_ = kEmptyList;
+ void LabelledInstrImpl(RegExpInstruction::Opcode op, Label& target) {
+ RegExpInstruction result;
+ result.opcode = op;
- // Don't copy, don't move. Moving could be implemented, but it's not
- // needed anywhere.
- DISALLOW_COPY_AND_ASSIGN(DeferredLabel);
+ if (target.state_ == Label::BOUND) {
+ result.payload.pc = target.bound_index_;
+ } else {
+ DCHECK_EQ(target.state_, Label::UNBOUND);
+ int new_list_begin = code_.length();
+ DCHECK_GE(new_list_begin, 0);
+
+ result.payload.pc = target.unbound_patch_list_begin_;
+
+ target.unbound_patch_list_begin_ = new_list_begin;
+ }
+
+ code_.Add(result, zone_);
+ }
+
+ Zone* zone_;
+ ZoneList<RegExpInstruction> code_;
};
class CompileVisitor : private RegExpVisitor {
@@ -278,27 +304,24 @@ class CompileVisitor : private RegExpVisitor {
// The match is not anchored, i.e. may start at any input position, so we
// emit a preamble corresponding to /.*?/. This skips an arbitrary
// prefix in the input non-greedily.
- compiler.CompileNonGreedyStar([&]() {
- compiler.code_.Add(RegExpInstruction::ConsumeAnyChar(), zone);
- });
+ compiler.CompileNonGreedyStar(
+ [&]() { compiler.assembler_.ConsumeAnyChar(); });
}
- compiler.code_.Add(RegExpInstruction::SetRegisterToCp(0), zone);
+ compiler.assembler_.SetRegisterToCp(0);
tree->Accept(&compiler, nullptr);
- compiler.code_.Add(RegExpInstruction::SetRegisterToCp(1), zone);
- compiler.code_.Add(RegExpInstruction::Accept(), zone);
+ compiler.assembler_.SetRegisterToCp(1);
+ compiler.assembler_.Accept();
- return std::move(compiler.code_);
+ return std::move(compiler.assembler_).IntoCode();
}
private:
- // TODO(mbid,v8:10765): Use some upper bound for code_ capacity computed from
- // the `tree` size we're going to compile?
- explicit CompileVisitor(Zone* zone) : zone_(zone), code_(0, zone) {}
+ explicit CompileVisitor(Zone* zone) : zone_(zone), assembler_(zone) {}
// Generate a disjunction of code fragments compiled by a function `alt_gen`.
// `alt_gen` is called repeatedly with argument `int i = 0, 1, ..., alt_num -
- // 1` and should push code corresponding to the ith alternative onto `code_`.
+ // 1` and should build code corresponding to the ith alternative.
template <class F>
void CompileDisjunction(int alt_num, F&& gen_alt) {
// An alternative a1 | ... | an is compiled into
@@ -325,23 +348,23 @@ class CompileVisitor : private RegExpVisitor {
if (alt_num == 0) {
// The empty disjunction. This can never match.
- code_.Add(RegExpInstruction::Fail(), zone_);
+ assembler_.Fail();
return;
}
- DeferredLabel end;
+ Label end;
for (int i = 0; i != alt_num - 1; ++i) {
- DeferredLabel tail;
- AddForkTo(tail, code_, zone_);
+ Label tail;
+ assembler_.Fork(tail);
gen_alt(i);
- AddJmpTo(end, code_, zone_);
- std::move(tail).Bind(code_);
+ assembler_.Jmp(end);
+ assembler_.Bind(tail);
}
gen_alt(alt_num - 1);
- std::move(end).Bind(code_);
+ assembler_.Bind(end);
}
void* VisitDisjunction(RegExpDisjunction* node, void*) override {
@@ -359,7 +382,7 @@ class CompileVisitor : private RegExpVisitor {
}
void* VisitAssertion(RegExpAssertion* node, void*) override {
- code_.Add(RegExpInstruction::Assertion(node->assertion_type()), zone_);
+ assembler_.Assertion(node->assertion_type());
return nullptr;
}
@@ -390,17 +413,14 @@ class CompileVisitor : private RegExpVisitor {
DCHECK_IMPLIES(to > kMaxSupportedCodepoint, to == String::kMaxCodePoint);
uc16 to_uc16 = static_cast<uc16>(std::min(to, kMaxSupportedCodepoint));
- RegExpInstruction::Uc16Range range{from_uc16, to_uc16};
- code_.Add(RegExpInstruction::ConsumeRange(range), zone_);
+ assembler_.ConsumeRange(from_uc16, to_uc16);
});
return nullptr;
}
void* VisitAtom(RegExpAtom* node, void*) override {
for (uc16 c : node->data()) {
- code_.Add(
- RegExpInstruction::ConsumeRange(RegExpInstruction::Uc16Range{c, c}),
- zone_);
+ assembler_.ConsumeRange(c, c);
}
return nullptr;
}
@@ -413,7 +433,7 @@ class CompileVisitor : private RegExpVisitor {
// It suffices to clear the register containing the `begin` of a capture
// because this indicates that the capture is undefined, regardless of
// the value in the `end` register.
- code_.Add(RegExpInstruction::ClearRegister(i), zone_);
+ assembler_.ClearRegister(i);
}
}
@@ -431,14 +451,15 @@ class CompileVisitor : private RegExpVisitor {
//
// This is greedy because a forked thread has lower priority than the
// thread that spawned it.
- Label begin(code_.length());
- DeferredLabel end;
+ Label begin;
+ Label end;
- AddForkTo(end, code_, zone_);
+ assembler_.Bind(begin);
+ assembler_.Fork(end);
emit_body();
- AddJmpTo(begin, code_, zone_);
+ assembler_.Jmp(begin);
- std::move(end).Bind(code_);
+ assembler_.Bind(end);
}
// Emit bytecode corresponding to /<emit_body>*?/.
@@ -454,18 +475,17 @@ class CompileVisitor : private RegExpVisitor {
// end:
// ...
- Label body(code_.length() + 2);
- DeferredLabel end;
-
- AddForkTo(body, code_, zone_);
- AddJmpTo(end, code_, zone_);
+ Label body;
+ Label end;
- DCHECK_EQ(body.index(), code_.length());
+ assembler_.Fork(body);
+ assembler_.Jmp(end);
+ assembler_.Bind(body);
emit_body();
- AddForkTo(body, code_, zone_);
+ assembler_.Fork(body);
- std::move(end).Bind(code_);
+ assembler_.Bind(end);
}
// Emit bytecode corresponding to /<emit_body>{0, max_repetition_num}/.
@@ -484,12 +504,12 @@ class CompileVisitor : private RegExpVisitor {
// end:
// ...
- DeferredLabel end;
+ Label end;
for (int i = 0; i != max_repetition_num; ++i) {
- AddForkTo(end, code_, zone_);
+ assembler_.Fork(end);
emit_body();
}
- std::move(end).Bind(code_);
+ assembler_.Bind(end);
}
// Emit bytecode corresponding to /<emit_body>{0, max_repetition_num}?/.
@@ -512,17 +532,16 @@ class CompileVisitor : private RegExpVisitor {
// end:
// ...
- DeferredLabel end;
+ Label end;
for (int i = 0; i != max_repetition_num; ++i) {
- Label body(code_.length() + 2);
- AddForkTo(body, code_, zone_);
- AddJmpTo(end, code_, zone_);
-
- DCHECK_EQ(body.index(), code_.length());
+ Label body;
+ assembler_.Fork(body);
+ assembler_.Jmp(end);
+ assembler_.Bind(body);
emit_body();
}
- std::move(end).Bind(code_);
+ assembler_.Bind(end);
}
void* VisitQuantifier(RegExpQuantifier* node, void*) override {
@@ -571,9 +590,9 @@ class CompileVisitor : private RegExpVisitor {
int index = node->index();
int start_register = RegExpCapture::StartRegister(index);
int end_register = RegExpCapture::EndRegister(index);
- code_.Add(RegExpInstruction::SetRegisterToCp(start_register), zone_);
+ assembler_.SetRegisterToCp(start_register);
node->body()->Accept(this, nullptr);
- code_.Add(RegExpInstruction::SetRegisterToCp(end_register), zone_);
+ assembler_.SetRegisterToCp(end_register);
return nullptr;
}
@@ -602,7 +621,7 @@ class CompileVisitor : private RegExpVisitor {
private:
Zone* zone_;
- ZoneList<RegExpInstruction> code_;
+ BytecodeAssembler assembler_;
};
} // namespace
diff --git a/deps/v8/src/regexp/experimental/experimental-interpreter.cc b/deps/v8/src/regexp/experimental/experimental-interpreter.cc
index 8db93ca746..fffca782fe 100644
--- a/deps/v8/src/regexp/experimental/experimental-interpreter.cc
+++ b/deps/v8/src/regexp/experimental/experimental-interpreter.cc
@@ -5,6 +5,8 @@
#include "src/regexp/experimental/experimental-interpreter.h"
#include "src/base/optional.h"
+#include "src/objects/fixed-array-inl.h"
+#include "src/objects/string-inl.h"
#include "src/regexp/experimental/experimental.h"
#include "src/strings/char-predicates-inl.h"
#include "src/zone/zone-allocator.h"
@@ -50,6 +52,37 @@ bool SatisfiesAssertion(RegExpAssertion::AssertionType type,
}
}
+Vector<RegExpInstruction> ToInstructionVector(
+ ByteArray raw_bytes, const DisallowHeapAllocation& no_gc) {
+ RegExpInstruction* inst_begin =
+ reinterpret_cast<RegExpInstruction*>(raw_bytes.GetDataStartAddress());
+ int inst_num = raw_bytes.length() / sizeof(RegExpInstruction);
+ DCHECK_EQ(sizeof(RegExpInstruction) * inst_num, raw_bytes.length());
+ return Vector<RegExpInstruction>(inst_begin, inst_num);
+}
+
+template <class Character>
+Vector<const Character> ToCharacterVector(String str,
+ const DisallowHeapAllocation& no_gc);
+
+template <>
+Vector<const uint8_t> ToCharacterVector<uint8_t>(
+ String str, const DisallowHeapAllocation& no_gc) {
+ DCHECK(str.IsFlat());
+ String::FlatContent content = str.GetFlatContent(no_gc);
+ DCHECK(content.IsOneByte());
+ return content.ToOneByteVector();
+}
+
+template <>
+Vector<const uc16> ToCharacterVector<uc16>(
+ String str, const DisallowHeapAllocation& no_gc) {
+ DCHECK(str.IsFlat());
+ String::FlatContent content = str.GetFlatContent(no_gc);
+ DCHECK(content.IsTwoByte());
+ return content.ToUC16Vector();
+}
+
template <class Character>
class NfaInterpreter {
// Executes a bytecode program in breadth-first mode, without backtracking.
@@ -100,12 +133,16 @@ class NfaInterpreter {
// with high priority are left, we return the match that was produced by the
// ACCEPTing thread with highest priority.
public:
- NfaInterpreter(Vector<const RegExpInstruction> bytecode,
- int register_count_per_match, Vector<const Character> input,
+ NfaInterpreter(Isolate* isolate, RegExp::CallOrigin call_origin,
+ ByteArray bytecode, int register_count_per_match, String input,
int32_t input_index, Zone* zone)
- : bytecode_(bytecode),
+ : isolate_(isolate),
+ call_origin_(call_origin),
+ bytecode_object_(bytecode),
+ bytecode_(ToInstructionVector(bytecode, no_gc_)),
register_count_per_match_(register_count_per_match),
- input_(input),
+ input_object_(input),
+ input_(ToCharacterVector<Character>(input, no_gc_)),
input_index_(input_index),
pc_last_input_index_(zone->NewArray<int>(bytecode.length()),
bytecode.length()),
@@ -131,12 +168,15 @@ class NfaInterpreter {
int match_num = 0;
while (match_num != max_match_num) {
- FindNextMatch();
+ int err_code = FindNextMatch();
+ if (err_code != RegExp::kInternalRegExpSuccess) return err_code;
+
if (!FoundMatch()) break;
- Vector<int> registers = *best_match_registers_;
+ Vector<int> registers = *best_match_registers_;
output_registers =
std::copy(registers.begin(), registers.end(), output_registers);
+
++match_num;
const int match_begin = registers[0];
@@ -177,6 +217,69 @@ class NfaInterpreter {
int* register_array_begin;
};
+ // Handles pending interrupts if there are any. Returns
+ // RegExp::kInternalRegExpSuccess if execution can continue, and an error
+ // code otherwise.
+ int HandleInterrupts() {
+ StackLimitCheck check(isolate_);
+ if (call_origin_ == RegExp::CallOrigin::kFromJs) {
+ // Direct calls from JavaScript can be interrupted in two ways:
+ // 1. A real stack overflow, in which case we let the caller throw the
+ // exception.
+ // 2. The stack guard was used to interrupt execution for another purpose,
+ // forcing the call through the runtime system.
+ if (check.JsHasOverflowed()) {
+ return RegExp::kInternalRegExpException;
+ } else if (check.InterruptRequested()) {
+ return RegExp::kInternalRegExpRetry;
+ }
+ } else {
+ DCHECK(call_origin_ == RegExp::CallOrigin::kFromRuntime);
+ HandleScope handles(isolate_);
+ Handle<ByteArray> bytecode_handle(bytecode_object_, isolate_);
+ Handle<String> input_handle(input_object_, isolate_);
+
+ if (check.JsHasOverflowed()) {
+ // We abort the interpreter now anyway, so gc can't invalidate any
+ // pointers.
+ AllowHeapAllocation yes_gc;
+ isolate_->StackOverflow();
+ return RegExp::kInternalRegExpException;
+ } else if (check.InterruptRequested()) {
+ // TODO(mbid): Is this really equivalent to whether the string is
+ // one-byte or two-byte? A comment at the declaration of
+ // IsOneByteRepresentationUnderneath says that this might fail for
+ // external strings.
+ const bool was_one_byte =
+ String::IsOneByteRepresentationUnderneath(input_object_);
+
+ Object result;
+ {
+ AllowHeapAllocation yes_gc;
+ result = isolate_->stack_guard()->HandleInterrupts();
+ }
+ if (result.IsException(isolate_)) {
+ return RegExp::kInternalRegExpException;
+ }
+
+ // If we changed between a LATIN1 and a UC16 string, we need to restart
+ // regexp matching with the appropriate template instantiation of
+ // RawMatch.
+ if (String::IsOneByteRepresentationUnderneath(*input_handle) !=
+ was_one_byte) {
+ return RegExp::kInternalRegExpRetry;
+ }
+
+ // Update objects and pointers in case they have changed during gc.
+ bytecode_object_ = *bytecode_handle;
+ bytecode_ = ToInstructionVector(bytecode_object_, no_gc_);
+ input_object_ = *input_handle;
+ input_ = ToCharacterVector<Character>(input_object_, no_gc_);
+ }
+ }
+ return RegExp::kInternalRegExpSuccess;
+ }
+
// Change the current input index for future calls to `FindNextMatch`.
void SetInputIndex(int new_input_index) {
DCHECK_GE(input_index_, 0);
@@ -187,8 +290,10 @@ class NfaInterpreter {
// Find the next match and return the corresponding capture registers and
// write its capture registers to `best_match_registers_`. The search starts
- // at the current `input_index_`.
- void FindNextMatch() {
+ // at the current `input_index_`. Returns RegExp::kInternalRegExpSuccess if
+ // execution could finish regularly (with or without a match) and an error
+ // code due to interrupt otherwise.
+ int FindNextMatch() {
DCHECK(active_threads_.is_empty());
// TODO(mbid,v8:10765): Can we get around resetting `pc_last_input_index_`
// here? As long as
@@ -240,12 +345,20 @@ class NfaInterpreter {
uc16 input_char = input_[input_index_];
++input_index_;
+ static constexpr int kTicksBetweenInterruptHandling = 64;
+ if (input_index_ % kTicksBetweenInterruptHandling == 0) {
+ int err_code = HandleInterrupts();
+ if (err_code != RegExp::kInternalRegExpSuccess) return err_code;
+ }
+
// We unblock all blocked_threads_ by feeding them the input char.
FlushBlockedThreads(input_char);
// Run all threads until they block or accept.
RunActiveThreads();
}
+
+ return RegExp::kInternalRegExpSuccess;
}
// Run an active thread `t` until it executes a CONSUME_RANGE or ACCEPT
@@ -394,12 +507,20 @@ class NfaInterpreter {
pc_last_input_index_[pc] = input_index_;
}
- const Vector<const RegExpInstruction> bytecode_;
+ Isolate* const isolate_;
+
+ const RegExp::CallOrigin call_origin_;
+
+ const DisallowHeapAllocation no_gc_;
+
+ ByteArray bytecode_object_;
+ Vector<const RegExpInstruction> bytecode_;
// Number of registers used per thread.
const int register_count_per_match_;
- const Vector<const Character> input_;
+ String input_object_;
+ Vector<const Character> input_;
int input_index_;
// pc_last_input_index_[k] records the value of input_index_ the last
@@ -432,22 +553,25 @@ class NfaInterpreter {
} // namespace
-int ExperimentalRegExpInterpreter::FindMatchesNfaOneByte(
- Vector<const RegExpInstruction> bytecode, int register_count_per_match,
- Vector<const uint8_t> input, int start_index, int32_t* output_registers,
- int output_register_count, Zone* zone) {
- NfaInterpreter<uint8_t> interpreter(bytecode, register_count_per_match, input,
- start_index, zone);
- return interpreter.FindMatches(output_registers, output_register_count);
-}
-
-int ExperimentalRegExpInterpreter::FindMatchesNfaTwoByte(
- Vector<const RegExpInstruction> bytecode, int register_count_per_match,
- Vector<const uc16> input, int start_index, int32_t* output_registers,
- int output_register_count, Zone* zone) {
- NfaInterpreter<uc16> interpreter(bytecode, register_count_per_match, input,
- start_index, zone);
- return interpreter.FindMatches(output_registers, output_register_count);
+int ExperimentalRegExpInterpreter::FindMatches(
+ Isolate* isolate, RegExp::CallOrigin call_origin, ByteArray bytecode,
+ int register_count_per_match, String input, int start_index,
+ int32_t* output_registers, int output_register_count, Zone* zone) {
+ DCHECK(input.IsFlat());
+ DisallowHeapAllocation no_gc;
+
+ if (input.GetFlatContent(no_gc).IsOneByte()) {
+ NfaInterpreter<uint8_t> interpreter(isolate, call_origin, bytecode,
+ register_count_per_match, input,
+ start_index, zone);
+ return interpreter.FindMatches(output_registers, output_register_count);
+ } else {
+ DCHECK(input.GetFlatContent(no_gc).IsTwoByte());
+ NfaInterpreter<uc16> interpreter(isolate, call_origin, bytecode,
+ register_count_per_match, input,
+ start_index, zone);
+ return interpreter.FindMatches(output_registers, output_register_count);
+ }
}
} // namespace internal
diff --git a/deps/v8/src/regexp/experimental/experimental-interpreter.h b/deps/v8/src/regexp/experimental/experimental-interpreter.h
index 32bff001b1..3da50e3902 100644
--- a/deps/v8/src/regexp/experimental/experimental-interpreter.h
+++ b/deps/v8/src/regexp/experimental/experimental-interpreter.h
@@ -5,7 +5,10 @@
#ifndef V8_REGEXP_EXPERIMENTAL_EXPERIMENTAL_INTERPRETER_H_
#define V8_REGEXP_EXPERIMENTAL_EXPERIMENTAL_INTERPRETER_H_
+#include "src/objects/fixed-array.h"
+#include "src/objects/string.h"
#include "src/regexp/experimental/experimental-bytecode.h"
+#include "src/regexp/regexp.h"
#include "src/utils/vector.h"
namespace v8 {
@@ -18,18 +21,13 @@ class ExperimentalRegExpInterpreter final : public AllStatic {
// Executes a bytecode program in breadth-first NFA mode, without
// backtracking, to find matching substrings. Trys to find up to
// `max_match_num` matches in `input`, starting at `start_index`. Returns
- // the actual number of matches found. The boundaires of matching subranges
+ // the actual number of matches found. The boundaries of matching subranges
// are written to `matches_out`. Provided in variants for one-byte and
// two-byte strings.
- static int FindMatchesNfaOneByte(Vector<const RegExpInstruction> bytecode,
- int capture_count,
- Vector<const uint8_t> input, int start_index,
- int32_t* output_registers,
- int output_register_count, Zone* zone);
- static int FindMatchesNfaTwoByte(Vector<const RegExpInstruction> bytecode,
- int capture_count, Vector<const uc16> input,
- int start_index, int32_t* output_registers,
- int output_register_count, Zone* zone);
+ static int FindMatches(Isolate* isolate, RegExp::CallOrigin call_origin,
+ ByteArray bytecode, int capture_count, String input,
+ int start_index, int32_t* output_registers,
+ int output_register_count, Zone* zone);
};
} // namespace internal
diff --git a/deps/v8/src/regexp/experimental/experimental.cc b/deps/v8/src/regexp/experimental/experimental.cc
index dc919f56c2..56c0596bb4 100644
--- a/deps/v8/src/regexp/experimental/experimental.cc
+++ b/deps/v8/src/regexp/experimental/experimental.cc
@@ -15,6 +15,8 @@ namespace internal {
bool ExperimentalRegExp::CanBeHandled(RegExpTree* tree, JSRegExp::Flags flags,
int capture_count) {
+ DCHECK(FLAG_enable_experimental_regexp_engine ||
+ FLAG_enable_experimental_regexp_engine_on_excessive_backtracks);
return ExperimentalRegExpCompiler::CanBeHandled(tree, flags, capture_count);
}
@@ -33,7 +35,6 @@ void ExperimentalRegExp::Initialize(Isolate* isolate, Handle<JSRegExp> re,
bool ExperimentalRegExp::IsCompiled(Handle<JSRegExp> re, Isolate* isolate) {
DCHECK(FLAG_enable_experimental_regexp_engine);
-
DCHECK_EQ(re->TypeTag(), JSRegExp::EXPERIMENTAL);
#ifdef VERIFY_HEAP
re->JSRegExpVerify(isolate);
@@ -43,22 +44,34 @@ bool ExperimentalRegExp::IsCompiled(Handle<JSRegExp> re, Isolate* isolate) {
Smi::FromInt(JSRegExp::kUninitializedValue);
}
-bool ExperimentalRegExp::Compile(Isolate* isolate, Handle<JSRegExp> re) {
- DCHECK_EQ(re->TypeTag(), JSRegExp::EXPERIMENTAL);
-#ifdef VERIFY_HEAP
- re->JSRegExpVerify(isolate);
-#endif
+template <class T>
+Handle<ByteArray> VectorToByteArray(Isolate* isolate, Vector<T> data) {
+ STATIC_ASSERT(std::is_trivial<T>::value);
- Handle<String> source(re->Pattern(), isolate);
- if (FLAG_trace_experimental_regexp_engine) {
- StdoutStream{} << "Compiling experimental regexp " << *source << std::endl;
- }
+ int byte_length = sizeof(T) * data.length();
+ Handle<ByteArray> byte_array = isolate->factory()->NewByteArray(byte_length);
+ DisallowHeapAllocation no_gc;
+ MemCopy(byte_array->GetDataStartAddress(), data.begin(), byte_length);
+ return byte_array;
+}
+namespace {
+
+struct CompilationResult {
+ Handle<ByteArray> bytecode;
+ Handle<FixedArray> capture_name_map;
+};
+
+// Compiles source pattern, but doesn't change the regexp object.
+base::Optional<CompilationResult> CompileImpl(Isolate* isolate,
+ Handle<JSRegExp> regexp) {
Zone zone(isolate->allocator(), ZONE_NAME);
+ Handle<String> source(regexp->Pattern(), isolate);
+ JSRegExp::Flags flags = regexp->GetFlags();
+
// Parse and compile the regexp source.
RegExpCompileData parse_result;
- JSRegExp::Flags flags = re->GetFlags();
FlatStringReader reader(isolate, source);
DCHECK(!isolate->has_pending_exception());
@@ -67,28 +80,52 @@ bool ExperimentalRegExp::Compile(Isolate* isolate, Handle<JSRegExp> re) {
if (!parse_success) {
// The pattern was already parsed successfully during initialization, so
// the only way parsing can fail now is because of stack overflow.
- CHECK_EQ(parse_result.error, RegExpError::kStackOverflow);
- USE(RegExp::ThrowRegExpException(isolate, re, source, parse_result.error));
- return false;
+ DCHECK_EQ(parse_result.error, RegExpError::kStackOverflow);
+ USE(RegExp::ThrowRegExpException(isolate, regexp, source,
+ parse_result.error));
+ return base::nullopt;
}
ZoneList<RegExpInstruction> bytecode =
ExperimentalRegExpCompiler::Compile(parse_result.tree, flags, &zone);
- int byte_length = sizeof(RegExpInstruction) * bytecode.length();
- Handle<ByteArray> bytecode_byte_array =
- isolate->factory()->NewByteArray(byte_length);
- MemCopy(bytecode_byte_array->GetDataStartAddress(), bytecode.begin(),
- byte_length);
+ CompilationResult result;
+ result.bytecode = VectorToByteArray(isolate, bytecode.ToVector());
+ result.capture_name_map = parse_result.capture_name_map;
+ return result;
+}
+
+} // namespace
- re->SetDataAt(JSRegExp::kIrregexpLatin1BytecodeIndex, *bytecode_byte_array);
- re->SetDataAt(JSRegExp::kIrregexpUC16BytecodeIndex, *bytecode_byte_array);
+bool ExperimentalRegExp::Compile(Isolate* isolate, Handle<JSRegExp> re) {
+ DCHECK(FLAG_enable_experimental_regexp_engine);
+ DCHECK_EQ(re->TypeTag(), JSRegExp::EXPERIMENTAL);
+#ifdef VERIFY_HEAP
+ re->JSRegExpVerify(isolate);
+#endif
+
+ Handle<String> source(re->Pattern(), isolate);
+ if (FLAG_trace_experimental_regexp_engine) {
+ StdoutStream{} << "Compiling experimental regexp " << *source << std::endl;
+ }
+
+ base::Optional<CompilationResult> compilation_result =
+ CompileImpl(isolate, re);
+ if (!compilation_result.has_value()) {
+ DCHECK(isolate->has_pending_exception());
+ return false;
+ }
+
+ re->SetDataAt(JSRegExp::kIrregexpLatin1BytecodeIndex,
+ *compilation_result->bytecode);
+ re->SetDataAt(JSRegExp::kIrregexpUC16BytecodeIndex,
+ *compilation_result->bytecode);
Handle<Code> trampoline = BUILTIN_CODE(isolate, RegExpExperimentalTrampoline);
re->SetDataAt(JSRegExp::kIrregexpLatin1CodeIndex, *trampoline);
re->SetDataAt(JSRegExp::kIrregexpUC16CodeIndex, *trampoline);
- re->SetCaptureNameMap(parse_result.capture_name_map);
+ re->SetCaptureNameMap(compilation_result->capture_name_map);
return true;
}
@@ -101,45 +138,52 @@ Vector<RegExpInstruction> AsInstructionSequence(ByteArray raw_bytes) {
return Vector<RegExpInstruction>(inst_begin, inst_num);
}
+namespace {
+
+int32_t ExecRawImpl(Isolate* isolate, RegExp::CallOrigin call_origin,
+ ByteArray bytecode, String subject, int capture_count,
+ int32_t* output_registers, int32_t output_register_count,
+ int32_t subject_index) {
+ DisallowHeapAllocation no_gc;
+
+ int register_count_per_match =
+ JSRegExp::RegistersForCaptureCount(capture_count);
+
+ int32_t result;
+ do {
+ DCHECK(subject.IsFlat());
+ Zone zone(isolate->allocator(), ZONE_NAME);
+ result = ExperimentalRegExpInterpreter::FindMatches(
+ isolate, call_origin, bytecode, register_count_per_match, subject,
+ subject_index, output_registers, output_register_count, &zone);
+ } while (result == RegExp::kInternalRegExpRetry &&
+ call_origin == RegExp::kFromRuntime);
+ return result;
+}
+
+} // namespace
+
// Returns the number of matches.
-int32_t ExperimentalRegExp::ExecRaw(Isolate* isolate, JSRegExp regexp,
- String subject, int32_t* output_registers,
+int32_t ExperimentalRegExp::ExecRaw(Isolate* isolate,
+ RegExp::CallOrigin call_origin,
+ JSRegExp regexp, String subject,
+ int32_t* output_registers,
int32_t output_register_count,
int32_t subject_index) {
- DisallowHeapAllocation no_gc;
-
DCHECK(FLAG_enable_experimental_regexp_engine);
+ DisallowHeapAllocation no_gc;
if (FLAG_trace_experimental_regexp_engine) {
String source = String::cast(regexp.DataAt(JSRegExp::kSourceIndex));
StdoutStream{} << "Executing experimental regexp " << source << std::endl;
}
- Vector<RegExpInstruction> bytecode = AsInstructionSequence(
- ByteArray::cast(regexp.DataAt(JSRegExp::kIrregexpLatin1BytecodeIndex)));
-
- if (FLAG_print_regexp_bytecode) {
- StdoutStream{} << "Bytecode:" << std::endl;
- StdoutStream{} << bytecode << std::endl;
- }
-
- int register_count_per_match =
- JSRegExp::RegistersForCaptureCount(regexp.CaptureCount());
-
- DCHECK(subject.IsFlat());
- String::FlatContent subject_content = subject.GetFlatContent(no_gc);
+ ByteArray bytecode =
+ ByteArray::cast(regexp.DataAt(JSRegExp::kIrregexpLatin1BytecodeIndex));
- Zone zone(isolate->allocator(), ZONE_NAME);
-
- if (subject_content.IsOneByte()) {
- return ExperimentalRegExpInterpreter::FindMatchesNfaOneByte(
- bytecode, register_count_per_match, subject_content.ToOneByteVector(),
- subject_index, output_registers, output_register_count, &zone);
- } else {
- return ExperimentalRegExpInterpreter::FindMatchesNfaTwoByte(
- bytecode, register_count_per_match, subject_content.ToUC16Vector(),
- subject_index, output_registers, output_register_count, &zone);
- }
+ return ExecRawImpl(isolate, call_origin, bytecode, subject,
+ regexp.CaptureCount(), output_registers,
+ output_register_count, subject_index);
}
int32_t ExperimentalRegExp::MatchForCallFromJs(
@@ -148,7 +192,6 @@ int32_t ExperimentalRegExp::MatchForCallFromJs(
Address backtrack_stack, RegExp::CallOrigin call_origin, Isolate* isolate,
Address regexp) {
DCHECK(FLAG_enable_experimental_regexp_engine);
-
DCHECK_NOT_NULL(isolate);
DCHECK_NOT_NULL(output_registers);
DCHECK(call_origin == RegExp::CallOrigin::kFromJs);
@@ -162,15 +205,14 @@ int32_t ExperimentalRegExp::MatchForCallFromJs(
JSRegExp regexp_obj = JSRegExp::cast(Object(regexp));
- return ExecRaw(isolate, regexp_obj, subject_string, output_registers,
- output_register_count, start_position);
+ return ExecRaw(isolate, RegExp::kFromJs, regexp_obj, subject_string,
+ output_registers, output_register_count, start_position);
}
MaybeHandle<Object> ExperimentalRegExp::Exec(
Isolate* isolate, Handle<JSRegExp> regexp, Handle<String> subject,
int subject_index, Handle<RegExpMatchInfo> last_match_info) {
DCHECK(FLAG_enable_experimental_regexp_engine);
-
DCHECK_EQ(regexp->TypeTag(), JSRegExp::EXPERIMENTAL);
#ifdef VERIFY_HEAP
regexp->JSRegExpVerify(isolate);
@@ -197,16 +239,78 @@ MaybeHandle<Object> ExperimentalRegExp::Exec(
output_registers_release.reset(output_registers);
}
- int num_matches = ExecRaw(isolate, *regexp, *subject, output_registers,
- output_register_count, subject_index);
+ int num_matches =
+ ExecRaw(isolate, RegExp::kFromRuntime, *regexp, *subject,
+ output_registers, output_register_count, subject_index);
- if (num_matches == 0) {
+ if (num_matches > 0) {
+ DCHECK_EQ(num_matches, 1);
+ return RegExp::SetLastMatchInfo(isolate, last_match_info, subject,
+ capture_count, output_registers);
+ } else if (num_matches == 0) {
return isolate->factory()->null_value();
} else {
+ DCHECK_LT(num_matches, 0);
+ DCHECK(isolate->has_pending_exception());
+ return MaybeHandle<Object>();
+ }
+}
+
+int32_t ExperimentalRegExp::OneshotExecRaw(Isolate* isolate,
+ Handle<JSRegExp> regexp,
+ Handle<String> subject,
+ int32_t* output_registers,
+ int32_t output_register_count,
+ int32_t subject_index) {
+ DCHECK(FLAG_enable_experimental_regexp_engine_on_excessive_backtracks);
+
+ if (FLAG_trace_experimental_regexp_engine) {
+ StdoutStream{} << "Experimental execution (oneshot) of regexp "
+ << regexp->Pattern() << std::endl;
+ }
+
+ base::Optional<CompilationResult> compilation_result =
+ CompileImpl(isolate, regexp);
+ if (!compilation_result.has_value()) return RegExp::kInternalRegExpException;
+
+ DisallowHeapAllocation no_gc;
+ return ExecRawImpl(isolate, RegExp::kFromRuntime,
+ *compilation_result->bytecode, *subject,
+ regexp->CaptureCount(), output_registers,
+ output_register_count, subject_index);
+}
+
+MaybeHandle<Object> ExperimentalRegExp::OneshotExec(
+ Isolate* isolate, Handle<JSRegExp> regexp, Handle<String> subject,
+ int subject_index, Handle<RegExpMatchInfo> last_match_info) {
+ DCHECK(FLAG_enable_experimental_regexp_engine_on_excessive_backtracks);
+ DCHECK_NE(regexp->TypeTag(), JSRegExp::NOT_COMPILED);
+
+ int capture_count = regexp->CaptureCount();
+ int output_register_count = JSRegExp::RegistersForCaptureCount(capture_count);
+
+ int32_t* output_registers;
+ std::unique_ptr<int32_t[]> output_registers_release;
+ if (output_register_count <= Isolate::kJSRegexpStaticOffsetsVectorSize) {
+ output_registers = isolate->jsregexp_static_offsets_vector();
+ } else {
+ output_registers = NewArray<int32_t>(output_register_count);
+ output_registers_release.reset(output_registers);
+ }
+
+ int num_matches = OneshotExecRaw(isolate, regexp, subject, output_registers,
+ output_register_count, subject_index);
+
+ if (num_matches > 0) {
DCHECK_EQ(num_matches, 1);
return RegExp::SetLastMatchInfo(isolate, last_match_info, subject,
capture_count, output_registers);
- return last_match_info;
+ } else if (num_matches == 0) {
+ return isolate->factory()->null_value();
+ } else {
+ DCHECK_LT(num_matches, 0);
+ DCHECK(isolate->has_pending_exception());
+ return MaybeHandle<Object>();
}
}
diff --git a/deps/v8/src/regexp/experimental/experimental.h b/deps/v8/src/regexp/experimental/experimental.h
index 02f535f621..a0ee8d1081 100644
--- a/deps/v8/src/regexp/experimental/experimental.h
+++ b/deps/v8/src/regexp/experimental/experimental.h
@@ -39,10 +39,22 @@ class ExperimentalRegExp final : public AllStatic {
static MaybeHandle<Object> Exec(Isolate* isolate, Handle<JSRegExp> regexp,
Handle<String> subject, int index,
Handle<RegExpMatchInfo> last_match_info);
- static int32_t ExecRaw(Isolate* isolate, JSRegExp regexp, String subject,
+ static int32_t ExecRaw(Isolate* isolate, RegExp::CallOrigin call_origin,
+ JSRegExp regexp, String subject,
int32_t* output_registers,
int32_t output_register_count, int32_t subject_index);
+ // Compile and execute a regexp with the experimental engine, regardless of
+ // its type tag. The regexp itself is not changed (apart from lastIndex).
+ static MaybeHandle<Object> OneshotExec(
+ Isolate* isolate, Handle<JSRegExp> regexp, Handle<String> subject,
+ int index, Handle<RegExpMatchInfo> last_match_info);
+ static int32_t OneshotExecRaw(Isolate* isolate, Handle<JSRegExp> regexp,
+ Handle<String> subject,
+ int32_t* output_registers,
+ int32_t output_register_count,
+ int32_t subject_index);
+
static constexpr bool kSupportsUnicode = false;
};
diff --git a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
index 2135e977a7..27c1300ced 100644
--- a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
+++ b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
@@ -116,6 +116,7 @@ RegExpMacroAssemblerIA32::~RegExpMacroAssemblerIA32() {
exit_label_.Unuse();
check_preempt_label_.Unuse();
stack_overflow_label_.Unuse();
+ fallback_label_.Unuse();
}
@@ -148,8 +149,13 @@ void RegExpMacroAssemblerIA32::Backtrack() {
__ cmp(Operand(ebp, kBacktrackCount), Immediate(backtrack_limit()));
__ j(not_equal, &next);
- // Exceeded limits are treated as a failed match.
- Fail();
+ // Backtrack limit exceeded.
+ if (can_fallback()) {
+ __ jmp(&fallback_label_);
+ } else {
+ // Can't fallback, so we treat it as a failed match.
+ Fail();
+ }
__ bind(&next);
}
@@ -940,6 +946,12 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ jmp(&return_eax);
}
+ if (fallback_label_.is_linked()) {
+ __ bind(&fallback_label_);
+ __ mov(eax, FALLBACK_TO_EXPERIMENTAL);
+ __ jmp(&return_eax);
+ }
+
CodeDesc code_desc;
masm_->GetCode(masm_->isolate(), &code_desc);
Handle<Code> code =
diff --git a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h
index a30bff29a1..0cb29979d7 100644
--- a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h
+++ b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h
@@ -192,6 +192,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerIA32
Label exit_label_;
Label check_preempt_label_;
Label stack_overflow_label_;
+ Label fallback_label_;
};
} // namespace internal
diff --git a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
index db79011284..e1b1119c17 100644
--- a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
+++ b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
@@ -129,6 +129,7 @@ RegExpMacroAssemblerMIPS::~RegExpMacroAssemblerMIPS() {
check_preempt_label_.Unuse();
stack_overflow_label_.Unuse();
internal_failure_label_.Unuse();
+ fallback_label_.Unuse();
}
@@ -165,8 +166,13 @@ void RegExpMacroAssemblerMIPS::Backtrack() {
__ Sw(a0, MemOperand(frame_pointer(), kBacktrackCount));
__ Branch(&next, ne, a0, Operand(backtrack_limit()));
- // Exceeded limits are treated as a failed match.
- Fail();
+ // Backtrack limit exceeded.
+ if (can_fallback()) {
+ __ jmp(&fallback_label_);
+ } else {
+ // Can't fallback, so we treat it as a failed match.
+ Fail();
+ }
__ bind(&next);
}
@@ -910,6 +916,12 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ li(v0, Operand(EXCEPTION));
__ jmp(&return_v0);
}
+
+ if (fallback_label_.is_linked()) {
+ __ bind(&fallback_label_);
+ __ li(v0, Operand(FALLBACK_TO_EXPERIMENTAL));
+ __ jmp(&return_v0);
+ }
}
CodeDesc code_desc;
diff --git a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h
index e2aea1b091..dd1c27a7db 100644
--- a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h
+++ b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h
@@ -211,6 +211,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerMIPS
Label check_preempt_label_;
Label stack_overflow_label_;
Label internal_failure_label_;
+ Label fallback_label_;
};
} // namespace internal
diff --git a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
index 309cebfcb9..48252a206e 100644
--- a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
+++ b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
@@ -165,6 +165,7 @@ RegExpMacroAssemblerMIPS::~RegExpMacroAssemblerMIPS() {
check_preempt_label_.Unuse();
stack_overflow_label_.Unuse();
internal_failure_label_.Unuse();
+ fallback_label_.Unuse();
}
@@ -201,8 +202,13 @@ void RegExpMacroAssemblerMIPS::Backtrack() {
__ Sd(a0, MemOperand(frame_pointer(), kBacktrackCount));
__ Branch(&next, ne, a0, Operand(backtrack_limit()));
- // Exceeded limits are treated as a failed match.
- Fail();
+ // Backtrack limit exceeded.
+ if (can_fallback()) {
+ __ jmp(&fallback_label_);
+ } else {
+ // Can't fallback, so we treat it as a failed match.
+ Fail();
+ }
__ bind(&next);
}
@@ -946,6 +952,12 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ li(v0, Operand(EXCEPTION));
__ jmp(&return_v0);
}
+
+ if (fallback_label_.is_linked()) {
+ __ bind(&fallback_label_);
+ __ li(v0, Operand(FALLBACK_TO_EXPERIMENTAL));
+ __ jmp(&return_v0);
+ }
}
CodeDesc code_desc;
diff --git a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
index aebfec1060..b9a29ca010 100644
--- a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
+++ b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
@@ -216,6 +216,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerMIPS
Label check_preempt_label_;
Label stack_overflow_label_;
Label internal_failure_label_;
+ Label fallback_label_;
};
} // namespace internal
diff --git a/deps/v8/src/regexp/ppc/OWNERS b/deps/v8/src/regexp/ppc/OWNERS
index 6edd45a6ef..02c2cd757c 100644
--- a/deps/v8/src/regexp/ppc/OWNERS
+++ b/deps/v8/src/regexp/ppc/OWNERS
@@ -2,3 +2,4 @@ junyan@redhat.com
joransiu@ca.ibm.com
midawson@redhat.com
mfarazma@redhat.com
+vasili.skurydzin@ibm.com
diff --git a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
index 0b1c9a99b7..c0d69297f9 100644
--- a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
+++ b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
@@ -136,6 +136,7 @@ RegExpMacroAssemblerPPC::~RegExpMacroAssemblerPPC() {
check_preempt_label_.Unuse();
stack_overflow_label_.Unuse();
internal_failure_label_.Unuse();
+ fallback_label_.Unuse();
}
@@ -176,11 +177,17 @@ void RegExpMacroAssemblerPPC::Backtrack() {
__ LoadP(r3, MemOperand(frame_pointer(), kBacktrackCount), r0);
__ addi(r3, r3, Operand(1));
__ StoreP(r3, MemOperand(frame_pointer(), kBacktrackCount), r0);
- __ cmpi(r3, Operand(backtrack_limit()));
+ __ mov(r0, Operand(backtrack_limit()));
+ __ cmp(r3, r0);
__ bne(&next);
- // Exceeded limits are treated as a failed match.
- Fail();
+ // Backtrack limit exceeded.
+ if (can_fallback()) {
+ __ b(&fallback_label_);
+ } else {
+ // Can't fallback, so we treat it as a failed match.
+ Fail();
+ }
__ bind(&next);
}
@@ -952,6 +959,12 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
__ li(r3, Operand(EXCEPTION));
__ b(&return_r3);
}
+
+ if (fallback_label_.is_linked()) {
+ __ bind(&fallback_label_);
+ __ li(r3, Operand(FALLBACK_TO_EXPERIMENTAL));
+ __ b(&return_r3);
+ }
}
CodeDesc code_desc;
@@ -1140,7 +1153,6 @@ void RegExpMacroAssemblerPPC::CallCheckStackGuardState(Register scratch) {
__ mov(ip, Operand(stack_guard_check));
EmbeddedData d = EmbeddedData::FromBlob();
- CHECK(Builtins::IsIsolateIndependent(Builtins::kDirectCEntry));
Address entry = d.InstructionStartOfBuiltin(Builtins::kDirectCEntry);
__ mov(r0, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
__ Call(r0);
diff --git a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
index f6b959837f..18b7c5b110 100644
--- a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
+++ b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
@@ -197,6 +197,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerPPC
Label check_preempt_label_;
Label stack_overflow_label_;
Label internal_failure_label_;
+ Label fallback_label_;
};
// Set of non-volatile registers saved/restored by generated regexp code.
diff --git a/deps/v8/src/regexp/regexp-bytecode-generator.cc b/deps/v8/src/regexp/regexp-bytecode-generator.cc
index 8abd15384e..262d788068 100644
--- a/deps/v8/src/regexp/regexp-bytecode-generator.cc
+++ b/deps/v8/src/regexp/regexp-bytecode-generator.cc
@@ -132,7 +132,11 @@ void RegExpBytecodeGenerator::PopCurrentPosition() { Emit(BC_POP_CP, 0); }
void RegExpBytecodeGenerator::PushCurrentPosition() { Emit(BC_PUSH_CP, 0); }
-void RegExpBytecodeGenerator::Backtrack() { Emit(BC_POP_BT, 0); }
+void RegExpBytecodeGenerator::Backtrack() {
+ int error_code =
+ can_fallback() ? RegExp::RE_FALLBACK_TO_EXPERIMENTAL : RegExp::RE_FAILURE;
+ Emit(BC_POP_BT, error_code);
+}
void RegExpBytecodeGenerator::GoTo(Label* l) {
if (advance_current_end_ == pc_) {
@@ -368,7 +372,7 @@ void RegExpBytecodeGenerator::IfRegisterEqPos(int register_index,
Handle<HeapObject> RegExpBytecodeGenerator::GetCode(Handle<String> source) {
Bind(&backtrack_);
- Emit(BC_POP_BT, 0);
+ Backtrack();
Handle<ByteArray> array;
if (FLAG_regexp_peephole_optimization) {
diff --git a/deps/v8/src/regexp/regexp-compiler.cc b/deps/v8/src/regexp/regexp-compiler.cc
index ce1197a55b..fe032bcfdd 100644
--- a/deps/v8/src/regexp/regexp-compiler.cc
+++ b/deps/v8/src/regexp/regexp-compiler.cc
@@ -1777,10 +1777,11 @@ class LoopInitializationMarker {
DCHECK(node_->traversed_loop_initialization_node_);
node_->traversed_loop_initialization_node_ = false;
}
+ LoopInitializationMarker(const LoopInitializationMarker&) = delete;
+ LoopInitializationMarker& operator=(const LoopInitializationMarker&) = delete;
private:
LoopChoiceNode* node_;
- DISALLOW_COPY_AND_ASSIGN(LoopInitializationMarker);
};
// Temporarily decrements min_loop_iterations_.
@@ -1791,10 +1792,11 @@ class IterationDecrementer {
--node_->min_loop_iterations_;
}
~IterationDecrementer() { ++node_->min_loop_iterations_; }
+ IterationDecrementer(const IterationDecrementer&) = delete;
+ IterationDecrementer& operator=(const IterationDecrementer&) = delete;
private:
LoopChoiceNode* node_;
- DISALLOW_COPY_AND_ASSIGN(IterationDecrementer);
};
RegExpNode* SeqRegExpNode::FilterOneByte(int depth) {
diff --git a/deps/v8/src/regexp/regexp-error.h b/deps/v8/src/regexp/regexp-error.h
index 6145b404ab..628f93638e 100644
--- a/deps/v8/src/regexp/regexp-error.h
+++ b/deps/v8/src/regexp/regexp-error.h
@@ -30,6 +30,7 @@ namespace internal {
T(InvalidQuantifier, "Invalid quantifier") \
T(InvalidGroup, "Invalid group") \
T(MultipleFlagDashes, "Multiple dashes in flag group") \
+ T(NotLinear, "Cannot be executed in linear time") \
T(RepeatedFlag, "Repeated flag in flag group") \
T(InvalidFlagGroup, "Invalid flag group") \
T(TooManyCaptures, "Too many captures") \
diff --git a/deps/v8/src/regexp/regexp-interpreter.cc b/deps/v8/src/regexp/regexp-interpreter.cc
index 80442a8db6..a73a9d3fcc 100644
--- a/deps/v8/src/regexp/regexp-interpreter.cc
+++ b/deps/v8/src/regexp/regexp-interpreter.cc
@@ -125,6 +125,8 @@ uint32_t LoadPacked24Unsigned(int32_t bytecode_and_packed_arg) {
class BacktrackStack {
public:
BacktrackStack() = default;
+ BacktrackStack(const BacktrackStack&) = delete;
+ BacktrackStack& operator=(const BacktrackStack&) = delete;
V8_WARN_UNUSED_RESULT bool push(int v) {
data_.emplace_back(v);
@@ -157,8 +159,6 @@ class BacktrackStack {
static constexpr int kMaxSize =
RegExpStack::kMaximumStackSize / sizeof(ValueT);
-
- DISALLOW_COPY_AND_ASSIGN(BacktrackStack);
};
// Registers used during interpreter execution. These consist of output
@@ -521,8 +521,8 @@ IrregexpInterpreter::Result RawMatch(
BYTECODE(POP_BT) {
STATIC_ASSERT(JSRegExp::kNoBacktrackLimit == 0);
if (++backtrack_count == backtrack_limit) {
- // Exceeded limits are treated as a failed match.
- return IrregexpInterpreter::FAILURE;
+ int return_code = LoadPacked24Signed(insn);
+ return static_cast<IrregexpInterpreter::Result>(return_code);
}
IrregexpInterpreter::Result return_code =
diff --git a/deps/v8/src/regexp/regexp-interpreter.h b/deps/v8/src/regexp/regexp-interpreter.h
index be96476443..9b4a8c6c30 100644
--- a/deps/v8/src/regexp/regexp-interpreter.h
+++ b/deps/v8/src/regexp/regexp-interpreter.h
@@ -19,6 +19,7 @@ class V8_EXPORT_PRIVATE IrregexpInterpreter : public AllStatic {
SUCCESS = RegExp::kInternalRegExpSuccess,
EXCEPTION = RegExp::kInternalRegExpException,
RETRY = RegExp::kInternalRegExpRetry,
+ FALLBACK_TO_EXPERIMENTAL = RegExp::kInternalRegExpFallbackToExperimental,
};
// In case a StackOverflow occurs, a StackOverflowException is created and
diff --git a/deps/v8/src/regexp/regexp-macro-assembler.cc b/deps/v8/src/regexp/regexp-macro-assembler.cc
index cf4346309e..62a72b1661 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler.cc
+++ b/deps/v8/src/regexp/regexp-macro-assembler.cc
@@ -315,7 +315,7 @@ int NativeRegExpMacroAssembler::Execute(
int result =
fn.Call(input.ptr(), start_offset, input_start, input_end, output,
output_size, stack_base, call_origin, isolate, regexp.ptr());
- DCHECK(result >= RETRY);
+ DCHECK_GE(result, SMALLEST_REGEXP_RESULT);
if (result == EXCEPTION && !isolate->has_pending_exception()) {
// We detected a stack overflow (on the backtrack stack) in RegExp code,
diff --git a/deps/v8/src/regexp/regexp-macro-assembler.h b/deps/v8/src/regexp/regexp-macro-assembler.h
index 52465610cb..f1dc57db64 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler.h
+++ b/deps/v8/src/regexp/regexp-macro-assembler.h
@@ -183,10 +183,19 @@ class RegExpMacroAssembler {
void set_slow_safe(bool ssc) { slow_safe_compiler_ = ssc; }
bool slow_safe() { return slow_safe_compiler_; }
+ // Controls after how many backtracks irregexp should abort execution. If it
+ // can fall back to the experimental engine (see `set_can_fallback`), it will
+ // return the appropriate error code, otherwise it will return the number of
+ // matches found so far (perhaps none).
void set_backtrack_limit(uint32_t backtrack_limit) {
backtrack_limit_ = backtrack_limit;
}
+ // Set whether or not irregexp can fall back to the experimental engine on
+ // excessive backtracking. The number of backtracks considered excessive can
+ // be controlled with set_backtrack_limit.
+ void set_can_fallback(bool val) { can_fallback_ = val; }
+
enum GlobalMode {
NOT_GLOBAL,
GLOBAL_NO_ZERO_LENGTH_CHECK,
@@ -211,9 +220,12 @@ class RegExpMacroAssembler {
}
uint32_t backtrack_limit() const { return backtrack_limit_; }
+ bool can_fallback() const { return can_fallback_; }
+
private:
bool slow_safe_compiler_;
uint32_t backtrack_limit_ = JSRegExp::kNoBacktrackLimit;
+ bool can_fallback_ = false;
GlobalMode global_mode_;
Isolate* isolate_;
Zone* zone_;
@@ -228,16 +240,20 @@ class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
// RETRY: Something significant changed during execution, and the matching
// should be retried from scratch.
// EXCEPTION: Something failed during execution. If no exception has been
- // thrown, it's an internal out-of-memory, and the caller should
- // throw the exception.
+ // thrown, it's an internal out-of-memory, and the caller should
+ // throw the exception.
// FAILURE: Matching failed.
// SUCCESS: Matching succeeded, and the output array has been filled with
- // capture positions.
+ // capture positions.
+ // FALLBACK_TO_EXPERIMENTAL: Execute the regexp on this subject using the
+ // experimental engine instead.
enum Result {
FAILURE = RegExp::kInternalRegExpFailure,
SUCCESS = RegExp::kInternalRegExpSuccess,
EXCEPTION = RegExp::kInternalRegExpException,
RETRY = RegExp::kInternalRegExpRetry,
+ FALLBACK_TO_EXPERIMENTAL = RegExp::kInternalRegExpFallbackToExperimental,
+ SMALLEST_REGEXP_RESULT = RegExp::kInternalRegExpSmallestResult,
};
NativeRegExpMacroAssembler(Isolate* isolate, Zone* zone);
diff --git a/deps/v8/src/regexp/regexp-parser.cc b/deps/v8/src/regexp/regexp-parser.cc
index fa58764aaa..622baadc07 100644
--- a/deps/v8/src/regexp/regexp-parser.cc
+++ b/deps/v8/src/regexp/regexp-parser.cc
@@ -1829,15 +1829,6 @@ bool RegExpParser::ParseRegExp(Isolate* isolate, Zone* zone,
return success;
}
-bool RegExpParser::VerifyRegExpSyntax(Isolate* isolate, Zone* zone,
- FlatStringReader* input,
- JSRegExp::Flags flags,
- RegExpCompileData* result,
- const DisallowHeapAllocation& no_gc) {
- RegExpParser parser(input, flags, isolate, zone);
- return parser.Parse(result, no_gc);
-}
-
RegExpBuilder::RegExpBuilder(Zone* zone, JSRegExp::Flags flags)
: zone_(zone),
pending_empty_(false),
diff --git a/deps/v8/src/regexp/regexp-parser.h b/deps/v8/src/regexp/regexp-parser.h
index 74b653b47e..23afe9f939 100644
--- a/deps/v8/src/regexp/regexp-parser.h
+++ b/deps/v8/src/regexp/regexp-parser.h
@@ -159,10 +159,6 @@ class V8_EXPORT_PRIVATE RegExpParser {
static bool ParseRegExp(Isolate* isolate, Zone* zone, FlatStringReader* input,
JSRegExp::Flags flags, RegExpCompileData* result);
- static bool VerifyRegExpSyntax(Isolate* isolate, Zone* zone,
- FlatStringReader* input, JSRegExp::Flags flags,
- RegExpCompileData* result,
- const DisallowHeapAllocation& no_gc);
private:
bool Parse(RegExpCompileData* result, const DisallowHeapAllocation&);
diff --git a/deps/v8/src/regexp/regexp-stack.cc b/deps/v8/src/regexp/regexp-stack.cc
index 7f47aec5ae..9a80f6f211 100644
--- a/deps/v8/src/regexp/regexp-stack.cc
+++ b/deps/v8/src/regexp/regexp-stack.cc
@@ -14,12 +14,18 @@ RegExpStackScope::RegExpStackScope(Isolate* isolate)
: regexp_stack_(isolate->regexp_stack()) {
// Initialize, if not already initialized.
regexp_stack_->EnsureCapacity(0);
+ // Irregexp is not reentrant in several ways; in particular, the
+ // RegExpStackScope is not reentrant since the destructor frees allocated
+ // memory. Protect against reentrancy here.
+ CHECK(!regexp_stack_->is_in_use());
+ regexp_stack_->set_is_in_use(true);
}
RegExpStackScope::~RegExpStackScope() {
// Reset the buffer if it has grown.
regexp_stack_->Reset();
+ DCHECK(!regexp_stack_->is_in_use());
}
RegExpStack::RegExpStack() : thread_local_(this), isolate_(nullptr) {}
@@ -36,17 +42,15 @@ char* RegExpStack::ArchiveStack(char* to) {
DCHECK(thread_local_.owns_memory_);
}
- size_t size = sizeof(thread_local_);
- MemCopy(reinterpret_cast<void*>(to), &thread_local_, size);
+ MemCopy(reinterpret_cast<void*>(to), &thread_local_, kThreadLocalSize);
thread_local_ = ThreadLocal(this);
- return to + size;
+ return to + kThreadLocalSize;
}
char* RegExpStack::RestoreStack(char* from) {
- size_t size = sizeof(thread_local_);
- MemCopy(&thread_local_, reinterpret_cast<void*>(from), size);
- return from + size;
+ MemCopy(&thread_local_, reinterpret_cast<void*>(from), kThreadLocalSize);
+ return from + kThreadLocalSize;
}
void RegExpStack::Reset() { thread_local_.ResetToStaticStack(this); }
@@ -60,6 +64,7 @@ void RegExpStack::ThreadLocal::ResetToStaticStack(RegExpStack* regexp_stack) {
limit_ = reinterpret_cast<Address>(regexp_stack->static_stack_) +
kStackLimitSlack * kSystemPointerSize;
owns_memory_ = false;
+ is_in_use_ = false;
}
void RegExpStack::ThreadLocal::FreeAndInvalidate() {
diff --git a/deps/v8/src/regexp/regexp-stack.h b/deps/v8/src/regexp/regexp-stack.h
index 9394398fcc..25a213e471 100644
--- a/deps/v8/src/regexp/regexp-stack.h
+++ b/deps/v8/src/regexp/regexp-stack.h
@@ -26,13 +26,13 @@ class RegExpStackScope {
// Initializes the stack memory area if necessary.
explicit RegExpStackScope(Isolate* isolate);
~RegExpStackScope(); // Releases the stack if it has grown.
+ RegExpStackScope(const RegExpStackScope&) = delete;
+ RegExpStackScope& operator=(const RegExpStackScope&) = delete;
RegExpStack* stack() const { return regexp_stack_; }
private:
RegExpStack* regexp_stack_;
-
- DISALLOW_COPY_AND_ASSIGN(RegExpStackScope);
};
@@ -40,6 +40,8 @@ class RegExpStack {
public:
RegExpStack();
~RegExpStack();
+ RegExpStack(const RegExpStack&) = delete;
+ RegExpStack& operator=(const RegExpStack&) = delete;
// Number of allocated locations on the stack below the limit.
// No sequence of pushes must be longer that this without doing a stack-limit
@@ -68,9 +70,12 @@ class RegExpStack {
// If passing zero, the default/minimum size buffer is allocated.
Address EnsureCapacity(size_t size);
+ bool is_in_use() const { return thread_local_.is_in_use_; }
+ void set_is_in_use(bool v) { thread_local_.is_in_use_ = v; }
+
// Thread local archiving.
static constexpr int ArchiveSpacePerThread() {
- return static_cast<int>(sizeof(ThreadLocal));
+ return static_cast<int>(kThreadLocalSize);
}
char* ArchiveStack(char* to);
char* RestoreStack(char* from);
@@ -112,10 +117,12 @@ class RegExpStack {
size_t memory_size_ = 0;
Address limit_ = kNullAddress;
bool owns_memory_ = false; // Whether memory_ is owned and must be freed.
+ bool is_in_use_ = false; // To guard against reentrancy.
void ResetToStaticStack(RegExpStack* regexp_stack);
void FreeAndInvalidate();
};
+ static constexpr size_t kThreadLocalSize = sizeof(ThreadLocal);
// Address of top of memory used as stack.
Address memory_top_address_address() {
@@ -133,8 +140,6 @@ class RegExpStack {
friend class ExternalReference;
friend class Isolate;
friend class RegExpStackScope;
-
- DISALLOW_COPY_AND_ASSIGN(RegExpStack);
};
} // namespace internal
diff --git a/deps/v8/src/regexp/regexp-utils.cc b/deps/v8/src/regexp/regexp-utils.cc
index 556edbdac8..07d1b5d8f3 100644
--- a/deps/v8/src/regexp/regexp-utils.cc
+++ b/deps/v8/src/regexp/regexp-utils.cc
@@ -173,9 +173,10 @@ bool RegExpUtils::IsUnmodifiedRegExp(Isolate* isolate, Handle<Object> obj) {
// with the init order in the bootstrapper).
InternalIndex kExecIndex(JSRegExp::kExecFunctionDescriptorIndex);
DCHECK_EQ(*(isolate->factory()->exec_string()),
- proto_map.instance_descriptors().GetKey(kExecIndex));
- if (proto_map.instance_descriptors().GetDetails(kExecIndex).constness() !=
- PropertyConstness::kConst) {
+ proto_map.instance_descriptors(kRelaxedLoad).GetKey(kExecIndex));
+ if (proto_map.instance_descriptors(kRelaxedLoad)
+ .GetDetails(kExecIndex)
+ .constness() != PropertyConstness::kConst) {
return false;
}
diff --git a/deps/v8/src/regexp/regexp.cc b/deps/v8/src/regexp/regexp.cc
index 569acdab48..b62ad1fff8 100644
--- a/deps/v8/src/regexp/regexp.cc
+++ b/deps/v8/src/regexp/regexp.cc
@@ -17,6 +17,7 @@
#include "src/regexp/regexp-macro-assembler-arch.h"
#include "src/regexp/regexp-macro-assembler-tracer.h"
#include "src/regexp/regexp-parser.h"
+#include "src/regexp/regexp-utils.h"
#include "src/strings/string-search.h"
#include "src/utils/ostreams.h"
@@ -88,7 +89,7 @@ class RegExpImpl final : public AllStatic {
static bool Compile(Isolate* isolate, Zone* zone, RegExpCompileData* input,
JSRegExp::Flags flags, Handle<String> pattern,
Handle<String> sample_subject, bool is_one_byte,
- uint32_t backtrack_limit);
+ uint32_t& backtrack_limit);
// For acting on the JSRegExp data FixedArray.
static int IrregexpMaxRegisterCount(FixedArray re);
@@ -119,6 +120,10 @@ void RegExp::ThrowRegExpException(Isolate* isolate, Handle<JSRegExp> re,
error_text));
}
+bool RegExp::IsUnmodifiedRegExp(Isolate* isolate, Handle<JSRegExp> regexp) {
+ return RegExpUtils::IsUnmodifiedRegExp(isolate, regexp);
+}
+
// Identifies the sort of regexps where the regexp engine is faster
// than the code used for atom matches.
static bool HasFewDifferentCharacters(Handle<String> pattern) {
@@ -182,9 +187,22 @@ MaybeHandle<Object> RegExp::Compile(Isolate* isolate, Handle<JSRegExp> re,
bool has_been_compiled = false;
- if (FLAG_enable_experimental_regexp_engine &&
+ if (FLAG_default_to_experimental_regexp_engine &&
ExperimentalRegExp::CanBeHandled(parse_result.tree, flags,
parse_result.capture_count)) {
+ DCHECK(FLAG_enable_experimental_regexp_engine);
+ ExperimentalRegExp::Initialize(isolate, re, pattern, flags,
+ parse_result.capture_count);
+ has_been_compiled = true;
+ } else if (flags & JSRegExp::kLinear) {
+ DCHECK(FLAG_enable_experimental_regexp_engine);
+ if (!ExperimentalRegExp::CanBeHandled(parse_result.tree, flags,
+ parse_result.capture_count)) {
+ // TODO(mbid): The error could provide a reason for why the regexp can't
+ // be executed in linear time (e.g. due to back references).
+ return RegExp::ThrowRegExpException(isolate, re, pattern,
+ RegExpError::kNotLinear);
+ }
ExperimentalRegExp::Initialize(isolate, re, pattern, flags,
parse_result.capture_count);
has_been_compiled = true;
@@ -248,6 +266,14 @@ bool RegExp::EnsureFullyCompiled(Isolate* isolate, Handle<JSRegExp> re,
}
// static
+MaybeHandle<Object> RegExp::ExperimentalOneshotExec(
+ Isolate* isolate, Handle<JSRegExp> regexp, Handle<String> subject,
+ int index, Handle<RegExpMatchInfo> last_match_info) {
+ return ExperimentalRegExp::OneshotExec(isolate, regexp, subject, index,
+ last_match_info);
+}
+
+// static
MaybeHandle<Object> RegExp::Exec(Isolate* isolate, Handle<JSRegExp> regexp,
Handle<String> subject, int index,
Handle<RegExpMatchInfo> last_match_info) {
@@ -450,9 +476,10 @@ bool RegExpImpl::CompileIrregexp(Isolate* isolate, Handle<JSRegExp> re,
compile_data.compilation_target = re->ShouldProduceBytecode()
? RegExpCompilationTarget::kBytecode
: RegExpCompilationTarget::kNative;
+ uint32_t backtrack_limit = re->BacktrackLimit();
const bool compilation_succeeded =
Compile(isolate, &zone, &compile_data, flags, pattern, sample_subject,
- is_one_byte, re->BacktrackLimit());
+ is_one_byte, backtrack_limit);
if (!compilation_succeeded) {
DCHECK(compile_data.error != RegExpError::kNone);
RegExp::ThrowRegExpException(isolate, re, compile_data.error);
@@ -482,6 +509,7 @@ bool RegExpImpl::CompileIrregexp(Isolate* isolate, Handle<JSRegExp> re,
if (compile_data.register_count > register_max) {
SetIrregexpMaxRegisterCount(*data, compile_data.register_count);
}
+ data->set(JSRegExp::kIrregexpBacktrackLimit, Smi::FromInt(backtrack_limit));
if (FLAG_trace_regexp_tier_up) {
PrintF("JSRegExp object %p %s size: %d\n",
@@ -595,6 +623,7 @@ int RegExpImpl::IrregexpExecRaw(Isolate* isolate, Handle<JSRegExp> regexp,
case IrregexpInterpreter::SUCCESS:
case IrregexpInterpreter::EXCEPTION:
case IrregexpInterpreter::FAILURE:
+ case IrregexpInterpreter::FALLBACK_TO_EXPERIMENTAL:
return result;
case IrregexpInterpreter::RETRY:
// The string has changed representation, and we must restart the
@@ -665,13 +694,16 @@ MaybeHandle<Object> RegExpImpl::IrregexpExec(
int capture_count = regexp->CaptureCount();
return RegExp::SetLastMatchInfo(isolate, last_match_info, subject,
capture_count, output_registers);
- }
- if (res == RegExp::RE_EXCEPTION) {
+ } else if (res == RegExp::RE_FALLBACK_TO_EXPERIMENTAL) {
+ return ExperimentalRegExp::OneshotExec(isolate, regexp, subject,
+ previous_index, last_match_info);
+ } else if (res == RegExp::RE_EXCEPTION) {
DCHECK(isolate->has_pending_exception());
return MaybeHandle<Object>();
+ } else {
+ DCHECK(res == RegExp::RE_FAILURE);
+ return isolate->factory()->null_value();
}
- DCHECK(res == RegExp::RE_FAILURE);
- return isolate->factory()->null_value();
}
// static
@@ -740,15 +772,15 @@ bool RegExp::CompileForTesting(Isolate* isolate, Zone* zone,
Handle<String> pattern,
Handle<String> sample_subject,
bool is_one_byte) {
+ uint32_t backtrack_limit = JSRegExp::kNoBacktrackLimit;
return RegExpImpl::Compile(isolate, zone, data, flags, pattern,
- sample_subject, is_one_byte,
- JSRegExp::kNoBacktrackLimit);
+ sample_subject, is_one_byte, backtrack_limit);
}
bool RegExpImpl::Compile(Isolate* isolate, Zone* zone, RegExpCompileData* data,
JSRegExp::Flags flags, Handle<String> pattern,
Handle<String> sample_subject, bool is_one_byte,
- uint32_t backtrack_limit) {
+ uint32_t& backtrack_limit) {
if (JSRegExp::RegistersForCaptureCount(data->capture_count) >
RegExpMacroAssembler::kMaxRegisterCount) {
data->error = RegExpError::kTooLarge;
@@ -825,7 +857,21 @@ bool RegExpImpl::Compile(Isolate* isolate, Zone* zone, RegExpCompileData* data,
}
macro_assembler->set_slow_safe(TooMuchRegExpCode(isolate, pattern));
- macro_assembler->set_backtrack_limit(backtrack_limit);
+ if (FLAG_enable_experimental_regexp_engine_on_excessive_backtracks &&
+ ExperimentalRegExp::CanBeHandled(data->tree, flags,
+ data->capture_count)) {
+ if (backtrack_limit == JSRegExp::kNoBacktrackLimit) {
+ backtrack_limit = FLAG_regexp_backtracks_before_fallback;
+ } else {
+ backtrack_limit =
+ std::min(backtrack_limit, FLAG_regexp_backtracks_before_fallback);
+ }
+ macro_assembler->set_backtrack_limit(backtrack_limit);
+ macro_assembler->set_can_fallback(true);
+ } else {
+ macro_assembler->set_backtrack_limit(backtrack_limit);
+ macro_assembler->set_can_fallback(false);
+ }
// Inserted here, instead of in Assembler, because it depends on information
// in the AST that isn't replicated in the Node structure.
@@ -1014,8 +1060,8 @@ int32_t* RegExpGlobalCache::FetchNext() {
DCHECK(ExperimentalRegExp::IsCompiled(regexp_, isolate_));
DisallowHeapAllocation no_gc;
num_matches_ = ExperimentalRegExp::ExecRaw(
- isolate_, *regexp_, *subject_, register_array_,
- register_array_size_, last_end_index);
+ isolate_, RegExp::kFromRuntime, *regexp_, *subject_,
+ register_array_, register_array_size_, last_end_index);
break;
}
case JSRegExp::IRREGEXP: {
@@ -1035,7 +1081,16 @@ int32_t* RegExpGlobalCache::FetchNext() {
}
}
- if (num_matches_ <= 0) return nullptr;
+ // Fall back to experimental engine if needed and possible.
+ if (num_matches_ == RegExp::kInternalRegExpFallbackToExperimental) {
+ num_matches_ = ExperimentalRegExp::OneshotExecRaw(
+ isolate_, regexp_, subject_, register_array_, register_array_size_,
+ last_end_index);
+ }
+
+ if (num_matches_ <= 0) {
+ return nullptr;
+ }
current_match_index_ = 0;
return register_array_;
} else {
diff --git a/deps/v8/src/regexp/regexp.h b/deps/v8/src/regexp/regexp.h
index a6a3a8f003..3e20b5f80c 100644
--- a/deps/v8/src/regexp/regexp.h
+++ b/deps/v8/src/regexp/regexp.h
@@ -92,16 +92,25 @@ class RegExp final : public AllStatic {
Isolate* isolate, Handle<JSRegExp> regexp, Handle<String> subject,
int index, Handle<RegExpMatchInfo> last_match_info);
+ V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT static MaybeHandle<Object>
+ ExperimentalOneshotExec(Isolate* isolate, Handle<JSRegExp> regexp,
+ Handle<String> subject, int index,
+ Handle<RegExpMatchInfo> last_match_info);
+
// Integral return values used throughout regexp code layers.
static constexpr int kInternalRegExpFailure = 0;
static constexpr int kInternalRegExpSuccess = 1;
static constexpr int kInternalRegExpException = -1;
static constexpr int kInternalRegExpRetry = -2;
+ static constexpr int kInternalRegExpFallbackToExperimental = -3;
+ static constexpr int kInternalRegExpSmallestResult = -3;
enum IrregexpResult : int32_t {
RE_FAILURE = kInternalRegExpFailure,
RE_SUCCESS = kInternalRegExpSuccess,
RE_EXCEPTION = kInternalRegExpException,
+ RE_RETRY = kInternalRegExpRetry,
+ RE_FALLBACK_TO_EXPERIMENTAL = kInternalRegExpFallbackToExperimental,
};
// Set last match info. If match is nullptr, then setting captures is
@@ -129,6 +138,8 @@ class RegExp final : public AllStatic {
RegExpError error);
static void ThrowRegExpException(Isolate* isolate, Handle<JSRegExp> re,
RegExpError error_text);
+
+ static bool IsUnmodifiedRegExp(Isolate* isolate, Handle<JSRegExp> regexp);
};
// Uses a special global mode of irregexp-generated code to perform a global
diff --git a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
index b574be8d74..9d2e62e1cb 100644
--- a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
+++ b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
@@ -137,6 +137,7 @@ RegExpMacroAssemblerS390::~RegExpMacroAssemblerS390() {
check_preempt_label_.Unuse();
stack_overflow_label_.Unuse();
internal_failure_label_.Unuse();
+ fallback_label_.Unuse();
}
int RegExpMacroAssemblerS390::stack_limit_slack() {
@@ -174,8 +175,13 @@ void RegExpMacroAssemblerS390::Backtrack() {
__ CmpLogicalP(r2, Operand(backtrack_limit()));
__ bne(&next);
- // Exceeded limits are treated as a failed match.
- Fail();
+ // Backtrack limit exceeded.
+ if (can_fallback()) {
+ __ jmp(&fallback_label_);
+ } else {
+ // Can't fallback, so we treat it as a failed match.
+ Fail();
+ }
__ bind(&next);
}
@@ -949,6 +955,12 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
__ b(&return_r2);
}
+ if (fallback_label_.is_linked()) {
+ __ bind(&fallback_label_);
+ __ LoadImmP(r2, Operand(FALLBACK_TO_EXPERIMENTAL));
+ __ b(&return_r2);
+ }
+
CodeDesc code_desc;
masm_->GetCode(isolate(), &code_desc);
Handle<Code> code =
diff --git a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h
index e4f88f51b9..a01d409279 100644
--- a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h
+++ b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h
@@ -197,6 +197,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerS390
Label check_preempt_label_;
Label stack_overflow_label_;
Label internal_failure_label_;
+ Label fallback_label_;
};
// Set of non-volatile registers saved/restored by generated regexp code.
diff --git a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
index da0397689f..79574ca993 100644
--- a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
@@ -125,6 +125,7 @@ RegExpMacroAssemblerX64::~RegExpMacroAssemblerX64() {
exit_label_.Unuse();
check_preempt_label_.Unuse();
stack_overflow_label_.Unuse();
+ fallback_label_.Unuse();
}
@@ -157,8 +158,13 @@ void RegExpMacroAssemblerX64::Backtrack() {
__ cmpq(Operand(rbp, kBacktrackCount), Immediate(backtrack_limit()));
__ j(not_equal, &next);
- // Exceeded limits are treated as a failed match.
- Fail();
+ // Backtrack limit exceeded.
+ if (can_fallback()) {
+ __ jmp(&fallback_label_);
+ } else {
+ // Can't fallback, so we treat it as a failed match.
+ Fail();
+ }
__ bind(&next);
}
@@ -1000,6 +1006,12 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ jmp(&return_rax);
}
+ if (fallback_label_.is_linked()) {
+ __ bind(&fallback_label_);
+ __ Set(rax, FALLBACK_TO_EXPERIMENTAL);
+ __ jmp(&return_rax);
+ }
+
FixupCodeRelativePositions();
CodeDesc code_desc;
diff --git a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h
index ea4d45edba..517a05d939 100644
--- a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h
+++ b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h
@@ -248,6 +248,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerX64
Label exit_label_;
Label check_preempt_label_;
Label stack_overflow_label_;
+ Label fallback_label_;
};
} // namespace internal
diff --git a/deps/v8/src/roots/DIR_METADATA b/deps/v8/src/roots/DIR_METADATA
new file mode 100644
index 0000000000..ff55846b31
--- /dev/null
+++ b/deps/v8/src/roots/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>GC"
+} \ No newline at end of file
diff --git a/deps/v8/src/roots/OWNERS b/deps/v8/src/roots/OWNERS
index 2d6e1ae7c2..aaffe920bb 100644
--- a/deps/v8/src/roots/OWNERS
+++ b/deps/v8/src/roots/OWNERS
@@ -7,5 +7,3 @@ jkummerow@chromium.org
marja@chromium.org
sigurds@chromium.org
ulan@chromium.org
-
-# COMPONENT: Blink>JavaScript>GC
diff --git a/deps/v8/src/roots/roots.h b/deps/v8/src/roots/roots.h
index 27f2f5792a..744176e35e 100644
--- a/deps/v8/src/roots/roots.h
+++ b/deps/v8/src/roots/roots.h
@@ -85,7 +85,6 @@ class Symbol;
V(Map, bytecode_array_map, BytecodeArrayMap) \
V(Map, code_data_container_map, CodeDataContainerMap) \
V(Map, coverage_info_map, CoverageInfoMap) \
- V(Map, descriptor_array_map, DescriptorArrayMap) \
V(Map, fixed_double_array_map, FixedDoubleArrayMap) \
V(Map, global_dictionary_map, GlobalDictionaryMap) \
V(Map, many_closures_cell_map, ManyClosuresCellMap) \
@@ -165,11 +164,13 @@ class Symbol;
EmptyClosureFeedbackCellArray) \
V(NumberDictionary, empty_slow_element_dictionary, \
EmptySlowElementDictionary) \
- V(FixedArray, empty_ordered_hash_map, EmptyOrderedHashMap) \
- V(FixedArray, empty_ordered_hash_set, EmptyOrderedHashSet) \
+ V(OrderedHashMap, empty_ordered_hash_map, EmptyOrderedHashMap) \
+ V(OrderedHashSet, empty_ordered_hash_set, EmptyOrderedHashSet) \
V(FeedbackMetadata, empty_feedback_metadata, EmptyFeedbackMetadata) \
V(PropertyCell, empty_property_cell, EmptyPropertyCell) \
V(NameDictionary, empty_property_dictionary, EmptyPropertyDictionary) \
+ V(OrderedNameDictionary, empty_ordered_property_dictionary, \
+ EmptyOrderedPropertyDictionary) \
V(InterceptorInfo, noop_interceptor_info, NoOpInterceptorInfo) \
V(WeakFixedArray, empty_weak_fixed_array, EmptyWeakFixedArray) \
V(WeakArrayList, empty_weak_array_list, EmptyWeakArrayList) \
diff --git a/deps/v8/src/runtime/DIR_METADATA b/deps/v8/src/runtime/DIR_METADATA
new file mode 100644
index 0000000000..b183b81885
--- /dev/null
+++ b/deps/v8/src/runtime/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>Runtime"
+} \ No newline at end of file
diff --git a/deps/v8/src/runtime/OWNERS b/deps/v8/src/runtime/OWNERS
index f52e1c9ca8..48d72aea5e 100644
--- a/deps/v8/src/runtime/OWNERS
+++ b/deps/v8/src/runtime/OWNERS
@@ -1,3 +1 @@
file:../../COMMON_OWNERS
-
-# COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/runtime/runtime-array.cc b/deps/v8/src/runtime/runtime-array.cc
index 3e72d5e816..623064fd8a 100644
--- a/deps/v8/src/runtime/runtime-array.cc
+++ b/deps/v8/src/runtime/runtime-array.cc
@@ -47,13 +47,8 @@ RUNTIME_FUNCTION(Runtime_NewArray) {
DCHECK_LE(3, args.length());
int const argc = args.length() - 3;
// argv points to the arguments constructed by the JavaScript call.
-#ifdef V8_REVERSE_JSARGS
JavaScriptArguments argv(argc, args.address_of_arg_at(0));
CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, argc);
-#else
- JavaScriptArguments argv(argc, args.address_of_arg_at(1));
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, 0);
-#endif
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, new_target, argc + 1);
CONVERT_ARG_HANDLE_CHECKED(HeapObject, type_info, argc + 2);
// TODO(bmeurer): Use MaybeHandle to pass around the AllocationSite.
diff --git a/deps/v8/src/runtime/runtime-classes.cc b/deps/v8/src/runtime/runtime-classes.cc
index 85b4ca767a..fa647b2c04 100644
--- a/deps/v8/src/runtime/runtime-classes.cc
+++ b/deps/v8/src/runtime/runtime-classes.cc
@@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/runtime/runtime-utils.h"
-
#include <stdlib.h>
+
#include <limits>
#include "src/builtins/accessors.h"
#include "src/common/message-template.h"
#include "src/debug/debug.h"
#include "src/execution/arguments-inl.h"
+#include "src/execution/frames-inl.h"
#include "src/execution/isolate-inl.h"
#include "src/logging/counters.h"
#include "src/logging/log.h"
@@ -20,6 +20,7 @@
#include "src/objects/lookup-inl.h"
#include "src/objects/smi.h"
#include "src/objects/struct-inl.h"
+#include "src/runtime/runtime-utils.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -138,8 +139,9 @@ inline void SetHomeObject(Isolate* isolate, JSFunction method,
if (method.shared().needs_home_object()) {
const InternalIndex kPropertyIndex(
JSFunction::kMaybeHomeObjectDescriptorIndex);
- CHECK_EQ(method.map().instance_descriptors().GetKey(kPropertyIndex),
- ReadOnlyRoots(isolate).home_object_symbol());
+ CHECK_EQ(
+ method.map().instance_descriptors(kRelaxedLoad).GetKey(kPropertyIndex),
+ ReadOnlyRoots(isolate).home_object_symbol());
FieldIndex field_index =
FieldIndex::ForDescriptor(method.map(), kPropertyIndex);
diff --git a/deps/v8/src/runtime/runtime-compiler.cc b/deps/v8/src/runtime/runtime-compiler.cc
index c20d2d69f2..898279cdb6 100644
--- a/deps/v8/src/runtime/runtime-compiler.cc
+++ b/deps/v8/src/runtime/runtime-compiler.cc
@@ -60,8 +60,29 @@ RUNTIME_FUNCTION(Runtime_CompileLazy) {
namespace {
-inline bool MaybeSpawnNativeContextIndependentCompilationJob() {
- return FLAG_turbo_nci && !FLAG_turbo_nci_as_midtier;
+// Returns false iff an exception was thrown.
+bool MaybeSpawnNativeContextIndependentCompilationJob(
+ Handle<JSFunction> function, ConcurrencyMode mode) {
+ if (!FLAG_turbo_nci || FLAG_turbo_nci_as_midtier) {
+ return true; // Nothing to do.
+ }
+
+ // If delayed codegen is enabled, the first optimization request does not
+ // trigger NCI compilation, since we try to avoid compiling Code that
+ // remains unused in the future. Repeated optimization (possibly in
+ // different native contexts) is taken as a signal that this SFI will
+ // continue to be used in the future, thus we trigger NCI compilation.
+ if (!FLAG_turbo_nci_delayed_codegen ||
+ function->shared().has_optimized_at_least_once()) {
+ if (!Compiler::CompileOptimized(function, mode,
+ CodeKind::NATIVE_CONTEXT_INDEPENDENT)) {
+ return false;
+ }
+ } else {
+ function->shared().set_has_optimized_at_least_once(true);
+ }
+
+ return true;
}
Object CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
@@ -77,20 +98,8 @@ Object CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
}
// Possibly compile for NCI caching.
- if (MaybeSpawnNativeContextIndependentCompilationJob()) {
- // The first optimization request does not trigger NCI compilation,
- // since we try to avoid compiling Code that remains unused in the future.
- // Repeated optimization (possibly in different native contexts) is taken
- // as a signal that this SFI will continue to be used in the future, thus
- // we trigger NCI compilation.
- if (function->shared().has_optimized_at_least_once()) {
- if (!Compiler::CompileOptimized(function, mode,
- CodeKind::NATIVE_CONTEXT_INDEPENDENT)) {
- return ReadOnlyRoots(isolate).exception();
- }
- } else {
- function->shared().set_has_optimized_at_least_once(true);
- }
+ if (!MaybeSpawnNativeContextIndependentCompilationJob(function, mode)) {
+ return ReadOnlyRoots(isolate).exception();
}
DCHECK(function->is_compiled());
@@ -132,7 +141,7 @@ RUNTIME_FUNCTION(Runtime_FunctionFirstExecution) {
return function->code();
}
-RUNTIME_FUNCTION(Runtime_EvictOptimizedCodeSlot) {
+RUNTIME_FUNCTION(Runtime_HealOptimizedCodeSlot) {
SealHandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
@@ -140,7 +149,7 @@ RUNTIME_FUNCTION(Runtime_EvictOptimizedCodeSlot) {
DCHECK(function->shared().is_compiled());
function->feedback_vector().EvictOptimizedCodeMarkedForDeoptimization(
- function->shared(), "Runtime_EvictOptimizedCodeSlot");
+ function->shared(), "Runtime_HealOptimizedCodeSlot");
return function->code();
}
@@ -299,6 +308,14 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
PrintF(scope.file(), " at AST id %d]\n", ast_id.ToInt());
}
maybe_result = Compiler::GetOptimizedCodeForOSR(function, ast_id, frame);
+
+ // Possibly compile for NCI caching.
+ if (!MaybeSpawnNativeContextIndependentCompilationJob(
+ function, FLAG_concurrent_recompilation
+ ? ConcurrencyMode::kConcurrent
+ : ConcurrencyMode::kNotConcurrent)) {
+ return Object();
+ }
}
// Check whether we ended up with usable optimized code.
diff --git a/deps/v8/src/runtime/runtime-debug.cc b/deps/v8/src/runtime/runtime-debug.cc
index 62ec1fdc24..175e81829c 100644
--- a/deps/v8/src/runtime/runtime-debug.cc
+++ b/deps/v8/src/runtime/runtime-debug.cc
@@ -859,7 +859,7 @@ RUNTIME_FUNCTION(Runtime_ProfileCreateSnapshotDataBlob) {
{
i::EmbeddedData d = i::EmbeddedData::FromBlob();
PrintF("Embedded blob is %d bytes\n",
- static_cast<int>(d.code_size() + d.metadata_size()));
+ static_cast<int>(d.code_size() + d.data_size()));
}
FreeCurrentEmbeddedBlob();
diff --git a/deps/v8/src/runtime/runtime-literals.cc b/deps/v8/src/runtime/runtime-literals.cc
index c38f6e1e4c..3a9075fc7d 100644
--- a/deps/v8/src/runtime/runtime-literals.cc
+++ b/deps/v8/src/runtime/runtime-literals.cc
@@ -30,7 +30,7 @@ bool HasBoilerplate(Handle<Object> literal_site) {
void PreInitializeLiteralSite(Handle<FeedbackVector> vector,
FeedbackSlot slot) {
- vector->Set(slot, Smi::FromInt(1));
+ vector->SynchronizedSet(slot, Smi::FromInt(1));
}
enum DeepCopyHints { kNoHints = 0, kObjectIsShallow = 1 };
@@ -110,7 +110,8 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
if (!copy->IsJSArray(isolate)) {
if (copy->HasFastProperties(isolate)) {
Handle<DescriptorArray> descriptors(
- copy->map(isolate).instance_descriptors(isolate), isolate);
+ copy->map(isolate).instance_descriptors(isolate, kRelaxedLoad),
+ isolate);
for (InternalIndex i : copy->map(isolate).IterateOwnDescriptors()) {
PropertyDetails details = descriptors->GetDetails(i);
DCHECK_EQ(kField, details.location());
@@ -567,7 +568,7 @@ MaybeHandle<JSObject> CreateLiteral(Isolate* isolate,
JSObject);
creation_context.ExitScope(site, boilerplate);
- vector->Set(literals_slot, *site);
+ vector->SynchronizedSet(literals_slot, *site);
}
STATIC_ASSERT(static_cast<int>(ObjectLiteral::kDisableMementos) ==
@@ -677,7 +678,7 @@ RUNTIME_FUNCTION(Runtime_CreateRegExpLiteral) {
PreInitializeLiteralSite(vector, literal_slot);
return *boilerplate;
}
- vector->Set(literal_slot, *boilerplate);
+ vector->SynchronizedSet(literal_slot, *boilerplate);
return *JSRegExp::Copy(boilerplate);
}
diff --git a/deps/v8/src/runtime/runtime-numbers.cc b/deps/v8/src/runtime/runtime-numbers.cc
index 04b195b31e..38349bd507 100644
--- a/deps/v8/src/runtime/runtime-numbers.cc
+++ b/deps/v8/src/runtime/runtime-numbers.cc
@@ -13,15 +13,6 @@
namespace v8 {
namespace internal {
-RUNTIME_FUNCTION(Runtime_IsValidSmi) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(1, args.length());
-
- CONVERT_NUMBER_CHECKED(int32_t, number, Int32, args[0]);
- return isolate->heap()->ToBoolean(Smi::IsValid(number));
-}
-
-
RUNTIME_FUNCTION(Runtime_StringToNumber) {
HandleScope handle_scope(isolate);
DCHECK_EQ(1, args.length());
diff --git a/deps/v8/src/runtime/runtime-object.cc b/deps/v8/src/runtime/runtime-object.cc
index 41dea0fe44..993adf47dd 100644
--- a/deps/v8/src/runtime/runtime-object.cc
+++ b/deps/v8/src/runtime/runtime-object.cc
@@ -108,8 +108,8 @@ bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver,
int nof = receiver_map->NumberOfOwnDescriptors();
if (nof == 0) return false;
InternalIndex descriptor(nof - 1);
- Handle<DescriptorArray> descriptors(receiver_map->instance_descriptors(),
- isolate);
+ Handle<DescriptorArray> descriptors(
+ receiver_map->instance_descriptors(kRelaxedLoad), isolate);
if (descriptors->GetKey(descriptor) != *key) return false;
// (3) The property to be deleted must be deletable.
PropertyDetails details = descriptors->GetDetails(descriptor);
@@ -859,9 +859,7 @@ RUNTIME_FUNCTION(Runtime_CompleteInobjectSlackTrackingForMap) {
RUNTIME_FUNCTION(Runtime_TryMigrateInstance) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
- if (!object->IsJSObject()) return Smi::zero();
- Handle<JSObject> js_object = Handle<JSObject>::cast(object);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, js_object, 0);
// It could have been a DCHECK but we call this function directly from tests.
if (!js_object->map().is_deprecated()) return Smi::zero();
// This call must not cause lazy deopts, because it's called from deferred
@@ -869,7 +867,7 @@ RUNTIME_FUNCTION(Runtime_TryMigrateInstance) {
// ID. So we just try migration and signal failure if necessary,
// which will also trigger a deopt.
if (!JSObject::TryMigrateInstance(isolate, js_object)) return Smi::zero();
- return *object;
+ return *js_object;
}
static bool IsValidAccessor(Isolate* isolate, Handle<Object> obj) {
@@ -1070,7 +1068,8 @@ RUNTIME_FUNCTION(Runtime_CopyDataPropertiesWithExcludedProperties) {
// If source is undefined or null, throw a non-coercible error.
if (source->IsNullOrUndefined(isolate)) {
- return ErrorUtils::ThrowLoadFromNullOrUndefined(isolate, source);
+ return ErrorUtils::ThrowLoadFromNullOrUndefined(isolate, source,
+ MaybeHandle<Object>());
}
ScopedVector<Handle<Object>> excluded_properties(args.length() - 1);
diff --git a/deps/v8/src/runtime/runtime-regexp.cc b/deps/v8/src/runtime/runtime-regexp.cc
index 994d6e3710..f6d76a1ecc 100644
--- a/deps/v8/src/runtime/runtime-regexp.cc
+++ b/deps/v8/src/runtime/runtime-regexp.cc
@@ -877,6 +877,23 @@ RUNTIME_FUNCTION(Runtime_RegExpExec) {
isolate, RegExp::Exec(isolate, regexp, subject, index, last_match_info));
}
+RUNTIME_FUNCTION(Runtime_RegExpExperimentalOneshotExec) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(4, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, subject, 1);
+ CONVERT_INT32_ARG_CHECKED(index, 2);
+ CONVERT_ARG_HANDLE_CHECKED(RegExpMatchInfo, last_match_info, 3);
+ // Due to the way the JS calls are constructed this must be less than the
+ // length of a string, i.e. it is always a Smi. We check anyway for security.
+ CHECK_LE(0, index);
+ CHECK_GE(subject->length(), index);
+ isolate->counters()->regexp_entry_runtime()->Increment();
+ RETURN_RESULT_OR_FAILURE(
+ isolate, RegExp::ExperimentalOneshotExec(isolate, regexp, subject, index,
+ last_match_info));
+}
+
namespace {
class MatchInfoBackedMatch : public String::Match {
diff --git a/deps/v8/src/runtime/runtime-scopes.cc b/deps/v8/src/runtime/runtime-scopes.cc
index 36a48ae513..ed16900abf 100644
--- a/deps/v8/src/runtime/runtime-scopes.cc
+++ b/deps/v8/src/runtime/runtime-scopes.cc
@@ -14,12 +14,11 @@
#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
#include "src/init/bootstrapper.h"
#include "src/logging/counters.h"
+#include "src/objects/arguments-inl.h"
#include "src/objects/heap-object-inl.h"
#include "src/objects/module-inl.h"
#include "src/objects/smi.h"
#include "src/runtime/runtime-utils.h"
-#include "torque-generated/exported-class-definitions-inl.h"
-#include "torque-generated/exported-class-definitions.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/runtime/runtime-test.cc b/deps/v8/src/runtime/runtime-test.cc
index 66e522e72e..41e34aaff6 100644
--- a/deps/v8/src/runtime/runtime-test.cc
+++ b/deps/v8/src/runtime/runtime-test.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/runtime/runtime-utils.h"
-
#include <memory>
#include <sstream>
@@ -29,6 +27,8 @@
#include "src/objects/js-function-inl.h"
#include "src/objects/js-regexp-inl.h"
#include "src/objects/smi.h"
+#include "src/regexp/regexp.h"
+#include "src/runtime/runtime-utils.h"
#include "src/snapshot/snapshot.h"
#include "src/trap-handler/trap-handler.h"
#include "src/utils/ostreams.h"
@@ -254,7 +254,7 @@ RUNTIME_FUNCTION(Runtime_IsConcurrentRecompilationSupported) {
RUNTIME_FUNCTION(Runtime_DynamicMapChecksEnabled) {
SealHandleScope shs(isolate);
DCHECK_EQ(0, args.length());
- return isolate->heap()->ToBoolean(FLAG_dynamic_map_checks);
+ return isolate->heap()->ToBoolean(FLAG_turboprop_dynamic_map_checks);
}
RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
@@ -551,7 +551,7 @@ RUNTIME_FUNCTION(Runtime_GetOptimizationStatus) {
if (function->IsMarkedForOptimization()) {
status |= static_cast<int>(OptimizationStatus::kMarkedForOptimization);
- } else if (function->IsInOptimizationQueue()) {
+ } else if (function->IsMarkedForConcurrentOptimization()) {
status |=
static_cast<int>(OptimizationStatus::kMarkedForConcurrentOptimization);
} else if (function->IsInOptimizationQueue()) {
@@ -1090,6 +1090,16 @@ RUNTIME_FUNCTION(Runtime_HaveSameMap) {
return isolate->heap()->ToBoolean(obj1.map() == obj2.map());
}
+RUNTIME_FUNCTION(Runtime_InLargeObjectSpace) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_CHECKED(HeapObject, obj, 0);
+ return isolate->heap()->ToBoolean(
+ isolate->heap()->new_lo_space()->Contains(obj) ||
+ isolate->heap()->code_lo_space()->Contains(obj) ||
+ isolate->heap()->lo_space()->Contains(obj));
+}
+
RUNTIME_FUNCTION(Runtime_HasElementsInALargeObjectSpace) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
@@ -1125,7 +1135,8 @@ RUNTIME_FUNCTION(Runtime_IsAsmWasmCode) {
namespace {
v8::ModifyCodeGenerationFromStringsResult DisallowCodegenFromStringsCallback(
- v8::Local<v8::Context> context, v8::Local<v8::Value> source) {
+ v8::Local<v8::Context> context, v8::Local<v8::Value> source,
+ bool is_code_kind) {
return {false, {}};
}
@@ -1278,6 +1289,14 @@ RUNTIME_FUNCTION(Runtime_RegexpTypeTag) {
return *isolate->factory()->NewStringFromAsciiChecked(type_str);
}
+RUNTIME_FUNCTION(Runtime_RegexpIsUnmodified) {
+ HandleScope shs(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
+ return isolate->heap()->ToBoolean(
+ RegExp::IsUnmodifiedRegExp(isolate, regexp));
+}
+
#define ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(Name) \
RUNTIME_FUNCTION(Runtime_Has##Name) { \
CONVERT_ARG_CHECKED(JSObject, obj, 0); \
diff --git a/deps/v8/src/runtime/runtime-wasm.cc b/deps/v8/src/runtime/runtime-wasm.cc
index 04cb59393f..76753b97fb 100644
--- a/deps/v8/src/runtime/runtime-wasm.cc
+++ b/deps/v8/src/runtime/runtime-wasm.cc
@@ -137,7 +137,7 @@ RUNTIME_FUNCTION(Runtime_ThrowWasmStackOverflow) {
return isolate->StackOverflow();
}
-RUNTIME_FUNCTION(Runtime_WasmThrowTypeError) {
+RUNTIME_FUNCTION(Runtime_WasmThrowJSTypeError) {
// This runtime function is called both from wasm and from e.g. js-to-js
// functions. Hence the "thread in wasm" flag can be either set or not. Both
// is OK, since throwing will trigger unwinding anyway, which sets the flag
@@ -145,7 +145,7 @@ RUNTIME_FUNCTION(Runtime_WasmThrowTypeError) {
HandleScope scope(isolate);
DCHECK_EQ(0, args.length());
THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kWasmTrapTypeError));
+ isolate, NewTypeError(MessageTemplate::kWasmTrapJSTypeError));
}
RUNTIME_FUNCTION(Runtime_WasmThrowCreate) {
@@ -213,6 +213,42 @@ RUNTIME_FUNCTION(Runtime_WasmCompileLazy) {
return Object(entrypoint);
}
+RUNTIME_FUNCTION(Runtime_WasmCompileWrapper) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
+ CONVERT_ARG_HANDLE_CHECKED(WasmExportedFunctionData, function_data, 1);
+ DCHECK(isolate->context().is_null());
+ isolate->set_context(instance->native_context());
+
+ const wasm::WasmModule* module = instance->module();
+ const int function_index = function_data->function_index();
+ const wasm::WasmFunction function = module->functions[function_index];
+ const wasm::FunctionSig* sig = function.sig;
+
+ MaybeHandle<WasmExternalFunction> maybe_result =
+ WasmInstanceObject::GetWasmExternalFunction(isolate, instance,
+ function_index);
+
+ Handle<WasmExternalFunction> result;
+ if (!maybe_result.ToHandle(&result)) {
+ // We expect the result to be empty in the case of the start function,
+ // which is not an exported function to begin with.
+ DCHECK_EQ(function_index, module->start_function_index);
+ return ReadOnlyRoots(isolate).undefined_value();
+ }
+
+ Handle<Code> wrapper =
+ wasm::JSToWasmWrapperCompilationUnit::CompileSpecificJSToWasmWrapper(
+ isolate, sig, module);
+
+ result->set_code(*wrapper);
+
+ function_data->set_wrapper_code(*wrapper);
+
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
RUNTIME_FUNCTION(Runtime_WasmTriggerTierUp) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -227,32 +263,20 @@ RUNTIME_FUNCTION(Runtime_WasmTriggerTierUp) {
return ReadOnlyRoots(isolate).undefined_value();
}
-// Should be called from within a handle scope
-Handle<JSArrayBuffer> GetArrayBuffer(Handle<WasmInstanceObject> instance,
- Isolate* isolate, uint32_t address) {
- DCHECK(instance->has_memory_object());
- Handle<JSArrayBuffer> array_buffer(instance->memory_object().array_buffer(),
- isolate);
-
- // Should have trapped if address was OOB
- DCHECK_LT(address, array_buffer->byte_length());
- return array_buffer;
-}
-
RUNTIME_FUNCTION(Runtime_WasmAtomicNotify) {
ClearThreadInWasmScope clear_wasm_flag;
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
- CONVERT_NUMBER_CHECKED(uint32_t, address, Uint32, args[1]);
+ CONVERT_DOUBLE_ARG_CHECKED(offset_double, 1);
+ uintptr_t offset = static_cast<uintptr_t>(offset_double);
CONVERT_NUMBER_CHECKED(uint32_t, count, Uint32, args[2]);
- Handle<JSArrayBuffer> array_buffer =
- GetArrayBuffer(instance, isolate, address);
- if (array_buffer->is_shared()) {
- return FutexEmulation::Wake(array_buffer, address, count);
- } else {
- return Smi::FromInt(0);
- }
+ Handle<JSArrayBuffer> array_buffer{instance->memory_object().array_buffer(),
+ isolate};
+ // Should have trapped if address was OOB.
+ DCHECK_LT(offset, array_buffer->byte_length());
+ if (!array_buffer->is_shared()) return Smi::FromInt(0);
+ return FutexEmulation::Wake(array_buffer, offset, count);
}
RUNTIME_FUNCTION(Runtime_WasmI32AtomicWait) {
@@ -260,18 +284,21 @@ RUNTIME_FUNCTION(Runtime_WasmI32AtomicWait) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
- CONVERT_NUMBER_CHECKED(uint32_t, address, Uint32, args[1]);
+ CONVERT_DOUBLE_ARG_CHECKED(offset_double, 1);
+ uintptr_t offset = static_cast<uintptr_t>(offset_double);
CONVERT_NUMBER_CHECKED(int32_t, expected_value, Int32, args[2]);
CONVERT_ARG_HANDLE_CHECKED(BigInt, timeout_ns, 3);
- Handle<JSArrayBuffer> array_buffer =
- GetArrayBuffer(instance, isolate, address);
+ Handle<JSArrayBuffer> array_buffer{instance->memory_object().array_buffer(),
+ isolate};
+ // Should have trapped if address was OOB.
+ DCHECK_LT(offset, array_buffer->byte_length());
- // Trap if memory is not shared
+ // Trap if memory is not shared.
if (!array_buffer->is_shared()) {
return ThrowWasmError(isolate, MessageTemplate::kAtomicsWaitNotAllowed);
}
- return FutexEmulation::WaitWasm32(isolate, array_buffer, address,
+ return FutexEmulation::WaitWasm32(isolate, array_buffer, offset,
expected_value, timeout_ns->AsInt64());
}
@@ -280,18 +307,21 @@ RUNTIME_FUNCTION(Runtime_WasmI64AtomicWait) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
- CONVERT_NUMBER_CHECKED(uint32_t, address, Uint32, args[1]);
+ CONVERT_DOUBLE_ARG_CHECKED(offset_double, 1);
+ uintptr_t offset = static_cast<uintptr_t>(offset_double);
CONVERT_ARG_HANDLE_CHECKED(BigInt, expected_value, 2);
CONVERT_ARG_HANDLE_CHECKED(BigInt, timeout_ns, 3);
- Handle<JSArrayBuffer> array_buffer =
- GetArrayBuffer(instance, isolate, address);
+ Handle<JSArrayBuffer> array_buffer{instance->memory_object().array_buffer(),
+ isolate};
+ // Should have trapped if address was OOB.
+ DCHECK_LT(offset, array_buffer->byte_length());
- // Trap if memory is not shared
+ // Trap if memory is not shared.
if (!array_buffer->is_shared()) {
return ThrowWasmError(isolate, MessageTemplate::kAtomicsWaitNotAllowed);
}
- return FutexEmulation::WaitWasm64(isolate, array_buffer, address,
+ return FutexEmulation::WaitWasm64(isolate, array_buffer, offset,
expected_value->AsInt64(),
timeout_ns->AsInt64());
}
diff --git a/deps/v8/src/runtime/runtime.h b/deps/v8/src/runtime/runtime.h
index 667b1f0045..a0041ec2c0 100644
--- a/deps/v8/src/runtime/runtime.h
+++ b/deps/v8/src/runtime/runtime.h
@@ -108,7 +108,7 @@ namespace internal {
F(CompileLazy, 1, 1) \
F(CompileOptimized_Concurrent, 1, 1) \
F(CompileOptimized_NotConcurrent, 1, 1) \
- F(EvictOptimizedCodeSlot, 1, 1) \
+ F(HealOptimizedCodeSlot, 1, 1) \
F(FunctionFirstExecution, 1, 1) \
F(InstantiateAsmJs, 4, 1) \
F(NotifyDeoptimized, 0, 1) \
@@ -276,7 +276,6 @@ namespace internal {
F(GetHoleNaNLower, 0, 1) \
F(GetHoleNaNUpper, 0, 1) \
I(IsSmi, 1, 1) \
- F(IsValidSmi, 1, 1) \
F(MaxSmi, 0, 1) \
F(NumberToStringSlow, 1, 1) \
F(StringParseFloat, 1, 1) \
@@ -388,6 +387,7 @@ namespace internal {
#define FOR_EACH_INTRINSIC_REGEXP(F, I) \
I(IsRegExp, 1, 1) \
F(RegExpExec, 4, 1) \
+ F(RegExpExperimentalOneshotExec, 4, 1) \
F(RegExpExecMultiple, 4, 1) \
F(RegExpInitializeAndCompile, 3, 1) \
F(RegExpReplaceRT, 3, 1) \
@@ -507,6 +507,7 @@ namespace internal {
F(HaveSameMap, 2, 1) \
F(HeapObjectVerify, 1, 1) \
F(ICsAreEnabled, 0, 1) \
+ F(InLargeObjectSpace, 1, 1) \
F(InYoungGeneration, 1, 1) \
F(IsAsmWasmCode, 1, 1) \
F(IsBeingInterpreted, 0, 1) \
@@ -518,6 +519,7 @@ namespace internal {
F(RegexpHasBytecode, 2, 1) \
F(RegexpHasNativeCode, 2, 1) \
F(RegexpTypeTag, 1, 1) \
+ F(RegexpIsUnmodified, 1, 1) \
F(MapIteratorProtector, 0, 1) \
F(NeverOptimizeFunction, 1, 1) \
F(NotifyContextDisposed, 0, 1) \
@@ -569,7 +571,7 @@ namespace internal {
F(WasmMemoryGrow, 2, 1) \
F(WasmStackGuard, 0, 1) \
F(WasmThrowCreate, 2, 1) \
- F(WasmThrowTypeError, 0, 1) \
+ F(WasmThrowJSTypeError, 0, 1) \
F(WasmRefFunc, 1, 1) \
F(WasmFunctionTableGet, 3, 1) \
F(WasmFunctionTableSet, 4, 1) \
@@ -579,6 +581,7 @@ namespace internal {
F(WasmTableFill, 4, 1) \
F(WasmIsValidRefValue, 3, 1) \
F(WasmCompileLazy, 2, 1) \
+ F(WasmCompileWrapper, 2, 1) \
F(WasmTriggerTierUp, 1, 1) \
F(WasmDebugBreak, 0, 1) \
F(WasmAllocateRtt, 2, 1)
diff --git a/deps/v8/src/snapshot/DIR_METADATA b/deps/v8/src/snapshot/DIR_METADATA
new file mode 100644
index 0000000000..b183b81885
--- /dev/null
+++ b/deps/v8/src/snapshot/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>Runtime"
+} \ No newline at end of file
diff --git a/deps/v8/src/snapshot/OWNERS b/deps/v8/src/snapshot/OWNERS
index 6fa7f3441c..0cf6544300 100644
--- a/deps/v8/src/snapshot/OWNERS
+++ b/deps/v8/src/snapshot/OWNERS
@@ -2,5 +2,3 @@ delphick@chromium.org
jgruber@chromium.org
leszeks@chromium.org
verwaest@chromium.org
-
-# COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/snapshot/code-serializer.cc b/deps/v8/src/snapshot/code-serializer.cc
index 5eec7668a2..f90ef62bad 100644
--- a/deps/v8/src/snapshot/code-serializer.cc
+++ b/deps/v8/src/snapshot/code-serializer.cc
@@ -36,9 +36,7 @@ ScriptData::ScriptData(const byte* data, int length)
CodeSerializer::CodeSerializer(Isolate* isolate, uint32_t source_hash)
: Serializer(isolate, Snapshot::kDefaultSerializerFlags),
- source_hash_(source_hash) {
- allocator()->UseCustomChunkSize(FLAG_serialization_chunk_size);
-}
+ source_hash_(source_hash) {}
// static
ScriptCompiler::CachedData* CodeSerializer::Serialize(
@@ -64,11 +62,11 @@ ScriptCompiler::CachedData* CodeSerializer::Serialize(
// Serialize code object.
Handle<String> source(String::cast(script->source()), isolate);
+ HandleScope scope(isolate);
CodeSerializer cs(isolate, SerializedCodeData::SourceHash(
source, script->origin_options()));
DisallowGarbageCollection no_gc;
- cs.reference_map()->AddAttachedReference(
- reinterpret_cast<void*>(source->ptr()));
+ cs.reference_map()->AddAttachedReference(*source);
ScriptData* script_data = cs.SerializeSharedFunctionInfo(info);
if (FLAG_profile_deserialization) {
@@ -100,13 +98,13 @@ ScriptData* CodeSerializer::SerializeSharedFunctionInfo(
return data.GetScriptData();
}
-bool CodeSerializer::SerializeReadOnlyObject(HeapObject obj) {
- if (!ReadOnlyHeap::Contains(obj)) return false;
+bool CodeSerializer::SerializeReadOnlyObject(Handle<HeapObject> obj) {
+ if (!ReadOnlyHeap::Contains(*obj)) return false;
// For objects on the read-only heap, never serialize the object, but instead
// create a back reference that encodes the page number as the chunk_index and
// the offset within the page as the chunk_offset.
- Address address = obj.address();
+ Address address = obj->address();
BasicMemoryChunk* chunk = BasicMemoryChunk::FromAddress(address);
uint32_t chunk_index = 0;
ReadOnlySpace* const read_only_space = isolate()->heap()->read_only_space();
@@ -115,14 +113,13 @@ bool CodeSerializer::SerializeReadOnlyObject(HeapObject obj) {
++chunk_index;
}
uint32_t chunk_offset = static_cast<uint32_t>(chunk->Offset(address));
- SerializerReference back_reference = SerializerReference::BackReference(
- SnapshotSpace::kReadOnlyHeap, chunk_index, chunk_offset);
- reference_map()->Add(reinterpret_cast<void*>(obj.ptr()), back_reference);
- CHECK(SerializeBackReference(obj));
+ sink_.Put(kReadOnlyHeapRef, "ReadOnlyHeapRef");
+ sink_.PutInt(chunk_index, "ReadOnlyHeapRefChunkIndex");
+ sink_.PutInt(chunk_offset, "ReadOnlyHeapRefChunkOffset");
return true;
}
-void CodeSerializer::SerializeObject(HeapObject obj) {
+void CodeSerializer::SerializeObjectImpl(Handle<HeapObject> obj) {
if (SerializeHotObject(obj)) return;
if (SerializeRoot(obj)) return;
@@ -131,60 +128,60 @@ void CodeSerializer::SerializeObject(HeapObject obj) {
if (SerializeReadOnlyObject(obj)) return;
- CHECK(!obj.IsCode());
+ CHECK(!obj->IsCode());
ReadOnlyRoots roots(isolate());
- if (ElideObject(obj)) {
- return SerializeObject(roots.undefined_value());
+ if (ElideObject(*obj)) {
+ return SerializeObject(roots.undefined_value_handle());
}
- if (obj.IsScript()) {
- Script script_obj = Script::cast(obj);
- DCHECK_NE(script_obj.compilation_type(), Script::COMPILATION_TYPE_EVAL);
+ if (obj->IsScript()) {
+ Handle<Script> script_obj = Handle<Script>::cast(obj);
+ DCHECK_NE(script_obj->compilation_type(), Script::COMPILATION_TYPE_EVAL);
// We want to differentiate between undefined and uninitialized_symbol for
// context_data for now. It is hack to allow debugging for scripts that are
// included as a part of custom snapshot. (see debug::Script::IsEmbedded())
- Object context_data = script_obj.context_data();
+ Object context_data = script_obj->context_data();
if (context_data != roots.undefined_value() &&
context_data != roots.uninitialized_symbol()) {
- script_obj.set_context_data(roots.undefined_value());
+ script_obj->set_context_data(roots.undefined_value());
}
// We don't want to serialize host options to avoid serializing unnecessary
// object graph.
- FixedArray host_options = script_obj.host_defined_options();
- script_obj.set_host_defined_options(roots.empty_fixed_array());
+ FixedArray host_options = script_obj->host_defined_options();
+ script_obj->set_host_defined_options(roots.empty_fixed_array());
SerializeGeneric(obj);
- script_obj.set_host_defined_options(host_options);
- script_obj.set_context_data(context_data);
+ script_obj->set_host_defined_options(host_options);
+ script_obj->set_context_data(context_data);
return;
}
- if (obj.IsSharedFunctionInfo()) {
- SharedFunctionInfo sfi = SharedFunctionInfo::cast(obj);
+ if (obj->IsSharedFunctionInfo()) {
+ Handle<SharedFunctionInfo> sfi = Handle<SharedFunctionInfo>::cast(obj);
// TODO(7110): Enable serializing of Asm modules once the AsmWasmData
// is context independent.
- DCHECK(!sfi.IsApiFunction() && !sfi.HasAsmWasmData());
+ DCHECK(!sfi->IsApiFunction() && !sfi->HasAsmWasmData());
DebugInfo debug_info;
BytecodeArray debug_bytecode_array;
- if (sfi.HasDebugInfo()) {
+ if (sfi->HasDebugInfo()) {
// Clear debug info.
- debug_info = sfi.GetDebugInfo();
+ debug_info = sfi->GetDebugInfo();
if (debug_info.HasInstrumentedBytecodeArray()) {
debug_bytecode_array = debug_info.DebugBytecodeArray();
- sfi.SetDebugBytecodeArray(debug_info.OriginalBytecodeArray());
+ sfi->SetDebugBytecodeArray(debug_info.OriginalBytecodeArray());
}
- sfi.set_script_or_debug_info(debug_info.script());
+ sfi->set_script_or_debug_info(debug_info.script(), kReleaseStore);
}
- DCHECK(!sfi.HasDebugInfo());
+ DCHECK(!sfi->HasDebugInfo());
SerializeGeneric(obj);
// Restore debug info
if (!debug_info.is_null()) {
- sfi.set_script_or_debug_info(debug_info);
+ sfi->set_script_or_debug_info(debug_info, kReleaseStore);
if (!debug_bytecode_array.is_null()) {
- sfi.SetDebugBytecodeArray(debug_bytecode_array);
+ sfi->SetDebugBytecodeArray(debug_bytecode_array);
}
}
return;
@@ -197,24 +194,24 @@ void CodeSerializer::SerializeObject(HeapObject obj) {
// --interpreted-frames-native-stack is on. See v8:9122 for more context
#ifndef V8_TARGET_ARCH_ARM
if (V8_UNLIKELY(FLAG_interpreted_frames_native_stack) &&
- obj.IsInterpreterData()) {
- obj = InterpreterData::cast(obj).bytecode_array();
+ obj->IsInterpreterData()) {
+ obj = handle(InterpreterData::cast(*obj).bytecode_array(), isolate());
}
#endif // V8_TARGET_ARCH_ARM
// Past this point we should not see any (context-specific) maps anymore.
- CHECK(!obj.IsMap());
+ CHECK(!obj->IsMap());
// There should be no references to the global object embedded.
- CHECK(!obj.IsJSGlobalProxy() && !obj.IsJSGlobalObject());
+ CHECK(!obj->IsJSGlobalProxy() && !obj->IsJSGlobalObject());
// Embedded FixedArrays that need rehashing must support rehashing.
- CHECK_IMPLIES(obj.NeedsRehashing(), obj.CanBeRehashed());
+ CHECK_IMPLIES(obj->NeedsRehashing(), obj->CanBeRehashed());
// We expect no instantiated function objects or contexts.
- CHECK(!obj.IsJSFunction() && !obj.IsContext());
+ CHECK(!obj->IsJSFunction() && !obj->IsContext());
SerializeGeneric(obj);
}
-void CodeSerializer::SerializeGeneric(HeapObject heap_object) {
+void CodeSerializer::SerializeGeneric(Handle<HeapObject> heap_object) {
// Object has not yet been serialized. Serialize it here.
ObjectSerializer serializer(this, heap_object, &sink_);
serializer.Serialize();
@@ -265,26 +262,27 @@ void CreateInterpreterDataForDeserializedCode(Isolate* isolate,
namespace {
class StressOffThreadDeserializeThread final : public base::Thread {
public:
- explicit StressOffThreadDeserializeThread(LocalIsolate* local_isolate,
+ explicit StressOffThreadDeserializeThread(Isolate* isolate,
const SerializedCodeData* scd)
: Thread(
base::Thread::Options("StressOffThreadDeserializeThread", 2 * MB)),
- local_isolate_(local_isolate),
+ isolate_(isolate),
scd_(scd) {}
MaybeHandle<SharedFunctionInfo> maybe_result() const { return maybe_result_; }
void Run() final {
+ LocalIsolate local_isolate(isolate_, ThreadKind::kBackground);
MaybeHandle<SharedFunctionInfo> local_maybe_result =
ObjectDeserializer::DeserializeSharedFunctionInfoOffThread(
- local_isolate_, scd_, local_isolate_->factory()->empty_string());
+ &local_isolate, scd_, local_isolate.factory()->empty_string());
maybe_result_ =
- local_isolate_->heap()->NewPersistentMaybeHandle(local_maybe_result);
+ local_isolate.heap()->NewPersistentMaybeHandle(local_maybe_result);
}
private:
- LocalIsolate* local_isolate_;
+ Isolate* isolate_;
const SerializedCodeData* scd_;
MaybeHandle<SharedFunctionInfo> maybe_result_;
};
@@ -315,9 +313,7 @@ MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
MaybeHandle<SharedFunctionInfo> maybe_result;
// TODO(leszeks): Add LocalHeap support to deserializer
if (false && FLAG_stress_background_compile) {
- LocalIsolate local_isolate(isolate);
-
- StressOffThreadDeserializeThread thread(&local_isolate, &scd);
+ StressOffThreadDeserializeThread thread(isolate, &scd);
CHECK(thread.Start());
thread.Join();
@@ -408,44 +404,29 @@ MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
SerializedCodeData::SerializedCodeData(const std::vector<byte>* payload,
const CodeSerializer* cs) {
DisallowGarbageCollection no_gc;
- std::vector<Reservation> reservations = cs->EncodeReservations();
// Calculate sizes.
- uint32_t reservation_size =
- static_cast<uint32_t>(reservations.size()) * kUInt32Size;
- uint32_t num_stub_keys = 0; // TODO(jgruber): Remove.
- uint32_t stub_keys_size = num_stub_keys * kUInt32Size;
- uint32_t payload_offset = kHeaderSize + reservation_size + stub_keys_size;
- uint32_t padded_payload_offset = POINTER_SIZE_ALIGN(payload_offset);
- uint32_t size =
- padded_payload_offset + static_cast<uint32_t>(payload->size());
+ uint32_t size = kHeaderSize + static_cast<uint32_t>(payload->size());
DCHECK(IsAligned(size, kPointerAlignment));
// Allocate backing store and create result data.
AllocateData(size);
// Zero out pre-payload data. Part of that is only used for padding.
- memset(data_, 0, padded_payload_offset);
+ memset(data_, 0, kHeaderSize);
// Set header values.
SetMagicNumber();
SetHeaderValue(kVersionHashOffset, Version::Hash());
SetHeaderValue(kSourceHashOffset, cs->source_hash());
SetHeaderValue(kFlagHashOffset, FlagList::Hash());
- SetHeaderValue(kNumReservationsOffset,
- static_cast<uint32_t>(reservations.size()));
SetHeaderValue(kPayloadLengthOffset, static_cast<uint32_t>(payload->size()));
// Zero out any padding in the header.
memset(data_ + kUnalignedHeaderSize, 0, kHeaderSize - kUnalignedHeaderSize);
- // Copy reservation chunk sizes.
- CopyBytes(data_ + kHeaderSize,
- reinterpret_cast<const byte*>(reservations.data()),
- reservation_size);
-
// Copy serialized data.
- CopyBytes(data_ + padded_payload_offset, payload->data(),
+ CopyBytes(data_ + kHeaderSize, payload->data(),
static_cast<size_t>(payload->size()));
SetHeaderValue(kChecksumOffset, Checksum(ChecksummedContent()));
@@ -464,10 +445,7 @@ SerializedCodeData::SanityCheckResult SerializedCodeData::SanityCheck(
if (version_hash != Version::Hash()) return VERSION_MISMATCH;
if (source_hash != expected_source_hash) return SOURCE_MISMATCH;
if (flags_hash != FlagList::Hash()) return FLAGS_MISMATCH;
- uint32_t max_payload_length =
- this->size_ -
- POINTER_SIZE_ALIGN(kHeaderSize +
- GetHeaderValue(kNumReservationsOffset) * kInt32Size);
+ uint32_t max_payload_length = this->size_ - kHeaderSize;
if (payload_length > max_payload_length) return LENGTH_MISMATCH;
if (Checksum(ChecksummedContent()) != c) return CHECKSUM_MISMATCH;
return CHECK_SUCCESS;
@@ -494,20 +472,8 @@ ScriptData* SerializedCodeData::GetScriptData() {
return result;
}
-std::vector<SerializedData::Reservation> SerializedCodeData::Reservations()
- const {
- uint32_t size = GetHeaderValue(kNumReservationsOffset);
- std::vector<Reservation> reservations(size);
- memcpy(reservations.data(), data_ + kHeaderSize,
- size * sizeof(SerializedData::Reservation));
- return reservations;
-}
-
Vector<const byte> SerializedCodeData::Payload() const {
- int reservations_size = GetHeaderValue(kNumReservationsOffset) * kInt32Size;
- int payload_offset = kHeaderSize + reservations_size;
- int padded_payload_offset = POINTER_SIZE_ALIGN(payload_offset);
- const byte* payload = data_ + padded_payload_offset;
+ const byte* payload = data_ + kHeaderSize;
DCHECK(IsAligned(reinterpret_cast<intptr_t>(payload), kPointerAlignment));
int length = GetHeaderValue(kPayloadLengthOffset);
DCHECK_EQ(data_ + size_, payload + length);
diff --git a/deps/v8/src/snapshot/code-serializer.h b/deps/v8/src/snapshot/code-serializer.h
index 2daf5200ec..8ca9721d16 100644
--- a/deps/v8/src/snapshot/code-serializer.h
+++ b/deps/v8/src/snapshot/code-serializer.h
@@ -7,6 +7,7 @@
#include "src/base/macros.h"
#include "src/snapshot/serializer.h"
+#include "src/snapshot/snapshot-data.h"
namespace v8 {
namespace internal {
@@ -17,6 +18,8 @@ class V8_EXPORT_PRIVATE ScriptData {
~ScriptData() {
if (owns_data_) DeleteArray(data_);
}
+ ScriptData(const ScriptData&) = delete;
+ ScriptData& operator=(const ScriptData&) = delete;
const byte* data() const { return data_; }
int length() const { return length_; }
@@ -39,12 +42,12 @@ class V8_EXPORT_PRIVATE ScriptData {
bool rejected_ : 1;
const byte* data_;
int length_;
-
- DISALLOW_COPY_AND_ASSIGN(ScriptData);
};
class CodeSerializer : public Serializer {
public:
+ CodeSerializer(const CodeSerializer&) = delete;
+ CodeSerializer& operator=(const CodeSerializer&) = delete;
V8_EXPORT_PRIVATE static ScriptCompiler::CachedData* Serialize(
Handle<SharedFunctionInfo> info);
@@ -61,16 +64,15 @@ class CodeSerializer : public Serializer {
~CodeSerializer() override { OutputStatistics("CodeSerializer"); }
virtual bool ElideObject(Object obj) { return false; }
- void SerializeGeneric(HeapObject heap_object);
+ void SerializeGeneric(Handle<HeapObject> heap_object);
private:
- void SerializeObject(HeapObject o) override;
+ void SerializeObjectImpl(Handle<HeapObject> o) override;
- bool SerializeReadOnlyObject(HeapObject obj);
+ bool SerializeReadOnlyObject(Handle<HeapObject> obj);
DISALLOW_HEAP_ALLOCATION(no_gc_)
uint32_t source_hash_;
- DISALLOW_COPY_AND_ASSIGN(CodeSerializer);
};
// Wrapper around ScriptData to provide code-serializer-specific functionality.
@@ -92,18 +94,13 @@ class SerializedCodeData : public SerializedData {
// [1] version hash
// [2] source hash
// [3] flag hash
- // [4] number of reservation size entries
- // [5] payload length
- // [6] payload checksum
- // ... reservations
- // ... code stub keys
+ // [4] payload length
+ // [5] payload checksum
// ... serialized payload
static const uint32_t kVersionHashOffset = kMagicNumberOffset + kUInt32Size;
static const uint32_t kSourceHashOffset = kVersionHashOffset + kUInt32Size;
static const uint32_t kFlagHashOffset = kSourceHashOffset + kUInt32Size;
- static const uint32_t kNumReservationsOffset = kFlagHashOffset + kUInt32Size;
- static const uint32_t kPayloadLengthOffset =
- kNumReservationsOffset + kUInt32Size;
+ static const uint32_t kPayloadLengthOffset = kFlagHashOffset + kUInt32Size;
static const uint32_t kChecksumOffset = kPayloadLengthOffset + kUInt32Size;
static const uint32_t kUnalignedHeaderSize = kChecksumOffset + kUInt32Size;
static const uint32_t kHeaderSize = POINTER_SIZE_ALIGN(kUnalignedHeaderSize);
@@ -120,7 +117,6 @@ class SerializedCodeData : public SerializedData {
// Return ScriptData object and relinquish ownership over it to the caller.
ScriptData* GetScriptData();
- std::vector<Reservation> Reservations() const;
Vector<const byte> Payload() const;
static uint32_t SourceHash(Handle<String> source,
diff --git a/deps/v8/src/snapshot/context-deserializer.cc b/deps/v8/src/snapshot/context-deserializer.cc
index ae0865ee28..5ae6dcd0eb 100644
--- a/deps/v8/src/snapshot/context-deserializer.cc
+++ b/deps/v8/src/snapshot/context-deserializer.cc
@@ -5,6 +5,7 @@
#include "src/snapshot/context-deserializer.h"
#include "src/api/api-inl.h"
+#include "src/common/assert-scope.h"
#include "src/heap/heap-inl.h"
#include "src/objects/slots.h"
#include "src/snapshot/snapshot.h"
@@ -16,8 +17,7 @@ MaybeHandle<Context> ContextDeserializer::DeserializeContext(
Isolate* isolate, const SnapshotData* data, bool can_rehash,
Handle<JSGlobalProxy> global_proxy,
v8::DeserializeEmbedderFieldsCallback embedder_fields_deserializer) {
- ContextDeserializer d(data);
- d.SetRehashability(can_rehash);
+ ContextDeserializer d(isolate, data, can_rehash);
MaybeHandle<Object> maybe_result =
d.Deserialize(isolate, global_proxy, embedder_fields_deserializer);
@@ -30,11 +30,6 @@ MaybeHandle<Context> ContextDeserializer::DeserializeContext(
MaybeHandle<Object> ContextDeserializer::Deserialize(
Isolate* isolate, Handle<JSGlobalProxy> global_proxy,
v8::DeserializeEmbedderFieldsCallback embedder_fields_deserializer) {
- Initialize(isolate);
- if (!allocator()->ReserveSpace()) {
- V8::FatalProcessOutOfMemory(isolate, "ContextDeserializer");
- }
-
// Replace serialized references to the global proxy and its map with the
// given global proxy and its map.
AddAttachedObject(global_proxy);
@@ -42,26 +37,17 @@ MaybeHandle<Object> ContextDeserializer::Deserialize(
Handle<Object> result;
{
- DisallowGarbageCollection no_gc;
- // Keep track of the code space start and end pointers in case new
- // code objects were unserialized
- CodeSpace* code_space = isolate->heap()->code_space();
- Address start_address = code_space->top();
- Object root;
- VisitRootPointer(Root::kStartupObjectCache, nullptr, FullObjectSlot(&root));
+ // There's no code deserialized here. If this assert fires then that's
+ // changed and logging should be added to notify the profiler et al. of
+ // the new code, which also has to be flushed from instruction cache.
+ DisallowCodeAllocation no_code_allocation;
+
+ result = ReadObject();
DeserializeDeferredObjects();
DeserializeEmbedderFields(embedder_fields_deserializer);
- allocator()->RegisterDeserializedObjectsForBlackAllocation();
-
- // There's no code deserialized here. If this assert fires then that's
- // changed and logging should be added to notify the profiler et al of the
- // new code, which also has to be flushed from instruction cache.
- CHECK_EQ(start_address, code_space->top());
-
LogNewMapEvents();
-
- result = handle(root, isolate);
+ WeakenDescriptorArrays();
}
if (FLAG_rehash_snapshot && can_rehash()) Rehash();
@@ -74,6 +60,7 @@ void ContextDeserializer::SetupOffHeapArrayBufferBackingStores() {
for (Handle<JSArrayBuffer> buffer : new_off_heap_array_buffers()) {
uint32_t store_index = buffer->GetBackingStoreRefForDeserialization();
auto bs = backing_store(store_index);
+ buffer->AllocateExternalPointerEntries(isolate());
SharedFlag shared =
bs && bs->is_shared() ? SharedFlag::kShared : SharedFlag::kNotShared;
buffer->Setup(shared, bs);
@@ -90,9 +77,7 @@ void ContextDeserializer::DeserializeEmbedderFields(
for (int code = source()->Get(); code != kSynchronize;
code = source()->Get()) {
HandleScope scope(isolate());
- SnapshotSpace space = NewObject::Decode(code);
- Handle<JSObject> obj(JSObject::cast(GetBackReferencedObject(space)),
- isolate());
+ Handle<JSObject> obj = Handle<JSObject>::cast(GetBackReferencedObject());
int index = source()->GetInt();
int size = source()->GetInt();
// TODO(yangguo,jgruber): Turn this into a reusable shared buffer.
diff --git a/deps/v8/src/snapshot/context-deserializer.h b/deps/v8/src/snapshot/context-deserializer.h
index 3854902238..6552a0fe45 100644
--- a/deps/v8/src/snapshot/context-deserializer.h
+++ b/deps/v8/src/snapshot/context-deserializer.h
@@ -6,12 +6,14 @@
#define V8_SNAPSHOT_CONTEXT_DESERIALIZER_H_
#include "src/snapshot/deserializer.h"
+#include "src/snapshot/snapshot-data.h"
#include "src/snapshot/snapshot.h"
namespace v8 {
namespace internal {
class Context;
+class Isolate;
// Deserializes the context-dependent object graph rooted at a given object.
// The ContextDeserializer is not expected to deserialize any code objects.
@@ -23,8 +25,10 @@ class V8_EXPORT_PRIVATE ContextDeserializer final : public Deserializer {
v8::DeserializeEmbedderFieldsCallback embedder_fields_deserializer);
private:
- explicit ContextDeserializer(const SnapshotData* data)
- : Deserializer(data, false) {}
+ explicit ContextDeserializer(Isolate* isolate, const SnapshotData* data,
+ bool can_rehash)
+ : Deserializer(isolate, data->Payload(), data->GetMagicNumber(), false,
+ can_rehash) {}
// Deserialize a single object and the objects reachable from it.
MaybeHandle<Object> Deserialize(
diff --git a/deps/v8/src/snapshot/context-serializer.cc b/deps/v8/src/snapshot/context-serializer.cc
index 931ee64176..8060f2845c 100644
--- a/deps/v8/src/snapshot/context-serializer.cc
+++ b/deps/v8/src/snapshot/context-serializer.cc
@@ -74,7 +74,6 @@ ContextSerializer::ContextSerializer(
serialize_embedder_fields_(callback),
can_be_rehashed_(true) {
InitializeCodeAddressMap();
- allocator()->UseCustomChunkSize(FLAG_serialization_chunk_size);
}
ContextSerializer::~ContextSerializer() {
@@ -88,10 +87,8 @@ void ContextSerializer::Serialize(Context* o,
// Upon deserialization, references to the global proxy and its map will be
// replaced.
- reference_map()->AddAttachedReference(
- reinterpret_cast<void*>(context_.global_proxy().ptr()));
- reference_map()->AddAttachedReference(
- reinterpret_cast<void*>(context_.global_proxy().map().ptr()));
+ reference_map()->AddAttachedReference(context_.global_proxy());
+ reference_map()->AddAttachedReference(context_.global_proxy().map());
// The bootstrap snapshot has a code-stub context. When serializing the
// context snapshot, it is chained into the weak context list on the isolate
@@ -123,7 +120,7 @@ void ContextSerializer::Serialize(Context* o,
Pad();
}
-void ContextSerializer::SerializeObject(HeapObject obj) {
+void ContextSerializer::SerializeObjectImpl(Handle<HeapObject> obj) {
DCHECK(!ObjectIsBytecodeHandler(obj)); // Only referenced in dispatch table.
if (!allow_active_isolate_for_testing()) {
@@ -132,7 +129,7 @@ void ContextSerializer::SerializeObject(HeapObject obj) {
// But in test scenarios there is no way to avoid this. Since we only
// serialize a single context in these cases, and this context does not
// have to be executable, we can simply ignore this.
- DCHECK_IMPLIES(obj.IsNativeContext(), obj == context_);
+ DCHECK_IMPLIES(obj->IsNativeContext(), *obj == context_);
}
if (SerializeHotObject(obj)) return;
@@ -145,7 +142,7 @@ void ContextSerializer::SerializeObject(HeapObject obj) {
return;
}
- if (ShouldBeInTheStartupObjectCache(obj)) {
+ if (ShouldBeInTheStartupObjectCache(*obj)) {
startup_serializer_->SerializeUsingStartupObjectCache(&sink_, obj);
return;
}
@@ -156,31 +153,33 @@ void ContextSerializer::SerializeObject(HeapObject obj) {
DCHECK(!startup_serializer_->ReferenceMapContains(obj));
// All the internalized strings that the context snapshot needs should be
// either in the root table or in the startup object cache.
- DCHECK(!obj.IsInternalizedString());
+ DCHECK(!obj->IsInternalizedString());
// Function and object templates are not context specific.
- DCHECK(!obj.IsTemplateInfo());
+ DCHECK(!obj->IsTemplateInfo());
// Clear literal boilerplates and feedback.
- if (obj.IsFeedbackVector()) FeedbackVector::cast(obj).ClearSlots(isolate());
+ if (obj->IsFeedbackVector()) {
+ Handle<FeedbackVector>::cast(obj)->ClearSlots(isolate());
+ }
// Clear InterruptBudget when serializing FeedbackCell.
- if (obj.IsFeedbackCell()) {
- FeedbackCell::cast(obj).SetInitialInterruptBudget();
+ if (obj->IsFeedbackCell()) {
+ Handle<FeedbackCell>::cast(obj)->SetInitialInterruptBudget();
}
if (SerializeJSObjectWithEmbedderFields(obj)) {
return;
}
- if (obj.IsJSFunction()) {
+ if (obj->IsJSFunction()) {
// Unconditionally reset the JSFunction to its SFI's code, since we can't
// serialize optimized code anyway.
- JSFunction closure = JSFunction::cast(obj);
- closure.ResetIfBytecodeFlushed();
- if (closure.is_compiled()) closure.set_code(closure.shared().GetCode());
+ Handle<JSFunction> closure = Handle<JSFunction>::cast(obj);
+ closure->ResetIfBytecodeFlushed();
+ if (closure->is_compiled()) closure->set_code(closure->shared().GetCode());
}
- CheckRehashability(obj);
+ CheckRehashability(*obj);
// Object has not yet been serialized. Serialize it here.
ObjectSerializer serializer(this, obj, &sink_);
@@ -196,29 +195,27 @@ bool ContextSerializer::ShouldBeInTheStartupObjectCache(HeapObject o) {
return o.IsName() || o.IsSharedFunctionInfo() || o.IsHeapNumber() ||
o.IsCode() || o.IsScopeInfo() || o.IsAccessorInfo() ||
o.IsTemplateInfo() || o.IsClassPositions() ||
- o.map() == ReadOnlyRoots(startup_serializer_->isolate())
- .fixed_cow_array_map();
+ o.map() == ReadOnlyRoots(isolate()).fixed_cow_array_map();
}
namespace {
bool DataIsEmpty(const StartupData& data) { return data.raw_size == 0; }
} // anonymous namespace
-bool ContextSerializer::SerializeJSObjectWithEmbedderFields(Object obj) {
- if (!obj.IsJSObject()) return false;
- JSObject js_obj = JSObject::cast(obj);
- int embedder_fields_count = js_obj.GetEmbedderFieldCount();
+bool ContextSerializer::SerializeJSObjectWithEmbedderFields(
+ Handle<HeapObject> obj) {
+ if (!obj->IsJSObject()) return false;
+ Handle<JSObject> js_obj = Handle<JSObject>::cast(obj);
+ int embedder_fields_count = js_obj->GetEmbedderFieldCount();
if (embedder_fields_count == 0) return false;
CHECK_GT(embedder_fields_count, 0);
- DCHECK(!js_obj.NeedsRehashing());
+ DCHECK(!js_obj->NeedsRehashing());
DisallowGarbageCollection no_gc;
DisallowJavascriptExecution no_js(isolate());
DisallowCompilation no_compile(isolate());
- HandleScope scope(isolate());
- Handle<JSObject> obj_handle(js_obj, isolate());
- v8::Local<v8::Object> api_obj = v8::Utils::ToLocal(obj_handle);
+ v8::Local<v8::Object> api_obj = v8::Utils::ToLocal(js_obj);
std::vector<EmbedderDataSlot::RawData> original_embedder_values;
std::vector<StartupData> serialized_data;
@@ -228,7 +225,7 @@ bool ContextSerializer::SerializeJSObjectWithEmbedderFields(Object obj) {
// serializer. For aligned pointers, call the serialize callback. Hold
// onto the result.
for (int i = 0; i < embedder_fields_count; i++) {
- EmbedderDataSlot embedder_data_slot(js_obj, i);
+ EmbedderDataSlot embedder_data_slot(*js_obj, i);
original_embedder_values.emplace_back(
embedder_data_slot.load_raw(isolate(), no_gc));
Object object = embedder_data_slot.load_tagged();
@@ -257,7 +254,7 @@ bool ContextSerializer::SerializeJSObjectWithEmbedderFields(Object obj) {
// with embedder callbacks.
for (int i = 0; i < embedder_fields_count; i++) {
if (!DataIsEmpty(serialized_data[i])) {
- EmbedderDataSlot(js_obj, i).store_raw(isolate(), kNullAddress, no_gc);
+ EmbedderDataSlot(*js_obj, i).store_raw(isolate(), kNullAddress, no_gc);
}
}
@@ -266,9 +263,10 @@ bool ContextSerializer::SerializeJSObjectWithEmbedderFields(Object obj) {
ObjectSerializer(this, js_obj, &sink_).Serialize();
// 4) Obtain back reference for the serialized object.
- SerializerReference reference =
- reference_map()->LookupReference(reinterpret_cast<void*>(js_obj.ptr()));
- DCHECK(reference.is_back_reference());
+ const SerializerReference* reference =
+ reference_map()->LookupReference(js_obj);
+ DCHECK_NOT_NULL(reference);
+ DCHECK(reference->is_back_reference());
// 5) Write data returned by the embedder callbacks into a separate sink,
// headed by the back reference. Restore the original embedder fields.
@@ -276,13 +274,10 @@ bool ContextSerializer::SerializeJSObjectWithEmbedderFields(Object obj) {
StartupData data = serialized_data[i];
if (DataIsEmpty(data)) continue;
// Restore original values from cleared fields.
- EmbedderDataSlot(js_obj, i).store_raw(isolate(),
- original_embedder_values[i], no_gc);
- embedder_fields_sink_.Put(kNewObject + static_cast<int>(reference.space()),
- "embedder field holder");
- embedder_fields_sink_.PutInt(reference.chunk_index(), "BackRefChunkIndex");
- embedder_fields_sink_.PutInt(reference.chunk_offset(),
- "BackRefChunkOffset");
+ EmbedderDataSlot(*js_obj, i)
+ .store_raw(isolate(), original_embedder_values[i], no_gc);
+ embedder_fields_sink_.Put(kNewObject, "embedder field holder");
+ embedder_fields_sink_.PutInt(reference->back_ref_index(), "BackRefIndex");
embedder_fields_sink_.PutInt(i, "embedder field index");
embedder_fields_sink_.PutInt(data.raw_size, "embedder fields data size");
embedder_fields_sink_.PutRaw(reinterpret_cast<const byte*>(data.data),
diff --git a/deps/v8/src/snapshot/context-serializer.h b/deps/v8/src/snapshot/context-serializer.h
index af8de77d80..03f195f3be 100644
--- a/deps/v8/src/snapshot/context-serializer.h
+++ b/deps/v8/src/snapshot/context-serializer.h
@@ -21,6 +21,8 @@ class V8_EXPORT_PRIVATE ContextSerializer : public Serializer {
v8::SerializeEmbedderFieldsCallback callback);
~ContextSerializer() override;
+ ContextSerializer(const ContextSerializer&) = delete;
+ ContextSerializer& operator=(const ContextSerializer&) = delete;
// Serialize the objects reachable from a single object pointer.
void Serialize(Context* o, const DisallowGarbageCollection& no_gc);
@@ -28,9 +30,9 @@ class V8_EXPORT_PRIVATE ContextSerializer : public Serializer {
bool can_be_rehashed() const { return can_be_rehashed_; }
private:
- void SerializeObject(HeapObject o) override;
+ void SerializeObjectImpl(Handle<HeapObject> o) override;
bool ShouldBeInTheStartupObjectCache(HeapObject o);
- bool SerializeJSObjectWithEmbedderFields(Object obj);
+ bool SerializeJSObjectWithEmbedderFields(Handle<HeapObject> obj);
void CheckRehashability(HeapObject obj);
StartupSerializer* startup_serializer_;
@@ -42,7 +44,6 @@ class V8_EXPORT_PRIVATE ContextSerializer : public Serializer {
// Used to store serialized data for embedder fields.
SnapshotByteSink embedder_fields_sink_;
- DISALLOW_COPY_AND_ASSIGN(ContextSerializer);
};
} // namespace internal
diff --git a/deps/v8/src/snapshot/deserializer-allocator.cc b/deps/v8/src/snapshot/deserializer-allocator.cc
deleted file mode 100644
index 7ad49b0867..0000000000
--- a/deps/v8/src/snapshot/deserializer-allocator.cc
+++ /dev/null
@@ -1,217 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/snapshot/deserializer-allocator.h"
-
-#include "src/heap/heap-inl.h" // crbug.com/v8/8499
-#include "src/heap/memory-chunk.h"
-#include "src/roots/roots.h"
-
-namespace v8 {
-namespace internal {
-
-void DeserializerAllocator::Initialize(Heap* heap) {
- heap_ = heap;
- roots_ = ReadOnlyRoots(heap);
-}
-
-// We know the space requirements before deserialization and can
-// pre-allocate that reserved space. During deserialization, all we need
-// to do is to bump up the pointer for each space in the reserved
-// space. This is also used for fixing back references.
-// We may have to split up the pre-allocation into several chunks
-// because it would not fit onto a single page. We do not have to keep
-// track of when to move to the next chunk. An opcode will signal this.
-// Since multiple large objects cannot be folded into one large object
-// space allocation, we have to do an actual allocation when deserializing
-// each large object. Instead of tracking offset for back references, we
-// reference large objects by index.
-Address DeserializerAllocator::AllocateRaw(SnapshotSpace space, int size) {
- const int space_number = static_cast<int>(space);
- if (space == SnapshotSpace::kLargeObject) {
- // Note that we currently do not support deserialization of large code
- // objects.
- HeapObject obj;
- AlwaysAllocateScope scope(heap_);
- OldLargeObjectSpace* lo_space = heap_->lo_space();
- AllocationResult result = lo_space->AllocateRaw(size);
- obj = result.ToObjectChecked();
- deserialized_large_objects_.push_back(obj);
- return obj.address();
- } else if (space == SnapshotSpace::kMap) {
- DCHECK_EQ(Map::kSize, size);
- return allocated_maps_[next_map_index_++];
- } else {
- DCHECK(IsPreAllocatedSpace(space));
- Address address = high_water_[space_number];
- DCHECK_NE(address, kNullAddress);
- high_water_[space_number] += size;
-#ifdef DEBUG
- // Assert that the current reserved chunk is still big enough.
- const Heap::Reservation& reservation = reservations_[space_number];
- int chunk_index = current_chunk_[space_number];
- DCHECK_LE(high_water_[space_number], reservation[chunk_index].end);
-#endif
-#ifndef V8_ENABLE_THIRD_PARTY_HEAP
- if (space == SnapshotSpace::kCode)
- MemoryChunk::FromAddress(address)
- ->GetCodeObjectRegistry()
- ->RegisterNewlyAllocatedCodeObject(address);
-#endif
- return address;
- }
-}
-
-Address DeserializerAllocator::Allocate(SnapshotSpace space, int size) {
-#ifdef DEBUG
- if (previous_allocation_start_ != kNullAddress) {
- // Make sure that the previous allocation is initialized sufficiently to
- // be iterated over by the GC.
- Address object_address = previous_allocation_start_;
- Address previous_allocation_end =
- previous_allocation_start_ + previous_allocation_size_;
- while (object_address != previous_allocation_end) {
- int object_size = HeapObject::FromAddress(object_address).Size();
- DCHECK_GT(object_size, 0);
- DCHECK_LE(object_address + object_size, previous_allocation_end);
- object_address += object_size;
- }
- }
-#endif
-
- Address address;
- HeapObject obj;
- // TODO(steveblackburn) Note that the third party heap allocates objects
- // at reservation time, which means alignment must be acted on at
- // reservation time, not here. Since the current encoding does not
- // inform the reservation of the alignment, it must be conservatively
- // aligned.
- //
- // A more general approach will be to avoid reservation altogether, and
- // instead of chunk index/offset encoding, simply encode backreferences
- // by index (this can be optimized by applying something like register
- // allocation to keep the metadata needed to record the in-flight
- // backreferences minimal). This has the significant advantage of
- // abstracting away the details of the memory allocator from this code.
- // At each allocation, the regular allocator performs allocation,
- // and a fixed-sized table is used to track and fix all back references.
- if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
- address = AllocateRaw(space, size);
- } else if (next_alignment_ != kWordAligned) {
- const int reserved = size + Heap::GetMaximumFillToAlign(next_alignment_);
- address = AllocateRaw(space, reserved);
- obj = HeapObject::FromAddress(address);
- // If one of the following assertions fails, then we are deserializing an
- // aligned object when the filler maps have not been deserialized yet.
- // We require filler maps as padding to align the object.
- DCHECK(roots_.free_space_map().IsMap());
- DCHECK(roots_.one_pointer_filler_map().IsMap());
- DCHECK(roots_.two_pointer_filler_map().IsMap());
- obj = Heap::AlignWithFiller(roots_, obj, size, reserved, next_alignment_);
- address = obj.address();
- next_alignment_ = kWordAligned;
- } else {
- address = AllocateRaw(space, size);
- }
-
-#ifdef DEBUG
- previous_allocation_start_ = address;
- previous_allocation_size_ = size;
-#endif
-
- return address;
-}
-
-void DeserializerAllocator::MoveToNextChunk(SnapshotSpace space) {
- DCHECK(IsPreAllocatedSpace(space));
- const int space_number = static_cast<int>(space);
- uint32_t chunk_index = current_chunk_[space_number];
- const Heap::Reservation& reservation = reservations_[space_number];
- // Make sure the current chunk is indeed exhausted.
- CHECK_EQ(reservation[chunk_index].end, high_water_[space_number]);
- // Move to next reserved chunk.
- chunk_index = ++current_chunk_[space_number];
- CHECK_LT(chunk_index, reservation.size());
- high_water_[space_number] = reservation[chunk_index].start;
-}
-
-HeapObject DeserializerAllocator::GetMap(uint32_t index) {
- DCHECK_LT(index, next_map_index_);
- return HeapObject::FromAddress(allocated_maps_[index]);
-}
-
-HeapObject DeserializerAllocator::GetLargeObject(uint32_t index) {
- DCHECK_LT(index, deserialized_large_objects_.size());
- return deserialized_large_objects_[index];
-}
-
-HeapObject DeserializerAllocator::GetObject(SnapshotSpace space,
- uint32_t chunk_index,
- uint32_t chunk_offset) {
- DCHECK(IsPreAllocatedSpace(space));
- const int space_number = static_cast<int>(space);
- DCHECK_LE(chunk_index, current_chunk_[space_number]);
- Address address =
- reservations_[space_number][chunk_index].start + chunk_offset;
- if (next_alignment_ != kWordAligned) {
- int padding = Heap::GetFillToAlign(address, next_alignment_);
- next_alignment_ = kWordAligned;
- DCHECK(padding == 0 ||
- HeapObject::FromAddress(address).IsFreeSpaceOrFiller());
- address += padding;
- }
- return HeapObject::FromAddress(address);
-}
-
-void DeserializerAllocator::DecodeReservation(
- const std::vector<SerializedData::Reservation>& res) {
- DCHECK_EQ(0, reservations_[0].size());
- int current_space = 0;
- for (auto& r : res) {
- reservations_[current_space].push_back(
- {r.chunk_size(), kNullAddress, kNullAddress});
- if (r.is_last()) current_space++;
- }
- DCHECK_EQ(kNumberOfSpaces, current_space);
- for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) current_chunk_[i] = 0;
-}
-
-bool DeserializerAllocator::ReserveSpace() {
-#ifdef DEBUG
- for (int i = 0; i < kNumberOfSpaces; ++i) {
- DCHECK_GT(reservations_[i].size(), 0);
- }
-#endif // DEBUG
- DCHECK(allocated_maps_.empty());
- // TODO(v8:7464): Allocate using the off-heap ReadOnlySpace here once
- // implemented.
- if (!heap_->ReserveSpace(reservations_, &allocated_maps_)) {
- return false;
- }
- for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
- high_water_[i] = reservations_[i][0].start;
- }
- return true;
-}
-
-bool DeserializerAllocator::ReservationsAreFullyUsed() const {
- for (int space = 0; space < kNumberOfPreallocatedSpaces; space++) {
- const uint32_t chunk_index = current_chunk_[space];
- if (reservations_[space].size() != chunk_index + 1) {
- return false;
- }
- if (reservations_[space][chunk_index].end != high_water_[space]) {
- return false;
- }
- }
- return (allocated_maps_.size() == next_map_index_);
-}
-
-void DeserializerAllocator::RegisterDeserializedObjectsForBlackAllocation() {
- heap_->RegisterDeserializedObjectsForBlackAllocation(
- reservations_, deserialized_large_objects_, allocated_maps_);
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/snapshot/deserializer-allocator.h b/deps/v8/src/snapshot/deserializer-allocator.h
deleted file mode 100644
index 403e386fda..0000000000
--- a/deps/v8/src/snapshot/deserializer-allocator.h
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_SNAPSHOT_DESERIALIZER_ALLOCATOR_H_
-#define V8_SNAPSHOT_DESERIALIZER_ALLOCATOR_H_
-
-#include "src/common/globals.h"
-#include "src/heap/heap.h"
-#include "src/objects/heap-object.h"
-#include "src/roots/roots.h"
-#include "src/snapshot/references.h"
-#include "src/snapshot/snapshot-data.h"
-
-namespace v8 {
-namespace internal {
-
-class Deserializer;
-class StartupDeserializer;
-
-class DeserializerAllocator final {
- public:
- DeserializerAllocator() = default;
-
- void Initialize(Heap* heap);
-
- // ------- Allocation Methods -------
- // Methods related to memory allocation during deserialization.
-
- Address Allocate(SnapshotSpace space, int size);
-
- void MoveToNextChunk(SnapshotSpace space);
- void SetAlignment(AllocationAlignment alignment) {
- DCHECK_EQ(kWordAligned, next_alignment_);
- DCHECK_LE(kWordAligned, alignment);
- DCHECK_LE(alignment, kDoubleUnaligned);
- next_alignment_ = static_cast<AllocationAlignment>(alignment);
- }
-
- HeapObject GetMap(uint32_t index);
- HeapObject GetLargeObject(uint32_t index);
- HeapObject GetObject(SnapshotSpace space, uint32_t chunk_index,
- uint32_t chunk_offset);
-
- // ------- Reservation Methods -------
- // Methods related to memory reservations (prior to deserialization).
-
- V8_EXPORT_PRIVATE void DecodeReservation(
- const std::vector<SerializedData::Reservation>& res);
- bool ReserveSpace();
-
- bool ReservationsAreFullyUsed() const;
-
- // ------- Misc Utility Methods -------
-
- void RegisterDeserializedObjectsForBlackAllocation();
-
- private:
- // Raw allocation without considering alignment.
- Address AllocateRaw(SnapshotSpace space, int size);
-
- private:
- static constexpr int kNumberOfPreallocatedSpaces =
- static_cast<int>(SnapshotSpace::kNumberOfPreallocatedSpaces);
- static constexpr int kNumberOfSpaces =
- static_cast<int>(SnapshotSpace::kNumberOfSpaces);
-
- // The address of the next object that will be allocated in each space.
- // Each space has a number of chunks reserved by the GC, with each chunk
- // fitting into a page. Deserialized objects are allocated into the
- // current chunk of the target space by bumping up high water mark.
- Heap::Reservation reservations_[kNumberOfSpaces];
- uint32_t current_chunk_[kNumberOfPreallocatedSpaces];
- Address high_water_[kNumberOfPreallocatedSpaces];
-
-#ifdef DEBUG
- // Record the previous object allocated for DCHECKs.
- Address previous_allocation_start_ = kNullAddress;
- int previous_allocation_size_ = 0;
-#endif
-
- // The alignment of the next allocation.
- AllocationAlignment next_alignment_ = kWordAligned;
-
- // All required maps are pre-allocated during reservation. {next_map_index_}
- // stores the index of the next map to return from allocation.
- uint32_t next_map_index_ = 0;
- std::vector<Address> allocated_maps_;
-
- // Allocated large objects are kept in this map and may be fetched later as
- // back-references.
- std::vector<HeapObject> deserialized_large_objects_;
-
- // ReadOnlyRoots and heap are null until Initialize is called.
- Heap* heap_ = nullptr;
- ReadOnlyRoots roots_ = ReadOnlyRoots(static_cast<Address*>(nullptr));
-
- DISALLOW_COPY_AND_ASSIGN(DeserializerAllocator);
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_SNAPSHOT_DESERIALIZER_ALLOCATOR_H_
diff --git a/deps/v8/src/snapshot/deserializer.cc b/deps/v8/src/snapshot/deserializer.cc
index 132af570b3..5a729b35d3 100644
--- a/deps/v8/src/snapshot/deserializer.cc
+++ b/deps/v8/src/snapshot/deserializer.cc
@@ -6,63 +6,221 @@
#include "src/base/logging.h"
#include "src/codegen/assembler-inl.h"
+#include "src/common/assert-scope.h"
#include "src/common/external-pointer.h"
#include "src/common/globals.h"
#include "src/execution/isolate.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap-write-barrier-inl.h"
+#include "src/heap/heap-write-barrier.h"
#include "src/heap/read-only-heap.h"
#include "src/interpreter/interpreter.h"
#include "src/logging/log.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/cell-inl.h"
+#include "src/objects/embedder-data-array-inl.h"
#include "src/objects/hash-table.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/maybe-object.h"
#include "src/objects/objects-body-descriptors-inl.h"
+#include "src/objects/objects.h"
#include "src/objects/slots.h"
#include "src/objects/smi.h"
#include "src/objects/string.h"
#include "src/roots/roots.h"
#include "src/snapshot/embedded/embedded-data.h"
+#include "src/snapshot/references.h"
#include "src/snapshot/serializer-deserializer.h"
+#include "src/snapshot/snapshot-data.h"
#include "src/snapshot/snapshot.h"
#include "src/tracing/trace-event.h"
#include "src/tracing/traced-value.h"
+#include "src/utils/memcopy.h"
namespace v8 {
namespace internal {
-template <typename TSlot>
-TSlot Deserializer::Write(TSlot dest, MaybeObject value) {
- DCHECK(!next_reference_is_weak_);
- dest.store(value);
- return dest + 1;
-}
+// A SlotAccessor for a slot in a HeapObject, which abstracts the slot
+// operations done by the deserializer in a way which is GC-safe. In particular,
+// rather than an absolute slot address, this accessor holds a Handle to the
+// HeapObject, which is updated if the HeapObject moves.
+class SlotAccessorForHeapObject {
+ public:
+ static SlotAccessorForHeapObject ForSlotIndex(Handle<HeapObject> object,
+ int index) {
+ return SlotAccessorForHeapObject(object, index * kTaggedSize);
+ }
+ static SlotAccessorForHeapObject ForSlotOffset(Handle<HeapObject> object,
+ int offset) {
+ return SlotAccessorForHeapObject(object, offset);
+ }
+
+ MaybeObjectSlot slot() const { return object_->RawMaybeWeakField(offset_); }
+ Handle<HeapObject> object() const { return object_; }
+ int offset() const { return offset_; }
+
+ // Writes the given value to this slot, optionally with an offset (e.g. for
+ // repeat writes). Returns the number of slots written (which is one).
+ int Write(MaybeObject value, int slot_offset = 0) {
+ MaybeObjectSlot current_slot = slot() + slot_offset;
+ current_slot.Relaxed_Store(value);
+ WriteBarrier::Marking(*object_, current_slot, value);
+ // No need for a generational write barrier.
+ DCHECK(!Heap::InYoungGeneration(value));
+ return 1;
+ }
+ int Write(HeapObject value, HeapObjectReferenceType ref_type,
+ int slot_offset = 0) {
+ return Write(HeapObjectReference::From(value, ref_type), slot_offset);
+ }
+ int Write(Handle<HeapObject> value, HeapObjectReferenceType ref_type,
+ int slot_offset = 0) {
+ return Write(*value, ref_type, slot_offset);
+ }
+
+ // Same as Write, but additionally with a generational barrier.
+ int WriteWithGenerationalBarrier(MaybeObject value) {
+ MaybeObjectSlot current_slot = slot();
+ current_slot.Relaxed_Store(value);
+ WriteBarrier::Marking(*object_, current_slot, value);
+ if (Heap::InYoungGeneration(value)) {
+ GenerationalBarrier(*object_, current_slot, value);
+ }
+ return 1;
+ }
+ int WriteWithGenerationalBarrier(HeapObject value,
+ HeapObjectReferenceType ref_type) {
+ return WriteWithGenerationalBarrier(
+ HeapObjectReference::From(value, ref_type));
+ }
+ int WriteWithGenerationalBarrier(Handle<HeapObject> value,
+ HeapObjectReferenceType ref_type) {
+ return WriteWithGenerationalBarrier(*value, ref_type);
+ }
+
+ private:
+ SlotAccessorForHeapObject(Handle<HeapObject> object, int offset)
+ : object_(object), offset_(offset) {}
+
+ const Handle<HeapObject> object_;
+ const int offset_;
+};
+
+// A SlotAccessor for absolute full slot addresses.
+class SlotAccessorForRootSlots {
+ public:
+ explicit SlotAccessorForRootSlots(FullMaybeObjectSlot slot) : slot_(slot) {}
+
+ FullMaybeObjectSlot slot() const { return slot_; }
+ Handle<HeapObject> object() const { UNREACHABLE(); }
+ int offset() const { UNREACHABLE(); }
+
+ // Writes the given value to this slot, optionally with an offset (e.g. for
+ // repeat writes). Returns the number of slots written (which is one).
+ int Write(MaybeObject value, int slot_offset = 0) {
+ FullMaybeObjectSlot current_slot = slot() + slot_offset;
+ current_slot.Relaxed_Store(value);
+ return 1;
+ }
+ int Write(HeapObject value, HeapObjectReferenceType ref_type,
+ int slot_offset = 0) {
+ return Write(HeapObjectReference::From(value, ref_type), slot_offset);
+ }
+ int Write(Handle<HeapObject> value, HeapObjectReferenceType ref_type,
+ int slot_offset = 0) {
+ return Write(*value, ref_type, slot_offset);
+ }
+
+ int WriteWithGenerationalBarrier(MaybeObject value) { return Write(value); }
+ int WriteWithGenerationalBarrier(HeapObject value,
+ HeapObjectReferenceType ref_type) {
+ return WriteWithGenerationalBarrier(
+ HeapObjectReference::From(value, ref_type));
+ }
+ int WriteWithGenerationalBarrier(Handle<HeapObject> value,
+ HeapObjectReferenceType ref_type) {
+ return WriteWithGenerationalBarrier(*value, ref_type);
+ }
+
+ private:
+ const FullMaybeObjectSlot slot_;
+};
+
+// A SlotAccessor for creating a Handle, which saves a Handle allocation when
+// a Handle already exists.
+class SlotAccessorForHandle {
+ public:
+ SlotAccessorForHandle(Handle<HeapObject>* handle, Isolate* isolate)
+ : handle_(handle), isolate_(isolate) {}
+
+ MaybeObjectSlot slot() const { UNREACHABLE(); }
+ Handle<HeapObject> object() const { UNREACHABLE(); }
+ int offset() const { UNREACHABLE(); }
+
+ int Write(MaybeObject value, int slot_offset = 0) { UNREACHABLE(); }
+ int Write(HeapObject value, HeapObjectReferenceType ref_type,
+ int slot_offset = 0) {
+ DCHECK_EQ(slot_offset, 0);
+ DCHECK_EQ(ref_type, HeapObjectReferenceType::STRONG);
+ *handle_ = handle(value, isolate_);
+ return 1;
+ }
+ int Write(Handle<HeapObject> value, HeapObjectReferenceType ref_type,
+ int slot_offset = 0) {
+ DCHECK_EQ(slot_offset, 0);
+ DCHECK_EQ(ref_type, HeapObjectReferenceType::STRONG);
+ *handle_ = value;
+ return 1;
+ }
+
+ int WriteWithGenerationalBarrier(HeapObject value,
+ HeapObjectReferenceType ref_type) {
+ return Write(value, ref_type);
+ }
+ int WriteWithGenerationalBarrier(Handle<HeapObject> value,
+ HeapObjectReferenceType ref_type) {
+ return Write(value, ref_type);
+ }
+
+ private:
+ Handle<HeapObject>* handle_;
+ Isolate* isolate_;
+};
template <typename TSlot>
-TSlot Deserializer::WriteAddress(TSlot dest, Address value) {
+int Deserializer::WriteAddress(TSlot dest, Address value) {
DCHECK(!next_reference_is_weak_);
memcpy(dest.ToVoidPtr(), &value, kSystemPointerSize);
STATIC_ASSERT(IsAligned(kSystemPointerSize, TSlot::kSlotDataSize));
- return dest + (kSystemPointerSize / TSlot::kSlotDataSize);
+ return (kSystemPointerSize / TSlot::kSlotDataSize);
}
template <typename TSlot>
-TSlot Deserializer::WriteExternalPointer(TSlot dest, Address value) {
- value = EncodeExternalPointer(isolate(), value);
+int Deserializer::WriteExternalPointer(TSlot dest, Address value,
+ ExternalPointerTag tag) {
DCHECK(!next_reference_is_weak_);
- memcpy(dest.ToVoidPtr(), &value, kExternalPointerSize);
+ InitExternalPointerField(dest.address(), isolate(), value, tag);
STATIC_ASSERT(IsAligned(kExternalPointerSize, TSlot::kSlotDataSize));
- return dest + (kExternalPointerSize / TSlot::kSlotDataSize);
+ return (kExternalPointerSize / TSlot::kSlotDataSize);
}
-void Deserializer::Initialize(Isolate* isolate) {
- DCHECK_NULL(isolate_);
+Deserializer::Deserializer(Isolate* isolate, Vector<const byte> payload,
+ uint32_t magic_number, bool deserializing_user_code,
+ bool can_rehash)
+ : isolate_(isolate),
+ source_(payload),
+ magic_number_(magic_number),
+ deserializing_user_code_(deserializing_user_code),
+ can_rehash_(can_rehash) {
DCHECK_NOT_NULL(isolate);
- isolate_ = isolate;
- allocator()->Initialize(isolate->heap());
+ isolate_->RegisterDeserializerStarted();
+
+ // We start the indices here at 1, so that we can distinguish between an
+ // actual index and a nullptr (serialized as kNullRefSentinel) in a
+ // deserialized object requiring fix-up.
+ STATIC_ASSERT(kNullRefSentinel == 0);
+ backing_stores_.push_back({});
#ifdef DEBUG
num_api_references_ = 0;
@@ -83,8 +241,8 @@ void Deserializer::Initialize(Isolate* isolate) {
void Deserializer::Rehash() {
DCHECK(can_rehash() || deserializing_user_code());
- for (HeapObject item : to_rehash_) {
- item.RehashBasedOnMap(isolate());
+ for (Handle<HeapObject> item : to_rehash_) {
+ item->RehashBasedOnMap(isolate());
}
}
@@ -94,16 +252,18 @@ Deserializer::~Deserializer() {
if (source_.position() == 0) return;
// Check that we only have padding bytes remaining.
while (source_.HasMore()) DCHECK_EQ(kNop, source_.Get());
- // Check that we've fully used all reserved space.
- DCHECK(allocator()->ReservationsAreFullyUsed());
+ // Check that there are no remaining forward refs.
+ DCHECK_EQ(num_unresolved_forward_refs_, 0);
+ DCHECK(unresolved_forward_refs_.empty());
#endif // DEBUG
+ isolate_->RegisterDeserializerFinished();
}
// This is called on the roots. It is the driver of the deserialization
// process. It is also called on the body of each function.
void Deserializer::VisitRootPointers(Root root, const char* description,
FullObjectSlot start, FullObjectSlot end) {
- ReadData(FullMaybeObjectSlot(start), FullMaybeObjectSlot(end), kNullAddress);
+ ReadData(FullMaybeObjectSlot(start), FullMaybeObjectSlot(end));
}
void Deserializer::Synchronize(VisitorSynchronization::SyncTag tag) {
@@ -112,8 +272,6 @@ void Deserializer::Synchronize(VisitorSynchronization::SyncTag tag) {
}
void Deserializer::DeserializeDeferredObjects() {
- DisallowGarbageCollection no_gc;
-
for (int code = source_.Get(); code != kSynchronize; code = source_.Get()) {
SnapshotSpace space = NewObject::Decode(code);
ReadObject(space);
@@ -122,10 +280,20 @@ void Deserializer::DeserializeDeferredObjects() {
void Deserializer::LogNewMapEvents() {
DisallowGarbageCollection no_gc;
- for (Map map : new_maps_) {
+ for (Handle<Map> map : new_maps_) {
DCHECK(FLAG_trace_maps);
- LOG(isolate(), MapCreate(map));
- LOG(isolate(), MapDetails(map));
+ LOG(isolate(), MapCreate(*map));
+ LOG(isolate(), MapDetails(*map));
+ }
+}
+
+void Deserializer::WeakenDescriptorArrays() {
+ DisallowHeapAllocation no_gc;
+ for (Handle<DescriptorArray> descriptor_array : new_descriptor_arrays_) {
+ DCHECK(descriptor_array->IsStrongDescriptorArray());
+ descriptor_array->set_map(ReadOnlyRoots(isolate()).descriptor_array_map());
+ WriteBarrier::Marking(*descriptor_array,
+ descriptor_array->number_of_descriptors());
}
}
@@ -157,141 +325,149 @@ uint32_t StringTableInsertionKey::ComputeHashField(String string) {
return string.hash_field();
}
-HeapObject Deserializer::PostProcessNewObject(HeapObject obj,
- SnapshotSpace space) {
+void Deserializer::PostProcessNewObject(Handle<Map> map, Handle<HeapObject> obj,
+ SnapshotSpace space) {
+ DCHECK_EQ(*map, obj->map());
DisallowGarbageCollection no_gc;
+ InstanceType instance_type = map->instance_type();
if ((FLAG_rehash_snapshot && can_rehash_) || deserializing_user_code()) {
- if (obj.IsString()) {
+ if (InstanceTypeChecker::IsString(instance_type)) {
// Uninitialize hash field as we need to recompute the hash.
- String string = String::cast(obj);
- string.set_hash_field(String::kEmptyHashField);
+ Handle<String> string = Handle<String>::cast(obj);
+ string->set_hash_field(String::kEmptyHashField);
// Rehash strings before read-only space is sealed. Strings outside
// read-only space are rehashed lazily. (e.g. when rehashing dictionaries)
if (space == SnapshotSpace::kReadOnlyHeap) {
to_rehash_.push_back(obj);
}
- } else if (obj.NeedsRehashing()) {
+ } else if (obj->NeedsRehashing(instance_type)) {
to_rehash_.push_back(obj);
}
}
if (deserializing_user_code()) {
- if (obj.IsString()) {
- String string = String::cast(obj);
- if (string.IsInternalizedString()) {
- // Canonicalize the internalized string. If it already exists in the
- // string table, set it to forward to the existing one.
-
- // Create storage for a fake handle -- this only needs to be valid until
- // the end of LookupKey.
- Address handle_storage = string.ptr();
- Handle<String> handle(&handle_storage);
- StringTableInsertionKey key(handle);
- String result = *isolate()->string_table()->LookupKey(isolate(), &key);
-
- if (FLAG_thin_strings && result != string) {
- string.MakeThin(isolate(), result);
- }
- return result;
+ if (InstanceTypeChecker::IsInternalizedString(instance_type)) {
+ // Canonicalize the internalized string. If it already exists in the
+ // string table, set it to forward to the existing one.
+ Handle<String> string = Handle<String>::cast(obj);
+
+ StringTableInsertionKey key(string);
+ Handle<String> result =
+ isolate()->string_table()->LookupKey(isolate(), &key);
+
+ if (FLAG_thin_strings && *result != *string) {
+ string->MakeThin(isolate(), *result);
+ // Mutate the given object handle so that the backreference entry is
+ // also updated.
+ obj.PatchValue(*result);
}
- } else if (obj.IsScript()) {
- new_scripts_.push_back(handle(Script::cast(obj), isolate()));
- } else if (obj.IsAllocationSite()) {
+ return;
+ } else if (InstanceTypeChecker::IsScript(instance_type)) {
+ new_scripts_.push_back(Handle<Script>::cast(obj));
+ } else if (InstanceTypeChecker::IsAllocationSite(instance_type)) {
// We should link new allocation sites, but we can't do this immediately
// because |AllocationSite::HasWeakNext()| internally accesses
// |Heap::roots_| that may not have been initialized yet. So defer this to
// |ObjectDeserializer::CommitPostProcessedObjects()|.
- new_allocation_sites_.push_back(AllocationSite::cast(obj));
+ new_allocation_sites_.push_back(Handle<AllocationSite>::cast(obj));
} else {
- DCHECK(CanBeDeferred(obj));
+ DCHECK(CanBeDeferred(*obj));
}
}
- if (obj.IsScript()) {
- LogScriptEvents(Script::cast(obj));
- } else if (obj.IsCode()) {
+
+ if (InstanceTypeChecker::IsScript(instance_type)) {
+ LogScriptEvents(Script::cast(*obj));
+ } else if (InstanceTypeChecker::IsCode(instance_type)) {
// We flush all code pages after deserializing the startup snapshot.
// Hence we only remember each individual code object when deserializing
// user code.
- if (deserializing_user_code() || space == SnapshotSpace::kLargeObject) {
- new_code_objects_.push_back(Code::cast(obj));
- }
- } else if (FLAG_trace_maps && obj.IsMap()) {
- // Keep track of all seen Maps to log them later since they might be only
- // partially initialized at this point.
- new_maps_.push_back(Map::cast(obj));
- } else if (obj.IsAccessorInfo()) {
+ if (deserializing_user_code()) {
+ new_code_objects_.push_back(Handle<Code>::cast(obj));
+ }
+ } else if (InstanceTypeChecker::IsMap(instance_type)) {
+ if (FLAG_trace_maps) {
+ // Keep track of all seen Maps to log them later since they might be only
+ // partially initialized at this point.
+ new_maps_.push_back(Handle<Map>::cast(obj));
+ }
+ } else if (InstanceTypeChecker::IsAccessorInfo(instance_type)) {
#ifdef USE_SIMULATOR
- accessor_infos_.push_back(AccessorInfo::cast(obj));
+ accessor_infos_.push_back(Handle<AccessorInfo>::cast(obj));
#endif
- } else if (obj.IsCallHandlerInfo()) {
+ } else if (InstanceTypeChecker::IsCallHandlerInfo(instance_type)) {
#ifdef USE_SIMULATOR
- call_handler_infos_.push_back(CallHandlerInfo::cast(obj));
+ call_handler_infos_.push_back(Handle<CallHandlerInfo>::cast(obj));
#endif
- } else if (obj.IsExternalString()) {
- ExternalString string = ExternalString::cast(obj);
- uint32_t index = string.resource_as_uint32();
+ } else if (InstanceTypeChecker::IsExternalString(instance_type)) {
+ Handle<ExternalString> string = Handle<ExternalString>::cast(obj);
+ uint32_t index = string->GetResourceRefForDeserialization();
Address address =
static_cast<Address>(isolate()->api_external_references()[index]);
- string.set_address_as_resource(isolate(), address);
- isolate()->heap()->UpdateExternalString(string, 0,
- string.ExternalPayloadSize());
- isolate()->heap()->RegisterExternalString(String::cast(obj));
- } else if (obj.IsJSDataView()) {
- JSDataView data_view = JSDataView::cast(obj);
- JSArrayBuffer buffer = JSArrayBuffer::cast(data_view.buffer());
+ string->AllocateExternalPointerEntries(isolate());
+ string->set_address_as_resource(isolate(), address);
+ isolate()->heap()->UpdateExternalString(*string, 0,
+ string->ExternalPayloadSize());
+ isolate()->heap()->RegisterExternalString(*string);
+ } else if (InstanceTypeChecker::IsJSDataView(instance_type)) {
+ Handle<JSDataView> data_view = Handle<JSDataView>::cast(obj);
+ JSArrayBuffer buffer = JSArrayBuffer::cast(data_view->buffer());
void* backing_store = nullptr;
- if (buffer.backing_store() != nullptr) {
+ uint32_t store_index = buffer.GetBackingStoreRefForDeserialization();
+ if (store_index != kNullRefSentinel) {
// The backing store of the JSArrayBuffer has not been correctly restored
// yet, as that may trigger GC. The backing_store field currently contains
// a numbered reference to an already deserialized backing store.
- uint32_t store_index = buffer.GetBackingStoreRefForDeserialization();
backing_store = backing_stores_[store_index]->buffer_start();
}
- data_view.set_data_pointer(
+ data_view->AllocateExternalPointerEntries(isolate());
+ data_view->set_data_pointer(
isolate(),
- reinterpret_cast<uint8_t*>(backing_store) + data_view.byte_offset());
- } else if (obj.IsJSTypedArray()) {
- JSTypedArray typed_array = JSTypedArray::cast(obj);
+ reinterpret_cast<uint8_t*>(backing_store) + data_view->byte_offset());
+ } else if (InstanceTypeChecker::IsJSTypedArray(instance_type)) {
+ Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(obj);
// Fixup typed array pointers.
- if (typed_array.is_on_heap()) {
- typed_array.SetOnHeapDataPtr(isolate(),
- HeapObject::cast(typed_array.base_pointer()),
- typed_array.external_pointer());
+ if (typed_array->is_on_heap()) {
+ Address raw_external_pointer = typed_array->external_pointer_raw();
+ typed_array->AllocateExternalPointerEntries(isolate());
+ typed_array->SetOnHeapDataPtr(
+ isolate(), HeapObject::cast(typed_array->base_pointer()),
+ raw_external_pointer);
} else {
// Serializer writes backing store ref as a DataPtr() value.
uint32_t store_index =
- typed_array.GetExternalBackingStoreRefForDeserialization();
+ typed_array->GetExternalBackingStoreRefForDeserialization();
auto backing_store = backing_stores_[store_index];
auto start = backing_store
? reinterpret_cast<byte*>(backing_store->buffer_start())
: nullptr;
- typed_array.SetOffHeapDataPtr(isolate(), start,
- typed_array.byte_offset());
+ typed_array->AllocateExternalPointerEntries(isolate());
+ typed_array->SetOffHeapDataPtr(isolate(), start,
+ typed_array->byte_offset());
}
- } else if (obj.IsJSArrayBuffer()) {
- JSArrayBuffer buffer = JSArrayBuffer::cast(obj);
+ } else if (InstanceTypeChecker::IsJSArrayBuffer(instance_type)) {
+ Handle<JSArrayBuffer> buffer = Handle<JSArrayBuffer>::cast(obj);
// Postpone allocation of backing store to avoid triggering the GC.
- if (buffer.backing_store() != nullptr) {
- new_off_heap_array_buffers_.push_back(handle(buffer, isolate()));
+ if (buffer->GetBackingStoreRefForDeserialization() != kNullRefSentinel) {
+ new_off_heap_array_buffers_.push_back(buffer);
+ } else {
+ buffer->AllocateExternalPointerEntries(isolate());
+ buffer->set_backing_store(isolate(), nullptr);
}
- } else if (obj.IsBytecodeArray()) {
+ } else if (InstanceTypeChecker::IsBytecodeArray(instance_type)) {
// TODO(mythria): Remove these once we store the default values for these
// fields in the serializer.
- BytecodeArray bytecode_array = BytecodeArray::cast(obj);
- bytecode_array.set_osr_loop_nesting_level(0);
- }
-#ifdef DEBUG
- if (obj.IsDescriptorArray()) {
- DescriptorArray descriptor_array = DescriptorArray::cast(obj);
- DCHECK_EQ(0, descriptor_array.raw_number_of_marked_descriptors());
+ Handle<BytecodeArray> bytecode_array = Handle<BytecodeArray>::cast(obj);
+ bytecode_array->set_osr_loop_nesting_level(0);
+ } else if (InstanceTypeChecker::IsDescriptorArray(instance_type)) {
+ DCHECK(InstanceTypeChecker::IsStrongDescriptorArray(instance_type));
+ Handle<DescriptorArray> descriptors = Handle<DescriptorArray>::cast(obj);
+ new_descriptor_arrays_.push_back(descriptors);
}
-#endif
// Check alignment.
- DCHECK_EQ(0, Heap::GetFillToAlign(obj.address(),
- HeapObject::RequiredAlignment(obj.map())));
- return obj;
+ DCHECK_EQ(0, Heap::GetFillToAlign(obj->address(),
+ HeapObject::RequiredAlignment(*map)));
}
HeapObjectReferenceType Deserializer::GetAndResetNextReferenceType() {
@@ -302,162 +478,180 @@ HeapObjectReferenceType Deserializer::GetAndResetNextReferenceType() {
return type;
}
-HeapObject Deserializer::GetBackReferencedObject(SnapshotSpace space) {
- HeapObject obj;
- switch (space) {
- case SnapshotSpace::kLargeObject:
- obj = allocator()->GetLargeObject(source_.GetInt());
- break;
- case SnapshotSpace::kMap:
- obj = allocator()->GetMap(source_.GetInt());
- break;
- case SnapshotSpace::kReadOnlyHeap: {
- uint32_t chunk_index = source_.GetInt();
- uint32_t chunk_offset = source_.GetInt();
- if (isolate()->heap()->deserialization_complete()) {
- ReadOnlySpace* read_only_space = isolate()->heap()->read_only_space();
- ReadOnlyPage* page = read_only_space->pages()[chunk_index];
- Address address = page->OffsetToAddress(chunk_offset);
- obj = HeapObject::FromAddress(address);
- } else {
- obj = allocator()->GetObject(space, chunk_index, chunk_offset);
- }
- break;
- }
- default: {
- uint32_t chunk_index = source_.GetInt();
- uint32_t chunk_offset = source_.GetInt();
- obj = allocator()->GetObject(space, chunk_index, chunk_offset);
- break;
- }
- }
+Handle<HeapObject> Deserializer::GetBackReferencedObject() {
+ Handle<HeapObject> obj = back_refs_[source_.GetInt()];
- if (deserializing_user_code() && obj.IsThinString()) {
- obj = ThinString::cast(obj).actual();
- }
+ // We don't allow ThinStrings in backreferences -- if internalization produces
+ // a thin string, then it should also update the backref handle.
+ DCHECK(!obj->IsThinString());
hot_objects_.Add(obj);
- DCHECK(!HasWeakHeapObjectTag(obj));
+ DCHECK(!HasWeakHeapObjectTag(*obj));
return obj;
}
-HeapObject Deserializer::ReadObject() {
- MaybeObject object;
- ReadData(FullMaybeObjectSlot(&object), FullMaybeObjectSlot(&object + 1),
- kNullAddress);
- return object.GetHeapObjectAssumeStrong();
+Handle<HeapObject> Deserializer::ReadObject() {
+ Handle<HeapObject> ret;
+ CHECK_EQ(ReadSingleBytecodeData(source_.Get(),
+ SlotAccessorForHandle(&ret, isolate())),
+ 1);
+ return ret;
}
-HeapObject Deserializer::ReadObject(SnapshotSpace space) {
- DisallowGarbageCollection no_gc;
-
- const int size = source_.GetInt() << kObjectAlignmentBits;
+Handle<HeapObject> Deserializer::ReadObject(SnapshotSpace space) {
+ const int size_in_tagged = source_.GetInt();
+ const int size_in_bytes = size_in_tagged * kTaggedSize;
// The map can't be a forward ref. If you want the map to be a forward ref,
// then you're probably serializing the meta-map, in which case you want to
// use the kNewMetaMap bytecode.
DCHECK_NE(source()->Peek(), kRegisterPendingForwardRef);
- Map map = Map::cast(ReadObject());
-
- // The serializer allocated the object now, so the next bytecodes might be an
- // alignment prefix and/or a next chunk
- if (base::IsInRange<byte, byte>(source()->Peek(), kAlignmentPrefix,
- kAlignmentPrefix + 2)) {
- int alignment = source()->Get() - (kAlignmentPrefix - 1);
- allocator()->SetAlignment(static_cast<AllocationAlignment>(alignment));
+ Handle<Map> map = Handle<Map>::cast(ReadObject());
+
+ // Filling an object's fields can cause GCs and heap walks, so this object has
+ // to be in a 'sufficiently initialised' state by the time the next allocation
+ // can happen. For this to be the case, the object is carefully deserialized
+ // as follows:
+ // * The space for the object is allocated.
+ // * The map is set on the object so that the GC knows what type the object
+ // has.
+ // * The rest of the object is filled with a fixed Smi value
+ // - This is a Smi so that tagged fields become initialized to a valid
+ // tagged value.
+ // - It's a fixed value, "uninitialized_field_value", so that we can
+ // DCHECK for it when reading objects that are assumed to be partially
+ // initialized objects.
+ // * The fields of the object are deserialized in order, under the
+ // assumption that objects are laid out in such a way that any fields
+ // required for object iteration (e.g. length fields) are deserialized
+ // before fields with objects.
+ // - We ensure this is the case by DCHECKing on object allocation that the
+ // previously allocated object has a valid size (see `Allocate`).
+ HeapObject raw_obj =
+ Allocate(space, size_in_bytes, HeapObject::RequiredAlignment(*map));
+ raw_obj.set_map_after_allocation(*map);
+ MemsetTagged(raw_obj.RawField(kTaggedSize), uninitialized_field_value(),
+ size_in_tagged - 1);
+
+ // Make sure BytecodeArrays have a valid age, so that the marker doesn't
+ // break when making them older.
+ if (raw_obj.IsBytecodeArray(isolate())) {
+ BytecodeArray::cast(raw_obj).set_bytecode_age(
+ BytecodeArray::kFirstBytecodeAge);
}
- if (source()->Peek() == kNextChunk) {
- source()->Advance(1);
- // The next byte is the space for the next chunk -- it should match the
- // current space.
- // TODO(leszeks): Remove the next chunk space entirely.
- DCHECK_EQ(static_cast<SnapshotSpace>(source()->Peek()), space);
- source()->Advance(1);
- allocator()->MoveToNextChunk(space);
+
+#ifdef DEBUG
+ // We want to make sure that all embedder pointers are initialized to null.
+ if (raw_obj.IsJSObject() && JSObject::cast(raw_obj).IsApiWrapper()) {
+ JSObject js_obj = JSObject::cast(raw_obj);
+ for (int i = 0; i < js_obj.GetEmbedderFieldCount(); ++i) {
+ void* pointer;
+ CHECK(EmbedderDataSlot(js_obj, i).ToAlignedPointerSafe(isolate(),
+ &pointer));
+ CHECK_NULL(pointer);
+ }
+ } else if (raw_obj.IsEmbedderDataArray()) {
+ EmbedderDataArray array = EmbedderDataArray::cast(raw_obj);
+ EmbedderDataSlot start(array, 0);
+ EmbedderDataSlot end(array, array.length());
+ for (EmbedderDataSlot slot = start; slot < end; ++slot) {
+ void* pointer;
+ CHECK(slot.ToAlignedPointerSafe(isolate(), &pointer));
+ CHECK_NULL(pointer);
+ }
}
+#endif
- Address address = allocator()->Allocate(space, size);
- HeapObject obj = HeapObject::FromAddress(address);
+ Handle<HeapObject> obj = handle(raw_obj, isolate());
+ back_refs_.push_back(obj);
- isolate()->heap()->OnAllocationEvent(obj, size);
- MaybeObjectSlot current(address);
- MaybeObjectSlot limit(address + size);
+ ReadData(obj, 1, size_in_tagged);
+ PostProcessNewObject(map, obj, space);
- current.store(MaybeObject::FromObject(map));
- ReadData(current + 1, limit, address);
- obj = PostProcessNewObject(obj, space);
+ DCHECK(!obj->IsThinString(isolate()));
#ifdef DEBUG
- if (obj.IsCode()) {
+ if (obj->IsCode()) {
DCHECK(space == SnapshotSpace::kCode ||
space == SnapshotSpace::kReadOnlyHeap);
} else {
DCHECK_NE(space, SnapshotSpace::kCode);
}
#endif // DEBUG
+
return obj;
}
-HeapObject Deserializer::ReadMetaMap() {
- DisallowHeapAllocation no_gc;
-
+Handle<HeapObject> Deserializer::ReadMetaMap() {
const SnapshotSpace space = SnapshotSpace::kReadOnlyHeap;
- const int size = Map::kSize;
+ const int size_in_bytes = Map::kSize;
+ const int size_in_tagged = size_in_bytes / kTaggedSize;
- Address address = allocator()->Allocate(space, size);
- HeapObject obj = HeapObject::FromAddress(address);
+ HeapObject raw_obj = Allocate(space, size_in_bytes, kWordAligned);
+ raw_obj.set_map_after_allocation(Map::unchecked_cast(raw_obj));
+ MemsetTagged(raw_obj.RawField(kTaggedSize), uninitialized_field_value(),
+ size_in_tagged - 1);
- isolate()->heap()->OnAllocationEvent(obj, size);
- MaybeObjectSlot current(address);
- MaybeObjectSlot limit(address + size);
+ Handle<HeapObject> obj = handle(raw_obj, isolate());
+ back_refs_.push_back(obj);
- current.store(MaybeObject(current.address() + kHeapObjectTag));
// Set the instance-type manually, to allow backrefs to read it.
- Map::unchecked_cast(obj).set_instance_type(MAP_TYPE);
- ReadData(current + 1, limit, address);
+ Map::unchecked_cast(*obj).set_instance_type(MAP_TYPE);
- return obj;
-}
+ ReadData(obj, 1, size_in_tagged);
+ PostProcessNewObject(Handle<Map>::cast(obj), obj, space);
-void Deserializer::ReadCodeObjectBody(Address code_object_address) {
- // At this point the code object is already allocated, its map field is
- // initialized and its raw data fields and code stream are also read.
- // Now we read the rest of code header's fields.
- MaybeObjectSlot current(code_object_address + HeapObject::kHeaderSize);
- MaybeObjectSlot limit(code_object_address + Code::kDataStart);
- ReadData(current, limit, code_object_address);
-
- // Now iterate RelocInfos the same way it was done by the serialzier and
- // deserialize respective data into RelocInfos.
- Code code = Code::cast(HeapObject::FromAddress(code_object_address));
- RelocIterator it(code, Code::BodyDescriptor::kRelocModeMask);
- for (; !it.done(); it.next()) {
- RelocInfo rinfo = *it.rinfo();
- rinfo.Visit(this);
- }
+ return obj;
}
-void Deserializer::VisitCodeTarget(Code host, RelocInfo* rinfo) {
- HeapObject object = ReadObject();
+class Deserializer::RelocInfoVisitor {
+ public:
+ RelocInfoVisitor(Deserializer* deserializer,
+ const std::vector<Handle<HeapObject>>* objects)
+ : deserializer_(deserializer), objects_(objects), current_object_(0) {}
+ ~RelocInfoVisitor() { DCHECK_EQ(current_object_, objects_->size()); }
+
+ void VisitCodeTarget(Code host, RelocInfo* rinfo);
+ void VisitEmbeddedPointer(Code host, RelocInfo* rinfo);
+ void VisitRuntimeEntry(Code host, RelocInfo* rinfo);
+ void VisitExternalReference(Code host, RelocInfo* rinfo);
+ void VisitInternalReference(Code host, RelocInfo* rinfo);
+ void VisitOffHeapTarget(Code host, RelocInfo* rinfo);
+
+ private:
+ Isolate* isolate() { return deserializer_->isolate(); }
+ SnapshotByteSource& source() { return deserializer_->source_; }
+
+ Deserializer* deserializer_;
+ const std::vector<Handle<HeapObject>>* objects_;
+ int current_object_;
+};
+
+void Deserializer::RelocInfoVisitor::VisitCodeTarget(Code host,
+ RelocInfo* rinfo) {
+ HeapObject object = *objects_->at(current_object_++);
rinfo->set_target_address(Code::cast(object).raw_instruction_start());
}
-void Deserializer::VisitEmbeddedPointer(Code host, RelocInfo* rinfo) {
- HeapObject object = ReadObject();
+void Deserializer::RelocInfoVisitor::VisitEmbeddedPointer(Code host,
+ RelocInfo* rinfo) {
+ HeapObject object = *objects_->at(current_object_++);
// Embedded object reference must be a strong one.
rinfo->set_target_object(isolate()->heap(), object);
}
-void Deserializer::VisitRuntimeEntry(Code host, RelocInfo* rinfo) {
+void Deserializer::RelocInfoVisitor::VisitRuntimeEntry(Code host,
+ RelocInfo* rinfo) {
// We no longer serialize code that contains runtime entries.
UNREACHABLE();
}
-void Deserializer::VisitExternalReference(Code host, RelocInfo* rinfo) {
- byte data = source_.Get();
+void Deserializer::RelocInfoVisitor::VisitExternalReference(Code host,
+ RelocInfo* rinfo) {
+ byte data = source().Get();
CHECK_EQ(data, kExternalReference);
- Address address = ReadExternalReferenceCase();
+ Address address = deserializer_->ReadExternalReferenceCase();
if (rinfo->IsCodedSpecially()) {
Address location_of_branch_data = rinfo->pc();
@@ -468,24 +662,30 @@ void Deserializer::VisitExternalReference(Code host, RelocInfo* rinfo) {
}
}
-void Deserializer::VisitInternalReference(Code host, RelocInfo* rinfo) {
- byte data = source_.Get();
+void Deserializer::RelocInfoVisitor::VisitInternalReference(Code host,
+ RelocInfo* rinfo) {
+ byte data = source().Get();
CHECK_EQ(data, kInternalReference);
// Internal reference target is encoded as an offset from code entry.
- int target_offset = source_.GetInt();
+ int target_offset = source().GetInt();
+ // TODO(jgruber,v8:11036): We are being permissive for this DCHECK, but
+ // consider using raw_instruction_size() instead of raw_body_size() in the
+ // future.
+ STATIC_ASSERT(Code::kOnHeapBodyIsContiguous);
DCHECK_LT(static_cast<unsigned>(target_offset),
- static_cast<unsigned>(host.raw_instruction_size()));
+ static_cast<unsigned>(host.raw_body_size()));
Address target = host.entry() + target_offset;
Assembler::deserialization_set_target_internal_reference_at(
rinfo->pc(), target, rinfo->rmode());
}
-void Deserializer::VisitOffHeapTarget(Code host, RelocInfo* rinfo) {
- byte data = source_.Get();
+void Deserializer::RelocInfoVisitor::VisitOffHeapTarget(Code host,
+ RelocInfo* rinfo) {
+ byte data = source().Get();
CHECK_EQ(data, kOffHeapTarget);
- int builtin_index = source_.GetInt();
+ int builtin_index = source().GetInt();
DCHECK(Builtins::IsBuiltinId(builtin_index));
CHECK_NOT_NULL(isolate()->embedded_blob_code());
@@ -503,18 +703,18 @@ void Deserializer::VisitOffHeapTarget(Code host, RelocInfo* rinfo) {
}
}
-template <typename TSlot>
-TSlot Deserializer::ReadRepeatedObject(TSlot current, int repeat_count) {
+template <typename SlotAccessor>
+int Deserializer::ReadRepeatedObject(SlotAccessor slot_accessor,
+ int repeat_count) {
CHECK_LE(2, repeat_count);
- HeapObject heap_object = ReadObject();
- DCHECK(!Heap::InYoungGeneration(heap_object));
+ Handle<HeapObject> heap_object = ReadObject();
+ DCHECK(!Heap::InYoungGeneration(*heap_object));
for (int i = 0; i < repeat_count; i++) {
- // Repeated values are not subject to the write barrier so we don't need
- // to trigger it.
- current = Write(current, MaybeObject::FromObject(heap_object));
+ // TODO(leszeks): Use a ranged barrier here.
+ slot_accessor.Write(heap_object, HeapObjectReferenceType::STRONG, i);
}
- return current;
+ return repeat_count;
}
namespace {
@@ -551,26 +751,38 @@ constexpr byte VerifyBytecodeCount(byte bytecode) {
#define CASE_R32(byte_code) CASE_R16(byte_code) : case CASE_R16(byte_code + 16)
// This generates a case range for all the spaces.
-#define CASE_RANGE_ALL_SPACES(bytecode) \
- SpaceEncoder<bytecode>::Encode(SnapshotSpace::kOld) \
- : case SpaceEncoder<bytecode>::Encode(SnapshotSpace::kCode) \
- : case SpaceEncoder<bytecode>::Encode(SnapshotSpace::kMap) \
- : case SpaceEncoder<bytecode>::Encode(SnapshotSpace::kLargeObject) \
+#define CASE_RANGE_ALL_SPACES(bytecode) \
+ SpaceEncoder<bytecode>::Encode(SnapshotSpace::kOld) \
+ : case SpaceEncoder<bytecode>::Encode(SnapshotSpace::kCode) \
+ : case SpaceEncoder<bytecode>::Encode(SnapshotSpace::kMap) \
: case SpaceEncoder<bytecode>::Encode(SnapshotSpace::kReadOnlyHeap)
-template <typename TSlot>
-void Deserializer::ReadData(TSlot current, TSlot limit,
- Address current_object_address) {
- while (current < limit) {
+void Deserializer::ReadData(Handle<HeapObject> object, int start_slot_index,
+ int end_slot_index) {
+ int current = start_slot_index;
+ while (current < end_slot_index) {
byte data = source_.Get();
- current = ReadSingleBytecodeData(data, current, current_object_address);
+ current += ReadSingleBytecodeData(
+ data, SlotAccessorForHeapObject::ForSlotIndex(object, current));
}
- CHECK_EQ(limit, current);
+ CHECK_EQ(current, end_slot_index);
}
-template <typename TSlot>
-TSlot Deserializer::ReadSingleBytecodeData(byte data, TSlot current,
- Address current_object_address) {
+void Deserializer::ReadData(FullMaybeObjectSlot start,
+ FullMaybeObjectSlot end) {
+ FullMaybeObjectSlot current = start;
+ while (current < end) {
+ byte data = source_.Get();
+ current += ReadSingleBytecodeData(data, SlotAccessorForRootSlots(current));
+ }
+ CHECK_EQ(current, end);
+}
+
+template <typename SlotAccessor>
+int Deserializer::ReadSingleBytecodeData(byte data,
+ SlotAccessor slot_accessor) {
+ using TSlot = decltype(slot_accessor.slot());
+
switch (data) {
// Deserialize a new object and write a pointer to it to the current
// object.
@@ -578,19 +790,30 @@ TSlot Deserializer::ReadSingleBytecodeData(byte data, TSlot current,
SnapshotSpace space = NewObject::Decode(data);
// Save the reference type before recursing down into reading the object.
HeapObjectReferenceType ref_type = GetAndResetNextReferenceType();
- HeapObject heap_object = ReadObject(space);
- DCHECK(!Heap::InYoungGeneration(heap_object));
- return Write(current, HeapObjectReference::From(heap_object, ref_type));
+ Handle<HeapObject> heap_object = ReadObject(space);
+ return slot_accessor.Write(heap_object, ref_type);
}
// Find a recently deserialized object using its offset from the current
// allocation point and write a pointer to it to the current object.
- case CASE_RANGE_ALL_SPACES(kBackref): {
- SnapshotSpace space = BackRef::Decode(data);
- HeapObject heap_object = GetBackReferencedObject(space);
- DCHECK(!Heap::InYoungGeneration(heap_object));
- return Write(current, HeapObjectReference::From(
- heap_object, GetAndResetNextReferenceType()));
+ case kBackref: {
+ Handle<HeapObject> heap_object = GetBackReferencedObject();
+ return slot_accessor.Write(heap_object, GetAndResetNextReferenceType());
+ }
+
+ // Reference an object in the read-only heap. This should be used when an
+ // object is read-only, but is not a root.
+ case kReadOnlyHeapRef: {
+ DCHECK(isolate()->heap()->deserialization_complete());
+ uint32_t chunk_index = source_.GetInt();
+ uint32_t chunk_offset = source_.GetInt();
+
+ ReadOnlySpace* read_only_space = isolate()->heap()->read_only_space();
+ ReadOnlyPage* page = read_only_space->pages()[chunk_index];
+ Address address = page->OffsetToAddress(chunk_offset);
+ HeapObject heap_object = HeapObject::FromAddress(address);
+
+ return slot_accessor.Write(heap_object, GetAndResetNextReferenceType());
}
// Find an object in the roots array and write a pointer to it to the
@@ -598,41 +821,39 @@ TSlot Deserializer::ReadSingleBytecodeData(byte data, TSlot current,
case kRootArray: {
int id = source_.GetInt();
RootIndex root_index = static_cast<RootIndex>(id);
- HeapObject heap_object = HeapObject::cast(isolate()->root(root_index));
- DCHECK(!Heap::InYoungGeneration(heap_object));
+ Handle<HeapObject> heap_object =
+ Handle<HeapObject>::cast(isolate()->root_handle(root_index));
hot_objects_.Add(heap_object);
- return Write(current, HeapObjectReference::From(
- heap_object, GetAndResetNextReferenceType()));
+ return slot_accessor.Write(heap_object, GetAndResetNextReferenceType());
}
// Find an object in the startup object cache and write a pointer to it to
// the current object.
case kStartupObjectCache: {
int cache_index = source_.GetInt();
+ // TODO(leszeks): Could we use the address of the startup_object_cache
+ // entry as a Handle backing?
HeapObject heap_object =
HeapObject::cast(isolate()->startup_object_cache()->at(cache_index));
- DCHECK(!Heap::InYoungGeneration(heap_object));
- return Write(current, HeapObjectReference::From(
- heap_object, GetAndResetNextReferenceType()));
+ return slot_accessor.Write(heap_object, GetAndResetNextReferenceType());
}
// Find an object in the read-only object cache and write a pointer to it
// to the current object.
case kReadOnlyObjectCache: {
int cache_index = source_.GetInt();
+ // TODO(leszeks): Could we use the address of the cached_read_only_object
+ // entry as a Handle backing?
HeapObject heap_object = HeapObject::cast(
isolate()->read_only_heap()->cached_read_only_object(cache_index));
- DCHECK(!Heap::InYoungGeneration(heap_object));
- return Write(current, HeapObjectReference::From(
- heap_object, GetAndResetNextReferenceType()));
+ return slot_accessor.Write(heap_object, GetAndResetNextReferenceType());
}
// Deserialize a new meta-map and write a pointer to it to the current
// object.
case kNewMetaMap: {
- HeapObject heap_object = ReadMetaMap();
- DCHECK(!Heap::InYoungGeneration(heap_object));
- return Write(current, HeapObjectReference::Strong(heap_object));
+ Handle<HeapObject> heap_object = ReadMetaMap();
+ return slot_accessor.Write(heap_object, HeapObjectReferenceType::STRONG);
}
// Find an external reference and write a pointer to it to the current
@@ -641,10 +862,11 @@ TSlot Deserializer::ReadSingleBytecodeData(byte data, TSlot current,
case kExternalReference: {
Address address = ReadExternalReferenceCase();
if (V8_HEAP_SANDBOX_BOOL && data == kSandboxedExternalReference) {
- return WriteExternalPointer(current, address);
+ return WriteExternalPointer(slot_accessor.slot(), address,
+ kForeignForeignAddressTag);
} else {
DCHECK(!V8_HEAP_SANDBOX_BOOL);
- return WriteAddress(current, address);
+ return WriteAddress(slot_accessor.slot(), address);
}
}
@@ -657,49 +879,36 @@ TSlot Deserializer::ReadSingleBytecodeData(byte data, TSlot current,
// the current object.
case kAttachedReference: {
int index = source_.GetInt();
- HeapObjectReference ref = HeapObjectReference::From(
- *attached_objects_[index], GetAndResetNextReferenceType());
+ Handle<HeapObject> heap_object = attached_objects_[index];
// This is the only case where we might encounter new space objects, so
- // maybe emit a write barrier before returning the updated slot.
- TSlot ret = Write(current, ref);
- if (Heap::InYoungGeneration(ref)) {
- HeapObject current_object =
- HeapObject::FromAddress(current_object_address);
- GenerationalBarrier(current_object, MaybeObjectSlot(current.address()),
- ref);
- }
- return ret;
+ // maybe emit a generational write barrier.
+ return slot_accessor.WriteWithGenerationalBarrier(
+ heap_object, GetAndResetNextReferenceType());
}
case kNop:
- return current;
-
- // NextChunk should only be seen during object allocation.
- case kNextChunk:
- UNREACHABLE();
+ return 0;
case kRegisterPendingForwardRef: {
- DCHECK_NE(current_object_address, kNullAddress);
- HeapObject obj = HeapObject::FromAddress(current_object_address);
HeapObjectReferenceType ref_type = GetAndResetNextReferenceType();
- unresolved_forward_refs_.emplace_back(
- obj, current.address() - current_object_address, ref_type);
+ unresolved_forward_refs_.emplace_back(slot_accessor.object(),
+ slot_accessor.offset(), ref_type);
num_unresolved_forward_refs_++;
- return current + 1;
+ return 1;
}
case kResolvePendingForwardRef: {
// Pending forward refs can only be resolved after the heap object's map
// field is deserialized; currently they only appear immediately after
// the map field.
- DCHECK_EQ(current.address(), current_object_address + kTaggedSize);
- HeapObject obj = HeapObject::FromAddress(current_object_address);
+ DCHECK_EQ(slot_accessor.offset(), HeapObject::kHeaderSize);
+ Handle<HeapObject> obj = slot_accessor.object();
int index = source_.GetInt();
auto& forward_ref = unresolved_forward_refs_[index];
- TaggedField<MaybeObject>::store(
- forward_ref.object, forward_ref.offset,
- HeapObjectReference::From(obj, forward_ref.ref_type));
+ SlotAccessorForHeapObject::ForSlotOffset(forward_ref.object,
+ forward_ref.offset)
+ .Write(*obj, forward_ref.ref_type);
num_unresolved_forward_refs_--;
if (num_unresolved_forward_refs_ == 0) {
// If there's no more pending fields, clear the entire pending field
@@ -707,9 +916,9 @@ TSlot Deserializer::ReadSingleBytecodeData(byte data, TSlot current,
unresolved_forward_refs_.clear();
} else {
// Otherwise, at least clear the pending field.
- forward_ref.object = HeapObject();
+ forward_ref.object = Handle<HeapObject>();
}
- return current;
+ return 0;
}
case kSynchronize:
@@ -719,31 +928,74 @@ TSlot Deserializer::ReadSingleBytecodeData(byte data, TSlot current,
// Deserialize raw data of variable length.
case kVariableRawData: {
- int size_in_bytes = source_.GetInt();
- DCHECK(IsAligned(size_in_bytes, kTaggedSize));
- source_.CopyRaw(current.ToVoidPtr(), size_in_bytes);
- return TSlot(current.address() + size_in_bytes);
+ // This operation is only supported for tagged-size slots, else we might
+ // become misaligned.
+ DCHECK_EQ(TSlot::kSlotDataSize, kTaggedSize);
+ int size_in_tagged = source_.GetInt();
+ // TODO(leszeks): Only copy slots when there are Smis in the serialized
+ // data.
+ source_.CopySlots(slot_accessor.slot().location(), size_in_tagged);
+ return size_in_tagged;
}
// Deserialize raw code directly into the body of the code object.
- case kVariableRawCode: {
- // VariableRawCode can only occur right after the heap object header.
- DCHECK_EQ(current.address(), current_object_address + kTaggedSize);
- int size_in_bytes = source_.GetInt();
- DCHECK(IsAligned(size_in_bytes, kTaggedSize));
- source_.CopyRaw(
- reinterpret_cast<void*>(current_object_address + Code::kDataStart),
- size_in_bytes);
- // Deserialize tagged fields in the code object header and reloc infos.
- ReadCodeObjectBody(current_object_address);
- // Set current to the code object end.
- return TSlot(current.address() + Code::kDataStart -
- HeapObject::kHeaderSize + size_in_bytes);
+ case kCodeBody: {
+ // This operation is only supported for tagged-size slots, else we might
+ // become misaligned.
+ DCHECK_EQ(TSlot::kSlotDataSize, kTaggedSize);
+ // CodeBody can only occur right after the heap object header.
+ DCHECK_EQ(slot_accessor.offset(), HeapObject::kHeaderSize);
+
+ int size_in_tagged = source_.GetInt();
+ int size_in_bytes = size_in_tagged * kTaggedSize;
+
+ {
+ DisallowGarbageCollection no_gc;
+ Code code = Code::cast(*slot_accessor.object());
+
+ // First deserialize the code itself.
+ source_.CopyRaw(
+ reinterpret_cast<void*>(code.address() + Code::kDataStart),
+ size_in_bytes);
+ }
+
+ // Then deserialize the code header
+ ReadData(slot_accessor.object(), HeapObject::kHeaderSize / kTaggedSize,
+ Code::kDataStart / kTaggedSize);
+
+ // Then deserialize the pre-serialized RelocInfo objects.
+ std::vector<Handle<HeapObject>> preserialized_objects;
+ while (source_.Peek() != kSynchronize) {
+ Handle<HeapObject> obj = ReadObject();
+ preserialized_objects.push_back(obj);
+ }
+ // Skip the synchronize bytecode.
+ source_.Advance(1);
+
+ // Finally iterate RelocInfos (the same way it was done by the serializer)
+ // and deserialize respective data into RelocInfos. The RelocIterator
+ // holds a raw pointer to the code, so we have to disable garbage
+ // collection here. It's ok though, any objects it would have needed are
+ // in the preserialized_objects vector.
+ {
+ DisallowGarbageCollection no_gc;
+
+ Code code = Code::cast(*slot_accessor.object());
+ RelocInfoVisitor visitor(this, &preserialized_objects);
+ for (RelocIterator it(code, Code::BodyDescriptor::kRelocModeMask);
+ !it.done(); it.next()) {
+ it.rinfo()->Visit(&visitor);
+ }
+ }
+
+ // Advance to the end of the code object.
+ return (Code::kDataStart - HeapObject::kHeaderSize) / kTaggedSize +
+ size_in_tagged;
}
case kVariableRepeat: {
int repeats = VariableRepeatCount::Decode(source_.GetInt());
- return ReadRepeatedObject(current, repeats);
+ return ReadRepeatedObject(slot_accessor, repeats);
}
case kOffHeapBackingStore: {
@@ -755,7 +1007,7 @@ TSlot Deserializer::ReadSingleBytecodeData(byte data, TSlot current,
CHECK_NOT_NULL(backing_store);
source_.CopyRaw(backing_store->buffer_start(), byte_length);
backing_stores_.push_back(std::move(backing_store));
- return current;
+ return 0;
}
case kSandboxedApiReference:
@@ -771,29 +1023,24 @@ TSlot Deserializer::ReadSingleBytecodeData(byte data, TSlot current,
address = reinterpret_cast<Address>(NoExternalReferencesCallback);
}
if (V8_HEAP_SANDBOX_BOOL && data == kSandboxedApiReference) {
- return WriteExternalPointer(current, address);
+ return WriteExternalPointer(slot_accessor.slot(), address,
+ kForeignForeignAddressTag);
} else {
DCHECK(!V8_HEAP_SANDBOX_BOOL);
- return WriteAddress(current, address);
+ return WriteAddress(slot_accessor.slot(), address);
}
}
case kClearedWeakReference:
- return Write(current, HeapObjectReference::ClearedValue(isolate()));
+ return slot_accessor.Write(HeapObjectReference::ClearedValue(isolate()));
case kWeakPrefix: {
// We shouldn't have two weak prefixes in a row.
DCHECK(!next_reference_is_weak_);
// We shouldn't have weak refs without a current object.
- DCHECK_NE(current_object_address, kNullAddress);
+ DCHECK_NE(slot_accessor.object()->address(), kNullAddress);
next_reference_is_weak_ = true;
- return current;
- }
-
- case CASE_RANGE(kAlignmentPrefix, 3): {
- int alignment = data - (SerializerDeserializer::kAlignmentPrefix - 1);
- allocator()->SetAlignment(static_cast<AllocationAlignment>(alignment));
- return current;
+ return 0;
}
case CASE_RANGE(kRootArrayConstants, 32): {
@@ -805,33 +1052,36 @@ TSlot Deserializer::ReadSingleBytecodeData(byte data, TSlot current,
static_cast<int>(RootIndex::kLastImmortalImmovableRoot));
RootIndex root_index = RootArrayConstant::Decode(data);
- MaybeObject object = MaybeObject(ReadOnlyRoots(isolate()).at(root_index));
- DCHECK(!Heap::InYoungGeneration(object));
- return Write(current, object);
+ Handle<HeapObject> heap_object =
+ Handle<HeapObject>::cast(isolate()->root_handle(root_index));
+ return slot_accessor.Write(heap_object, HeapObjectReferenceType::STRONG);
}
case CASE_RANGE(kHotObject, 8): {
int index = HotObject::Decode(data);
- HeapObject hot_object = hot_objects_.Get(index);
- DCHECK(!Heap::InYoungGeneration(hot_object));
- return Write(current, HeapObjectReference::From(
- hot_object, GetAndResetNextReferenceType()));
+ Handle<HeapObject> hot_object = hot_objects_.Get(index);
+ return slot_accessor.Write(hot_object, GetAndResetNextReferenceType());
}
case CASE_RANGE(kFixedRawData, 32): {
// Deserialize raw data of fixed length from 1 to 32 times kTaggedSize.
int size_in_tagged = FixedRawDataWithSize::Decode(data);
- source_.CopyRaw(current.ToVoidPtr(), size_in_tagged * kTaggedSize);
-
- int size_in_bytes = size_in_tagged * kTaggedSize;
- int size_in_slots = size_in_bytes / TSlot::kSlotDataSize;
- DCHECK(IsAligned(size_in_bytes, TSlot::kSlotDataSize));
- return current + size_in_slots;
+ STATIC_ASSERT(TSlot::kSlotDataSize == kTaggedSize ||
+ TSlot::kSlotDataSize == 2 * kTaggedSize);
+ int size_in_slots = size_in_tagged / (TSlot::kSlotDataSize / kTaggedSize);
+ // kFixedRawData can have kTaggedSize != TSlot::kSlotDataSize when
+ // serializing Smi roots in pointer-compressed builds. In this case, the
+ // size in bytes is unconditionally the (full) slot size.
+ DCHECK_IMPLIES(kTaggedSize != TSlot::kSlotDataSize, size_in_slots == 1);
+ // TODO(leszeks): Only copy slots when there are Smis in the serialized
+ // data.
+ source_.CopySlots(slot_accessor.slot().location(), size_in_slots);
+ return size_in_slots;
}
case CASE_RANGE(kFixedRepeat, 16): {
int repeats = FixedRepeatWithCount::Decode(data);
- return ReadRepeatedObject(current, repeats);
+ return ReadRepeatedObject(slot_accessor, repeats);
}
#ifdef DEBUG
@@ -864,5 +1114,42 @@ Address Deserializer::ReadExternalReferenceCase() {
return isolate()->external_reference_table()->address(reference_id);
}
+namespace {
+AllocationType SpaceToType(SnapshotSpace space) {
+ switch (space) {
+ case SnapshotSpace::kCode:
+ return AllocationType::kCode;
+ case SnapshotSpace::kMap:
+ return AllocationType::kMap;
+ case SnapshotSpace::kOld:
+ return AllocationType::kOld;
+ case SnapshotSpace::kReadOnlyHeap:
+ return AllocationType::kReadOnly;
+ }
+}
+} // namespace
+
+HeapObject Deserializer::Allocate(SnapshotSpace space, int size,
+ AllocationAlignment alignment) {
+#ifdef DEBUG
+ if (!previous_allocation_obj_.is_null()) {
+ // Make sure that the previous object is initialized sufficiently to
+ // be iterated over by the GC.
+ int object_size = previous_allocation_obj_->Size();
+ DCHECK_LE(object_size, previous_allocation_size_);
+ }
+#endif
+
+ HeapObject obj = isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>(
+ size, SpaceToType(space), AllocationOrigin::kRuntime, alignment);
+
+#ifdef DEBUG
+ previous_allocation_obj_ = handle(obj, isolate());
+ previous_allocation_size_ = size;
+#endif
+
+ return obj;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/snapshot/deserializer.h b/deps/v8/src/snapshot/deserializer.h
index ae410bacd3..62f7ea39e0 100644
--- a/deps/v8/src/snapshot/deserializer.h
+++ b/deps/v8/src/snapshot/deserializer.h
@@ -17,7 +17,6 @@
#include "src/objects/map.h"
#include "src/objects/string-table.h"
#include "src/objects/string.h"
-#include "src/snapshot/deserializer-allocator.h"
#include "src/snapshot/serializer-deserializer.h"
#include "src/snapshot/snapshot-source-sink.h"
@@ -40,27 +39,31 @@ class Object;
// A Deserializer reads a snapshot and reconstructs the Object graph it defines.
class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer {
public:
+ // Smi value for filling in not-yet initialized tagged field values with a
+ // valid tagged pointer. A field value equal to this doesn't necessarily
+ // indicate that a field is uninitialized, but an uninitialized field should
+ // definitely equal this value.
+ //
+ // This _has_ to be kNullAddress, so that an uninitialized_field_value read as
+ // an embedded pointer field is interpreted as nullptr. This is so that
+ // uninitialised embedded pointers are not forwarded to the embedded as part
+ // of embedder tracing (and similar mechanisms), as nullptrs are skipped for
+ // those cases and otherwise the embedder would try to dereference the
+ // uninitialized pointer value.
+ static constexpr Smi uninitialized_field_value() { return Smi(kNullAddress); }
+
~Deserializer() override;
+ Deserializer(const Deserializer&) = delete;
+ Deserializer& operator=(const Deserializer&) = delete;
- void SetRehashability(bool v) { can_rehash_ = v; }
uint32_t GetChecksum() const { return source_.GetChecksum(); }
protected:
// Create a deserializer from a snapshot byte source.
- template <class Data>
- Deserializer(Data* data, bool deserializing_user_code)
- : isolate_(nullptr),
- source_(data->Payload()),
- magic_number_(data->GetMagicNumber()),
- deserializing_user_code_(deserializing_user_code),
- can_rehash_(false) {
- allocator()->DecodeReservation(data->Reservations());
- // We start the indices here at 1, so that we can distinguish between an
- // actual index and a nullptr in a deserialized object requiring fix-up.
- backing_stores_.push_back({});
- }
+ Deserializer(Isolate* isolate, Vector<const byte> payload,
+ uint32_t magic_number, bool deserializing_user_code,
+ bool can_rehash);
- void Initialize(Isolate* isolate);
void DeserializeDeferredObjects();
// Create Log events for newly deserialized objects.
@@ -68,9 +71,14 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer {
void LogScriptEvents(Script script);
void LogNewMapEvents();
+ // Descriptor arrays are deserialized as "strong", so that there is no risk of
+ // them getting trimmed during a partial deserialization. This method makes
+ // them "weak" again after deserialization completes.
+ void WeakenDescriptorArrays();
+
// This returns the address of an object that has been described in the
- // snapshot by chunk index and offset.
- HeapObject GetBackReferencedObject(SnapshotSpace space);
+ // snapshot by object vector index.
+ Handle<HeapObject> GetBackReferencedObject();
// Add an object to back an attached reference. The order to add objects must
// mirror the order they are added in the serializer.
@@ -85,17 +93,17 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer {
Isolate* isolate() const { return isolate_; }
SnapshotByteSource* source() { return &source_; }
- const std::vector<AllocationSite>& new_allocation_sites() const {
+ const std::vector<Handle<AllocationSite>>& new_allocation_sites() const {
return new_allocation_sites_;
}
- const std::vector<Code>& new_code_objects() const {
+ const std::vector<Handle<Code>>& new_code_objects() const {
return new_code_objects_;
}
- const std::vector<Map>& new_maps() const { return new_maps_; }
- const std::vector<AccessorInfo>& accessor_infos() const {
+ const std::vector<Handle<Map>>& new_maps() const { return new_maps_; }
+ const std::vector<Handle<AccessorInfo>>& accessor_infos() const {
return accessor_infos_;
}
- const std::vector<CallHandlerInfo>& call_handler_infos() const {
+ const std::vector<Handle<CallHandlerInfo>>& call_handler_infos() const {
return call_handler_infos_;
}
const std::vector<Handle<Script>>& new_scripts() const {
@@ -106,76 +114,97 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer {
return new_off_heap_array_buffers_;
}
+ const std::vector<Handle<DescriptorArray>>& new_descriptor_arrays() const {
+ return new_descriptor_arrays_;
+ }
+
std::shared_ptr<BackingStore> backing_store(size_t i) {
DCHECK_LT(i, backing_stores_.size());
return backing_stores_[i];
}
- DeserializerAllocator* allocator() { return &allocator_; }
bool deserializing_user_code() const { return deserializing_user_code_; }
bool can_rehash() const { return can_rehash_; }
void Rehash();
+ Handle<HeapObject> ReadObject();
+
private:
+ class RelocInfoVisitor;
+ // A circular queue of hot objects. This is added to in the same order as in
+ // Serializer::HotObjectsList, but this stores the objects as a vector of
+ // existing handles. This allows us to add Handles to the queue without having
+ // to create new handles. Note that this depends on those Handles staying
+ // valid as long as the HotObjectsList is alive.
+ class HotObjectsList {
+ public:
+ HotObjectsList() = default;
+ HotObjectsList(const HotObjectsList&) = delete;
+ HotObjectsList& operator=(const HotObjectsList&) = delete;
+
+ void Add(Handle<HeapObject> object) {
+ circular_queue_[index_] = object;
+ index_ = (index_ + 1) & kSizeMask;
+ }
+
+ Handle<HeapObject> Get(int index) {
+ DCHECK(!circular_queue_[index].is_null());
+ return circular_queue_[index];
+ }
+
+ private:
+ static const int kSize = kHotObjectCount;
+ static const int kSizeMask = kSize - 1;
+ STATIC_ASSERT(base::bits::IsPowerOfTwo(kSize));
+ Handle<HeapObject> circular_queue_[kSize];
+ int index_ = 0;
+ };
+
void VisitRootPointers(Root root, const char* description,
FullObjectSlot start, FullObjectSlot end) override;
void Synchronize(VisitorSynchronization::SyncTag tag) override;
template <typename TSlot>
- inline TSlot Write(TSlot dest, MaybeObject value);
-
- template <typename TSlot>
- inline TSlot Write(TSlot dest, HeapObject value,
- HeapObjectReferenceType type);
+ inline int WriteAddress(TSlot dest, Address value);
template <typename TSlot>
- inline TSlot WriteAddress(TSlot dest, Address value);
+ inline int WriteExternalPointer(TSlot dest, Address value,
+ ExternalPointerTag tag);
- template <typename TSlot>
- inline TSlot WriteExternalPointer(TSlot dest, Address value);
+ // Fills in a heap object's data from start to end (exclusive). Start and end
+ // are slot indices within the object.
+ void ReadData(Handle<HeapObject> object, int start_slot_index,
+ int end_slot_index);
- // Fills in some heap data in an area from start to end (non-inclusive). The
- // object_address is the address of the object we are writing into, or nullptr
- // if we are not writing into an object, i.e. if we are writing a series of
- // tagged values that are not on the heap.
- template <typename TSlot>
- void ReadData(TSlot start, TSlot end, Address object_address);
+ // Fills in a contiguous range of full object slots (e.g. root pointers) from
+ // start to end (exclusive).
+ void ReadData(FullMaybeObjectSlot start, FullMaybeObjectSlot end);
// Helper for ReadData which reads the given bytecode and fills in some heap
// data into the given slot. May fill in zero or multiple slots, so it returns
- // the next unfilled slot.
- template <typename TSlot>
- TSlot ReadSingleBytecodeData(byte data, TSlot current,
- Address object_address);
+ // the number of slots filled.
+ template <typename SlotAccessor>
+ int ReadSingleBytecodeData(byte data, SlotAccessor slot_accessor);
// A helper function for ReadData for reading external references.
inline Address ReadExternalReferenceCase();
- HeapObject ReadObject(SnapshotSpace space_number);
- HeapObject ReadMetaMap();
- void ReadCodeObjectBody(Address code_object_address);
+ Handle<HeapObject> ReadObject(SnapshotSpace space_number);
+ Handle<HeapObject> ReadMetaMap();
HeapObjectReferenceType GetAndResetNextReferenceType();
- protected:
- HeapObject ReadObject();
-
- public:
- void VisitCodeTarget(Code host, RelocInfo* rinfo);
- void VisitEmbeddedPointer(Code host, RelocInfo* rinfo);
- void VisitRuntimeEntry(Code host, RelocInfo* rinfo);
- void VisitExternalReference(Code host, RelocInfo* rinfo);
- void VisitInternalReference(Code host, RelocInfo* rinfo);
- void VisitOffHeapTarget(Code host, RelocInfo* rinfo);
-
- private:
- template <typename TSlot>
- TSlot ReadRepeatedObject(TSlot current, int repeat_count);
+ template <typename SlotGetter>
+ int ReadRepeatedObject(SlotGetter slot_getter, int repeat_count);
// Special handling for serialized code like hooking up internalized strings.
- HeapObject PostProcessNewObject(HeapObject obj, SnapshotSpace space);
+ void PostProcessNewObject(Handle<Map> map, Handle<HeapObject> obj,
+ SnapshotSpace space);
+
+ HeapObject Allocate(SnapshotSpace space, int size,
+ AllocationAlignment alignment);
// Cached current isolate.
Isolate* isolate_;
@@ -186,15 +215,20 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer {
SnapshotByteSource source_;
uint32_t magic_number_;
- std::vector<Map> new_maps_;
- std::vector<AllocationSite> new_allocation_sites_;
- std::vector<Code> new_code_objects_;
- std::vector<AccessorInfo> accessor_infos_;
- std::vector<CallHandlerInfo> call_handler_infos_;
+ HotObjectsList hot_objects_;
+ std::vector<Handle<Map>> new_maps_;
+ std::vector<Handle<AllocationSite>> new_allocation_sites_;
+ std::vector<Handle<Code>> new_code_objects_;
+ std::vector<Handle<AccessorInfo>> accessor_infos_;
+ std::vector<Handle<CallHandlerInfo>> call_handler_infos_;
std::vector<Handle<Script>> new_scripts_;
std::vector<Handle<JSArrayBuffer>> new_off_heap_array_buffers_;
+ std::vector<Handle<DescriptorArray>> new_descriptor_arrays_;
std::vector<std::shared_ptr<BackingStore>> backing_stores_;
+ // Vector of allocated objects that can be accessed by a backref, by index.
+ std::vector<Handle<HeapObject>> back_refs_;
+
// Unresolved forward references (registered with kRegisterPendingForwardRef)
// are collected in order as (object, field offset) pairs. The subsequent
// forward ref resolution (with kResolvePendingForwardRef) accesses this
@@ -202,34 +236,32 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer {
//
// The vector is cleared when there are no more unresolved forward refs.
struct UnresolvedForwardRef {
- UnresolvedForwardRef(HeapObject object, int offset,
+ UnresolvedForwardRef(Handle<HeapObject> object, int offset,
HeapObjectReferenceType ref_type)
: object(object), offset(offset), ref_type(ref_type) {}
- HeapObject object;
+ Handle<HeapObject> object;
int offset;
HeapObjectReferenceType ref_type;
};
std::vector<UnresolvedForwardRef> unresolved_forward_refs_;
int num_unresolved_forward_refs_ = 0;
- DeserializerAllocator allocator_;
const bool deserializing_user_code_;
bool next_reference_is_weak_ = false;
// TODO(6593): generalize rehashing, and remove this flag.
bool can_rehash_;
- std::vector<HeapObject> to_rehash_;
+ std::vector<Handle<HeapObject>> to_rehash_;
#ifdef DEBUG
uint32_t num_api_references_;
-#endif // DEBUG
-
- // For source(), isolate(), and allocator().
- friend class DeserializerAllocator;
- DISALLOW_COPY_AND_ASSIGN(Deserializer);
+ // Record the previous object allocated for DCHECKs.
+ Handle<HeapObject> previous_allocation_obj_;
+ int previous_allocation_size_ = 0;
+#endif // DEBUG
};
// Used to insert a deserialized internalized string into the string table.
diff --git a/deps/v8/src/snapshot/embedded/embedded-data.cc b/deps/v8/src/snapshot/embedded/embedded-data.cc
index 3496a613f2..03702bf331 100644
--- a/deps/v8/src/snapshot/embedded/embedded-data.cc
+++ b/deps/v8/src/snapshot/embedded/embedded-data.cc
@@ -50,9 +50,11 @@ Code InstructionStream::TryLookupCode(Isolate* isolate, Address address) {
}
// static
-void InstructionStream::CreateOffHeapInstructionStream(
- Isolate* isolate, uint8_t** code, uint32_t* code_size, uint8_t** metadata,
- uint32_t* metadata_size) {
+void InstructionStream::CreateOffHeapInstructionStream(Isolate* isolate,
+ uint8_t** code,
+ uint32_t* code_size,
+ uint8_t** data,
+ uint32_t* data_size) {
// Create the embedded blob from scratch using the current Isolate's heap.
EmbeddedData d = EmbeddedData::FromIsolate(isolate);
@@ -71,14 +73,13 @@ void InstructionStream::CreateOffHeapInstructionStream(
alignment, PageAllocator::kReadWrite));
CHECK_NOT_NULL(allocated_code_bytes);
- void* const requested_allocation_metadata_address =
+ void* const requested_allocation_data_address =
AlignedAddress(isolate->heap()->GetRandomMmapAddr(), alignment);
- const uint32_t allocation_metadata_size =
- RoundUp(d.metadata_size(), alignment);
- uint8_t* allocated_metadata_bytes = static_cast<uint8_t*>(AllocatePages(
- page_allocator, requested_allocation_metadata_address,
- allocation_metadata_size, alignment, PageAllocator::kReadWrite));
- CHECK_NOT_NULL(allocated_metadata_bytes);
+ const uint32_t allocation_data_size = RoundUp(d.data_size(), alignment);
+ uint8_t* allocated_data_bytes = static_cast<uint8_t*>(AllocatePages(
+ page_allocator, requested_allocation_data_address, allocation_data_size,
+ alignment, PageAllocator::kReadWrite));
+ CHECK_NOT_NULL(allocated_data_bytes);
// Copy the embedded blob into the newly allocated backing store. Switch
// permissions to read-execute since builtin code is immutable from now on
@@ -92,14 +93,14 @@ void InstructionStream::CreateOffHeapInstructionStream(
CHECK(SetPermissions(page_allocator, allocated_code_bytes,
allocation_code_size, PageAllocator::kReadExecute));
- std::memcpy(allocated_metadata_bytes, d.metadata(), d.metadata_size());
- CHECK(SetPermissions(page_allocator, allocated_metadata_bytes,
- allocation_metadata_size, PageAllocator::kRead));
+ std::memcpy(allocated_data_bytes, d.data(), d.data_size());
+ CHECK(SetPermissions(page_allocator, allocated_data_bytes,
+ allocation_data_size, PageAllocator::kRead));
*code = allocated_code_bytes;
*code_size = d.code_size();
- *metadata = allocated_metadata_bytes;
- *metadata_size = d.metadata_size();
+ *data = allocated_data_bytes;
+ *data_size = d.data_size();
d.Dispose();
}
@@ -107,13 +108,13 @@ void InstructionStream::CreateOffHeapInstructionStream(
// static
void InstructionStream::FreeOffHeapInstructionStream(uint8_t* code,
uint32_t code_size,
- uint8_t* metadata,
- uint32_t metadata_size) {
+ uint8_t* data,
+ uint32_t data_size) {
v8::PageAllocator* page_allocator = v8::internal::GetPlatformPageAllocator();
const uint32_t page_size =
static_cast<uint32_t>(page_allocator->AllocatePageSize());
CHECK(FreePages(page_allocator, code, RoundUp(code_size, page_size)));
- CHECK(FreePages(page_allocator, metadata, RoundUp(metadata_size, page_size)));
+ CHECK(FreePages(page_allocator, data, RoundUp(data_size, page_size)));
}
namespace {
@@ -157,9 +158,8 @@ void FinalizeEmbeddedCodeTargets(Isolate* isolate, EmbeddedData* blob) {
RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET);
+ STATIC_ASSERT(Builtins::kAllBuiltinsAreIsolateIndependent);
for (int i = 0; i < Builtins::builtin_count; i++) {
- if (!Builtins::IsIsolateIndependent(i)) continue;
-
Code code = isolate->builtins()->builtin(i);
RelocIterator on_heap_it(code, kRelocMask);
RelocIterator off_heap_it(blob, code, kRelocMask);
@@ -205,37 +205,40 @@ EmbeddedData EmbeddedData::FromIsolate(Isolate* isolate) {
Builtins* builtins = isolate->builtins();
// Store instruction stream lengths and offsets.
- std::vector<struct Metadata> metadata(kTableSize);
+ std::vector<struct LayoutDescription> layout_descriptions(kTableSize);
bool saw_unsafe_builtin = false;
uint32_t raw_code_size = 0;
+ uint32_t raw_data_size = 0;
+ STATIC_ASSERT(Builtins::kAllBuiltinsAreIsolateIndependent);
for (int i = 0; i < Builtins::builtin_count; i++) {
Code code = builtins->builtin(i);
- if (Builtins::IsIsolateIndependent(i)) {
- // Sanity-check that the given builtin is isolate-independent and does not
- // use the trampoline register in its calling convention.
- if (!code.IsIsolateIndependent(isolate)) {
- saw_unsafe_builtin = true;
- fprintf(stderr, "%s is not isolate-independent.\n", Builtins::name(i));
- }
- if (BuiltinAliasesOffHeapTrampolineRegister(isolate, code)) {
- saw_unsafe_builtin = true;
- fprintf(stderr, "%s aliases the off-heap trampoline register.\n",
- Builtins::name(i));
- }
-
- uint32_t length = static_cast<uint32_t>(code.raw_instruction_size());
-
- DCHECK_EQ(0, raw_code_size % kCodeAlignment);
- metadata[i].instructions_offset = raw_code_size;
- metadata[i].instructions_length = length;
-
- // Align the start of each instruction stream.
- raw_code_size += PadAndAlign(length);
- } else {
- metadata[i].instructions_offset = raw_code_size;
+ // Sanity-check that the given builtin is isolate-independent and does not
+ // use the trampoline register in its calling convention.
+ if (!code.IsIsolateIndependent(isolate)) {
+ saw_unsafe_builtin = true;
+ fprintf(stderr, "%s is not isolate-independent.\n", Builtins::name(i));
+ }
+ if (BuiltinAliasesOffHeapTrampolineRegister(isolate, code)) {
+ saw_unsafe_builtin = true;
+ fprintf(stderr, "%s aliases the off-heap trampoline register.\n",
+ Builtins::name(i));
}
+
+ uint32_t instruction_size =
+ static_cast<uint32_t>(code.raw_instruction_size());
+ uint32_t metadata_size = static_cast<uint32_t>(code.raw_metadata_size());
+
+ DCHECK_EQ(0, raw_code_size % kCodeAlignment);
+ layout_descriptions[i].instruction_offset = raw_code_size;
+ layout_descriptions[i].instruction_length = instruction_size;
+ layout_descriptions[i].metadata_offset = raw_data_size;
+ layout_descriptions[i].metadata_length = metadata_size;
+
+ // Align the start of each section.
+ raw_code_size += PadAndAlignCode(instruction_size);
+ raw_data_size += PadAndAlignData(metadata_size);
}
CHECK_WITH_MSG(
!saw_unsafe_builtin,
@@ -243,12 +246,15 @@ EmbeddedData EmbeddedData::FromIsolate(Isolate* isolate) {
"isolate-dependent code or aliases the off-heap trampoline register. "
"If in doubt, ask jgruber@");
+ // Allocate space for the code section, value-initialized to 0.
+ STATIC_ASSERT(RawCodeOffset() == 0);
const uint32_t blob_code_size = RawCodeOffset() + raw_code_size;
- uint8_t* const blob_code = new uint8_t[blob_code_size];
- uint8_t* const raw_code_start = blob_code + RawCodeOffset();
- const uint32_t blob_metadata_size =
- MetadataTableOffset() + MetadataTableSize();
- uint8_t* const blob_metadata = new uint8_t[blob_metadata_size];
+ uint8_t* const blob_code = new uint8_t[blob_code_size]();
+
+ // Allocate space for the data section, value-initialized to 0.
+ STATIC_ASSERT(IsAligned(FixedDataSize(), Code::kMetadataAlignment));
+ const uint32_t blob_data_size = FixedDataSize() + raw_data_size;
+ uint8_t* const blob_data = new uint8_t[blob_data_size]();
// Initially zap the entire blob, effectively padding the alignment area
// between two builtins with int3's (on x64/ia32).
@@ -258,19 +264,34 @@ EmbeddedData EmbeddedData::FromIsolate(Isolate* isolate) {
{
STATIC_ASSERT(IsolateHashSize() == kSizetSize);
const size_t hash = isolate->HashIsolateForEmbeddedBlob();
- std::memcpy(blob_metadata + IsolateHashOffset(), &hash, IsolateHashSize());
+ std::memcpy(blob_data + IsolateHashOffset(), &hash, IsolateHashSize());
}
- // Write the metadata tables.
- DCHECK_EQ(MetadataTableSize(), sizeof(metadata[0]) * metadata.size());
- std::memcpy(blob_metadata + MetadataTableOffset(), metadata.data(),
- MetadataTableSize());
+ // Write the layout_descriptions tables.
+ DCHECK_EQ(LayoutDescriptionTableSize(),
+ sizeof(layout_descriptions[0]) * layout_descriptions.size());
+ std::memcpy(blob_data + LayoutDescriptionTableOffset(),
+ layout_descriptions.data(), LayoutDescriptionTableSize());
- // Write the raw data section.
+ // .. and the variable-size data section.
+ uint8_t* const raw_metadata_start = blob_data + RawMetadataOffset();
+ STATIC_ASSERT(Builtins::kAllBuiltinsAreIsolateIndependent);
for (int i = 0; i < Builtins::builtin_count; i++) {
- if (!Builtins::IsIsolateIndependent(i)) continue;
Code code = builtins->builtin(i);
- uint32_t offset = metadata[i].instructions_offset;
+ uint32_t offset = layout_descriptions[i].metadata_offset;
+ uint8_t* dst = raw_metadata_start + offset;
+ DCHECK_LE(RawMetadataOffset() + offset + code.raw_metadata_size(),
+ blob_data_size);
+ std::memcpy(dst, reinterpret_cast<uint8_t*>(code.raw_metadata_start()),
+ code.raw_metadata_size());
+ }
+
+ // .. and the variable-size code section.
+ uint8_t* const raw_code_start = blob_code + RawCodeOffset();
+ STATIC_ASSERT(Builtins::kAllBuiltinsAreIsolateIndependent);
+ for (int i = 0; i < Builtins::builtin_count; i++) {
+ Code code = builtins->builtin(i);
+ uint32_t offset = layout_descriptions[i].instruction_offset;
uint8_t* dst = raw_code_start + offset;
DCHECK_LE(RawCodeOffset() + offset + code.raw_instruction_size(),
blob_code_size);
@@ -278,20 +299,27 @@ EmbeddedData EmbeddedData::FromIsolate(Isolate* isolate) {
code.raw_instruction_size());
}
- EmbeddedData d(blob_code, blob_code_size, blob_metadata, blob_metadata_size);
+ EmbeddedData d(blob_code, blob_code_size, blob_data, blob_data_size);
// Fix up call targets that point to other embedded builtins.
FinalizeEmbeddedCodeTargets(isolate, &d);
// Hash the blob and store the result.
{
- STATIC_ASSERT(EmbeddedBlobHashSize() == kSizetSize);
- const size_t hash = d.CreateEmbeddedBlobHash();
- std::memcpy(blob_metadata + EmbeddedBlobHashOffset(), &hash,
- EmbeddedBlobHashSize());
-
- DCHECK_EQ(hash, d.CreateEmbeddedBlobHash());
- DCHECK_EQ(hash, d.EmbeddedBlobHash());
+ STATIC_ASSERT(EmbeddedBlobDataHashSize() == kSizetSize);
+ const size_t data_hash = d.CreateEmbeddedBlobDataHash();
+ std::memcpy(blob_data + EmbeddedBlobDataHashOffset(), &data_hash,
+ EmbeddedBlobDataHashSize());
+
+ STATIC_ASSERT(EmbeddedBlobCodeHashSize() == kSizetSize);
+ const size_t code_hash = d.CreateEmbeddedBlobCodeHash();
+ std::memcpy(blob_data + EmbeddedBlobCodeHashOffset(), &code_hash,
+ EmbeddedBlobCodeHashSize());
+
+ DCHECK_EQ(data_hash, d.CreateEmbeddedBlobDataHash());
+ DCHECK_EQ(data_hash, d.EmbeddedBlobDataHash());
+ DCHECK_EQ(code_hash, d.CreateEmbeddedBlobCodeHash());
+ DCHECK_EQ(code_hash, d.EmbeddedBlobCodeHash());
}
if (FLAG_serialization_statistics) d.PrintStatistics();
@@ -301,18 +329,30 @@ EmbeddedData EmbeddedData::FromIsolate(Isolate* isolate) {
Address EmbeddedData::InstructionStartOfBuiltin(int i) const {
DCHECK(Builtins::IsBuiltinId(i));
- const struct Metadata* metadata = Metadata();
- const uint8_t* result = RawCode() + metadata[i].instructions_offset;
- DCHECK_LE(result, code_ + code_size_);
- DCHECK_IMPLIES(result == code_ + code_size_,
- InstructionSizeOfBuiltin(i) == 0);
+ const struct LayoutDescription* descs = LayoutDescription();
+ const uint8_t* result = RawCode() + descs[i].instruction_offset;
+ DCHECK_LT(result, code_ + code_size_);
return reinterpret_cast<Address>(result);
}
uint32_t EmbeddedData::InstructionSizeOfBuiltin(int i) const {
DCHECK(Builtins::IsBuiltinId(i));
- const struct Metadata* metadata = Metadata();
- return metadata[i].instructions_length;
+ const struct LayoutDescription* descs = LayoutDescription();
+ return descs[i].instruction_length;
+}
+
+Address EmbeddedData::MetadataStartOfBuiltin(int i) const {
+ DCHECK(Builtins::IsBuiltinId(i));
+ const struct LayoutDescription* descs = LayoutDescription();
+ const uint8_t* result = RawMetadata() + descs[i].metadata_offset;
+ DCHECK_LE(descs[i].metadata_offset, data_size_);
+ return reinterpret_cast<Address>(result);
+}
+
+uint32_t EmbeddedData::MetadataSizeOfBuiltin(int i) const {
+ DCHECK(Builtins::IsBuiltinId(i));
+ const struct LayoutDescription* descs = LayoutDescription();
+ return descs[i].metadata_length;
}
Address EmbeddedData::InstructionStartOfBytecodeHandlers() const {
@@ -328,49 +368,49 @@ Address EmbeddedData::InstructionEndOfBytecodeHandlers() const {
InstructionSizeOfBuiltin(lastBytecodeHandler);
}
-size_t EmbeddedData::CreateEmbeddedBlobHash() const {
- STATIC_ASSERT(EmbeddedBlobHashOffset() == 0);
- STATIC_ASSERT(EmbeddedBlobHashSize() == kSizetSize);
- // Hash the entire blob except the hash field itself.
- Vector<const byte> payload1(metadata_ + EmbeddedBlobHashSize(),
- metadata_size_ - EmbeddedBlobHashSize());
- Vector<const byte> payload2(code_, code_size_);
- return Checksum(payload1, payload2);
+size_t EmbeddedData::CreateEmbeddedBlobDataHash() const {
+ STATIC_ASSERT(EmbeddedBlobDataHashOffset() == 0);
+ STATIC_ASSERT(EmbeddedBlobCodeHashOffset() == EmbeddedBlobDataHashSize());
+ STATIC_ASSERT(IsolateHashOffset() ==
+ EmbeddedBlobCodeHashOffset() + EmbeddedBlobCodeHashSize());
+ static constexpr uint32_t kFirstHashedDataOffset = IsolateHashOffset();
+ // Hash the entire data section except the embedded blob hash fields
+ // themselves.
+ Vector<const byte> payload(data_ + kFirstHashedDataOffset,
+ data_size_ - kFirstHashedDataOffset);
+ return Checksum(payload);
+}
+
+size_t EmbeddedData::CreateEmbeddedBlobCodeHash() const {
+ CHECK(FLAG_text_is_readable);
+ Vector<const byte> payload(code_, code_size_);
+ return Checksum(payload);
}
void EmbeddedData::PrintStatistics() const {
DCHECK(FLAG_serialization_statistics);
constexpr int kCount = Builtins::builtin_count;
-
- int embedded_count = 0;
- int instruction_size = 0;
int sizes[kCount];
+ STATIC_ASSERT(Builtins::kAllBuiltinsAreIsolateIndependent);
for (int i = 0; i < kCount; i++) {
- if (!Builtins::IsIsolateIndependent(i)) continue;
- const int size = InstructionSizeOfBuiltin(i);
- instruction_size += size;
- sizes[embedded_count] = size;
- embedded_count++;
+ sizes[i] = InstructionSizeOfBuiltin(i);
}
// Sort for percentiles.
- std::sort(&sizes[0], &sizes[embedded_count]);
+ std::sort(&sizes[0], &sizes[kCount]);
- const int k50th = embedded_count * 0.5;
- const int k75th = embedded_count * 0.75;
- const int k90th = embedded_count * 0.90;
- const int k99th = embedded_count * 0.99;
+ const int k50th = kCount * 0.5;
+ const int k75th = kCount * 0.75;
+ const int k90th = kCount * 0.90;
+ const int k99th = kCount * 0.99;
PrintF("EmbeddedData:\n");
PrintF(" Total size: %d\n",
- static_cast<int>(code_size() + metadata_size()));
- PrintF(" Metadata size: %d\n",
- static_cast<int>(metadata_size()));
- PrintF(" Instruction size: %d\n", instruction_size);
- PrintF(" Padding: %d\n",
- static_cast<int>(code_size() - instruction_size));
- PrintF(" Embedded builtin count: %d\n", embedded_count);
+ static_cast<int>(code_size() + data_size()));
+ PrintF(" Data size: %d\n",
+ static_cast<int>(data_size()));
+ PrintF(" Code size: %d\n", static_cast<int>(code_size()));
PrintF(" Instruction size (50th percentile): %d\n", sizes[k50th]);
PrintF(" Instruction size (75th percentile): %d\n", sizes[k75th]);
PrintF(" Instruction size (90th percentile): %d\n", sizes[k90th]);
diff --git a/deps/v8/src/snapshot/embedded/embedded-data.h b/deps/v8/src/snapshot/embedded/embedded-data.h
index d568be83f6..d8d2dd822d 100644
--- a/deps/v8/src/snapshot/embedded/embedded-data.h
+++ b/deps/v8/src/snapshot/embedded/embedded-data.h
@@ -32,11 +32,10 @@ class InstructionStream final : public AllStatic {
// mksnapshot. Otherwise, off-heap code is embedded directly into the binary.
static void CreateOffHeapInstructionStream(Isolate* isolate, uint8_t** code,
uint32_t* code_size,
- uint8_t** metadata,
- uint32_t* metadata_size);
+ uint8_t** data,
+ uint32_t* data_size);
static void FreeOffHeapInstructionStream(uint8_t* code, uint32_t code_size,
- uint8_t* metadata,
- uint32_t metadata_size);
+ uint8_t* data, uint32_t data_size);
};
class EmbeddedData final {
@@ -46,27 +45,26 @@ class EmbeddedData final {
static EmbeddedData FromBlob() {
return EmbeddedData(Isolate::CurrentEmbeddedBlobCode(),
Isolate::CurrentEmbeddedBlobCodeSize(),
- Isolate::CurrentEmbeddedBlobMetadata(),
- Isolate::CurrentEmbeddedBlobMetadataSize());
+ Isolate::CurrentEmbeddedBlobData(),
+ Isolate::CurrentEmbeddedBlobDataSize());
}
static EmbeddedData FromBlob(Isolate* isolate) {
- return EmbeddedData(isolate->embedded_blob_code(),
- isolate->embedded_blob_code_size(),
- isolate->embedded_blob_metadata(),
- isolate->embedded_blob_metadata_size());
+ return EmbeddedData(
+ isolate->embedded_blob_code(), isolate->embedded_blob_code_size(),
+ isolate->embedded_blob_data(), isolate->embedded_blob_data_size());
}
const uint8_t* code() const { return code_; }
uint32_t code_size() const { return code_size_; }
- const uint8_t* metadata() const { return metadata_; }
- uint32_t metadata_size() const { return metadata_size_; }
+ const uint8_t* data() const { return data_; }
+ uint32_t data_size() const { return data_size_; }
void Dispose() {
delete[] code_;
code_ = nullptr;
- delete[] metadata_;
- metadata_ = nullptr;
+ delete[] data_;
+ data_ = nullptr;
}
Address InstructionStartOfBuiltin(int i) const;
@@ -75,7 +73,8 @@ class EmbeddedData final {
Address InstructionStartOfBytecodeHandlers() const;
Address InstructionEndOfBytecodeHandlers() const;
- bool ContainsBuiltin(int i) const { return InstructionSizeOfBuiltin(i) > 0; }
+ Address MetadataStartOfBuiltin(int i) const;
+ uint32_t MetadataSizeOfBuiltin(int i) const;
uint32_t AddressForHashing(Address addr) {
Address start = reinterpret_cast<Address>(code_);
@@ -84,92 +83,132 @@ class EmbeddedData final {
}
// Padded with kCodeAlignment.
+ // TODO(v8:11045): Consider removing code alignment.
uint32_t PaddedInstructionSizeOfBuiltin(int i) const {
uint32_t size = InstructionSizeOfBuiltin(i);
- return (size == 0) ? 0 : PadAndAlign(size);
+ CHECK_NE(size, 0);
+ return PadAndAlignCode(size);
}
- size_t CreateEmbeddedBlobHash() const;
- size_t EmbeddedBlobHash() const {
- return *reinterpret_cast<const size_t*>(metadata_ +
- EmbeddedBlobHashOffset());
+ size_t CreateEmbeddedBlobDataHash() const;
+ size_t CreateEmbeddedBlobCodeHash() const;
+ size_t EmbeddedBlobDataHash() const {
+ return *reinterpret_cast<const size_t*>(data_ +
+ EmbeddedBlobDataHashOffset());
+ }
+ size_t EmbeddedBlobCodeHash() const {
+ return *reinterpret_cast<const size_t*>(data_ +
+ EmbeddedBlobCodeHashOffset());
}
size_t IsolateHash() const {
- return *reinterpret_cast<const size_t*>(metadata_ + IsolateHashOffset());
+ return *reinterpret_cast<const size_t*>(data_ + IsolateHashOffset());
}
- struct Metadata {
- // Blob layout information.
- uint32_t instructions_offset;
- uint32_t instructions_length;
+ // Blob layout information for a single instruction stream. Corresponds
+ // roughly to Code object layout (see the instruction and metadata area).
+ struct LayoutDescription {
+ // The offset and (unpadded) length of this builtin's instruction area
+ // from the start of the embedded code section.
+ uint32_t instruction_offset;
+ uint32_t instruction_length;
+ // The offset and (unpadded) length of this builtin's metadata area
+ // from the start of the embedded code section.
+ uint32_t metadata_offset;
+ uint32_t metadata_length;
};
- STATIC_ASSERT(offsetof(Metadata, instructions_offset) == 0);
- STATIC_ASSERT(offsetof(Metadata, instructions_length) == kUInt32Size);
- STATIC_ASSERT(sizeof(Metadata) == kUInt32Size + kUInt32Size);
+ STATIC_ASSERT(offsetof(LayoutDescription, instruction_offset) ==
+ 0 * kUInt32Size);
+ STATIC_ASSERT(offsetof(LayoutDescription, instruction_length) ==
+ 1 * kUInt32Size);
+ STATIC_ASSERT(offsetof(LayoutDescription, metadata_offset) ==
+ 2 * kUInt32Size);
+ STATIC_ASSERT(offsetof(LayoutDescription, metadata_length) ==
+ 3 * kUInt32Size);
+ STATIC_ASSERT(sizeof(LayoutDescription) == 4 * kUInt32Size);
// The layout of the blob is as follows:
//
- // metadata:
- // [0] hash of the remaining blob
- // [1] hash of embedded-blob-relevant heap objects
- // [2] metadata of instruction stream 0
- // ... metadata
+ // data:
+ // [0] hash of the data section
+ // [1] hash of the code section
+ // [2] hash of embedded-blob-relevant heap objects
+ // [3] layout description of instruction stream 0
+ // ... layout descriptions
+ // [x] metadata section of builtin 0
+ // ... metadata sections
//
// code:
- // [0] instruction streams 0
- // ... instruction streams
+ // [0] instruction section of builtin 0
+ // ... instruction sections
static constexpr uint32_t kTableSize = Builtins::builtin_count;
- static constexpr uint32_t EmbeddedBlobHashOffset() { return 0; }
- static constexpr uint32_t EmbeddedBlobHashSize() { return kSizetSize; }
+ static constexpr uint32_t EmbeddedBlobDataHashOffset() { return 0; }
+ static constexpr uint32_t EmbeddedBlobDataHashSize() { return kSizetSize; }
+ static constexpr uint32_t EmbeddedBlobCodeHashOffset() {
+ return EmbeddedBlobDataHashOffset() + EmbeddedBlobDataHashSize();
+ }
+ static constexpr uint32_t EmbeddedBlobCodeHashSize() { return kSizetSize; }
static constexpr uint32_t IsolateHashOffset() {
- return EmbeddedBlobHashOffset() + EmbeddedBlobHashSize();
+ return EmbeddedBlobCodeHashOffset() + EmbeddedBlobCodeHashSize();
}
static constexpr uint32_t IsolateHashSize() { return kSizetSize; }
- static constexpr uint32_t MetadataTableOffset() {
+ static constexpr uint32_t LayoutDescriptionTableOffset() {
return IsolateHashOffset() + IsolateHashSize();
}
- static constexpr uint32_t MetadataTableSize() {
- return sizeof(struct Metadata) * kTableSize;
+ static constexpr uint32_t LayoutDescriptionTableSize() {
+ return sizeof(struct LayoutDescription) * kTableSize;
+ }
+ static constexpr uint32_t FixedDataSize() {
+ return LayoutDescriptionTableOffset() + LayoutDescriptionTableSize();
}
+ // The variable-size data section starts here.
+ static constexpr uint32_t RawMetadataOffset() { return FixedDataSize(); }
+
+ // Code is in its own dedicated section.
static constexpr uint32_t RawCodeOffset() { return 0; }
private:
- EmbeddedData(const uint8_t* code, uint32_t code_size, const uint8_t* metadata,
- uint32_t metadata_size)
- : code_(code),
- code_size_(code_size),
- metadata_(metadata),
- metadata_size_(metadata_size) {
+ EmbeddedData(const uint8_t* code, uint32_t code_size, const uint8_t* data,
+ uint32_t data_size)
+ : code_(code), code_size_(code_size), data_(data), data_size_(data_size) {
DCHECK_NOT_NULL(code);
DCHECK_LT(0, code_size);
- DCHECK_NOT_NULL(metadata);
- DCHECK_LT(0, metadata_size);
+ DCHECK_NOT_NULL(data);
+ DCHECK_LT(0, data_size);
}
- const Metadata* Metadata() const {
- return reinterpret_cast<const struct Metadata*>(metadata_ +
- MetadataTableOffset());
- }
const uint8_t* RawCode() const { return code_ + RawCodeOffset(); }
- static constexpr int PadAndAlign(int size) {
+ const LayoutDescription* LayoutDescription() const {
+ return reinterpret_cast<const struct LayoutDescription*>(
+ data_ + LayoutDescriptionTableOffset());
+ }
+ const uint8_t* RawMetadata() const { return data_ + RawMetadataOffset(); }
+
+ static constexpr int PadAndAlignCode(int size) {
// Ensure we have at least one byte trailing the actual builtin
// instructions which we can later fill with int3.
return RoundUp<kCodeAlignment>(size + 1);
}
+ static constexpr int PadAndAlignData(int size) {
+ // Ensure we have at least one byte trailing the actual builtin
+ // instructions which we can later fill with int3.
+ return RoundUp<Code::kMetadataAlignment>(size);
+ }
void PrintStatistics() const;
- // This points to code for builtins. The contents are potentially unreadable
- // on platforms that disallow reads from the .text section.
+ // The code section contains instruction streams. It is guaranteed to have
+ // execute permissions, and may have read permissions.
const uint8_t* code_;
uint32_t code_size_;
- // This is metadata for the code.
- const uint8_t* metadata_;
- uint32_t metadata_size_;
+ // The data section contains both descriptions of the code section (hashes,
+ // offsets, sizes) and metadata describing Code objects (see
+ // Code::MetadataStart()). It is guaranteed to have read permissions.
+ const uint8_t* data_;
+ uint32_t data_size_;
};
} // namespace internal
diff --git a/deps/v8/src/snapshot/embedded/embedded-empty.cc b/deps/v8/src/snapshot/embedded/embedded-empty.cc
index a407f904ed..c32b459d9d 100644
--- a/deps/v8/src/snapshot/embedded/embedded-empty.cc
+++ b/deps/v8/src/snapshot/embedded/embedded-empty.cc
@@ -10,22 +10,22 @@
extern "C" const uint8_t* v8_Default_embedded_blob_code_;
extern "C" uint32_t v8_Default_embedded_blob_code_size_;
-extern "C" const uint8_t* v8_Default_embedded_blob_metadata_;
-extern "C" uint32_t v8_Default_embedded_blob_metadata_size_;
+extern "C" const uint8_t* v8_Default_embedded_blob_data_;
+extern "C" uint32_t v8_Default_embedded_blob_data_size_;
const uint8_t* v8_Default_embedded_blob_code_ = nullptr;
uint32_t v8_Default_embedded_blob_code_size_ = 0;
-const uint8_t* v8_Default_embedded_blob_metadata_ = nullptr;
-uint32_t v8_Default_embedded_blob_metadata_size_ = 0;
+const uint8_t* v8_Default_embedded_blob_data_ = nullptr;
+uint32_t v8_Default_embedded_blob_data_size_ = 0;
#ifdef V8_MULTI_SNAPSHOTS
extern "C" const uint8_t* v8_Trusted_embedded_blob_code_;
extern "C" uint32_t v8_Trusted_embedded_blob_code_size_;
-extern "C" const uint8_t* v8_Trusted_embedded_blob_metadata_;
-extern "C" uint32_t v8_Trusted_embedded_blob_metadata_size_;
+extern "C" const uint8_t* v8_Trusted_embedded_blob_data_;
+extern "C" uint32_t v8_Trusted_embedded_blob_data_size_;
const uint8_t* v8_Trusted_embedded_blob_code_ = nullptr;
uint32_t v8_Trusted_embedded_blob_code_size_ = 0;
-const uint8_t* v8_Trusted_embedded_blob_metadata_ = nullptr;
-uint32_t v8_Trusted_embedded_blob_metadata_size_ = 0;
+const uint8_t* v8_Trusted_embedded_blob_data_ = nullptr;
+uint32_t v8_Trusted_embedded_blob_data_size_ = 0;
#endif
diff --git a/deps/v8/src/snapshot/embedded/embedded-file-writer.cc b/deps/v8/src/snapshot/embedded/embedded-file-writer.cc
index 8a3e248d6b..b472841cc6 100644
--- a/deps/v8/src/snapshot/embedded/embedded-file-writer.cc
+++ b/deps/v8/src/snapshot/embedded/embedded-file-writer.cc
@@ -14,6 +14,38 @@
namespace v8 {
namespace internal {
+namespace {
+
+int WriteDirectiveOrSeparator(PlatformEmbeddedFileWriterBase* w,
+ int current_line_length,
+ DataDirective directive) {
+ int printed_chars;
+ if (current_line_length == 0) {
+ printed_chars = w->IndentedDataDirective(directive);
+ DCHECK_LT(0, printed_chars);
+ } else {
+ printed_chars = fprintf(w->fp(), ",");
+ DCHECK_EQ(1, printed_chars);
+ }
+ return current_line_length + printed_chars;
+}
+
+int WriteLineEndIfNeeded(PlatformEmbeddedFileWriterBase* w,
+ int current_line_length, int write_size) {
+ static const int kTextWidth = 100;
+ // Check if adding ',0xFF...FF\n"' would force a line wrap. This doesn't use
+ // the actual size of the string to be written to determine this so it's
+ // more conservative than strictly needed.
+ if (current_line_length + strlen(",0x") + write_size * 2 > kTextWidth) {
+ fprintf(w->fp(), "\n");
+ return 0;
+ } else {
+ return current_line_length;
+ }
+}
+
+} // namespace
+
void EmbeddedFileWriter::WriteBuiltin(PlatformEmbeddedFileWriterBase* w,
const i::EmbeddedData* blob,
const int builtin_id) const {
@@ -98,6 +130,40 @@ void EmbeddedFileWriter::WriteBuiltinLabels(PlatformEmbeddedFileWriterBase* w,
w->DeclareLabel(name.c_str());
}
+void EmbeddedFileWriter::WriteCodeSection(PlatformEmbeddedFileWriterBase* w,
+ const i::EmbeddedData* blob) const {
+ w->Comment(
+ "The embedded blob code section starts here. It contains the builtin");
+ w->Comment("instruction streams.");
+ w->SectionText();
+
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
+ // UMA needs an exposed function-type label at the start of the embedded
+ // code section.
+ static const char* kCodeStartForProfilerSymbolName =
+ "v8_code_start_for_profiler_";
+ static constexpr int kDummyFunctionLength = 1;
+ static constexpr int kDummyFunctionData = 0xcc;
+ w->DeclareFunctionBegin(kCodeStartForProfilerSymbolName,
+ kDummyFunctionLength);
+ // The label must not be at the same address as the first builtin, insert
+ // padding bytes.
+ WriteDirectiveOrSeparator(w, 0, kByte);
+ w->HexLiteral(kDummyFunctionData);
+ w->Newline();
+ w->DeclareFunctionEnd(kCodeStartForProfilerSymbolName);
+#endif
+
+ w->AlignToCodeAlignment();
+ w->DeclareLabel(EmbeddedBlobCodeDataSymbol().c_str());
+
+ STATIC_ASSERT(Builtins::kAllBuiltinsAreIsolateIndependent);
+ for (int i = 0; i < i::Builtins::builtin_count; i++) {
+ WriteBuiltin(w, blob, i);
+ }
+ w->Newline();
+}
+
void EmbeddedFileWriter::WriteFileEpilogue(PlatformEmbeddedFileWriterBase* w,
const i::EmbeddedData* blob) const {
{
@@ -112,15 +178,14 @@ void EmbeddedFileWriter::WriteFileEpilogue(PlatformEmbeddedFileWriterBase* w,
EmbeddedBlobCodeDataSymbol().c_str());
w->Newline();
- i::EmbeddedVector<char, kTemporaryStringLength>
- embedded_blob_metadata_symbol;
- i::SNPrintF(embedded_blob_metadata_symbol, "v8_%s_embedded_blob_metadata_",
+ i::EmbeddedVector<char, kTemporaryStringLength> embedded_blob_data_symbol;
+ i::SNPrintF(embedded_blob_data_symbol, "v8_%s_embedded_blob_data_",
embedded_variant_);
- w->Comment("Pointer to the beginning of the embedded blob metadata.");
+ w->Comment("Pointer to the beginning of the embedded blob data section.");
w->AlignToDataAlignment();
- w->DeclarePointerToSymbol(embedded_blob_metadata_symbol.begin(),
- EmbeddedBlobMetadataDataSymbol().c_str());
+ w->DeclarePointerToSymbol(embedded_blob_data_symbol.begin(),
+ EmbeddedBlobDataDataSymbol().c_str());
w->Newline();
}
@@ -137,13 +202,12 @@ void EmbeddedFileWriter::WriteFileEpilogue(PlatformEmbeddedFileWriterBase* w,
w->Newline();
i::EmbeddedVector<char, kTemporaryStringLength>
- embedded_blob_metadata_size_symbol;
- i::SNPrintF(embedded_blob_metadata_size_symbol,
- "v8_%s_embedded_blob_metadata_size_", embedded_variant_);
+ embedded_blob_data_size_symbol;
+ i::SNPrintF(embedded_blob_data_size_symbol,
+ "v8_%s_embedded_blob_data_size_", embedded_variant_);
- w->Comment("The size of the embedded blob metadata in bytes.");
- w->DeclareUint32(embedded_blob_metadata_size_symbol.begin(),
- blob->metadata_size());
+ w->Comment("The size of the embedded blob data section in bytes.");
+ w->DeclareUint32(embedded_blob_data_size_symbol.begin(), blob->data_size());
w->Newline();
}
@@ -162,38 +226,6 @@ void EmbeddedFileWriter::WriteFileEpilogue(PlatformEmbeddedFileWriterBase* w,
w->FileEpilogue();
}
-namespace {
-
-int WriteDirectiveOrSeparator(PlatformEmbeddedFileWriterBase* w,
- int current_line_length,
- DataDirective directive) {
- int printed_chars;
- if (current_line_length == 0) {
- printed_chars = w->IndentedDataDirective(directive);
- DCHECK_LT(0, printed_chars);
- } else {
- printed_chars = fprintf(w->fp(), ",");
- DCHECK_EQ(1, printed_chars);
- }
- return current_line_length + printed_chars;
-}
-
-int WriteLineEndIfNeeded(PlatformEmbeddedFileWriterBase* w,
- int current_line_length, int write_size) {
- static const int kTextWidth = 100;
- // Check if adding ',0xFF...FF\n"' would force a line wrap. This doesn't use
- // the actual size of the string to be written to determine this so it's
- // more conservative than strictly needed.
- if (current_line_length + strlen(",0x") + write_size * 2 > kTextWidth) {
- fprintf(w->fp(), "\n");
- return 0;
- } else {
- return current_line_length;
- }
-}
-
-} // namespace
-
// static
void EmbeddedFileWriter::WriteBinaryContentsAsInlineAssembly(
PlatformEmbeddedFileWriterBase* w, const uint8_t* data, uint32_t size) {
diff --git a/deps/v8/src/snapshot/embedded/embedded-file-writer.h b/deps/v8/src/snapshot/embedded/embedded-file-writer.h
index f1ca04170a..6a90e5c685 100644
--- a/deps/v8/src/snapshot/embedded/embedded-file-writer.h
+++ b/deps/v8/src/snapshot/embedded/embedded-file-writer.h
@@ -109,8 +109,8 @@ class EmbeddedFileWriter : public EmbeddedFileWriterInterface {
WriteFilePrologue(writer.get());
WriteExternalFilenames(writer.get());
- WriteMetadataSection(writer.get(), blob);
- WriteInstructionStreams(writer.get(), blob);
+ WriteDataSection(writer.get(), blob);
+ WriteCodeSection(writer.get(), blob);
WriteFileEpilogue(writer.get(), blob);
fclose(fp);
@@ -161,23 +161,22 @@ class EmbeddedFileWriter : public EmbeddedFileWriterInterface {
return std::string{embedded_blob_code_data_symbol.begin()};
}
- std::string EmbeddedBlobMetadataDataSymbol() const {
+ std::string EmbeddedBlobDataDataSymbol() const {
i::EmbeddedVector<char, kTemporaryStringLength>
- embedded_blob_metadata_data_symbol;
- i::SNPrintF(embedded_blob_metadata_data_symbol,
- "v8_%s_embedded_blob_metadata_data_", embedded_variant_);
- return std::string{embedded_blob_metadata_data_symbol.begin()};
+ embedded_blob_data_data_symbol;
+ i::SNPrintF(embedded_blob_data_data_symbol,
+ "v8_%s_embedded_blob_data_data_", embedded_variant_);
+ return std::string{embedded_blob_data_data_symbol.begin()};
}
- void WriteMetadataSection(PlatformEmbeddedFileWriterBase* w,
- const i::EmbeddedData* blob) const {
- w->Comment("The embedded blob metadata starts here.");
+ void WriteDataSection(PlatformEmbeddedFileWriterBase* w,
+ const i::EmbeddedData* blob) const {
+ w->Comment("The embedded blob data section starts here.");
w->SectionRoData();
w->AlignToDataAlignment();
- w->DeclareLabel(EmbeddedBlobMetadataDataSymbol().c_str());
+ w->DeclareLabel(EmbeddedBlobDataDataSymbol().c_str());
- WriteBinaryContentsAsInlineAssembly(w, blob->metadata(),
- blob->metadata_size());
+ WriteBinaryContentsAsInlineAssembly(w, blob->data(), blob->data_size());
}
void WriteBuiltin(PlatformEmbeddedFileWriterBase* w,
@@ -186,21 +185,8 @@ class EmbeddedFileWriter : public EmbeddedFileWriterInterface {
void WriteBuiltinLabels(PlatformEmbeddedFileWriterBase* w,
std::string name) const;
- void WriteInstructionStreams(PlatformEmbeddedFileWriterBase* w,
- const i::EmbeddedData* blob) const {
- w->Comment("The embedded blob data starts here. It contains the builtin");
- w->Comment("instruction streams.");
- w->SectionText();
- w->AlignToCodeAlignment();
- w->DeclareLabel(EmbeddedBlobCodeDataSymbol().c_str());
-
- for (int i = 0; i < i::Builtins::builtin_count; i++) {
- if (!blob->ContainsBuiltin(i)) continue;
-
- WriteBuiltin(w, blob, i);
- }
- w->Newline();
- }
+ void WriteCodeSection(PlatformEmbeddedFileWriterBase* w,
+ const i::EmbeddedData* blob) const;
void WriteFileEpilogue(PlatformEmbeddedFileWriterBase* w,
const i::EmbeddedData* blob) const;
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.cc b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.cc
index 1c823ef421..4065e4a7eb 100644
--- a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.cc
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.cc
@@ -4,6 +4,8 @@
#include "src/snapshot/embedded/platform-embedded-file-writer-aix.h"
+#include "src/objects/code.h"
+
namespace v8 {
namespace internal {
@@ -63,10 +65,12 @@ void PlatformEmbeddedFileWriterAIX::DeclareSymbolGlobal(const char* name) {
}
void PlatformEmbeddedFileWriterAIX::AlignToCodeAlignment() {
+ STATIC_ASSERT((1 << 5) >= kCodeAlignment);
fprintf(fp_, ".align 5\n");
}
void PlatformEmbeddedFileWriterAIX::AlignToDataAlignment() {
+ STATIC_ASSERT((1 << 3) >= Code::kMetadataAlignment);
fprintf(fp_, ".align 3\n");
}
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc
index 070aaf51b6..8acfd0d176 100644
--- a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc
@@ -8,6 +8,7 @@
#include <cinttypes>
#include "src/common/globals.h"
+#include "src/objects/code.h"
namespace v8 {
namespace internal {
@@ -73,6 +74,7 @@ void PlatformEmbeddedFileWriterGeneric::DeclareSymbolGlobal(const char* name) {
}
void PlatformEmbeddedFileWriterGeneric::AlignToCodeAlignment() {
+ STATIC_ASSERT(32 >= kCodeAlignment);
fprintf(fp_, ".balign 32\n");
}
@@ -81,6 +83,7 @@ void PlatformEmbeddedFileWriterGeneric::AlignToDataAlignment() {
// instructions are used to retrieve v8_Default_embedded_blob_ and/or
// v8_Default_embedded_blob_size_. The generated instructions require the
// load target to be aligned at 8 bytes (2^3).
+ STATIC_ASSERT(8 >= Code::kMetadataAlignment);
fprintf(fp_, ".balign 8\n");
}
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.cc b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.cc
index 9c5bf7049d..5fa12ec6ea 100644
--- a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.cc
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.cc
@@ -4,6 +4,8 @@
#include "src/snapshot/embedded/platform-embedded-file-writer-mac.h"
+#include "src/objects/code.h"
+
namespace v8 {
namespace internal {
@@ -54,6 +56,7 @@ void PlatformEmbeddedFileWriterMac::DeclareSymbolGlobal(const char* name) {
// prevents something along the compilation chain from messing with the
// embedded blob. Using .global here causes embedded blob hash verification
// failures at runtime.
+ STATIC_ASSERT(32 >= kCodeAlignment);
fprintf(fp_, ".private_extern _%s\n", name);
}
@@ -62,6 +65,7 @@ void PlatformEmbeddedFileWriterMac::AlignToCodeAlignment() {
}
void PlatformEmbeddedFileWriterMac::AlignToDataAlignment() {
+ STATIC_ASSERT(8 >= Code::kMetadataAlignment);
fprintf(fp_, ".balign 8\n");
}
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc
index e3250084c4..891dbd94d3 100644
--- a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc
@@ -109,12 +109,12 @@ void EmitUnwindData(PlatformEmbeddedFileWriterWin* w,
w->Comment(" UnwindInfoAddress");
w->StartPdataSection();
{
+ STATIC_ASSERT(Builtins::kAllBuiltinsAreIsolateIndependent);
Address prev_builtin_end_offset = 0;
for (int i = 0; i < Builtins::builtin_count; i++) {
// Some builtins are leaf functions from the point of view of Win64 stack
// walking: they do not move the stack pointer and do not require a PDATA
// entry because the return address can be retrieved from [rsp].
- if (!blob->ContainsBuiltin(i)) continue;
if (unwind_infos[i].is_leaf_function()) continue;
uint64_t builtin_start_offset = blob->InstructionStartOfBuiltin(i) -
@@ -193,8 +193,8 @@ void EmitUnwindData(PlatformEmbeddedFileWriterWin* w,
std::vector<int> code_chunks;
std::vector<win64_unwindinfo::FrameOffsets> fp_adjustments;
+ STATIC_ASSERT(Builtins::kAllBuiltinsAreIsolateIndependent);
for (int i = 0; i < Builtins::builtin_count; i++) {
- if (!blob->ContainsBuiltin(i)) continue;
if (unwind_infos[i].is_leaf_function()) continue;
uint64_t builtin_start_offset = blob->InstructionStartOfBuiltin(i) -
diff --git a/deps/v8/src/snapshot/object-deserializer.cc b/deps/v8/src/snapshot/object-deserializer.cc
index e8bc45a4a2..5747f705ae 100644
--- a/deps/v8/src/snapshot/object-deserializer.cc
+++ b/deps/v8/src/snapshot/object-deserializer.cc
@@ -15,18 +15,20 @@
namespace v8 {
namespace internal {
-ObjectDeserializer::ObjectDeserializer(const SerializedCodeData* data)
- : Deserializer(data, true) {}
+ObjectDeserializer::ObjectDeserializer(Isolate* isolate,
+ const SerializedCodeData* data)
+ : Deserializer(isolate, data->Payload(), data->GetMagicNumber(), true,
+ false) {}
MaybeHandle<SharedFunctionInfo>
ObjectDeserializer::DeserializeSharedFunctionInfo(
Isolate* isolate, const SerializedCodeData* data, Handle<String> source) {
- ObjectDeserializer d(data);
+ ObjectDeserializer d(isolate, data);
d.AddAttachedObject(source);
Handle<HeapObject> result;
- return d.Deserialize(isolate).ToHandle(&result)
+ return d.Deserialize().ToHandle(&result)
? Handle<SharedFunctionInfo>::cast(result)
: MaybeHandle<SharedFunctionInfo>();
}
@@ -39,23 +41,17 @@ ObjectDeserializer::DeserializeSharedFunctionInfoOffThread(
UNREACHABLE();
}
-MaybeHandle<HeapObject> ObjectDeserializer::Deserialize(Isolate* isolate) {
- Initialize(isolate);
- if (!allocator()->ReserveSpace()) return MaybeHandle<HeapObject>();
-
+MaybeHandle<HeapObject> ObjectDeserializer::Deserialize() {
DCHECK(deserializing_user_code());
- HandleScope scope(isolate);
+ HandleScope scope(isolate());
Handle<HeapObject> result;
{
- DisallowGarbageCollection no_gc;
- Object root;
- VisitRootPointer(Root::kStartupObjectCache, nullptr, FullObjectSlot(&root));
+ result = ReadObject();
DeserializeDeferredObjects();
CHECK(new_code_objects().empty());
LinkAllocationSites();
- LogNewMapEvents();
- result = handle(HeapObject::cast(root), isolate);
- allocator()->RegisterDeserializedObjectsForBlackAllocation();
+ CHECK(new_maps().empty());
+ WeakenDescriptorArrays();
}
Rehash();
@@ -77,10 +73,10 @@ void ObjectDeserializer::CommitPostProcessedObjects() {
script->set_id(isolate()->GetNextScriptId());
LogScriptEvents(*script);
// Add script to list.
- Handle<WeakArrayList> list = isolate()->factory()->script_list();
- list = WeakArrayList::AddToEnd(isolate(), list,
- MaybeObjectHandle::Weak(script));
- isolate()->heap()->SetRootScriptList(*list);
+ Handle<WeakArrayList> list = isolate()->factory()->script_list();
+ list = WeakArrayList::AddToEnd(isolate(), list,
+ MaybeObjectHandle::Weak(script));
+ isolate()->heap()->SetRootScriptList(*list);
}
}
@@ -89,17 +85,17 @@ void ObjectDeserializer::LinkAllocationSites() {
Heap* heap = isolate()->heap();
// Allocation sites are present in the snapshot, and must be linked into
// a list at deserialization time.
- for (AllocationSite site : new_allocation_sites()) {
- if (!site.HasWeakNext()) continue;
+ for (Handle<AllocationSite> site : new_allocation_sites()) {
+ if (!site->HasWeakNext()) continue;
// TODO(mvstanton): consider treating the heap()->allocation_sites_list()
// as a (weak) root. If this root is relocated correctly, this becomes
// unnecessary.
if (heap->allocation_sites_list() == Smi::zero()) {
- site.set_weak_next(ReadOnlyRoots(heap).undefined_value());
+ site->set_weak_next(ReadOnlyRoots(heap).undefined_value());
} else {
- site.set_weak_next(heap->allocation_sites_list());
+ site->set_weak_next(heap->allocation_sites_list());
}
- heap->set_allocation_sites_list(site);
+ heap->set_allocation_sites_list(*site);
}
}
diff --git a/deps/v8/src/snapshot/object-deserializer.h b/deps/v8/src/snapshot/object-deserializer.h
index f155ca8d07..6ba79147f5 100644
--- a/deps/v8/src/snapshot/object-deserializer.h
+++ b/deps/v8/src/snapshot/object-deserializer.h
@@ -23,10 +23,10 @@ class ObjectDeserializer final : public Deserializer {
Handle<String> source);
private:
- explicit ObjectDeserializer(const SerializedCodeData* data);
+ explicit ObjectDeserializer(Isolate* isolate, const SerializedCodeData* data);
// Deserialize an object graph. Fail gracefully.
- MaybeHandle<HeapObject> Deserialize(Isolate* isolate);
+ MaybeHandle<HeapObject> Deserialize();
void LinkAllocationSites();
void CommitPostProcessedObjects();
diff --git a/deps/v8/src/snapshot/read-only-deserializer.cc b/deps/v8/src/snapshot/read-only-deserializer.cc
index 7d1ff90b8c..c8a6651eb7 100644
--- a/deps/v8/src/snapshot/read-only-deserializer.cc
+++ b/deps/v8/src/snapshot/read-only-deserializer.cc
@@ -14,29 +14,24 @@
namespace v8 {
namespace internal {
-void ReadOnlyDeserializer::DeserializeInto(Isolate* isolate) {
- Initialize(isolate);
+void ReadOnlyDeserializer::DeserializeIntoIsolate() {
+ HandleScope scope(isolate());
- if (!allocator()->ReserveSpace()) {
- V8::FatalProcessOutOfMemory(isolate, "ReadOnlyDeserializer");
- }
-
- ReadOnlyHeap* ro_heap = isolate->read_only_heap();
+ ReadOnlyHeap* ro_heap = isolate()->read_only_heap();
// No active threads.
- DCHECK_NULL(isolate->thread_manager()->FirstThreadStateInUse());
+ DCHECK_NULL(isolate()->thread_manager()->FirstThreadStateInUse());
// No active handles.
- DCHECK(isolate->handle_scope_implementer()->blocks()->empty());
+ DCHECK(isolate()->handle_scope_implementer()->blocks()->empty());
// Read-only object cache is not yet populated.
DCHECK(!ro_heap->read_only_object_cache_is_initialized());
// Startup object cache is not yet populated.
- DCHECK(isolate->startup_object_cache()->empty());
+ DCHECK(isolate()->startup_object_cache()->empty());
// Builtins are not yet created.
- DCHECK(!isolate->builtins()->is_initialized());
+ DCHECK(!isolate()->builtins()->is_initialized());
{
- DisallowGarbageCollection no_gc;
- ReadOnlyRoots roots(isolate);
+ ReadOnlyRoots roots(isolate());
roots.Iterate(this);
ro_heap->read_only_space()->RepairFreeSpacesAfterDeserialization();
@@ -55,7 +50,7 @@ void ReadOnlyDeserializer::DeserializeInto(Isolate* isolate) {
}
if (FLAG_rehash_snapshot && can_rehash()) {
- isolate->heap()->InitializeHashSeed();
+ isolate()->heap()->InitializeHashSeed();
Rehash();
}
}
diff --git a/deps/v8/src/snapshot/read-only-deserializer.h b/deps/v8/src/snapshot/read-only-deserializer.h
index 08443766c2..c546c234ff 100644
--- a/deps/v8/src/snapshot/read-only-deserializer.h
+++ b/deps/v8/src/snapshot/read-only-deserializer.h
@@ -6,6 +6,7 @@
#define V8_SNAPSHOT_READ_ONLY_DESERIALIZER_H_
#include "src/snapshot/deserializer.h"
+#include "src/snapshot/snapshot-data.h"
#include "src/snapshot/snapshot.h"
namespace v8 {
@@ -15,11 +16,13 @@ namespace internal {
// Read-only object cache used by the other deserializers.
class ReadOnlyDeserializer final : public Deserializer {
public:
- explicit ReadOnlyDeserializer(const SnapshotData* data)
- : Deserializer(data, false) {}
+ explicit ReadOnlyDeserializer(Isolate* isolate, const SnapshotData* data,
+ bool can_rehash)
+ : Deserializer(isolate, data->Payload(), data->GetMagicNumber(), false,
+ can_rehash) {}
// Deserialize the snapshot into an empty heap.
- void DeserializeInto(Isolate* isolate);
+ void DeserializeIntoIsolate();
};
} // namespace internal
diff --git a/deps/v8/src/snapshot/read-only-serializer.cc b/deps/v8/src/snapshot/read-only-serializer.cc
index 4b852c0656..06c5094782 100644
--- a/deps/v8/src/snapshot/read-only-serializer.cc
+++ b/deps/v8/src/snapshot/read-only-serializer.cc
@@ -18,32 +18,51 @@ namespace internal {
ReadOnlySerializer::ReadOnlySerializer(Isolate* isolate,
Snapshot::SerializerFlags flags)
- : RootsSerializer(isolate, flags, RootIndex::kFirstReadOnlyRoot) {
+ : RootsSerializer(isolate, flags, RootIndex::kFirstReadOnlyRoot)
+#ifdef DEBUG
+ ,
+ serialized_objects_(isolate->heap()),
+ did_serialize_not_mapped_symbol_(false)
+#endif
+{
STATIC_ASSERT(RootIndex::kFirstReadOnlyRoot == RootIndex::kFirstRoot);
- allocator()->UseCustomChunkSize(FLAG_serialization_chunk_size);
}
ReadOnlySerializer::~ReadOnlySerializer() {
OutputStatistics("ReadOnlySerializer");
}
-void ReadOnlySerializer::SerializeObject(HeapObject obj) {
- CHECK(ReadOnlyHeap::Contains(obj));
- CHECK_IMPLIES(obj.IsString(), obj.IsInternalizedString());
-
- if (SerializeHotObject(obj)) return;
- if (IsRootAndHasBeenSerialized(obj) && SerializeRoot(obj)) {
- return;
+void ReadOnlySerializer::SerializeObjectImpl(Handle<HeapObject> obj) {
+ CHECK(ReadOnlyHeap::Contains(*obj));
+ CHECK_IMPLIES(obj->IsString(), obj->IsInternalizedString());
+
+ // There should be no references to the not_mapped_symbol except for the entry
+ // in the root table, so don't try to serialize a reference and rely on the
+ // below CHECK(!did_serialize_not_mapped_symbol_) to make sure it doesn't
+ // serialize twice.
+ if (*obj != ReadOnlyRoots(isolate()).not_mapped_symbol()) {
+ if (SerializeHotObject(obj)) return;
+ if (IsRootAndHasBeenSerialized(*obj) && SerializeRoot(obj)) {
+ return;
+ }
+ if (SerializeBackReference(obj)) return;
}
- if (SerializeBackReference(obj)) return;
- CheckRehashability(obj);
+ CheckRehashability(*obj);
// Object has not yet been serialized. Serialize it here.
ObjectSerializer object_serializer(this, obj, &sink_);
object_serializer.Serialize();
#ifdef DEBUG
- serialized_objects_.insert(obj);
+ if (*obj == ReadOnlyRoots(isolate()).not_mapped_symbol()) {
+ CHECK(!did_serialize_not_mapped_symbol_);
+ did_serialize_not_mapped_symbol_ = true;
+ } else {
+ CHECK_NULL(serialized_objects_.Find(obj));
+ // There's no "IdentitySet", so use an IdentityMap with a value that is
+ // later ignored.
+ serialized_objects_.Insert(obj, 0);
+ }
#endif
}
@@ -73,7 +92,11 @@ void ReadOnlySerializer::FinalizeSerialization() {
ReadOnlyHeapObjectIterator iterator(isolate()->read_only_heap());
for (HeapObject object = iterator.Next(); !object.is_null();
object = iterator.Next()) {
- CHECK(serialized_objects_.count(object));
+ if (object == ReadOnlyRoots(isolate()).not_mapped_symbol()) {
+ CHECK(did_serialize_not_mapped_symbol_);
+ } else {
+ CHECK_NOT_NULL(serialized_objects_.Find(object));
+ }
}
#endif
}
@@ -92,8 +115,8 @@ bool ReadOnlySerializer::MustBeDeferred(HeapObject object) {
}
bool ReadOnlySerializer::SerializeUsingReadOnlyObjectCache(
- SnapshotByteSink* sink, HeapObject obj) {
- if (!ReadOnlyHeap::Contains(obj)) return false;
+ SnapshotByteSink* sink, Handle<HeapObject> obj) {
+ if (!ReadOnlyHeap::Contains(*obj)) return false;
// Get the cache index and serialize it into the read-only snapshot if
// necessary.
diff --git a/deps/v8/src/snapshot/read-only-serializer.h b/deps/v8/src/snapshot/read-only-serializer.h
index f30b2c30ba..fd88b9f7b6 100644
--- a/deps/v8/src/snapshot/read-only-serializer.h
+++ b/deps/v8/src/snapshot/read-only-serializer.h
@@ -7,6 +7,7 @@
#include <unordered_set>
+#include "src/base/hashmap.h"
#include "src/snapshot/roots-serializer.h"
namespace v8 {
@@ -19,6 +20,8 @@ class V8_EXPORT_PRIVATE ReadOnlySerializer : public RootsSerializer {
public:
ReadOnlySerializer(Isolate* isolate, Snapshot::SerializerFlags flags);
~ReadOnlySerializer() override;
+ ReadOnlySerializer(const ReadOnlySerializer&) = delete;
+ ReadOnlySerializer& operator=(const ReadOnlySerializer&) = delete;
void SerializeReadOnlyRoots();
@@ -31,16 +34,16 @@ class V8_EXPORT_PRIVATE ReadOnlySerializer : public RootsSerializer {
// ReadOnlyObjectCache bytecode into |sink|. Returns whether this was
// successful.
bool SerializeUsingReadOnlyObjectCache(SnapshotByteSink* sink,
- HeapObject obj);
+ Handle<HeapObject> obj);
private:
- void SerializeObject(HeapObject o) override;
+ void SerializeObjectImpl(Handle<HeapObject> o) override;
bool MustBeDeferred(HeapObject object) override;
#ifdef DEBUG
- std::unordered_set<HeapObject, Object::Hasher> serialized_objects_;
+ IdentityMap<int, base::DefaultAllocationPolicy> serialized_objects_;
+ bool did_serialize_not_mapped_symbol_;
#endif
- DISALLOW_COPY_AND_ASSIGN(ReadOnlySerializer);
};
} // namespace internal
diff --git a/deps/v8/src/snapshot/references.h b/deps/v8/src/snapshot/references.h
index eed4def1ef..ecaedc41d4 100644
--- a/deps/v8/src/snapshot/references.h
+++ b/deps/v8/src/snapshot/references.h
@@ -8,78 +8,42 @@
#include "src/base/bit-field.h"
#include "src/base/hashmap.h"
#include "src/common/assert-scope.h"
+#include "src/execution/isolate.h"
+#include "src/utils/identity-map.h"
namespace v8 {
namespace internal {
-// TODO(goszczycki): Move this somewhere every file in src/snapshot can use it.
-// The spaces suported by the serializer. Spaces after LO_SPACE (NEW_LO_SPACE
-// and CODE_LO_SPACE) are not supported.
enum class SnapshotSpace : byte {
- kReadOnlyHeap = RO_SPACE,
- kOld = OLD_SPACE,
- kCode = CODE_SPACE,
- kMap = MAP_SPACE,
- kLargeObject = LO_SPACE,
- kNumberOfPreallocatedSpaces = kCode + 1,
- kNumberOfSpaces = kLargeObject + 1,
- kSpecialValueSpace = kNumberOfSpaces,
- // Number of spaces which should be allocated by the heap. Eventually
- // kReadOnlyHeap will move to the end of this enum and this will be equal to
- // it.
- kNumberOfHeapSpaces = kNumberOfSpaces,
+ kReadOnlyHeap,
+ kOld,
+ kCode,
+ kMap,
};
-
-constexpr bool IsPreAllocatedSpace(SnapshotSpace space) {
- return static_cast<int>(space) <
- static_cast<int>(SnapshotSpace::kNumberOfPreallocatedSpaces);
-}
+static constexpr int kNumberOfSnapshotSpaces =
+ static_cast<int>(SnapshotSpace::kMap) + 1;
class SerializerReference {
private:
enum SpecialValueType {
- kInvalidValue,
+ kBackReference,
kAttachedReference,
kOffHeapBackingStore,
kBuiltinReference,
};
- STATIC_ASSERT(static_cast<int>(SnapshotSpace::kSpecialValueSpace) <
- (1 << kSpaceTagSize));
-
SerializerReference(SpecialValueType type, uint32_t value)
- : bitfield_(SpaceBits::encode(SnapshotSpace::kSpecialValueSpace) |
- SpecialValueTypeBits::encode(type)),
- value_(value) {}
+ : bit_field_(TypeBits::encode(type) | ValueBits::encode(value)) {}
public:
- SerializerReference() : SerializerReference(kInvalidValue, 0) {}
-
- SerializerReference(SnapshotSpace space, uint32_t chunk_index,
- uint32_t chunk_offset)
- : bitfield_(SpaceBits::encode(space) |
- ChunkIndexBits::encode(chunk_index)),
- value_(chunk_offset) {}
-
- static SerializerReference BackReference(SnapshotSpace space,
- uint32_t chunk_index,
- uint32_t chunk_offset) {
- DCHECK(IsAligned(chunk_offset, kObjectAlignment));
- return SerializerReference(space, chunk_index, chunk_offset);
- }
-
- static SerializerReference MapReference(uint32_t index) {
- return SerializerReference(SnapshotSpace::kMap, 0, index);
+ static SerializerReference BackReference(uint32_t index) {
+ return SerializerReference(kBackReference, index);
}
static SerializerReference OffHeapBackingStoreReference(uint32_t index) {
return SerializerReference(kOffHeapBackingStore, index);
}
- static SerializerReference LargeObjectReference(uint32_t index) {
- return SerializerReference(SnapshotSpace::kLargeObject, 0, index);
- }
-
static SerializerReference AttachedReference(uint32_t index) {
return SerializerReference(kAttachedReference, index);
}
@@ -88,127 +52,94 @@ class SerializerReference {
return SerializerReference(kBuiltinReference, index);
}
- bool is_valid() const {
- return SpaceBits::decode(bitfield_) != SnapshotSpace::kSpecialValueSpace ||
- SpecialValueTypeBits::decode(bitfield_) != kInvalidValue;
- }
-
bool is_back_reference() const {
- return SpaceBits::decode(bitfield_) != SnapshotSpace::kSpecialValueSpace;
+ return TypeBits::decode(bit_field_) == kBackReference;
}
- SnapshotSpace space() const {
+ uint32_t back_ref_index() const {
DCHECK(is_back_reference());
- return SpaceBits::decode(bitfield_);
- }
-
- uint32_t chunk_offset() const {
- DCHECK(is_back_reference());
- return value_;
- }
-
- uint32_t chunk_index() const {
- DCHECK(IsPreAllocatedSpace(space()));
- return ChunkIndexBits::decode(bitfield_);
- }
-
- uint32_t map_index() const {
- DCHECK_EQ(SnapshotSpace::kMap, SpaceBits::decode(bitfield_));
- return value_;
+ return ValueBits::decode(bit_field_);
}
bool is_off_heap_backing_store_reference() const {
- return SpaceBits::decode(bitfield_) == SnapshotSpace::kSpecialValueSpace &&
- SpecialValueTypeBits::decode(bitfield_) == kOffHeapBackingStore;
+ return TypeBits::decode(bit_field_) == kOffHeapBackingStore;
}
uint32_t off_heap_backing_store_index() const {
DCHECK(is_off_heap_backing_store_reference());
- return value_;
- }
-
- uint32_t large_object_index() const {
- DCHECK_EQ(SnapshotSpace::kLargeObject, SpaceBits::decode(bitfield_));
- return value_;
+ return ValueBits::decode(bit_field_);
}
bool is_attached_reference() const {
- return SpaceBits::decode(bitfield_) == SnapshotSpace::kSpecialValueSpace &&
- SpecialValueTypeBits::decode(bitfield_) == kAttachedReference;
+ return TypeBits::decode(bit_field_) == kAttachedReference;
}
uint32_t attached_reference_index() const {
DCHECK(is_attached_reference());
- return value_;
+ return ValueBits::decode(bit_field_);
}
bool is_builtin_reference() const {
- return SpaceBits::decode(bitfield_) == SnapshotSpace::kSpecialValueSpace &&
- SpecialValueTypeBits::decode(bitfield_) == kBuiltinReference;
+ return TypeBits::decode(bit_field_) == kBuiltinReference;
}
uint32_t builtin_index() const {
DCHECK(is_builtin_reference());
- return value_;
+ return ValueBits::decode(bit_field_);
}
private:
- using SpaceBits = base::BitField<SnapshotSpace, 0, kSpaceTagSize>;
- using ChunkIndexBits = SpaceBits::Next<uint32_t, 32 - kSpaceTagSize>;
- using SpecialValueTypeBits =
- SpaceBits::Next<SpecialValueType, 32 - kSpaceTagSize>;
-
- // We use two fields to store a reference.
- // In case of a normal back reference, the bitfield_ stores the space and
- // the chunk index. In case of special references, it uses a special value
- // for space and stores the special value type.
- uint32_t bitfield_;
- // value_ stores either chunk offset or special value.
- uint32_t value_;
+ using TypeBits = base::BitField<SpecialValueType, 0, 2>;
+ using ValueBits = TypeBits::Next<uint32_t, 32 - TypeBits::kSize>;
+
+ uint32_t bit_field_;
friend class SerializerReferenceMap;
};
-class SerializerReferenceMap
- : public base::TemplateHashMapImpl<uintptr_t, SerializerReference,
- base::KeyEqualityMatcher<intptr_t>,
- base::DefaultAllocationPolicy> {
+// SerializerReference has to fit in an IdentityMap value field.
+STATIC_ASSERT(sizeof(SerializerReference) <= sizeof(void*));
+
+class SerializerReferenceMap {
public:
- using Entry = base::TemplateHashMapEntry<uintptr_t, SerializerReference>;
+ explicit SerializerReferenceMap(Isolate* isolate)
+ : map_(isolate->heap()), attached_reference_index_(0) {}
+
+ const SerializerReference* LookupReference(HeapObject object) const {
+ return map_.Find(object);
+ }
+
+ const SerializerReference* LookupReference(Handle<HeapObject> object) const {
+ return map_.Find(object);
+ }
- SerializerReferenceMap() : attached_reference_index_(0) {}
+ const SerializerReference* LookupBackingStore(void* backing_store) const {
+ auto it = backing_store_map_.find(backing_store);
+ if (it == backing_store_map_.end()) return nullptr;
+ return &it->second;
+ }
- SerializerReference LookupReference(void* value) const {
- uintptr_t key = Key(value);
- Entry* entry = Lookup(key, Hash(key));
- if (entry == nullptr) return SerializerReference();
- return entry->value;
+ void Add(HeapObject object, SerializerReference reference) {
+ DCHECK_NULL(LookupReference(object));
+ map_.Insert(object, reference);
}
- void Add(void* obj, SerializerReference reference) {
- DCHECK(reference.is_valid());
- DCHECK(!LookupReference(obj).is_valid());
- uintptr_t key = Key(obj);
- LookupOrInsert(key, Hash(key))->value = reference;
+ void AddBackingStore(void* backing_store, SerializerReference reference) {
+ DCHECK(backing_store_map_.find(backing_store) == backing_store_map_.end());
+ backing_store_map_.emplace(backing_store, reference);
}
- SerializerReference AddAttachedReference(void* attached_reference) {
+ SerializerReference AddAttachedReference(HeapObject object) {
SerializerReference reference =
SerializerReference::AttachedReference(attached_reference_index_++);
- Add(attached_reference, reference);
+ map_.Insert(object, reference);
return reference;
}
private:
- static inline uintptr_t Key(void* value) {
- return reinterpret_cast<uintptr_t>(value);
- }
-
- static uint32_t Hash(uintptr_t key) { return static_cast<uint32_t>(key); }
-
- DISALLOW_HEAP_ALLOCATION(no_allocation_)
+ IdentityMap<SerializerReference, base::DefaultAllocationPolicy> map_;
+ std::unordered_map<void*, SerializerReference> backing_store_map_;
int attached_reference_index_;
- DISALLOW_COPY_AND_ASSIGN(SerializerReferenceMap);
};
} // namespace internal
diff --git a/deps/v8/src/snapshot/roots-serializer.cc b/deps/v8/src/snapshot/roots-serializer.cc
index 6a8f2bb05e..7e459ee811 100644
--- a/deps/v8/src/snapshot/roots-serializer.cc
+++ b/deps/v8/src/snapshot/roots-serializer.cc
@@ -17,6 +17,7 @@ RootsSerializer::RootsSerializer(Isolate* isolate,
RootIndex first_root_to_be_serialized)
: Serializer(isolate, flags),
first_root_to_be_serialized_(first_root_to_be_serialized),
+ object_cache_index_map_(isolate->heap()),
can_be_rehashed_(true) {
for (size_t i = 0; i < static_cast<size_t>(first_root_to_be_serialized);
++i) {
@@ -24,7 +25,7 @@ RootsSerializer::RootsSerializer(Isolate* isolate,
}
}
-int RootsSerializer::SerializeInObjectCache(HeapObject heap_object) {
+int RootsSerializer::SerializeInObjectCache(Handle<HeapObject> heap_object) {
int index;
if (!object_cache_index_map_.LookupOrInsert(heap_object, &index)) {
// This object is not part of the object cache yet. Add it to the cache so
diff --git a/deps/v8/src/snapshot/roots-serializer.h b/deps/v8/src/snapshot/roots-serializer.h
index be41d7220f..7a699a7645 100644
--- a/deps/v8/src/snapshot/roots-serializer.h
+++ b/deps/v8/src/snapshot/roots-serializer.h
@@ -26,6 +26,8 @@ class RootsSerializer : public Serializer {
// are already serialized.
RootsSerializer(Isolate* isolate, Snapshot::SerializerFlags flags,
RootIndex first_root_to_be_serialized);
+ RootsSerializer(const RootsSerializer&) = delete;
+ RootsSerializer& operator=(const RootsSerializer&) = delete;
bool can_be_rehashed() const { return can_be_rehashed_; }
bool root_has_been_serialized(RootIndex root_index) const {
@@ -42,7 +44,7 @@ class RootsSerializer : public Serializer {
void CheckRehashability(HeapObject obj);
// Serializes |object| if not previously seen and returns its cache index.
- int SerializeInObjectCache(HeapObject object);
+ int SerializeInObjectCache(Handle<HeapObject> object);
private:
void VisitRootPointers(Root root, const char* description,
@@ -55,8 +57,6 @@ class RootsSerializer : public Serializer {
// Indicates whether we only serialized hash tables that we can rehash.
// TODO(yangguo): generalize rehashing, and remove this flag.
bool can_be_rehashed_;
-
- DISALLOW_COPY_AND_ASSIGN(RootsSerializer);
};
} // namespace internal
diff --git a/deps/v8/src/snapshot/serializer-allocator.cc b/deps/v8/src/snapshot/serializer-allocator.cc
deleted file mode 100644
index a1bd9f43eb..0000000000
--- a/deps/v8/src/snapshot/serializer-allocator.cc
+++ /dev/null
@@ -1,167 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/snapshot/serializer-allocator.h"
-
-#include "src/heap/heap-inl.h" // crbug.com/v8/8499
-#include "src/snapshot/references.h"
-#include "src/snapshot/serializer.h"
-#include "src/snapshot/snapshot-source-sink.h"
-
-namespace v8 {
-namespace internal {
-
-SerializerAllocator::SerializerAllocator(Serializer* serializer)
- : serializer_(serializer) {
- for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
- pending_chunk_[i] = 0;
- }
-}
-
-void SerializerAllocator::UseCustomChunkSize(uint32_t chunk_size) {
- custom_chunk_size_ = chunk_size;
-}
-
-static uint32_t PageSizeOfSpace(SnapshotSpace space) {
- return static_cast<uint32_t>(
- MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
- static_cast<AllocationSpace>(space)));
-}
-
-uint32_t SerializerAllocator::TargetChunkSize(SnapshotSpace space) {
- if (custom_chunk_size_ == 0) return PageSizeOfSpace(space);
- DCHECK_LE(custom_chunk_size_, PageSizeOfSpace(space));
- return custom_chunk_size_;
-}
-
-SerializerReference SerializerAllocator::Allocate(SnapshotSpace space,
- uint32_t size) {
- const int space_number = static_cast<int>(space);
- DCHECK(IsPreAllocatedSpace(space));
- DCHECK(size > 0 && size <= PageSizeOfSpace(space));
-
- // Maps are allocated through AllocateMap.
- DCHECK_NE(SnapshotSpace::kMap, space);
-
- uint32_t old_chunk_size = pending_chunk_[space_number];
- uint32_t new_chunk_size = old_chunk_size + size;
- // Start a new chunk if the new size exceeds the target chunk size.
- // We may exceed the target chunk size if the single object size does.
- if (new_chunk_size > TargetChunkSize(space) && old_chunk_size != 0) {
- serializer_->PutNextChunk(space);
- completed_chunks_[space_number].push_back(pending_chunk_[space_number]);
- pending_chunk_[space_number] = 0;
- new_chunk_size = size;
- }
- uint32_t offset = pending_chunk_[space_number];
- pending_chunk_[space_number] = new_chunk_size;
- return SerializerReference::BackReference(
- space, static_cast<uint32_t>(completed_chunks_[space_number].size()),
- offset);
-}
-
-SerializerReference SerializerAllocator::AllocateMap() {
- // Maps are allocated one-by-one when deserializing.
- return SerializerReference::MapReference(num_maps_++);
-}
-
-SerializerReference SerializerAllocator::AllocateLargeObject(uint32_t size) {
- // Large objects are allocated one-by-one when deserializing. We do not
- // have to keep track of multiple chunks.
- large_objects_total_size_ += size;
- return SerializerReference::LargeObjectReference(seen_large_objects_index_++);
-}
-
-SerializerReference SerializerAllocator::AllocateOffHeapBackingStore() {
- DCHECK_NE(0, seen_backing_stores_index_);
- return SerializerReference::OffHeapBackingStoreReference(
- seen_backing_stores_index_++);
-}
-
-#ifdef DEBUG
-bool SerializerAllocator::BackReferenceIsAlreadyAllocated(
- SerializerReference reference) const {
- DCHECK(reference.is_back_reference());
- SnapshotSpace space = reference.space();
- if (space == SnapshotSpace::kLargeObject) {
- return reference.large_object_index() < seen_large_objects_index_;
- } else if (space == SnapshotSpace::kMap) {
- return reference.map_index() < num_maps_;
- } else if (space == SnapshotSpace::kReadOnlyHeap &&
- serializer_->isolate()->heap()->deserialization_complete()) {
- // If not deserializing the isolate itself, then we create BackReferences
- // for all read-only heap objects without ever allocating.
- return true;
- } else {
- const int space_number = static_cast<int>(space);
- size_t chunk_index = reference.chunk_index();
- if (chunk_index == completed_chunks_[space_number].size()) {
- return reference.chunk_offset() < pending_chunk_[space_number];
- } else {
- return chunk_index < completed_chunks_[space_number].size() &&
- reference.chunk_offset() <
- completed_chunks_[space_number][chunk_index];
- }
- }
-}
-#endif
-
-std::vector<SerializedData::Reservation>
-SerializerAllocator::EncodeReservations() const {
- std::vector<SerializedData::Reservation> out;
-
- for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
- for (size_t j = 0; j < completed_chunks_[i].size(); j++) {
- out.emplace_back(completed_chunks_[i][j]);
- }
-
- if (pending_chunk_[i] > 0 || completed_chunks_[i].size() == 0) {
- out.emplace_back(pending_chunk_[i]);
- }
- out.back().mark_as_last();
- }
-
- STATIC_ASSERT(SnapshotSpace::kMap ==
- SnapshotSpace::kNumberOfPreallocatedSpaces);
- out.emplace_back(num_maps_ * Map::kSize);
- out.back().mark_as_last();
-
- STATIC_ASSERT(static_cast<int>(SnapshotSpace::kLargeObject) ==
- static_cast<int>(SnapshotSpace::kNumberOfPreallocatedSpaces) +
- 1);
- out.emplace_back(large_objects_total_size_);
- out.back().mark_as_last();
-
- return out;
-}
-
-void SerializerAllocator::OutputStatistics() {
- DCHECK(FLAG_serialization_statistics);
-
- PrintF(" Spaces (bytes):\n");
-
- for (int space = 0; space < kNumberOfSpaces; space++) {
- PrintF("%16s",
- BaseSpace::GetSpaceName(static_cast<AllocationSpace>(space)));
- }
- PrintF("\n");
-
- for (int space = 0; space < kNumberOfPreallocatedSpaces; space++) {
- size_t s = pending_chunk_[space];
- for (uint32_t chunk_size : completed_chunks_[space]) s += chunk_size;
- PrintF("%16zu", s);
- }
-
- STATIC_ASSERT(SnapshotSpace::kMap ==
- SnapshotSpace::kNumberOfPreallocatedSpaces);
- PrintF("%16d", num_maps_ * Map::kSize);
-
- STATIC_ASSERT(static_cast<int>(SnapshotSpace::kLargeObject) ==
- static_cast<int>(SnapshotSpace::kNumberOfPreallocatedSpaces) +
- 1);
- PrintF("%16d\n", large_objects_total_size_);
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/snapshot/serializer-allocator.h b/deps/v8/src/snapshot/serializer-allocator.h
deleted file mode 100644
index 51264961cd..0000000000
--- a/deps/v8/src/snapshot/serializer-allocator.h
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_SNAPSHOT_SERIALIZER_ALLOCATOR_H_
-#define V8_SNAPSHOT_SERIALIZER_ALLOCATOR_H_
-
-#include "src/snapshot/references.h"
-#include "src/snapshot/snapshot-data.h"
-
-namespace v8 {
-namespace internal {
-
-class Serializer;
-
-class SerializerAllocator final {
- public:
- explicit SerializerAllocator(Serializer* serializer);
-
- SerializerReference Allocate(SnapshotSpace space, uint32_t size);
- SerializerReference AllocateMap();
- SerializerReference AllocateLargeObject(uint32_t size);
- SerializerReference AllocateOffHeapBackingStore();
-
- void UseCustomChunkSize(uint32_t chunk_size);
-
-#ifdef DEBUG
- bool BackReferenceIsAlreadyAllocated(
- SerializerReference back_reference) const;
-#endif
-
- std::vector<SerializedData::Reservation> EncodeReservations() const;
-
- void OutputStatistics();
-
- private:
- // We try to not exceed this size for every chunk. We will not succeed for
- // larger objects though.
- uint32_t TargetChunkSize(SnapshotSpace space);
-
- static constexpr int kNumberOfPreallocatedSpaces =
- static_cast<int>(SnapshotSpace::kNumberOfPreallocatedSpaces);
- static constexpr int kNumberOfSpaces =
- static_cast<int>(SnapshotSpace::kNumberOfSpaces);
-
- // Objects from the same space are put into chunks for bulk-allocation
- // when deserializing. We have to make sure that each chunk fits into a
- // page. So we track the chunk size in pending_chunk_ of a space, but
- // when it exceeds a page, we complete the current chunk and start a new one.
- uint32_t pending_chunk_[kNumberOfPreallocatedSpaces];
- std::vector<uint32_t> completed_chunks_[kNumberOfPreallocatedSpaces];
-
- // Number of maps that we need to allocate.
- uint32_t num_maps_ = 0;
-
- // We map serialized large objects to indexes for back-referencing.
- uint32_t large_objects_total_size_ = 0;
- uint32_t seen_large_objects_index_ = 0;
-
- // Used to keep track of the off-heap backing stores used by TypedArrays/
- // ArrayBuffers. Note that the index begins at 1 and not 0, because when a
- // TypedArray has an on-heap backing store, the backing_store pointer in the
- // corresponding ArrayBuffer will be null, which makes it indistinguishable
- // from index 0.
- uint32_t seen_backing_stores_index_ = 1;
-
- uint32_t custom_chunk_size_ = 0;
-
- // The current serializer.
- Serializer* const serializer_;
-
- DISALLOW_COPY_AND_ASSIGN(SerializerAllocator);
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_SNAPSHOT_SERIALIZER_ALLOCATOR_H_
diff --git a/deps/v8/src/snapshot/serializer-deserializer.cc b/deps/v8/src/snapshot/serializer-deserializer.cc
index 4055d4cca3..afa41e7d03 100644
--- a/deps/v8/src/snapshot/serializer-deserializer.cc
+++ b/deps/v8/src/snapshot/serializer-deserializer.cc
@@ -30,35 +30,26 @@ void SerializerDeserializer::Iterate(Isolate* isolate, RootVisitor* visitor) {
}
bool SerializerDeserializer::CanBeDeferred(HeapObject o) {
- // 1. Maps cannot be deferred as objects are expected to have a valid map
- // immediately.
- // 2. Internalized strings cannot be deferred as they might be
+ // Maps cannot be deferred as objects are expected to have a valid map
+ // immediately. Internalized strings cannot be deferred as they might be
// converted to thin strings during post processing, at which point forward
// references to the now-thin string will already have been written.
- // 3. JS objects with embedder fields cannot be deferred because the
- // serialize/deserialize callbacks need the back reference immediately to
- // identify the object.
// TODO(leszeks): Could we defer string serialization if forward references
// were resolved after object post processing?
- return !o.IsMap() && !o.IsInternalizedString() &&
- !(o.IsJSObject() && JSObject::cast(o).GetEmbedderFieldCount() > 0);
+ return !o.IsMap() && !o.IsInternalizedString();
}
-void SerializerDeserializer::RestoreExternalReferenceRedirectors(
- Isolate* isolate, const std::vector<AccessorInfo>& accessor_infos) {
+void SerializerDeserializer::RestoreExternalReferenceRedirector(
+ Isolate* isolate, Handle<AccessorInfo> accessor_info) {
// Restore wiped accessor infos.
- for (AccessorInfo info : accessor_infos) {
- Foreign::cast(info.js_getter())
- .set_foreign_address(isolate, info.redirected_getter());
- }
+ Foreign::cast(accessor_info->js_getter())
+ .set_foreign_address(isolate, accessor_info->redirected_getter());
}
-void SerializerDeserializer::RestoreExternalReferenceRedirectors(
- Isolate* isolate, const std::vector<CallHandlerInfo>& call_handler_infos) {
- for (CallHandlerInfo info : call_handler_infos) {
- Foreign::cast(info.js_callback())
- .set_foreign_address(isolate, info.redirected_callback());
- }
+void SerializerDeserializer::RestoreExternalReferenceRedirector(
+ Isolate* isolate, Handle<CallHandlerInfo> call_handler_info) {
+ Foreign::cast(call_handler_info->js_callback())
+ .set_foreign_address(isolate, call_handler_info->redirected_callback());
}
} // namespace internal
diff --git a/deps/v8/src/snapshot/serializer-deserializer.h b/deps/v8/src/snapshot/serializer-deserializer.h
index c6c381192e..0e156f75a0 100644
--- a/deps/v8/src/snapshot/serializer-deserializer.h
+++ b/deps/v8/src/snapshot/serializer-deserializer.h
@@ -23,58 +23,20 @@ class SerializerDeserializer : public RootVisitor {
static void Iterate(Isolate* isolate, RootVisitor* visitor);
protected:
- class HotObjectsList {
- public:
- HotObjectsList() = default;
-
- void Add(HeapObject object) {
- DCHECK(!AllowGarbageCollection::IsAllowed());
- circular_queue_[index_] = object;
- index_ = (index_ + 1) & kSizeMask;
- }
-
- HeapObject Get(int index) {
- DCHECK(!AllowGarbageCollection::IsAllowed());
- DCHECK(!circular_queue_[index].is_null());
- return circular_queue_[index];
- }
-
- static const int kNotFound = -1;
-
- int Find(HeapObject object) {
- DCHECK(!AllowGarbageCollection::IsAllowed());
- for (int i = 0; i < kSize; i++) {
- if (circular_queue_[i] == object) return i;
- }
- return kNotFound;
- }
-
- static const int kSize = 8;
-
- private:
- STATIC_ASSERT(base::bits::IsPowerOfTwo(kSize));
- static const int kSizeMask = kSize - 1;
- HeapObject circular_queue_[kSize];
- int index_ = 0;
-
- DISALLOW_COPY_AND_ASSIGN(HotObjectsList);
- };
-
static bool CanBeDeferred(HeapObject o);
- void RestoreExternalReferenceRedirectors(
- Isolate* isolate, const std::vector<AccessorInfo>& accessor_infos);
- void RestoreExternalReferenceRedirectors(
- Isolate* isolate, const std::vector<CallHandlerInfo>& call_handler_infos);
-
- static const int kNumberOfSpaces =
- static_cast<int>(SnapshotSpace::kNumberOfSpaces);
+ void RestoreExternalReferenceRedirector(Isolate* isolate,
+ Handle<AccessorInfo> accessor_info);
+ void RestoreExternalReferenceRedirector(
+ Isolate* isolate, Handle<CallHandlerInfo> call_handler_info);
// clang-format off
#define UNUSED_SERIALIZER_BYTE_CODES(V) \
- V(0x05) V(0x06) V(0x07) V(0x0d) V(0x0e) V(0x0f) \
- /* Free range 0x2a..0x2f */ \
- V(0x2a) V(0x2b) V(0x2c) V(0x2d) V(0x2e) V(0x2f) \
+ /* Free range 0x1c..0x1f */ \
+ V(0x1c) V(0x1d) V(0x1e) V(0x1f) \
+ /* Free range 0x20..0x2f */ \
+ V(0x20) V(0x21) V(0x22) V(0x23) V(0x24) V(0x25) V(0x26) V(0x27) \
+ V(0x28) V(0x29) V(0x2a) V(0x2b) V(0x2c) V(0x2d) V(0x2e) V(0x2f) \
/* Free range 0x30..0x3f */ \
V(0x30) V(0x31) V(0x32) V(0x33) V(0x34) V(0x35) V(0x36) V(0x37) \
V(0x38) V(0x39) V(0x3a) V(0x3b) V(0x3c) V(0x3d) V(0x3e) V(0x3f) \
@@ -103,7 +65,7 @@ class SerializerDeserializer : public RootVisitor {
// The static assert below will trigger when the number of preallocated spaces
// changed. If that happens, update the kNewObject and kBackref bytecode
// ranges in the comments below.
- STATIC_ASSERT(5 == kNumberOfSpaces);
+ STATIC_ASSERT(4 == kNumberOfSnapshotSpaces);
// First 32 root array items.
static const int kRootArrayConstantsCount = 0x20;
@@ -115,27 +77,20 @@ class SerializerDeserializer : public RootVisitor {
// 8 hot (recently seen or back-referenced) objects with optional skip.
static const int kHotObjectCount = 8;
- STATIC_ASSERT(kHotObjectCount == HotObjectsList::kSize);
-
- // 3 alignment prefixes
- static const int kAlignmentPrefixCount = 3;
enum Bytecode : byte {
//
- // ---------- byte code range 0x00..0x0f ----------
+ // ---------- byte code range 0x00..0x1b ----------
//
- // 0x00..0x04 Allocate new object, in specified space.
+ // 0x00..0x03 Allocate new object, in specified space.
kNewObject = 0x00,
- // 0x08..0x0c Reference to previous object from specified space.
- kBackref = 0x08,
-
- //
- // ---------- byte code range 0x10..0x27 ----------
- //
-
+ // Reference to previously allocated object.
+ kBackref = 0x04,
+ // Reference to an object in the read only heap.
+ kReadOnlyHeapRef,
// Object in the startup object cache.
- kStartupObjectCache = 0x10,
+ kStartupObjectCache,
// Root array item.
kRootArray,
// Object provided in the attached list.
@@ -144,16 +99,12 @@ class SerializerDeserializer : public RootVisitor {
kReadOnlyObjectCache,
// Do nothing, used for padding.
kNop,
- // Move to next reserved chunk.
- kNextChunk,
- // 3 alignment prefixes 0x16..0x18
- kAlignmentPrefix = 0x16,
// A tag emitted at strategic points in the snapshot to delineate sections.
// If the deserializer does not find these at the expected moments then it
// is an indication that the snapshot and the VM do not fit together.
// Examine the build process for architecture, version or configuration
// mismatches.
- kSynchronize = 0x19,
+ kSynchronize,
// Repeats of variable length.
kVariableRepeat,
// Used for embedder-allocated backing stores for TypedArrays.
@@ -161,7 +112,6 @@ class SerializerDeserializer : public RootVisitor {
// Used for embedder-provided serialization data for embedder fields.
kEmbedderFieldsData,
// Raw data of variable length.
- kVariableRawCode,
kVariableRawData,
// Used to encode external references provided through the API.
kApiReference,
@@ -193,6 +143,9 @@ class SerializerDeserializer : public RootVisitor {
// register as the pending field. We could either hack around this, or
// simply introduce this new bytecode.
kNewMetaMap,
+ // Special construction bytecode for Code object bodies, which have a more
+ // complex deserialization ordering and RelocInfo processing.
+ kCodeBody,
//
// ---------- byte code range 0x40..0x7f ----------
@@ -248,15 +201,14 @@ class SerializerDeserializer : public RootVisitor {
template <Bytecode bytecode>
using SpaceEncoder =
- BytecodeValueEncoder<bytecode, 0, kNumberOfSpaces - 1, SnapshotSpace>;
+ BytecodeValueEncoder<bytecode, 0, kNumberOfSnapshotSpaces - 1,
+ SnapshotSpace>;
using NewObject = SpaceEncoder<kNewObject>;
- using BackRef = SpaceEncoder<kBackref>;
//
// Some other constants.
//
- static const SnapshotSpace kAnyOldSpace = SnapshotSpace::kNumberOfSpaces;
// Sentinel after a new object to indicate that double alignment is needed.
static const int kDoubleAlignmentSentinel = 0;
@@ -303,8 +255,9 @@ class SerializerDeserializer : public RootVisitor {
RootIndex>;
using HotObject = BytecodeValueEncoder<kHotObject, 0, kHotObjectCount - 1>;
- // ---------- member variable ----------
- HotObjectsList hot_objects_;
+ // This backing store reference value represents nullptr values during
+ // serialization/deserialization.
+ static const uint32_t kNullRefSentinel = 0;
};
} // namespace internal
diff --git a/deps/v8/src/snapshot/serializer.cc b/deps/v8/src/snapshot/serializer.cc
index 4a18383e45..a0088315d3 100644
--- a/deps/v8/src/snapshot/serializer.cc
+++ b/deps/v8/src/snapshot/serializer.cc
@@ -14,21 +14,32 @@
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/map.h"
+#include "src/objects/objects-body-descriptors-inl.h"
#include "src/objects/slots-inl.h"
#include "src/objects/smi.h"
+#include "src/snapshot/serializer-deserializer.h"
namespace v8 {
namespace internal {
Serializer::Serializer(Isolate* isolate, Snapshot::SerializerFlags flags)
: isolate_(isolate),
+ hot_objects_(isolate->heap()),
+ reference_map_(isolate),
external_reference_encoder_(isolate),
root_index_map_(isolate),
- flags_(flags),
- allocator_(this) {
+ deferred_objects_(isolate->heap()),
+ forward_refs_per_pending_object_(isolate->heap()),
+ flags_(flags)
+#ifdef DEBUG
+ ,
+ back_refs_(isolate->heap()),
+ stack_(isolate->heap())
+#endif
+{
#ifdef OBJECT_PRINT
if (FLAG_serialization_statistics) {
- for (int space = 0; space < kNumberOfSpaces; ++space) {
+ for (int space = 0; space < kNumberOfSnapshotSpaces; ++space) {
// Value-initialized to 0.
instance_type_count_[space] = std::make_unique<int[]>(kInstanceTypes);
instance_type_size_[space] = std::make_unique<size_t[]>(kInstanceTypes);
@@ -37,25 +48,47 @@ Serializer::Serializer(Isolate* isolate, Snapshot::SerializerFlags flags)
#endif // OBJECT_PRINT
}
-#ifdef OBJECT_PRINT
-void Serializer::CountInstanceType(Map map, int size, SnapshotSpace space) {
+void Serializer::CountAllocation(Map map, int size, SnapshotSpace space) {
+ DCHECK(FLAG_serialization_statistics);
+
const int space_number = static_cast<int>(space);
+ allocation_size_[space_number] += size;
+#ifdef OBJECT_PRINT
int instance_type = map.instance_type();
instance_type_count_[space_number][instance_type]++;
instance_type_size_[space_number][instance_type] += size;
-}
#endif // OBJECT_PRINT
+}
+
+int Serializer::TotalAllocationSize() const {
+ int sum = 0;
+ for (int space = 0; space < kNumberOfSnapshotSpaces; space++) {
+ sum += allocation_size_[space];
+ }
+ return sum;
+}
void Serializer::OutputStatistics(const char* name) {
if (!FLAG_serialization_statistics) return;
PrintF("%s:\n", name);
- allocator()->OutputStatistics();
+
+ PrintF(" Spaces (bytes):\n");
+
+ for (int space = 0; space < kNumberOfSnapshotSpaces; space++) {
+ PrintF("%16s",
+ BaseSpace::GetSpaceName(static_cast<AllocationSpace>(space)));
+ }
+ PrintF("\n");
+
+ for (int space = 0; space < kNumberOfSnapshotSpaces; space++) {
+ PrintF("%16zu", allocation_size_[space]);
+ }
#ifdef OBJECT_PRINT
PrintF(" Instance types (count and bytes):\n");
#define PRINT_INSTANCE_TYPE(Name) \
- for (int space = 0; space < kNumberOfSpaces; ++space) { \
+ for (int space = 0; space < kNumberOfSnapshotSpaces; ++space) { \
if (instance_type_count_[space][Name]) { \
PrintF("%10d %10zu %-10s %s\n", instance_type_count_[space][Name], \
instance_type_size_[space][Name], \
@@ -74,15 +107,24 @@ void Serializer::SerializeDeferredObjects() {
if (FLAG_trace_serializer) {
PrintF("Serializing deferred objects\n");
}
- while (!deferred_objects_.empty()) {
- HeapObject obj = deferred_objects_.back();
- deferred_objects_.pop_back();
+ WHILE_WITH_HANDLE_SCOPE(isolate(), !deferred_objects_.empty(), {
+ Handle<HeapObject> obj = handle(deferred_objects_.Pop(), isolate());
+
ObjectSerializer obj_serializer(this, obj, &sink_);
obj_serializer.SerializeDeferred();
- }
+ });
sink_.Put(kSynchronize, "Finished with deferred objects");
}
+void Serializer::SerializeObject(Handle<HeapObject> obj) {
+ // ThinStrings are just an indirection to an internalized string, so elide the
+ // indirection and serialize the actual string directly.
+ if (obj->IsThinString(isolate())) {
+ obj = handle(ThinString::cast(*obj).actual(isolate()), isolate());
+ }
+ SerializeObjectImpl(obj);
+}
+
bool Serializer::MustBeDeferred(HeapObject object) { return false; }
void Serializer::VisitRootPointers(Root root, const char* description,
@@ -97,7 +139,7 @@ void Serializer::SerializeRootObject(FullObjectSlot slot) {
if (o.IsSmi()) {
PutSmiRoot(slot);
} else {
- SerializeObject(HeapObject::cast(o));
+ SerializeObject(Handle<HeapObject>(slot.location()));
}
}
@@ -106,88 +148,87 @@ void Serializer::PrintStack() { PrintStack(std::cout); }
void Serializer::PrintStack(std::ostream& out) {
for (const auto o : stack_) {
- o.Print(out);
+ o->Print(out);
out << "\n";
}
}
#endif // DEBUG
-bool Serializer::SerializeRoot(HeapObject obj) {
+bool Serializer::SerializeRoot(Handle<HeapObject> obj) {
RootIndex root_index;
// Derived serializers are responsible for determining if the root has
// actually been serialized before calling this.
- if (root_index_map()->Lookup(obj, &root_index)) {
- PutRoot(root_index, obj);
+ if (root_index_map()->Lookup(*obj, &root_index)) {
+ PutRoot(root_index);
return true;
}
return false;
}
-bool Serializer::SerializeHotObject(HeapObject obj) {
+bool Serializer::SerializeHotObject(Handle<HeapObject> obj) {
// Encode a reference to a hot object by its index in the working set.
- int index = hot_objects_.Find(obj);
+ int index = hot_objects_.Find(*obj);
if (index == HotObjectsList::kNotFound) return false;
DCHECK(index >= 0 && index < kHotObjectCount);
if (FLAG_trace_serializer) {
PrintF(" Encoding hot object %d:", index);
- obj.ShortPrint();
+ obj->ShortPrint();
PrintF("\n");
}
sink_.Put(HotObject::Encode(index), "HotObject");
return true;
}
-bool Serializer::SerializeBackReference(HeapObject obj) {
- SerializerReference reference =
- reference_map_.LookupReference(reinterpret_cast<void*>(obj.ptr()));
- if (!reference.is_valid()) return false;
+bool Serializer::SerializeBackReference(Handle<HeapObject> obj) {
+ const SerializerReference* reference = reference_map_.LookupReference(obj);
+ if (reference == nullptr) return false;
// Encode the location of an already deserialized object in order to write
// its location into a later object. We can encode the location as an
// offset fromthe start of the deserialized objects or as an offset
// backwards from thecurrent allocation pointer.
- if (reference.is_attached_reference()) {
+ if (reference->is_attached_reference()) {
if (FLAG_trace_serializer) {
PrintF(" Encoding attached reference %d\n",
- reference.attached_reference_index());
+ reference->attached_reference_index());
}
- PutAttachedReference(reference);
+ PutAttachedReference(*reference);
} else {
- DCHECK(reference.is_back_reference());
+ DCHECK(reference->is_back_reference());
if (FLAG_trace_serializer) {
PrintF(" Encoding back reference to: ");
- obj.ShortPrint();
+ obj->ShortPrint();
PrintF("\n");
}
- PutAlignmentPrefix(obj);
- SnapshotSpace space = reference.space();
- sink_.Put(BackRef::Encode(space), "BackRef");
- PutBackReference(obj, reference);
+ sink_.Put(kBackref, "Backref");
+ PutBackReference(obj, *reference);
}
return true;
}
-bool Serializer::SerializePendingObject(HeapObject obj) {
- PendingObjectReference pending_obj =
- forward_refs_per_pending_object_.find(obj);
- if (pending_obj == forward_refs_per_pending_object_.end()) {
+bool Serializer::SerializePendingObject(Handle<HeapObject> obj) {
+ PendingObjectReferences* refs_to_object =
+ forward_refs_per_pending_object_.Find(obj);
+ if (refs_to_object == nullptr) {
return false;
}
- PutPendingForwardReferenceTo(pending_obj);
+ PutPendingForwardReference(*refs_to_object);
return true;
}
-bool Serializer::ObjectIsBytecodeHandler(HeapObject obj) const {
- if (!obj.IsCode()) return false;
- return (Code::cast(obj).kind() == CodeKind::BYTECODE_HANDLER);
+bool Serializer::ObjectIsBytecodeHandler(Handle<HeapObject> obj) const {
+ if (!obj->IsCode()) return false;
+ return (Code::cast(*obj).kind() == CodeKind::BYTECODE_HANDLER);
}
-void Serializer::PutRoot(RootIndex root, HeapObject object) {
+void Serializer::PutRoot(RootIndex root) {
int root_index = static_cast<int>(root);
+ Handle<HeapObject> object =
+ Handle<HeapObject>::cast(isolate()->root_handle(root));
if (FLAG_trace_serializer) {
PrintF(" Encoding root %d:", root_index);
- object.ShortPrint();
+ object->ShortPrint();
PrintF("\n");
}
@@ -198,12 +239,12 @@ void Serializer::PutRoot(RootIndex root, HeapObject object) {
// TODO(ulan): Check that it works with young large objects.
if (root_index < kRootArrayConstantsCount &&
- !Heap::InYoungGeneration(object)) {
+ !Heap::InYoungGeneration(*object)) {
sink_.Put(RootArrayConstant::Encode(root), "RootConstant");
} else {
sink_.Put(kRootArray, "RootSerialization");
sink_.PutInt(root_index, "root_index");
- hot_objects_.Add(object);
+ hot_objects_.Add(*object);
}
}
@@ -222,25 +263,11 @@ void Serializer::PutSmiRoot(FullObjectSlot slot) {
sink_.PutRaw(raw_value_as_bytes, bytes_to_output, "Bytes");
}
-void Serializer::PutBackReference(HeapObject object,
+void Serializer::PutBackReference(Handle<HeapObject> object,
SerializerReference reference) {
- DCHECK(allocator()->BackReferenceIsAlreadyAllocated(reference));
- switch (reference.space()) {
- case SnapshotSpace::kMap:
- sink_.PutInt(reference.map_index(), "BackRefMapIndex");
- break;
-
- case SnapshotSpace::kLargeObject:
- sink_.PutInt(reference.large_object_index(), "BackRefLargeObjectIndex");
- break;
-
- default:
- sink_.PutInt(reference.chunk_index(), "BackRefChunkIndex");
- sink_.PutInt(reference.chunk_offset(), "BackRefChunkOffset");
- break;
- }
-
- hot_objects_.Add(object);
+ DCHECK_EQ(*object, *back_refs_[reference.back_ref_index()]);
+ sink_.PutInt(reference.back_ref_index(), "BackRefIndex");
+ hot_objects_.Add(*object);
}
void Serializer::PutAttachedReference(SerializerReference reference) {
@@ -249,22 +276,6 @@ void Serializer::PutAttachedReference(SerializerReference reference) {
sink_.PutInt(reference.attached_reference_index(), "AttachedRefIndex");
}
-int Serializer::PutAlignmentPrefix(HeapObject object) {
- AllocationAlignment alignment = HeapObject::RequiredAlignment(object.map());
- if (alignment != kWordAligned) {
- DCHECK(1 <= alignment && alignment <= 3);
- byte prefix = (kAlignmentPrefix - 1) + alignment;
- sink_.Put(prefix, "Alignment");
- return Heap::GetMaximumFillToAlign(alignment);
- }
- return 0;
-}
-
-void Serializer::PutNextChunk(SnapshotSpace space) {
- sink_.Put(kNextChunk, "NextChunk");
- sink_.Put(static_cast<byte>(space), "NextChunkSpace");
-}
-
void Serializer::PutRepeat(int repeat_count) {
if (repeat_count <= kLastEncodableFixedRepeatCount) {
sink_.Put(FixedRepeatWithCount::Encode(repeat_count), "FixedRepeat");
@@ -274,13 +285,19 @@ void Serializer::PutRepeat(int repeat_count) {
}
}
-void Serializer::PutPendingForwardReferenceTo(
- PendingObjectReference reference) {
+void Serializer::PutPendingForwardReference(PendingObjectReferences& refs) {
sink_.Put(kRegisterPendingForwardRef, "RegisterPendingForwardRef");
unresolved_forward_refs_++;
// Register the current slot with the pending object.
int forward_ref_id = next_forward_ref_id_++;
- reference->second.push_back(forward_ref_id);
+ if (refs == nullptr) {
+ // The IdentityMap holding the pending object reference vectors does not
+ // support non-trivial types; in particular it doesn't support destructors
+ // on values. So, we manually allocate a vector with new, and delete it when
+ // resolving the pending object.
+ refs = new std::vector<int>();
+ }
+ refs->push_back(forward_ref_id);
}
void Serializer::ResolvePendingForwardReference(int forward_reference_id) {
@@ -295,27 +312,34 @@ void Serializer::ResolvePendingForwardReference(int forward_reference_id) {
}
}
-Serializer::PendingObjectReference Serializer::RegisterObjectIsPending(
- HeapObject obj) {
+void Serializer::RegisterObjectIsPending(Handle<HeapObject> obj) {
+ if (*obj == ReadOnlyRoots(isolate()).not_mapped_symbol()) return;
+
// Add the given object to the pending objects -> forward refs map.
- auto forward_refs_entry_insertion =
- forward_refs_per_pending_object_.emplace(obj, std::vector<int>());
+ auto find_result = forward_refs_per_pending_object_.FindOrInsert(obj);
+ USE(find_result);
// If the above emplace didn't actually add the object, then the object must
// already have been registered pending by deferring. It might not be in the
// deferred objects queue though, since it may be the very object we just
// popped off that queue, so just check that it can be deferred.
- DCHECK_IMPLIES(!forward_refs_entry_insertion.second, CanBeDeferred(obj));
-
- // return the iterator into the map as the reference.
- return forward_refs_entry_insertion.first;
+ DCHECK_IMPLIES(find_result.already_exists, *find_result.entry != nullptr);
+ DCHECK_IMPLIES(find_result.already_exists, CanBeDeferred(*obj));
}
-void Serializer::ResolvePendingObject(Serializer::PendingObjectReference ref) {
- for (int index : ref->second) {
- ResolvePendingForwardReference(index);
+void Serializer::ResolvePendingObject(Handle<HeapObject> obj) {
+ if (*obj == ReadOnlyRoots(isolate()).not_mapped_symbol()) return;
+
+ std::vector<int>* refs;
+ CHECK(forward_refs_per_pending_object_.Delete(obj, &refs));
+ if (refs) {
+ for (int index : *refs) {
+ ResolvePendingForwardReference(index);
+ }
+ // See PutPendingForwardReference -- we have to manually manage the memory
+ // of non-trivial IdentityMap values.
+ delete refs;
}
- forward_refs_per_pending_object_.erase(ref);
}
void Serializer::Pad(int padding_offset) {
@@ -351,19 +375,17 @@ void Serializer::ObjectSerializer::SerializePrologue(SnapshotSpace space,
int size, Map map) {
if (serializer_->code_address_map_) {
const char* code_name =
- serializer_->code_address_map_->Lookup(object_.address());
+ serializer_->code_address_map_->Lookup(object_->address());
LOG(serializer_->isolate_,
- CodeNameEvent(object_.address(), sink_->Position(), code_name));
+ CodeNameEvent(object_->address(), sink_->Position(), code_name));
}
- SerializerReference back_reference;
- if (map == object_) {
- DCHECK_EQ(object_, ReadOnlyRoots(serializer_->isolate()).meta_map());
+ if (map == *object_) {
+ DCHECK_EQ(*object_, ReadOnlyRoots(isolate()).meta_map());
DCHECK_EQ(space, SnapshotSpace::kReadOnlyHeap);
sink_->Put(kNewMetaMap, "NewMetaMap");
DCHECK_EQ(size, Map::kSize);
- back_reference = serializer_->allocator()->Allocate(space, size);
} else {
sink_->Put(NewObject::Encode(space), "NewObject");
@@ -371,133 +393,157 @@ void Serializer::ObjectSerializer::SerializePrologue(SnapshotSpace space,
sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
// Until the space for the object is allocated, it is considered "pending".
- auto pending_object_ref = serializer_->RegisterObjectIsPending(object_);
+ serializer_->RegisterObjectIsPending(object_);
// Serialize map (first word of the object) before anything else, so that
// the deserializer can access it when allocating. Make sure that the map
// isn't a pending object.
- DCHECK_EQ(serializer_->forward_refs_per_pending_object_.count(map), 0);
+ DCHECK_NULL(serializer_->forward_refs_per_pending_object_.Find(map));
DCHECK(map.IsMap());
- serializer_->SerializeObject(map);
+ serializer_->SerializeObject(handle(map, isolate()));
// Make sure the map serialization didn't accidentally recursively serialize
// this object.
- DCHECK(!serializer_->reference_map()
- ->LookupReference(reinterpret_cast<void*>(object_.ptr()))
- .is_valid());
-
- // Allocate the object after the map is serialized.
- if (space == SnapshotSpace::kLargeObject) {
- CHECK(!object_.IsCode());
- back_reference = serializer_->allocator()->AllocateLargeObject(size);
- } else if (space == SnapshotSpace::kMap) {
- back_reference = serializer_->allocator()->AllocateMap();
- DCHECK_EQ(Map::kSize, size);
- } else {
- int fill = serializer_->PutAlignmentPrefix(object_);
- back_reference = serializer_->allocator()->Allocate(space, size + fill);
- }
+ DCHECK_IMPLIES(
+ *object_ != ReadOnlyRoots(isolate()).not_mapped_symbol(),
+ serializer_->reference_map()->LookupReference(object_) == nullptr);
// Now that the object is allocated, we can resolve pending references to
// it.
- serializer_->ResolvePendingObject(pending_object_ref);
+ serializer_->ResolvePendingObject(object_);
}
-#ifdef OBJECT_PRINT
if (FLAG_serialization_statistics) {
- serializer_->CountInstanceType(map, size, space);
+ serializer_->CountAllocation(object_->map(), size, space);
}
-#endif // OBJECT_PRINT
- // Mark this object as already serialized.
- serializer_->reference_map()->Add(reinterpret_cast<void*>(object_.ptr()),
- back_reference);
+ // Mark this object as already serialized, and add it to the reference map so
+ // that it can be accessed by backreference by future objects.
+ serializer_->num_back_refs_++;
+#ifdef DEBUG
+ serializer_->back_refs_.Push(*object_);
+ DCHECK_EQ(serializer_->back_refs_.size(), serializer_->num_back_refs_);
+#endif
+ if (*object_ != ReadOnlyRoots(isolate()).not_mapped_symbol()) {
+ // Only add the object to the map if it's not not_mapped_symbol, else
+ // the reference IdentityMap has issues. We don't expect to have back
+ // references to the not_mapped_symbol anyway, so it's fine.
+ SerializerReference back_reference =
+ SerializerReference::BackReference(serializer_->num_back_refs_ - 1);
+ serializer_->reference_map()->Add(*object_, back_reference);
+ DCHECK_EQ(*object_,
+ *serializer_->back_refs_[back_reference.back_ref_index()]);
+ DCHECK_EQ(back_reference.back_ref_index(), serializer_->reference_map()
+ ->LookupReference(object_)
+ ->back_ref_index());
+ }
}
uint32_t Serializer::ObjectSerializer::SerializeBackingStore(
void* backing_store, int32_t byte_length) {
- SerializerReference reference =
- serializer_->reference_map()->LookupReference(backing_store);
+ const SerializerReference* reference_ptr =
+ serializer_->reference_map()->LookupBackingStore(backing_store);
// Serialize the off-heap backing store.
- if (!reference.is_valid()) {
+ if (!reference_ptr) {
sink_->Put(kOffHeapBackingStore, "Off-heap backing store");
sink_->PutInt(byte_length, "length");
sink_->PutRaw(static_cast<byte*>(backing_store), byte_length,
"BackingStore");
- reference = serializer_->allocator()->AllocateOffHeapBackingStore();
+ DCHECK_NE(0, serializer_->seen_backing_stores_index_);
+ SerializerReference reference =
+ SerializerReference::OffHeapBackingStoreReference(
+ serializer_->seen_backing_stores_index_++);
// Mark this backing store as already serialized.
- serializer_->reference_map()->Add(backing_store, reference);
+ serializer_->reference_map()->AddBackingStore(backing_store, reference);
+ return reference.off_heap_backing_store_index();
+ } else {
+ return reference_ptr->off_heap_backing_store_index();
}
-
- return reference.off_heap_backing_store_index();
}
void Serializer::ObjectSerializer::SerializeJSTypedArray() {
- JSTypedArray typed_array = JSTypedArray::cast(object_);
- if (typed_array.is_on_heap()) {
- typed_array.RemoveExternalPointerCompensationForSerialization(
- serializer_->isolate());
+ Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(object_);
+ if (typed_array->is_on_heap()) {
+ typed_array->RemoveExternalPointerCompensationForSerialization(isolate());
} else {
- if (!typed_array.WasDetached()) {
+ if (!typed_array->WasDetached()) {
// Explicitly serialize the backing store now.
- JSArrayBuffer buffer = JSArrayBuffer::cast(typed_array.buffer());
+ JSArrayBuffer buffer = JSArrayBuffer::cast(typed_array->buffer());
// We cannot store byte_length larger than int32 range in the snapshot.
CHECK_LE(buffer.byte_length(), std::numeric_limits<int32_t>::max());
int32_t byte_length = static_cast<int32_t>(buffer.byte_length());
- size_t byte_offset = typed_array.byte_offset();
+ size_t byte_offset = typed_array->byte_offset();
// We need to calculate the backing store from the data pointer
// because the ArrayBuffer may already have been serialized.
void* backing_store = reinterpret_cast<void*>(
- reinterpret_cast<Address>(typed_array.DataPtr()) - byte_offset);
+ reinterpret_cast<Address>(typed_array->DataPtr()) - byte_offset);
uint32_t ref = SerializeBackingStore(backing_store, byte_length);
- typed_array.SetExternalBackingStoreRefForSerialization(ref);
+ typed_array->SetExternalBackingStoreRefForSerialization(ref);
} else {
- typed_array.SetExternalBackingStoreRefForSerialization(0);
+ typed_array->SetExternalBackingStoreRefForSerialization(0);
}
}
SerializeObject();
}
void Serializer::ObjectSerializer::SerializeJSArrayBuffer() {
- JSArrayBuffer buffer = JSArrayBuffer::cast(object_);
- void* backing_store = buffer.backing_store();
+ Handle<JSArrayBuffer> buffer = Handle<JSArrayBuffer>::cast(object_);
+ void* backing_store = buffer->backing_store();
// We cannot store byte_length larger than int32 range in the snapshot.
- CHECK_LE(buffer.byte_length(), std::numeric_limits<int32_t>::max());
- int32_t byte_length = static_cast<int32_t>(buffer.byte_length());
- ArrayBufferExtension* extension = buffer.extension();
+ CHECK_LE(buffer->byte_length(), std::numeric_limits<int32_t>::max());
+ int32_t byte_length = static_cast<int32_t>(buffer->byte_length());
+ ArrayBufferExtension* extension = buffer->extension();
// The embedder-allocated backing store only exists for the off-heap case.
+#ifdef V8_HEAP_SANDBOX
+ uint32_t external_pointer_entry =
+ buffer->GetBackingStoreRefForDeserialization();
+#endif
if (backing_store != nullptr) {
uint32_t ref = SerializeBackingStore(backing_store, byte_length);
- buffer.SetBackingStoreRefForSerialization(ref);
+ buffer->SetBackingStoreRefForSerialization(ref);
// Ensure deterministic output by setting extension to null during
// serialization.
- buffer.set_extension(nullptr);
+ buffer->set_extension(nullptr);
+ } else {
+ buffer->SetBackingStoreRefForSerialization(kNullRefSentinel);
}
SerializeObject();
- buffer.set_backing_store(serializer_->isolate(), backing_store);
- buffer.set_extension(extension);
+#ifdef V8_HEAP_SANDBOX
+ buffer->SetBackingStoreRefForSerialization(external_pointer_entry);
+#else
+ buffer->set_backing_store(isolate(), backing_store);
+#endif
+ buffer->set_extension(extension);
}
void Serializer::ObjectSerializer::SerializeExternalString() {
// For external strings with known resources, we replace the resource field
// with the encoded external reference, which we restore upon deserialize.
// For the rest we serialize them to look like ordinary sequential strings.
- ExternalString string = ExternalString::cast(object_);
- Address resource = string.resource_as_address();
+ Handle<ExternalString> string = Handle<ExternalString>::cast(object_);
+ Address resource = string->resource_as_address();
ExternalReferenceEncoder::Value reference;
if (serializer_->external_reference_encoder_.TryEncode(resource).To(
&reference)) {
DCHECK(reference.is_from_api());
- string.set_uint32_as_resource(serializer_->isolate(), reference.index());
+#ifdef V8_HEAP_SANDBOX
+ uint32_t external_pointer_entry =
+ string->GetResourceRefForDeserialization();
+#endif
+ string->SetResourceRefForSerialization(reference.index());
SerializeObject();
- string.set_address_as_resource(serializer_->isolate(), resource);
+#ifdef V8_HEAP_SANDBOX
+ string->SetResourceRefForSerialization(external_pointer_entry);
+#else
+ string->set_address_as_resource(isolate(), resource);
+#endif
} else {
SerializeExternalStringAsSequentialString();
}
@@ -506,46 +552,45 @@ void Serializer::ObjectSerializer::SerializeExternalString() {
void Serializer::ObjectSerializer::SerializeExternalStringAsSequentialString() {
// Instead of serializing this as an external string, we serialize
// an imaginary sequential string with the same content.
- ReadOnlyRoots roots(serializer_->isolate());
- DCHECK(object_.IsExternalString());
- ExternalString string = ExternalString::cast(object_);
- int length = string.length();
+ ReadOnlyRoots roots(isolate());
+ DCHECK(object_->IsExternalString());
+ Handle<ExternalString> string = Handle<ExternalString>::cast(object_);
+ int length = string->length();
Map map;
int content_size;
int allocation_size;
const byte* resource;
// Find the map and size for the imaginary sequential string.
- bool internalized = object_.IsInternalizedString();
- if (object_.IsExternalOneByteString()) {
+ bool internalized = object_->IsInternalizedString();
+ if (object_->IsExternalOneByteString()) {
map = internalized ? roots.one_byte_internalized_string_map()
: roots.one_byte_string_map();
allocation_size = SeqOneByteString::SizeFor(length);
content_size = length * kCharSize;
resource = reinterpret_cast<const byte*>(
- ExternalOneByteString::cast(string).resource()->data());
+ Handle<ExternalOneByteString>::cast(string)->resource()->data());
} else {
map = internalized ? roots.internalized_string_map() : roots.string_map();
allocation_size = SeqTwoByteString::SizeFor(length);
content_size = length * kShortSize;
resource = reinterpret_cast<const byte*>(
- ExternalTwoByteString::cast(string).resource()->data());
+ Handle<ExternalTwoByteString>::cast(string)->resource()->data());
}
- SnapshotSpace space = (allocation_size > kMaxRegularHeapObjectSize)
- ? SnapshotSpace::kLargeObject
- : SnapshotSpace::kOld;
+ SnapshotSpace space = SnapshotSpace::kOld;
SerializePrologue(space, allocation_size, map);
// Output the rest of the imaginary string.
int bytes_to_output = allocation_size - HeapObject::kHeaderSize;
DCHECK(IsAligned(bytes_to_output, kTaggedSize));
+ int slots_to_output = bytes_to_output >> kTaggedSizeLog2;
// Output raw data header. Do not bother with common raw length cases here.
sink_->Put(kVariableRawData, "RawDataForString");
- sink_->PutInt(bytes_to_output, "length");
+ sink_->PutInt(slots_to_output, "length");
// Serialize string header (except for map).
- byte* string_start = reinterpret_cast<byte*>(string.address());
+ byte* string_start = reinterpret_cast<byte*>(string->address());
for (int i = HeapObject::kHeaderSize; i < SeqString::kHeaderSize; i++) {
sink_->Put(string_start[i], "StringHeader");
}
@@ -565,26 +610,27 @@ void Serializer::ObjectSerializer::SerializeExternalStringAsSequentialString() {
// TODO(all): replace this with proper iteration of weak slots in serializer.
class UnlinkWeakNextScope {
public:
- explicit UnlinkWeakNextScope(Heap* heap, HeapObject object) {
- if (object.IsAllocationSite() &&
- AllocationSite::cast(object).HasWeakNext()) {
+ explicit UnlinkWeakNextScope(Heap* heap, Handle<HeapObject> object) {
+ if (object->IsAllocationSite() &&
+ Handle<AllocationSite>::cast(object)->HasWeakNext()) {
object_ = object;
- next_ = AllocationSite::cast(object).weak_next();
- AllocationSite::cast(object).set_weak_next(
+ next_ =
+ handle(AllocationSite::cast(*object).weak_next(), heap->isolate());
+ Handle<AllocationSite>::cast(object)->set_weak_next(
ReadOnlyRoots(heap).undefined_value());
}
}
~UnlinkWeakNextScope() {
if (!object_.is_null()) {
- AllocationSite::cast(object_).set_weak_next(next_,
- UPDATE_WEAK_WRITE_BARRIER);
+ Handle<AllocationSite>::cast(object_)->set_weak_next(
+ *next_, UPDATE_WEAK_WRITE_BARRIER);
}
}
private:
- HeapObject object_;
- Object next_;
+ Handle<HeapObject> object_;
+ Handle<Object> next_;
DISALLOW_HEAP_ALLOCATION(no_gc_)
};
@@ -593,103 +639,120 @@ void Serializer::ObjectSerializer::Serialize() {
// Defer objects as "pending" if they cannot be serialized now, or if we
// exceed a certain recursion depth. Some objects cannot be deferred
- if ((recursion.ExceedsMaximum() && CanBeDeferred(object_)) ||
- serializer_->MustBeDeferred(object_)) {
- DCHECK(CanBeDeferred(object_));
+ if ((recursion.ExceedsMaximum() && CanBeDeferred(*object_)) ||
+ serializer_->MustBeDeferred(*object_)) {
+ DCHECK(CanBeDeferred(*object_));
if (FLAG_trace_serializer) {
PrintF(" Deferring heap object: ");
- object_.ShortPrint();
+ object_->ShortPrint();
PrintF("\n");
}
// Deferred objects are considered "pending".
- PendingObjectReference pending_obj =
- serializer_->RegisterObjectIsPending(object_);
- serializer_->PutPendingForwardReferenceTo(pending_obj);
+ serializer_->RegisterObjectIsPending(object_);
+ serializer_->PutPendingForwardReference(
+ *serializer_->forward_refs_per_pending_object_.Find(object_));
serializer_->QueueDeferredObject(object_);
return;
}
if (FLAG_trace_serializer) {
PrintF(" Encoding heap object: ");
- object_.ShortPrint();
+ object_->ShortPrint();
PrintF("\n");
}
- if (object_.IsExternalString()) {
+ if (object_->IsExternalString()) {
SerializeExternalString();
return;
- } else if (!ReadOnlyHeap::Contains(object_)) {
+ } else if (!ReadOnlyHeap::Contains(*object_)) {
// Only clear padding for strings outside the read-only heap. Read-only heap
// should have been cleared elsewhere.
- if (object_.IsSeqOneByteString()) {
+ if (object_->IsSeqOneByteString()) {
// Clear padding bytes at the end. Done here to avoid having to do this
// at allocation sites in generated code.
- SeqOneByteString::cast(object_).clear_padding();
- } else if (object_.IsSeqTwoByteString()) {
- SeqTwoByteString::cast(object_).clear_padding();
+ Handle<SeqOneByteString>::cast(object_)->clear_padding();
+ } else if (object_->IsSeqTwoByteString()) {
+ Handle<SeqTwoByteString>::cast(object_)->clear_padding();
}
}
- if (object_.IsJSTypedArray()) {
+ if (object_->IsJSTypedArray()) {
SerializeJSTypedArray();
return;
- }
- if (object_.IsJSArrayBuffer()) {
+ } else if (object_->IsJSArrayBuffer()) {
SerializeJSArrayBuffer();
return;
}
// We don't expect fillers.
- DCHECK(!object_.IsFreeSpaceOrFiller());
+ DCHECK(!object_->IsFreeSpaceOrFiller());
- if (object_.IsScript()) {
+ if (object_->IsScript()) {
// Clear cached line ends.
- Oddball undefined = ReadOnlyRoots(serializer_->isolate()).undefined_value();
- Script::cast(object_).set_line_ends(undefined);
+ Oddball undefined = ReadOnlyRoots(isolate()).undefined_value();
+ Handle<Script>::cast(object_)->set_line_ends(undefined);
}
SerializeObject();
}
namespace {
-SnapshotSpace GetSnapshotSpace(HeapObject object) {
+SnapshotSpace GetSnapshotSpace(Handle<HeapObject> object) {
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
- if (third_party_heap::Heap::InCodeSpace(object.address())) {
+ if (object->IsCode()) {
return SnapshotSpace::kCode;
- } else if (ReadOnlyHeap::Contains(object)) {
+ } else if (ReadOnlyHeap::Contains(*object)) {
return SnapshotSpace::kReadOnlyHeap;
- } else if (object.Size() > kMaxRegularHeapObjectSize) {
- return SnapshotSpace::kLargeObject;
- } else if (object.IsMap()) {
+ } else if (object->IsMap()) {
return SnapshotSpace::kMap;
} else {
- return SnapshotSpace::kOld; // avoid new/young distinction in TPH
+ return SnapshotSpace::kOld;
}
- } else if (ReadOnlyHeap::Contains(object)) {
+ } else if (ReadOnlyHeap::Contains(*object)) {
return SnapshotSpace::kReadOnlyHeap;
} else {
AllocationSpace heap_space =
- MemoryChunk::FromHeapObject(object)->owner_identity();
+ MemoryChunk::FromHeapObject(*object)->owner_identity();
// Large code objects are not supported and cannot be expressed by
// SnapshotSpace.
DCHECK_NE(heap_space, CODE_LO_SPACE);
switch (heap_space) {
+ case OLD_SPACE:
// Young generation objects are tenured, as objects that have survived
// until snapshot building probably deserve to be considered 'old'.
case NEW_SPACE:
- return SnapshotSpace::kOld;
+ // Large objects (young and old) are encoded as simply 'old' snapshot
+ // obects, as "normal" objects vs large objects is a heap implementation
+ // detail and isn't relevant to the snapshot.
case NEW_LO_SPACE:
- return SnapshotSpace::kLargeObject;
-
- default:
- return static_cast<SnapshotSpace>(heap_space);
+ case LO_SPACE:
+ return SnapshotSpace::kOld;
+ case CODE_SPACE:
+ return SnapshotSpace::kCode;
+ case MAP_SPACE:
+ return SnapshotSpace::kMap;
+ case CODE_LO_SPACE:
+ case RO_SPACE:
+ UNREACHABLE();
}
}
}
} // namespace
void Serializer::ObjectSerializer::SerializeObject() {
- int size = object_.Size();
- Map map = object_.map();
+ int size = object_->Size();
+ Map map = object_->map();
+
+ // Descriptor arrays have complex element weakness, that is dependent on the
+ // maps pointing to them. During deserialization, this can cause them to get
+ // prematurely trimmed one of their owners isn't deserialized yet. We work
+ // around this by forcing all descriptor arrays to be serialized as "strong",
+ // i.e. no custom weakness, and "re-weaken" them in the deserializer once
+ // deserialization completes.
+ //
+ // See also `Deserializer::WeakenDescriptorArrays`.
+ if (map == ReadOnlyRoots(isolate()).descriptor_array_map()) {
+ map = ReadOnlyRoots(isolate()).strong_descriptor_array_map();
+ }
SnapshotSpace space = GetSnapshotSpace(object_);
SerializePrologue(space, size, map);
@@ -701,14 +764,13 @@ void Serializer::ObjectSerializer::SerializeObject() {
}
void Serializer::ObjectSerializer::SerializeDeferred() {
- SerializerReference back_reference =
- serializer_->reference_map()->LookupReference(
- reinterpret_cast<void*>(object_.ptr()));
+ const SerializerReference* back_reference =
+ serializer_->reference_map()->LookupReference(object_);
- if (back_reference.is_valid()) {
+ if (back_reference != nullptr) {
if (FLAG_trace_serializer) {
PrintF(" Deferred heap object ");
- object_.ShortPrint();
+ object_->ShortPrint();
PrintF(" was already serialized\n");
}
return;
@@ -721,17 +783,15 @@ void Serializer::ObjectSerializer::SerializeDeferred() {
}
void Serializer::ObjectSerializer::SerializeContent(Map map, int size) {
- UnlinkWeakNextScope unlink_weak_next(serializer_->isolate()->heap(), object_);
- if (object_.IsCode()) {
- // For code objects, output raw bytes first.
- OutputCode(size);
- // Then iterate references via reloc info.
- object_.IterateBody(map, size, this);
+ UnlinkWeakNextScope unlink_weak_next(isolate()->heap(), object_);
+ if (object_->IsCode()) {
+ // For code objects, perform a custom serialization.
+ SerializeCode(map, size);
} else {
// For other objects, iterate references first.
- object_.IterateBody(map, size, this);
+ object_->IterateBody(map, size, this);
// Then output data payload, if any.
- OutputRawData(object_.address() + size);
+ OutputRawData(object_->address() + size);
}
}
@@ -744,6 +804,7 @@ void Serializer::ObjectSerializer::VisitPointers(HeapObject host,
void Serializer::ObjectSerializer::VisitPointers(HeapObject host,
MaybeObjectSlot start,
MaybeObjectSlot end) {
+ HandleScope scope(isolate());
DisallowGarbageCollection no_gc;
MaybeObjectSlot current = start;
@@ -771,7 +832,8 @@ void Serializer::ObjectSerializer::VisitPointers(HeapObject host,
sink_->Put(kWeakPrefix, "WeakReference");
}
- if (serializer_->SerializePendingObject(current_contents)) {
+ Handle<HeapObject> obj = handle(current_contents, isolate());
+ if (serializer_->SerializePendingObject(obj)) {
bytes_processed_so_far_ += kTaggedSize;
++current;
continue;
@@ -783,12 +845,11 @@ void Serializer::ObjectSerializer::VisitPointers(HeapObject host,
// immortal immovable root members.
MaybeObjectSlot repeat_end = current + 1;
if (repeat_end < end &&
- serializer_->root_index_map()->Lookup(current_contents,
- &root_index) &&
+ serializer_->root_index_map()->Lookup(*obj, &root_index) &&
RootsTable::IsImmortalImmovable(root_index) &&
*current == *repeat_end) {
DCHECK_EQ(reference_type, HeapObjectReferenceType::STRONG);
- DCHECK(!Heap::InYoungGeneration(current_contents));
+ DCHECK(!Heap::InYoungGeneration(*obj));
while (repeat_end < end && *repeat_end == *current) {
repeat_end++;
}
@@ -801,18 +862,11 @@ void Serializer::ObjectSerializer::VisitPointers(HeapObject host,
++current;
}
// Now write the object itself.
- serializer_->SerializeObject(current_contents);
+ serializer_->SerializeObject(obj);
}
}
}
-void Serializer::ObjectSerializer::VisitEmbeddedPointer(Code host,
- RelocInfo* rinfo) {
- Object object = rinfo->target_object();
- serializer_->SerializeObject(HeapObject::cast(object));
- bytes_processed_so_far_ += rinfo->target_address_size();
-}
-
void Serializer::ObjectSerializer::OutputExternalReference(Address target,
int target_size,
bool sandboxify) {
@@ -834,7 +888,7 @@ void Serializer::ObjectSerializer::OutputExternalReference(Address target,
// serialization and deserialization. We can serialize seen external
// references verbatim.
CHECK(serializer_->allow_unknown_external_references_for_testing());
- CHECK(IsAligned(target_size, kObjectAlignment));
+ CHECK(IsAligned(target_size, kTaggedSize));
CHECK_LE(target_size, kFixedRawDataCount * kTaggedSize);
int size_in_tagged = target_size >> kTaggedSizeLog2;
sink_->Put(FixedRawDataWithSize::Encode(size_in_tagged), "FixedRawData");
@@ -854,13 +908,56 @@ void Serializer::ObjectSerializer::OutputExternalReference(Address target,
}
sink_->PutInt(encoded_reference.index(), "reference index");
}
- bytes_processed_so_far_ += target_size;
}
void Serializer::ObjectSerializer::VisitExternalReference(Foreign host,
Address* p) {
// "Sandboxify" external reference.
OutputExternalReference(host.foreign_address(), kExternalPointerSize, true);
+ bytes_processed_so_far_ += kExternalPointerSize;
+}
+
+class Serializer::ObjectSerializer::RelocInfoObjectPreSerializer {
+ public:
+ explicit RelocInfoObjectPreSerializer(Serializer* serializer)
+ : serializer_(serializer) {}
+
+ void VisitEmbeddedPointer(Code host, RelocInfo* target) {
+ Object object = target->target_object();
+ serializer_->SerializeObject(handle(HeapObject::cast(object), isolate()));
+ num_serialized_objects_++;
+ }
+ void VisitCodeTarget(Code host, RelocInfo* target) {
+#ifdef V8_TARGET_ARCH_ARM
+ DCHECK(!RelocInfo::IsRelativeCodeTarget(target->rmode()));
+#endif
+ Code object = Code::GetCodeFromTargetAddress(target->target_address());
+ serializer_->SerializeObject(handle(object, isolate()));
+ num_serialized_objects_++;
+ }
+
+ void VisitExternalReference(Code host, RelocInfo* rinfo) {}
+ void VisitInternalReference(Code host, RelocInfo* rinfo) {}
+ void VisitRuntimeEntry(Code host, RelocInfo* reloc) { UNREACHABLE(); }
+ void VisitOffHeapTarget(Code host, RelocInfo* target) {}
+
+ int num_serialized_objects() const { return num_serialized_objects_; }
+
+ Isolate* isolate() { return serializer_->isolate(); }
+
+ private:
+ Serializer* serializer_;
+ int num_serialized_objects_ = 0;
+};
+
+void Serializer::ObjectSerializer::VisitEmbeddedPointer(Code host,
+ RelocInfo* rinfo) {
+ // Target object should be pre-serialized by RelocInfoObjectPreSerializer, so
+ // just track the pointer's existence as kTaggedSize in
+ // bytes_processed_so_far_.
+ // TODO(leszeks): DCHECK that RelocInfoObjectPreSerializer serialized this
+ // specific object already.
+ bytes_processed_so_far_ += kTaggedSize;
}
void Serializer::ObjectSerializer::VisitExternalReference(Code host,
@@ -875,10 +972,14 @@ void Serializer::ObjectSerializer::VisitExternalReference(Code host,
void Serializer::ObjectSerializer::VisitInternalReference(Code host,
RelocInfo* rinfo) {
- Address entry = Code::cast(object_).entry();
+ Address entry = Handle<Code>::cast(object_)->entry();
DCHECK_GE(rinfo->target_internal_reference(), entry);
uintptr_t target_offset = rinfo->target_internal_reference() - entry;
- DCHECK_LE(target_offset, Code::cast(object_).raw_instruction_size());
+ // TODO(jgruber,v8:11036): We are being permissive for this DCHECK, but
+ // consider using raw_instruction_size() instead of raw_body_size() in the
+ // future.
+ STATIC_ASSERT(Code::kOnHeapBodyIsContiguous);
+ DCHECK_LE(target_offset, Handle<Code>::cast(object_)->raw_body_size());
sink_->Put(kInternalReference, "InternalRef");
sink_->PutInt(target_offset, "internal ref value");
}
@@ -896,22 +997,21 @@ void Serializer::ObjectSerializer::VisitOffHeapTarget(Code host,
Address addr = rinfo->target_off_heap_target();
CHECK_NE(kNullAddress, addr);
- Code target = InstructionStream::TryLookupCode(serializer_->isolate(), addr);
+ Code target = InstructionStream::TryLookupCode(isolate(), addr);
CHECK(Builtins::IsIsolateIndependentBuiltin(target));
sink_->Put(kOffHeapTarget, "OffHeapTarget");
sink_->PutInt(target.builtin_index(), "builtin index");
- bytes_processed_so_far_ += rinfo->target_address_size();
}
void Serializer::ObjectSerializer::VisitCodeTarget(Code host,
RelocInfo* rinfo) {
-#ifdef V8_TARGET_ARCH_ARM
- DCHECK(!RelocInfo::IsRelativeCodeTarget(rinfo->rmode()));
-#endif
- Code object = Code::GetCodeFromTargetAddress(rinfo->target_address());
- serializer_->SerializeObject(object);
- bytes_processed_so_far_ += rinfo->target_address_size();
+ // Target object should be pre-serialized by RelocInfoObjectPreSerializer, so
+ // just track the pointer's existence as kTaggedSize in
+ // bytes_processed_so_far_.
+ // TODO(leszeks): DCHECK that RelocInfoObjectPreSerializer serialized this
+ // specific object already.
+ bytes_processed_so_far_ += kTaggedSize;
}
namespace {
@@ -940,35 +1040,36 @@ void OutputRawWithCustomField(SnapshotByteSink* sink, Address object_start,
} // anonymous namespace
void Serializer::ObjectSerializer::OutputRawData(Address up_to) {
- Address object_start = object_.address();
+ Address object_start = object_->address();
int base = bytes_processed_so_far_;
int up_to_offset = static_cast<int>(up_to - object_start);
int to_skip = up_to_offset - bytes_processed_so_far_;
int bytes_to_output = to_skip;
+ DCHECK(IsAligned(bytes_to_output, kTaggedSize));
+ int tagged_to_output = bytes_to_output / kTaggedSize;
bytes_processed_so_far_ += to_skip;
DCHECK_GE(to_skip, 0);
if (bytes_to_output != 0) {
DCHECK(to_skip == bytes_to_output);
- if (IsAligned(bytes_to_output, kObjectAlignment) &&
- bytes_to_output <= kFixedRawDataCount * kTaggedSize) {
- int size_in_tagged = bytes_to_output >> kTaggedSizeLog2;
- sink_->Put(FixedRawDataWithSize::Encode(size_in_tagged), "FixedRawData");
+ if (tagged_to_output <= kFixedRawDataCount) {
+ sink_->Put(FixedRawDataWithSize::Encode(tagged_to_output),
+ "FixedRawData");
} else {
sink_->Put(kVariableRawData, "VariableRawData");
- sink_->PutInt(bytes_to_output, "length");
+ sink_->PutInt(tagged_to_output, "length");
}
#ifdef MEMORY_SANITIZER
// Check that we do not serialize uninitialized memory.
__msan_check_mem_is_initialized(
reinterpret_cast<void*>(object_start + base), bytes_to_output);
#endif // MEMORY_SANITIZER
- if (object_.IsBytecodeArray()) {
+ if (object_->IsBytecodeArray()) {
// The bytecode age field can be changed by GC concurrently.
byte field_value = BytecodeArray::kNoAgeBytecodeAge;
OutputRawWithCustomField(sink_, object_start, base, bytes_to_output,
BytecodeArray::kBytecodeAgeOffset,
sizeof(field_value), &field_value);
- } else if (object_.IsDescriptorArray()) {
+ } else if (object_->IsDescriptorArray()) {
// The number of marked descriptors field can be changed by GC
// concurrently.
byte field_value[2];
@@ -985,26 +1086,30 @@ void Serializer::ObjectSerializer::OutputRawData(Address up_to) {
}
}
-void Serializer::ObjectSerializer::OutputCode(int size) {
- DCHECK_EQ(kTaggedSize, bytes_processed_so_far_);
- Code on_heap_code = Code::cast(object_);
- // To make snapshots reproducible, we make a copy of the code object
- // and wipe all pointers in the copy, which we then serialize.
- Code off_heap_code = serializer_->CopyCode(on_heap_code);
- int mode_mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
- RelocInfo::ModeMask(RelocInfo::FULL_EMBEDDED_OBJECT) |
- RelocInfo::ModeMask(RelocInfo::COMPRESSED_EMBEDDED_OBJECT) |
- RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
- RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
- RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) |
- RelocInfo::ModeMask(RelocInfo::OFF_HEAP_TARGET) |
- RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
+void Serializer::ObjectSerializer::SerializeCode(Map map, int size) {
+ static const int kWipeOutModeMask =
+ RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
+ RelocInfo::ModeMask(RelocInfo::FULL_EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::COMPRESSED_EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) |
+ RelocInfo::ModeMask(RelocInfo::OFF_HEAP_TARGET) |
+ RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
+
+ DCHECK_EQ(HeapObject::kHeaderSize, bytes_processed_so_far_);
+ Handle<Code> on_heap_code = Handle<Code>::cast(object_);
+
// With enabled pointer compression normal accessors no longer work for
// off-heap objects, so we have to get the relocation info data via the
// on-heap code object.
- ByteArray relocation_info = on_heap_code.unchecked_relocation_info();
- for (RelocIterator it(off_heap_code, relocation_info, mode_mask); !it.done();
- it.next()) {
+ ByteArray relocation_info = on_heap_code->unchecked_relocation_info();
+
+ // To make snapshots reproducible, we make a copy of the code object
+ // and wipe all pointers in the copy, which we then serialize.
+ Code off_heap_code = serializer_->CopyCode(*on_heap_code);
+ for (RelocIterator it(off_heap_code, relocation_info, kWipeOutModeMask);
+ !it.done(); it.next()) {
RelocInfo* rinfo = it.rinfo();
rinfo->WipeOut();
}
@@ -1012,12 +1117,18 @@ void Serializer::ObjectSerializer::OutputCode(int size) {
// relocations, because some of these fields are needed for the latter.
off_heap_code.WipeOutHeader();
+ // Initially skip serializing the code header. We'll serialize it after the
+ // Code body, so that the various fields the Code needs for iteration are
+ // already valid.
+ sink_->Put(kCodeBody, "kCodeBody");
+
+ // Now serialize the wiped off-heap Code, as length + data.
Address start = off_heap_code.address() + Code::kDataStart;
int bytes_to_output = size - Code::kDataStart;
DCHECK(IsAligned(bytes_to_output, kTaggedSize));
+ int tagged_to_output = bytes_to_output / kTaggedSize;
- sink_->Put(kVariableRawCode, "VariableRawCode");
- sink_->PutInt(bytes_to_output, "length");
+ sink_->PutInt(tagged_to_output, "length");
#ifdef MEMORY_SANITIZER
// Check that we do not serialize uninitialized memory.
@@ -1025,6 +1136,59 @@ void Serializer::ObjectSerializer::OutputCode(int size) {
bytes_to_output);
#endif // MEMORY_SANITIZER
sink_->PutRaw(reinterpret_cast<byte*>(start), bytes_to_output, "Code");
+
+ // Manually serialize the code header. We don't use Code::BodyDescriptor
+ // here as we don't yet want to walk the RelocInfos.
+ DCHECK_EQ(HeapObject::kHeaderSize, bytes_processed_so_far_);
+ VisitPointers(*on_heap_code, on_heap_code->RawField(HeapObject::kHeaderSize),
+ on_heap_code->RawField(Code::kDataStart));
+ DCHECK_EQ(bytes_processed_so_far_, Code::kDataStart);
+
+ // Now serialize RelocInfos. We can't allocate during a RelocInfo walk during
+ // deserualization, so we have two passes for RelocInfo serialization:
+ // 1. A pre-serializer which serializes all allocatable objects in the
+ // RelocInfo, followed by a kSynchronize bytecode, and
+ // 2. A walk the RelocInfo with this serializer, serializing any objects
+ // implicitly as offsets into the pre-serializer's object array.
+ // This way, the deserializer can deserialize the allocatable objects first,
+ // without walking RelocInfo, re-build the pre-serializer's object array, and
+ // only then walk the RelocInfo itself.
+ // TODO(leszeks): We only really need to pre-serialize objects which need
+ // serialization, i.e. no backrefs or roots.
+ RelocInfoObjectPreSerializer pre_serializer(serializer_);
+ for (RelocIterator it(*on_heap_code, relocation_info,
+ Code::BodyDescriptor::kRelocModeMask);
+ !it.done(); it.next()) {
+ it.rinfo()->Visit(&pre_serializer);
+ }
+ // Mark that the pre-serialization finished with a kSynchronize bytecode.
+ sink_->Put(kSynchronize, "PreSerializationFinished");
+
+ // Finally serialize all RelocInfo objects in the on-heap Code, knowing that
+ // we will not do a recursive serialization.
+ // TODO(leszeks): Add a scope that DCHECKs this.
+ for (RelocIterator it(*on_heap_code, relocation_info,
+ Code::BodyDescriptor::kRelocModeMask);
+ !it.done(); it.next()) {
+ it.rinfo()->Visit(this);
+ }
+
+ // We record a kTaggedSize for every object encountered during the
+ // serialization, so DCHECK that bytes_processed_so_far_ matches the expected
+ // number of bytes (i.e. the code header + a tagged size per pre-serialized
+ // object).
+ DCHECK_EQ(
+ bytes_processed_so_far_,
+ Code::kDataStart + kTaggedSize * pre_serializer.num_serialized_objects());
+}
+
+Serializer::HotObjectsList::HotObjectsList(Heap* heap) : heap_(heap) {
+ strong_roots_entry_ =
+ heap->RegisterStrongRoots(FullObjectSlot(&circular_queue_[0]),
+ FullObjectSlot(&circular_queue_[kSize]));
+}
+Serializer::HotObjectsList::~HotObjectsList() {
+ heap_->UnregisterStrongRoots(strong_roots_entry_);
}
} // namespace internal
diff --git a/deps/v8/src/snapshot/serializer.h b/deps/v8/src/snapshot/serializer.h
index 87f02f5c42..e04d08b256 100644
--- a/deps/v8/src/snapshot/serializer.h
+++ b/deps/v8/src/snapshot/serializer.h
@@ -8,14 +8,16 @@
#include <map>
#include "src/codegen/external-reference-encoder.h"
+#include "src/common/assert-scope.h"
#include "src/execution/isolate.h"
+#include "src/handles/global-handles.h"
#include "src/logging/log.h"
#include "src/objects/objects.h"
#include "src/snapshot/embedded/embedded-data.h"
-#include "src/snapshot/serializer-allocator.h"
#include "src/snapshot/serializer-deserializer.h"
#include "src/snapshot/snapshot-source-sink.h"
#include "src/snapshot/snapshot.h"
+#include "src/utils/identity-map.h"
namespace v8 {
namespace internal {
@@ -45,6 +47,8 @@ class CodeAddressMap : public CodeEventLogger {
class NameMap {
public:
NameMap() : impl_() {}
+ NameMap(const NameMap&) = delete;
+ NameMap& operator=(const NameMap&) = delete;
~NameMap() {
for (base::HashMap::Entry* p = impl_.Start(); p != nullptr;
@@ -112,8 +116,6 @@ class CodeAddressMap : public CodeEventLogger {
}
base::HashMap impl_;
-
- DISALLOW_COPY_AND_ASSIGN(NameMap);
};
void LogRecordedBuffer(Handle<AbstractCode> code,
@@ -132,52 +134,48 @@ class CodeAddressMap : public CodeEventLogger {
class ObjectCacheIndexMap {
public:
- ObjectCacheIndexMap() : map_(), next_index_(0) {}
+ explicit ObjectCacheIndexMap(Heap* heap) : map_(heap), next_index_(0) {}
+ ObjectCacheIndexMap(const ObjectCacheIndexMap&) = delete;
+ ObjectCacheIndexMap& operator=(const ObjectCacheIndexMap&) = delete;
// If |obj| is in the map, immediately return true. Otherwise add it to the
// map and return false. In either case set |*index_out| to the index
// associated with the map.
- bool LookupOrInsert(HeapObject obj, int* index_out) {
- Maybe<uint32_t> maybe_index = map_.Get(obj);
- if (maybe_index.IsJust()) {
- *index_out = maybe_index.FromJust();
- return true;
+ bool LookupOrInsert(Handle<HeapObject> obj, int* index_out) {
+ auto find_result = map_.FindOrInsert(obj);
+ if (!find_result.already_exists) {
+ *find_result.entry = next_index_++;
}
- *index_out = next_index_;
- map_.Set(obj, next_index_++);
- return false;
+ *index_out = *find_result.entry;
+ return find_result.already_exists;
}
private:
DisallowHeapAllocation no_allocation_;
- HeapObjectToIndexHashMap map_;
+ IdentityMap<int, base::DefaultAllocationPolicy> map_;
int next_index_;
-
- DISALLOW_COPY_AND_ASSIGN(ObjectCacheIndexMap);
};
class Serializer : public SerializerDeserializer {
public:
Serializer(Isolate* isolate, Snapshot::SerializerFlags flags);
-
- std::vector<SerializedData::Reservation> EncodeReservations() const {
- return allocator_.EncodeReservations();
- }
+ ~Serializer() override { DCHECK_EQ(unresolved_forward_refs_, 0); }
+ Serializer(const Serializer&) = delete;
+ Serializer& operator=(const Serializer&) = delete;
const std::vector<byte>* Payload() const { return sink_.data(); }
- bool ReferenceMapContains(HeapObject o) {
- return reference_map()
- ->LookupReference(reinterpret_cast<void*>(o.ptr()))
- .is_valid();
+ bool ReferenceMapContains(Handle<HeapObject> o) {
+ return reference_map()->LookupReference(o) != nullptr;
}
Isolate* isolate() const { return isolate_; }
+ int TotalAllocationSize() const;
+
protected:
- using PendingObjectReference =
- std::map<HeapObject, std::vector<int>>::iterator;
+ using PendingObjectReferences = std::vector<int>*;
class ObjectSerializer;
class RecursionScope {
@@ -196,7 +194,8 @@ class Serializer : public SerializerDeserializer {
};
void SerializeDeferredObjects();
- virtual void SerializeObject(HeapObject o) = 0;
+ void SerializeObject(Handle<HeapObject> o);
+ virtual void SerializeObjectImpl(Handle<HeapObject> o) = 0;
virtual bool MustBeDeferred(HeapObject object);
@@ -204,36 +203,35 @@ class Serializer : public SerializerDeserializer {
FullObjectSlot start, FullObjectSlot end) override;
void SerializeRootObject(FullObjectSlot slot);
- void PutRoot(RootIndex root_index, HeapObject object);
+ void PutRoot(RootIndex root_index);
void PutSmiRoot(FullObjectSlot slot);
- void PutBackReference(HeapObject object, SerializerReference reference);
+ void PutBackReference(Handle<HeapObject> object,
+ SerializerReference reference);
void PutAttachedReference(SerializerReference reference);
- // Emit alignment prefix if necessary, return required padding space in bytes.
- int PutAlignmentPrefix(HeapObject object);
void PutNextChunk(SnapshotSpace space);
void PutRepeat(int repeat_count);
// Emit a marker noting that this slot is a forward reference to the an
// object which has not yet been serialized.
- void PutPendingForwardReferenceTo(PendingObjectReference reference);
+ void PutPendingForwardReference(PendingObjectReferences& ref);
// Resolve the given previously registered forward reference to the current
// object.
void ResolvePendingForwardReference(int obj);
// Returns true if the object was successfully serialized as a root.
- bool SerializeRoot(HeapObject obj);
+ bool SerializeRoot(Handle<HeapObject> obj);
// Returns true if the object was successfully serialized as hot object.
- bool SerializeHotObject(HeapObject obj);
+ bool SerializeHotObject(Handle<HeapObject> obj);
// Returns true if the object was successfully serialized as back reference.
- bool SerializeBackReference(HeapObject obj);
+ bool SerializeBackReference(Handle<HeapObject> obj);
// Returns true if the object was successfully serialized as pending object.
- bool SerializePendingObject(HeapObject obj);
+ bool SerializePendingObject(Handle<HeapObject> obj);
// Returns true if the given heap object is a bytecode handler code object.
- bool ObjectIsBytecodeHandler(HeapObject obj) const;
+ bool ObjectIsBytecodeHandler(Handle<HeapObject> obj) const;
ExternalReferenceEncoder::Value EncodeExternalReference(Address addr) {
return external_reference_encoder_.Encode(addr);
@@ -253,36 +251,32 @@ class Serializer : public SerializerDeserializer {
Code CopyCode(Code code);
- void QueueDeferredObject(HeapObject obj) {
- DCHECK(!reference_map_.LookupReference(reinterpret_cast<void*>(obj.ptr()))
- .is_valid());
- deferred_objects_.push_back(obj);
+ void QueueDeferredObject(Handle<HeapObject> obj) {
+ DCHECK_NULL(reference_map_.LookupReference(obj));
+ deferred_objects_.Push(*obj);
}
// Register that the the given object shouldn't be immediately serialized, but
// will be serialized later and any references to it should be pending forward
// references.
- PendingObjectReference RegisterObjectIsPending(HeapObject obj);
+ void RegisterObjectIsPending(Handle<HeapObject> obj);
// Resolve the given pending object reference with the current object.
- void ResolvePendingObject(PendingObjectReference ref);
+ void ResolvePendingObject(Handle<HeapObject> obj);
void OutputStatistics(const char* name);
-#ifdef OBJECT_PRINT
- void CountInstanceType(Map map, int size, SnapshotSpace space);
-#endif // OBJECT_PRINT
+ void CountAllocation(Map map, int size, SnapshotSpace space);
#ifdef DEBUG
- void PushStack(HeapObject o) { stack_.push_back(o); }
- void PopStack() { stack_.pop_back(); }
+ void PushStack(Handle<HeapObject> o) { stack_.Push(*o); }
+ void PopStack() { stack_.Pop(); }
void PrintStack();
void PrintStack(std::ostream&);
#endif // DEBUG
SerializerReferenceMap* reference_map() { return &reference_map_; }
const RootIndexMap* root_index_map() const { return &root_index_map_; }
- SerializerAllocator* allocator() { return &allocator_; }
SnapshotByteSink sink_; // Used directly by subclasses.
@@ -294,17 +288,62 @@ class Serializer : public SerializerDeserializer {
}
private:
+ // A circular queue of hot objects. This is added to in the same order as in
+ // Deserializer::HotObjectsList, but this stores the objects as an array of
+ // raw addresses that are considered strong roots. This allows objects to be
+ // added to the list without having to extend their handle's lifetime.
+ //
+ // We should never allow this class to return Handles to objects in the queue,
+ // as the object in the queue may change if kSize other objects are added to
+ // the queue during that Handle's lifetime.
+ class HotObjectsList {
+ public:
+ explicit HotObjectsList(Heap* heap);
+ ~HotObjectsList();
+ HotObjectsList(const HotObjectsList&) = delete;
+ HotObjectsList& operator=(const HotObjectsList&) = delete;
+
+ void Add(HeapObject object) {
+ circular_queue_[index_] = object.ptr();
+ index_ = (index_ + 1) & kSizeMask;
+ }
+
+ static const int kNotFound = -1;
+
+ int Find(HeapObject object) {
+ DCHECK(!AllowGarbageCollection::IsAllowed());
+ for (int i = 0; i < kSize; i++) {
+ if (circular_queue_[i] == object.ptr()) {
+ return i;
+ }
+ }
+ return kNotFound;
+ }
+
+ private:
+ static const int kSize = kHotObjectCount;
+ static const int kSizeMask = kSize - 1;
+ STATIC_ASSERT(base::bits::IsPowerOfTwo(kSize));
+ Heap* heap_;
+ StrongRootsEntry* strong_roots_entry_;
+ Address circular_queue_[kSize] = {kNullAddress};
+ int index_ = 0;
+ };
+
// Disallow GC during serialization.
// TODO(leszeks, v8:10815): Remove this constraint.
- DisallowHeapAllocation no_gc;
+ DISALLOW_HEAP_ALLOCATION(no_gc)
Isolate* isolate_;
+ HotObjectsList hot_objects_;
SerializerReferenceMap reference_map_;
ExternalReferenceEncoder external_reference_encoder_;
RootIndexMap root_index_map_;
std::unique_ptr<CodeAddressMap> code_address_map_;
std::vector<byte> code_buffer_;
- std::vector<HeapObject> deferred_objects_; // To handle stack overflow.
+ GlobalHandleVector<HeapObject>
+ deferred_objects_; // To handle stack overflow.
+ int num_back_refs_ = 0;
// Objects which have started being serialized, but haven't yet been allocated
// with the allocator, are considered "pending". References to them don't have
@@ -319,34 +358,40 @@ class Serializer : public SerializerDeserializer {
// forward refs remaining.
int next_forward_ref_id_ = 0;
int unresolved_forward_refs_ = 0;
- std::map<HeapObject, std::vector<int>> forward_refs_per_pending_object_;
+ IdentityMap<PendingObjectReferences, base::DefaultAllocationPolicy>
+ forward_refs_per_pending_object_;
+
+ // Used to keep track of the off-heap backing stores used by TypedArrays/
+ // ArrayBuffers. Note that the index begins at 1 and not 0, because when a
+ // TypedArray has an on-heap backing store, the backing_store pointer in the
+ // corresponding ArrayBuffer will be null, which makes it indistinguishable
+ // from index 0.
+ uint32_t seen_backing_stores_index_ = 1;
int recursion_depth_ = 0;
const Snapshot::SerializerFlags flags_;
- SerializerAllocator allocator_;
+ size_t allocation_size_[kNumberOfSnapshotSpaces] = {0};
#ifdef OBJECT_PRINT
static constexpr int kInstanceTypes = LAST_TYPE + 1;
- std::unique_ptr<int[]> instance_type_count_[kNumberOfSpaces];
- std::unique_ptr<size_t[]> instance_type_size_[kNumberOfSpaces];
+ std::unique_ptr<int[]> instance_type_count_[kNumberOfSnapshotSpaces];
+ std::unique_ptr<size_t[]> instance_type_size_[kNumberOfSnapshotSpaces];
#endif // OBJECT_PRINT
#ifdef DEBUG
- std::vector<HeapObject> stack_;
+ GlobalHandleVector<HeapObject> back_refs_;
+ GlobalHandleVector<HeapObject> stack_;
#endif // DEBUG
-
- friend class SerializerAllocator;
-
- DISALLOW_COPY_AND_ASSIGN(Serializer);
};
class RelocInfoIterator;
class Serializer::ObjectSerializer : public ObjectVisitor {
public:
- ObjectSerializer(Serializer* serializer, HeapObject obj,
+ ObjectSerializer(Serializer* serializer, Handle<HeapObject> obj,
SnapshotByteSink* sink)
- : serializer_(serializer),
+ : isolate_(serializer->isolate()),
+ serializer_(serializer),
object_(obj),
sink_(sink),
bytes_processed_so_far_(0) {
@@ -375,7 +420,11 @@ class Serializer::ObjectSerializer : public ObjectVisitor {
void VisitRuntimeEntry(Code host, RelocInfo* reloc) override;
void VisitOffHeapTarget(Code host, RelocInfo* target) override;
+ Isolate* isolate() { return isolate_; }
+
private:
+ class RelocInfoObjectPreSerializer;
+
void SerializePrologue(SnapshotSpace space, int size, Map map);
// This function outputs or skips the raw data between the last pointer and
@@ -384,15 +433,16 @@ class Serializer::ObjectSerializer : public ObjectVisitor {
void OutputExternalReference(Address target, int target_size,
bool sandboxify);
void OutputRawData(Address up_to);
- void OutputCode(int size);
+ void SerializeCode(Map map, int size);
uint32_t SerializeBackingStore(void* backing_store, int32_t byte_length);
void SerializeJSTypedArray();
void SerializeJSArrayBuffer();
void SerializeExternalString();
void SerializeExternalStringAsSequentialString();
+ Isolate* isolate_;
Serializer* serializer_;
- HeapObject object_;
+ Handle<HeapObject> object_;
SnapshotByteSink* sink_;
int bytes_processed_so_far_;
};
diff --git a/deps/v8/src/snapshot/snapshot-data.cc b/deps/v8/src/snapshot/snapshot-data.cc
index 870945cdce..0a5bbaaf2a 100644
--- a/deps/v8/src/snapshot/snapshot-data.cc
+++ b/deps/v8/src/snapshot/snapshot-data.cc
@@ -26,51 +26,28 @@ constexpr uint32_t SerializedData::kMagicNumber;
SnapshotData::SnapshotData(const Serializer* serializer) {
DisallowGarbageCollection no_gc;
- std::vector<Reservation> reservations = serializer->EncodeReservations();
const std::vector<byte>* payload = serializer->Payload();
// Calculate sizes.
- uint32_t reservation_size =
- static_cast<uint32_t>(reservations.size()) * kUInt32Size;
- uint32_t payload_offset = kHeaderSize + reservation_size;
- uint32_t padded_payload_offset = POINTER_SIZE_ALIGN(payload_offset);
- uint32_t size =
- padded_payload_offset + static_cast<uint32_t>(payload->size());
+ uint32_t size = kHeaderSize + static_cast<uint32_t>(payload->size());
// Allocate backing store and create result data.
AllocateData(size);
// Zero out pre-payload data. Part of that is only used for padding.
- memset(data_, 0, padded_payload_offset);
+ memset(data_, 0, kHeaderSize);
// Set header values.
SetMagicNumber();
- SetHeaderValue(kNumReservationsOffset, static_cast<int>(reservations.size()));
SetHeaderValue(kPayloadLengthOffset, static_cast<int>(payload->size()));
- // Copy reservation chunk sizes.
- CopyBytes(data_ + kHeaderSize, reinterpret_cast<byte*>(reservations.data()),
- reservation_size);
-
// Copy serialized data.
- CopyBytes(data_ + padded_payload_offset, payload->data(),
+ CopyBytes(data_ + kHeaderSize, payload->data(),
static_cast<size_t>(payload->size()));
}
-std::vector<SerializedData::Reservation> SnapshotData::Reservations() const {
- uint32_t size = GetHeaderValue(kNumReservationsOffset);
- std::vector<SerializedData::Reservation> reservations(size);
- memcpy(reservations.data(), data_ + kHeaderSize,
- size * sizeof(SerializedData::Reservation));
- return reservations;
-}
-
Vector<const byte> SnapshotData::Payload() const {
- uint32_t reservations_size =
- GetHeaderValue(kNumReservationsOffset) * kUInt32Size;
- uint32_t padded_payload_offset =
- POINTER_SIZE_ALIGN(kHeaderSize + reservations_size);
- const byte* payload = data_ + padded_payload_offset;
+ const byte* payload = data_ + kHeaderSize;
uint32_t length = GetHeaderValue(kPayloadLengthOffset);
DCHECK_EQ(data_ + size_, payload + length);
return Vector<const byte>(payload, length);
diff --git a/deps/v8/src/snapshot/snapshot-data.h b/deps/v8/src/snapshot/snapshot-data.h
index b8a9133e7f..a7d6872bc6 100644
--- a/deps/v8/src/snapshot/snapshot-data.h
+++ b/deps/v8/src/snapshot/snapshot-data.h
@@ -20,21 +20,6 @@ class Serializer;
class SerializedData {
public:
- class Reservation {
- public:
- Reservation() : reservation_(0) {}
- explicit Reservation(uint32_t size)
- : reservation_(ChunkSizeBits::encode(size)) {}
-
- uint32_t chunk_size() const { return ChunkSizeBits::decode(reservation_); }
- bool is_last() const { return IsLastChunkBits::decode(reservation_); }
-
- void mark_as_last() { reservation_ |= IsLastChunkBits::encode(true); }
-
- private:
- uint32_t reservation_;
- };
-
SerializedData(byte* data, int size)
: data_(data), size_(size), owns_data_(false) {}
SerializedData() : data_(nullptr), size_(0), owns_data_(false) {}
@@ -45,6 +30,8 @@ class SerializedData {
// Ensure |other| will not attempt to destroy our data in destructor.
other.owns_data_ = false;
}
+ SerializedData(const SerializedData&) = delete;
+ SerializedData& operator=(const SerializedData&) = delete;
virtual ~SerializedData() {
if (owns_data_) DeleteArray<byte>(data_);
@@ -77,9 +64,6 @@ class SerializedData {
byte* data_;
uint32_t size_;
bool owns_data_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(SerializedData);
};
// Wrapper around reservation sizes and the serialization payload.
@@ -93,7 +77,6 @@ class V8_EXPORT_PRIVATE SnapshotData : public SerializedData {
: SerializedData(const_cast<byte*>(snapshot.begin()), snapshot.length()) {
}
- std::vector<Reservation> Reservations() const;
virtual Vector<const byte> Payload() const;
Vector<const byte> RawData() const {
@@ -112,14 +95,9 @@ class V8_EXPORT_PRIVATE SnapshotData : public SerializedData {
// The data header consists of uint32_t-sized entries:
// [0] magic number and (internal) external reference count
- // [1] number of reservation size entries
- // [2] payload length
- // ... reservations
+ // [1] payload length
// ... serialized payload
- static const uint32_t kNumReservationsOffset =
- kMagicNumberOffset + kUInt32Size;
- static const uint32_t kPayloadLengthOffset =
- kNumReservationsOffset + kUInt32Size;
+ static const uint32_t kPayloadLengthOffset = kMagicNumberOffset + kUInt32Size;
static const uint32_t kHeaderSize = kPayloadLengthOffset + kUInt32Size;
};
diff --git a/deps/v8/src/snapshot/snapshot-source-sink.h b/deps/v8/src/snapshot/snapshot-source-sink.h
index 9d44678679..f0686af3c0 100644
--- a/deps/v8/src/snapshot/snapshot-source-sink.h
+++ b/deps/v8/src/snapshot/snapshot-source-sink.h
@@ -7,7 +7,9 @@
#include <utility>
+#include "src/base/atomicops.h"
#include "src/base/logging.h"
+#include "src/common/globals.h"
#include "src/snapshot/snapshot-utils.h"
#include "src/utils/utils.h"
@@ -31,6 +33,8 @@ class SnapshotByteSource final {
: data_(payload.begin()), length_(payload.length()), position_(0) {}
~SnapshotByteSource() = default;
+ SnapshotByteSource(const SnapshotByteSource&) = delete;
+ SnapshotByteSource& operator=(const SnapshotByteSource&) = delete;
bool HasMore() { return position_ < length_; }
@@ -51,6 +55,30 @@ class SnapshotByteSource final {
position_ += number_of_bytes;
}
+ void CopySlots(Address* dest, int number_of_slots) {
+ base::AtomicWord* start = reinterpret_cast<base::AtomicWord*>(dest);
+ base::AtomicWord* end = start + number_of_slots;
+ for (base::AtomicWord* p = start; p < end;
+ ++p, position_ += sizeof(base::AtomicWord)) {
+ base::AtomicWord val;
+ memcpy(&val, data_ + position_, sizeof(base::AtomicWord));
+ base::Relaxed_Store(p, val);
+ }
+ }
+
+#ifdef V8_COMPRESS_POINTERS
+ void CopySlots(Tagged_t* dest, int number_of_slots) {
+ AtomicTagged_t* start = reinterpret_cast<AtomicTagged_t*>(dest);
+ AtomicTagged_t* end = start + number_of_slots;
+ for (AtomicTagged_t* p = start; p < end;
+ ++p, position_ += sizeof(AtomicTagged_t)) {
+ AtomicTagged_t val;
+ memcpy(&val, data_ + position_, sizeof(AtomicTagged_t));
+ base::Relaxed_Store(p, val);
+ }
+ }
+#endif
+
inline int GetInt() {
// This way of decoding variable-length encoded integers does not
// suffer from branch mispredictions.
@@ -82,8 +110,6 @@ class SnapshotByteSource final {
const byte* data_;
int length_;
int position_;
-
- DISALLOW_COPY_AND_ASSIGN(SnapshotByteSource);
};
/**
diff --git a/deps/v8/src/snapshot/snapshot-utils.cc b/deps/v8/src/snapshot/snapshot-utils.cc
index 319b828446..eb2372372c 100644
--- a/deps/v8/src/snapshot/snapshot-utils.cc
+++ b/deps/v8/src/snapshot/snapshot-utils.cc
@@ -21,20 +21,5 @@ uint32_t Checksum(Vector<const byte> payload) {
return static_cast<uint32_t>(adler32(0, payload.begin(), payload.length()));
}
-V8_EXPORT_PRIVATE uint32_t Checksum(Vector<const byte> payload1,
- Vector<const byte> payload2) {
-#ifdef MEMORY_SANITIZER
- // Computing the checksum includes padding bytes for objects like strings.
- // Mark every object as initialized in the code serializer.
- MSAN_MEMORY_IS_INITIALIZED(payload1.begin(), payload1.length());
- MSAN_MEMORY_IS_INITIALIZED(payload2.begin(), payload2.length());
-#endif // MEMORY_SANITIZER
- // Priming the adler32 call so it can see what CPU features are available.
- adler32(0, nullptr, 0);
- auto sum = adler32(0, payload1.begin(), payload1.length());
- sum = adler32(sum, payload2.begin(), payload2.length());
- return static_cast<uint32_t>(sum);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/snapshot/snapshot-utils.h b/deps/v8/src/snapshot/snapshot-utils.h
index 284bbcd4a5..045813b139 100644
--- a/deps/v8/src/snapshot/snapshot-utils.h
+++ b/deps/v8/src/snapshot/snapshot-utils.h
@@ -11,8 +11,6 @@ namespace v8 {
namespace internal {
V8_EXPORT_PRIVATE uint32_t Checksum(Vector<const byte> payload);
-V8_EXPORT_PRIVATE uint32_t Checksum(Vector<const byte> payload1,
- Vector<const byte> payload2);
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/snapshot/snapshot.cc b/deps/v8/src/snapshot/snapshot.cc
index 7e3f072659..86d0544667 100644
--- a/deps/v8/src/snapshot/snapshot.cc
+++ b/deps/v8/src/snapshot/snapshot.cc
@@ -158,12 +158,9 @@ bool Snapshot::Initialize(Isolate* isolate) {
SnapshotData startup_snapshot_data(MaybeDecompress(startup_data));
SnapshotData read_only_snapshot_data(MaybeDecompress(read_only_data));
- StartupDeserializer startup_deserializer(&startup_snapshot_data);
- ReadOnlyDeserializer read_only_deserializer(&read_only_snapshot_data);
- startup_deserializer.SetRehashability(ExtractRehashability(blob));
- read_only_deserializer.SetRehashability(ExtractRehashability(blob));
- bool success =
- isolate->InitWithSnapshot(&read_only_deserializer, &startup_deserializer);
+ bool success = isolate->InitWithSnapshot(&startup_snapshot_data,
+ &read_only_snapshot_data,
+ ExtractRehashability(blob));
if (FLAG_profile_deserialization) {
double ms = timer.Elapsed().InMillisecondsF();
int bytes = startup_data.length();
@@ -317,30 +314,6 @@ void Snapshot::SerializeDeserializeAndVerifyForTesting(
Isolate::Delete(new_isolate);
}
-void ProfileDeserialization(
- const SnapshotData* read_only_snapshot,
- const SnapshotData* startup_snapshot,
- const std::vector<SnapshotData*>& context_snapshots) {
- if (FLAG_profile_deserialization) {
- int startup_total = 0;
- PrintF("Deserialization will reserve:\n");
- for (const auto& reservation : read_only_snapshot->Reservations()) {
- startup_total += reservation.chunk_size();
- }
- for (const auto& reservation : startup_snapshot->Reservations()) {
- startup_total += reservation.chunk_size();
- }
- PrintF("%10d bytes per isolate\n", startup_total);
- for (size_t i = 0; i < context_snapshots.size(); i++) {
- int context_total = 0;
- for (const auto& reservation : context_snapshots[i]->Reservations()) {
- context_total += reservation.chunk_size();
- }
- PrintF("%10d bytes per context #%zu\n", context_total, i);
- }
- }
-}
-
// static
constexpr Snapshot::SerializerFlags Snapshot::kDefaultSerializerFlags;
@@ -352,6 +325,7 @@ v8::StartupData Snapshot::Create(
const DisallowGarbageCollection& no_gc, SerializerFlags flags) {
DCHECK_EQ(contexts->size(), embedder_fields_serializers.size());
DCHECK_GT(contexts->size(), 0);
+ HandleScope scope(isolate);
// Enter a safepoint so that the heap is safe to iterate.
// TODO(leszeks): This safepoint's scope could be tightened to just string
@@ -374,12 +348,17 @@ v8::StartupData Snapshot::Create(
// TODO(v8:6593): generalize rehashing, and remove this flag.
bool can_be_rehashed = true;
+ std::vector<int> context_allocation_sizes;
for (int i = 0; i < num_contexts; i++) {
ContextSerializer context_serializer(isolate, flags, &startup_serializer,
embedder_fields_serializers[i]);
context_serializer.Serialize(&contexts->at(i), no_gc);
can_be_rehashed = can_be_rehashed && context_serializer.can_be_rehashed();
context_snapshots.push_back(new SnapshotData(&context_serializer));
+ if (FLAG_profile_deserialization) {
+ context_allocation_sizes.push_back(
+ context_serializer.TotalAllocationSize());
+ }
}
startup_serializer.SerializeWeakReferencesAndDeferred();
@@ -390,6 +369,17 @@ v8::StartupData Snapshot::Create(
read_only_serializer.FinalizeSerialization();
can_be_rehashed = can_be_rehashed && read_only_serializer.can_be_rehashed();
+ if (FLAG_profile_deserialization) {
+ // These prints should match the regexp in test/memory/Memory.json
+ PrintF("Deserialization will allocate:\n");
+ PrintF("%10d bytes per isolate\n",
+ read_only_serializer.TotalAllocationSize() +
+ startup_serializer.TotalAllocationSize());
+ for (int i = 0; i < num_contexts; i++) {
+ PrintF("%10d bytes per context #%d\n", context_allocation_sizes[i], i);
+ }
+ }
+
SnapshotData read_only_snapshot(&read_only_serializer);
SnapshotData startup_snapshot(&startup_serializer);
v8::StartupData result =
@@ -454,9 +444,6 @@ v8::StartupData SnapshotImpl::CreateSnapshotBlob(
total_length += static_cast<uint32_t>(context_snapshot->RawData().length());
}
- ProfileDeserialization(read_only_snapshot_in, startup_snapshot_in,
- context_snapshots_in);
-
char* data = new char[total_length];
// Zero out pre-payload data. Part of that is only used for padding.
memset(data, 0, SnapshotImpl::StartupSnapshotOffset(num_contexts));
@@ -480,9 +467,8 @@ v8::StartupData SnapshotImpl::CreateSnapshotBlob(
reinterpret_cast<const char*>(startup_snapshot->RawData().begin()),
payload_length);
if (FLAG_profile_deserialization) {
- PrintF("Snapshot blob consists of:\n%10d bytes in %d chunks for startup\n",
- payload_length,
- static_cast<uint32_t>(startup_snapshot_in->Reservations().size()));
+ PrintF("Snapshot blob consists of:\n%10d bytes for startup\n",
+ payload_length);
}
payload_offset += payload_length;
@@ -510,10 +496,7 @@ v8::StartupData SnapshotImpl::CreateSnapshotBlob(
reinterpret_cast<const char*>(context_snapshot->RawData().begin()),
payload_length);
if (FLAG_profile_deserialization) {
- PrintF(
- "%10d bytes in %d chunks for context #%d\n", payload_length,
- static_cast<uint32_t>(context_snapshots_in[i]->Reservations().size()),
- i);
+ PrintF("%10d bytes for context #%d\n", payload_length, i);
}
payload_offset += payload_length;
}
diff --git a/deps/v8/src/snapshot/startup-deserializer.cc b/deps/v8/src/snapshot/startup-deserializer.cc
index 3288aff509..b019091ee9 100644
--- a/deps/v8/src/snapshot/startup-deserializer.cc
+++ b/deps/v8/src/snapshot/startup-deserializer.cc
@@ -14,36 +14,35 @@
namespace v8 {
namespace internal {
-void StartupDeserializer::DeserializeInto(Isolate* isolate) {
- Initialize(isolate);
-
- if (!allocator()->ReserveSpace()) {
- V8::FatalProcessOutOfMemory(isolate, "StartupDeserializer");
- }
+void StartupDeserializer::DeserializeIntoIsolate() {
+ HandleScope scope(isolate());
// No active threads.
- DCHECK_NULL(isolate->thread_manager()->FirstThreadStateInUse());
+ DCHECK_NULL(isolate()->thread_manager()->FirstThreadStateInUse());
// No active handles.
- DCHECK(isolate->handle_scope_implementer()->blocks()->empty());
+ DCHECK(isolate()->handle_scope_implementer()->blocks()->empty());
// Startup object cache is not yet populated.
- DCHECK(isolate->startup_object_cache()->empty());
+ DCHECK(isolate()->startup_object_cache()->empty());
// Builtins are not yet created.
- DCHECK(!isolate->builtins()->is_initialized());
+ DCHECK(!isolate()->builtins()->is_initialized());
{
- DisallowGarbageCollection no_gc;
- isolate->heap()->IterateSmiRoots(this);
- isolate->heap()->IterateRoots(
+ isolate()->heap()->IterateSmiRoots(this);
+ isolate()->heap()->IterateRoots(
this,
base::EnumSet<SkipRoot>{SkipRoot::kUnserializable, SkipRoot::kWeak});
- Iterate(isolate, this);
+ Iterate(isolate(), this);
DeserializeStringTable();
- isolate->heap()->IterateWeakRoots(
+ isolate()->heap()->IterateWeakRoots(
this, base::EnumSet<SkipRoot>{SkipRoot::kUnserializable});
DeserializeDeferredObjects();
- RestoreExternalReferenceRedirectors(isolate, accessor_infos());
- RestoreExternalReferenceRedirectors(isolate, call_handler_infos());
+ for (Handle<AccessorInfo> info : accessor_infos()) {
+ RestoreExternalReferenceRedirector(isolate(), info);
+ }
+ for (Handle<CallHandlerInfo> info : call_handler_infos()) {
+ RestoreExternalReferenceRedirector(isolate(), info);
+ }
// Flush the instruction cache for the entire code-space. Must happen after
// builtins deserialization.
@@ -52,22 +51,23 @@ void StartupDeserializer::DeserializeInto(Isolate* isolate) {
CheckNoArrayBufferBackingStores();
- isolate->heap()->set_native_contexts_list(
- ReadOnlyRoots(isolate).undefined_value());
+ isolate()->heap()->set_native_contexts_list(
+ ReadOnlyRoots(isolate()).undefined_value());
// The allocation site list is build during root iteration, but if no sites
// were encountered then it needs to be initialized to undefined.
- if (isolate->heap()->allocation_sites_list() == Smi::zero()) {
- isolate->heap()->set_allocation_sites_list(
- ReadOnlyRoots(isolate).undefined_value());
+ if (isolate()->heap()->allocation_sites_list() == Smi::zero()) {
+ isolate()->heap()->set_allocation_sites_list(
+ ReadOnlyRoots(isolate()).undefined_value());
}
- isolate->heap()->set_dirty_js_finalization_registries_list(
- ReadOnlyRoots(isolate).undefined_value());
- isolate->heap()->set_dirty_js_finalization_registries_list_tail(
- ReadOnlyRoots(isolate).undefined_value());
+ isolate()->heap()->set_dirty_js_finalization_registries_list(
+ ReadOnlyRoots(isolate()).undefined_value());
+ isolate()->heap()->set_dirty_js_finalization_registries_list_tail(
+ ReadOnlyRoots(isolate()).undefined_value());
- isolate->builtins()->MarkInitialized();
+ isolate()->builtins()->MarkInitialized();
LogNewMapEvents();
+ WeakenDescriptorArrays();
if (FLAG_rehash_snapshot && can_rehash()) {
// Hash seed was initalized in ReadOnlyDeserializer.
@@ -84,16 +84,15 @@ void StartupDeserializer::DeserializeStringTable() {
// Add each string to the Isolate's string table.
// TODO(leszeks): Consider pre-sizing the string table.
for (int i = 0; i < string_table_size; ++i) {
- String string = String::cast(ReadObject());
- Address handle_storage = string.ptr();
- Handle<String> handle(&handle_storage);
- StringTableInsertionKey key(handle);
- String result = *isolate()->string_table()->LookupKey(isolate(), &key);
+ Handle<String> string = Handle<String>::cast(ReadObject());
+ StringTableInsertionKey key(string);
+ Handle<String> result =
+ isolate()->string_table()->LookupKey(isolate(), &key);
USE(result);
// This is startup, so there should be no duplicate entries in the string
// table, and the lookup should unconditionally add the given string.
- DCHECK_EQ(result, string);
+ DCHECK_EQ(*result, *string);
}
DCHECK_EQ(string_table_size, isolate()->string_table()->NumberOfElements());
diff --git a/deps/v8/src/snapshot/startup-deserializer.h b/deps/v8/src/snapshot/startup-deserializer.h
index 59533de8de..f744efc193 100644
--- a/deps/v8/src/snapshot/startup-deserializer.h
+++ b/deps/v8/src/snapshot/startup-deserializer.h
@@ -6,6 +6,7 @@
#define V8_SNAPSHOT_STARTUP_DESERIALIZER_H_
#include "src/snapshot/deserializer.h"
+#include "src/snapshot/snapshot-data.h"
#include "src/snapshot/snapshot.h"
namespace v8 {
@@ -14,11 +15,14 @@ namespace internal {
// Initializes an isolate with context-independent data from a given snapshot.
class StartupDeserializer final : public Deserializer {
public:
- explicit StartupDeserializer(const SnapshotData* startup_data)
- : Deserializer(startup_data, false) {}
+ explicit StartupDeserializer(Isolate* isolate,
+ const SnapshotData* startup_data,
+ bool can_rehash)
+ : Deserializer(isolate, startup_data->Payload(),
+ startup_data->GetMagicNumber(), false, can_rehash) {}
// Deserialize the snapshot into an empty heap.
- void DeserializeInto(Isolate* isolate);
+ void DeserializeIntoIsolate();
private:
void DeserializeStringTable();
diff --git a/deps/v8/src/snapshot/startup-serializer.cc b/deps/v8/src/snapshot/startup-serializer.cc
index 8606f6a019..88d3c77c66 100644
--- a/deps/v8/src/snapshot/startup-serializer.cc
+++ b/deps/v8/src/snapshot/startup-serializer.cc
@@ -66,14 +66,19 @@ StartupSerializer::StartupSerializer(Isolate* isolate,
Snapshot::SerializerFlags flags,
ReadOnlySerializer* read_only_serializer)
: RootsSerializer(isolate, flags, RootIndex::kFirstStrongRoot),
- read_only_serializer_(read_only_serializer) {
- allocator()->UseCustomChunkSize(FLAG_serialization_chunk_size);
+ read_only_serializer_(read_only_serializer),
+ accessor_infos_(isolate->heap()),
+ call_handler_infos_(isolate->heap()) {
InitializeCodeAddressMap();
}
StartupSerializer::~StartupSerializer() {
- RestoreExternalReferenceRedirectors(isolate(), accessor_infos_);
- RestoreExternalReferenceRedirectors(isolate(), call_handler_infos_);
+ for (Handle<AccessorInfo> info : accessor_infos_) {
+ RestoreExternalReferenceRedirector(isolate(), info);
+ }
+ for (Handle<CallHandlerInfo> info : call_handler_infos_) {
+ RestoreExternalReferenceRedirector(isolate(), info);
+ }
OutputStatistics("StartupSerializer");
}
@@ -84,12 +89,6 @@ bool IsUnexpectedCodeObject(Isolate* isolate, HeapObject obj) {
if (!obj.IsCode()) return false;
Code code = Code::cast(obj);
-
- // TODO(v8:8768): Deopt entry code should not be serialized.
- if (code.kind() == CodeKind::STUB && isolate->deoptimizer_data() != nullptr) {
- if (isolate->deoptimizer_data()->IsDeoptEntryCode(code)) return false;
- }
-
if (code.kind() == CodeKind::REGEXP) return false;
if (!code.is_builtin()) return true;
if (code.is_off_heap_trampoline()) return false;
@@ -114,21 +113,21 @@ bool IsUnexpectedCodeObject(Isolate* isolate, HeapObject obj) {
} // namespace
#endif // DEBUG
-void StartupSerializer::SerializeObject(HeapObject obj) {
+void StartupSerializer::SerializeObjectImpl(Handle<HeapObject> obj) {
#ifdef DEBUG
- if (obj.IsJSFunction()) {
+ if (obj->IsJSFunction()) {
v8::base::OS::PrintError("Reference stack:\n");
PrintStack(std::cerr);
- obj.Print(std::cerr);
+ obj->Print(std::cerr);
FATAL(
"JSFunction should be added through the context snapshot instead of "
"the isolate snapshot");
}
#endif // DEBUG
- DCHECK(!IsUnexpectedCodeObject(isolate(), obj));
+ DCHECK(!IsUnexpectedCodeObject(isolate(), *obj));
if (SerializeHotObject(obj)) return;
- if (IsRootAndHasBeenSerialized(obj) && SerializeRoot(obj)) return;
+ if (IsRootAndHasBeenSerialized(*obj) && SerializeRoot(obj)) return;
if (SerializeUsingReadOnlyObjectCache(&sink_, obj)) return;
if (SerializeBackReference(obj)) return;
@@ -137,37 +136,37 @@ void StartupSerializer::SerializeObject(HeapObject obj) {
use_simulator = true;
#endif
- if (use_simulator && obj.IsAccessorInfo()) {
+ if (use_simulator && obj->IsAccessorInfo()) {
// Wipe external reference redirects in the accessor info.
- AccessorInfo info = AccessorInfo::cast(obj);
+ Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(obj);
Address original_address =
- Foreign::cast(info.getter()).foreign_address(isolate());
- Foreign::cast(info.js_getter())
+ Foreign::cast(info->getter()).foreign_address(isolate());
+ Foreign::cast(info->js_getter())
.set_foreign_address(isolate(), original_address);
- accessor_infos_.push_back(info);
- } else if (use_simulator && obj.IsCallHandlerInfo()) {
- CallHandlerInfo info = CallHandlerInfo::cast(obj);
+ accessor_infos_.Push(*info);
+ } else if (use_simulator && obj->IsCallHandlerInfo()) {
+ Handle<CallHandlerInfo> info = Handle<CallHandlerInfo>::cast(obj);
Address original_address =
- Foreign::cast(info.callback()).foreign_address(isolate());
- Foreign::cast(info.js_callback())
+ Foreign::cast(info->callback()).foreign_address(isolate());
+ Foreign::cast(info->js_callback())
.set_foreign_address(isolate(), original_address);
- call_handler_infos_.push_back(info);
- } else if (obj.IsScript() && Script::cast(obj).IsUserJavaScript()) {
- Script::cast(obj).set_context_data(
+ call_handler_infos_.Push(*info);
+ } else if (obj->IsScript() && Handle<Script>::cast(obj)->IsUserJavaScript()) {
+ Handle<Script>::cast(obj)->set_context_data(
ReadOnlyRoots(isolate()).uninitialized_symbol());
- } else if (obj.IsSharedFunctionInfo()) {
+ } else if (obj->IsSharedFunctionInfo()) {
// Clear inferred name for native functions.
- SharedFunctionInfo shared = SharedFunctionInfo::cast(obj);
- if (!shared.IsSubjectToDebugging() && shared.HasUncompiledData()) {
- shared.uncompiled_data().set_inferred_name(
+ Handle<SharedFunctionInfo> shared = Handle<SharedFunctionInfo>::cast(obj);
+ if (!shared->IsSubjectToDebugging() && shared->HasUncompiledData()) {
+ shared->uncompiled_data().set_inferred_name(
ReadOnlyRoots(isolate()).empty_string());
}
}
- CheckRehashability(obj);
+ CheckRehashability(*obj);
// Object has not yet been serialized. Serialize it here.
- DCHECK(!ReadOnlyHeap::Contains(obj));
+ DCHECK(!ReadOnlyHeap::Contains(*obj));
ObjectSerializer object_serializer(this, obj, &sink_);
object_serializer.Serialize();
}
@@ -225,7 +224,7 @@ void StartupSerializer::SerializeStringTable(StringTable* string_table) {
Object obj = current.load(isolate);
if (obj.IsHeapObject()) {
DCHECK(obj.IsInternalizedString());
- serializer_->SerializeObject(HeapObject::cast(obj));
+ serializer_->SerializeObject(handle(HeapObject::cast(obj), isolate));
}
}
}
@@ -243,9 +242,6 @@ void StartupSerializer::SerializeStrongReferences(
Isolate* isolate = this->isolate();
// No active threads.
CHECK_NULL(isolate->thread_manager()->FirstThreadStateInUse());
- // No active or weak handles.
- CHECK_IMPLIES(!allow_active_isolate_for_testing(),
- isolate->handle_scope_implementer()->blocks()->empty());
SanitizeIsolateScope sanitize_isolate(
isolate, allow_active_isolate_for_testing(), no_gc);
@@ -268,12 +264,12 @@ SerializedHandleChecker::SerializedHandleChecker(Isolate* isolate,
}
bool StartupSerializer::SerializeUsingReadOnlyObjectCache(
- SnapshotByteSink* sink, HeapObject obj) {
+ SnapshotByteSink* sink, Handle<HeapObject> obj) {
return read_only_serializer_->SerializeUsingReadOnlyObjectCache(sink, obj);
}
-void StartupSerializer::SerializeUsingStartupObjectCache(SnapshotByteSink* sink,
- HeapObject obj) {
+void StartupSerializer::SerializeUsingStartupObjectCache(
+ SnapshotByteSink* sink, Handle<HeapObject> obj) {
int cache_index = SerializeInObjectCache(obj);
sink->Put(kStartupObjectCache, "StartupObjectCache");
sink->PutInt(cache_index, "startup_object_cache_index");
diff --git a/deps/v8/src/snapshot/startup-serializer.h b/deps/v8/src/snapshot/startup-serializer.h
index d13d5d224e..ba4b44b2ff 100644
--- a/deps/v8/src/snapshot/startup-serializer.h
+++ b/deps/v8/src/snapshot/startup-serializer.h
@@ -7,6 +7,7 @@
#include <unordered_set>
+#include "src/handles/global-handles.h"
#include "src/snapshot/roots-serializer.h"
namespace v8 {
@@ -21,6 +22,8 @@ class V8_EXPORT_PRIVATE StartupSerializer : public RootsSerializer {
StartupSerializer(Isolate* isolate, Snapshot::SerializerFlags flags,
ReadOnlySerializer* read_only_serializer);
~StartupSerializer() override;
+ StartupSerializer(const StartupSerializer&) = delete;
+ StartupSerializer& operator=(const StartupSerializer&) = delete;
// Serialize the current state of the heap. The order is:
// 1) Strong roots
@@ -35,25 +38,24 @@ class V8_EXPORT_PRIVATE StartupSerializer : public RootsSerializer {
// ReadOnlyObjectCache bytecode into |sink|. Returns whether this was
// successful.
bool SerializeUsingReadOnlyObjectCache(SnapshotByteSink* sink,
- HeapObject obj);
+ Handle<HeapObject> obj);
// Adds |obj| to the startup object object cache if not already present and
// emits a StartupObjectCache bytecode into |sink|.
- void SerializeUsingStartupObjectCache(SnapshotByteSink* sink, HeapObject obj);
+ void SerializeUsingStartupObjectCache(SnapshotByteSink* sink,
+ Handle<HeapObject> obj);
// The per-heap dirty FinalizationRegistry list is weak and not serialized. No
// JSFinalizationRegistries should be used during startup.
void CheckNoDirtyFinalizationRegistries();
private:
- void SerializeObject(HeapObject o) override;
+ void SerializeObjectImpl(Handle<HeapObject> o) override;
void SerializeStringTable(StringTable* string_table);
ReadOnlySerializer* read_only_serializer_;
- std::vector<AccessorInfo> accessor_infos_;
- std::vector<CallHandlerInfo> call_handler_infos_;
-
- DISALLOW_COPY_AND_ASSIGN(StartupSerializer);
+ GlobalHandleVector<AccessorInfo> accessor_infos_;
+ GlobalHandleVector<CallHandlerInfo> call_handler_infos_;
};
class SerializedHandleChecker : public RootVisitor {
diff --git a/deps/v8/src/strings/DIR_METADATA b/deps/v8/src/strings/DIR_METADATA
new file mode 100644
index 0000000000..b183b81885
--- /dev/null
+++ b/deps/v8/src/strings/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>Runtime"
+} \ No newline at end of file
diff --git a/deps/v8/src/strings/OWNERS b/deps/v8/src/strings/OWNERS
index 3c29ae29e2..ac020e24a9 100644
--- a/deps/v8/src/strings/OWNERS
+++ b/deps/v8/src/strings/OWNERS
@@ -2,5 +2,3 @@ bmeurer@chromium.org
jkummerow@chromium.org
leszeks@chromium.org
verwaest@chromium.org
-
-# COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/strings/char-predicates-inl.h b/deps/v8/src/strings/char-predicates-inl.h
index 2dc7e50925..4c43172ff4 100644
--- a/deps/v8/src/strings/char-predicates-inl.h
+++ b/deps/v8/src/strings/char-predicates-inl.h
@@ -74,64 +74,102 @@ inline constexpr bool IsRegExpWord(uc32 c) {
}
// Constexpr cache table for character flags.
-enum AsciiCharFlags {
+enum OneByteCharFlags {
kIsIdentifierStart = 1 << 0,
kIsIdentifierPart = 1 << 1,
kIsWhiteSpace = 1 << 2,
- kIsWhiteSpaceOrLineTerminator = 1 << 3
+ kIsWhiteSpaceOrLineTerminator = 1 << 3,
+ kMaybeLineEnd = 1 << 4
};
-constexpr uint8_t BuildAsciiCharFlags(uc32 c) {
- return ((IsAsciiIdentifier(c) || c == '\\')
- ? (kIsIdentifierPart |
- (!IsDecimalDigit(c) ? kIsIdentifierStart : 0))
- : 0) |
- ((c == ' ' || c == '\t' || c == '\v' || c == '\f')
- ? kIsWhiteSpace | kIsWhiteSpaceOrLineTerminator
- : 0) |
- ((c == '\r' || c == '\n') ? kIsWhiteSpaceOrLineTerminator : 0);
-}
-const constexpr uint8_t kAsciiCharFlags[128] = {
-#define BUILD_CHAR_FLAGS(N) BuildAsciiCharFlags(N),
+
+// See http://www.unicode.org/Public/UCD/latest/ucd/DerivedCoreProperties.txt
+// ID_Start. Additionally includes '_' and '$'.
+constexpr bool IsOneByteIDStart(uc32 c) {
+ return c == 0x0024 || (c >= 0x0041 && c <= 0x005A) || c == 0x005F ||
+ (c >= 0x0061 && c <= 0x007A) || c == 0x00AA || c == 0x00B5 ||
+ c == 0x00BA || (c >= 0x00C0 && c <= 0x00D6) ||
+ (c >= 0x00D8 && c <= 0x00F6) || (c >= 0x00F8 && c <= 0x00FF);
+}
+
+// See http://www.unicode.org/Public/UCD/latest/ucd/DerivedCoreProperties.txt
+// ID_Continue. Additionally includes '_' and '$'.
+constexpr bool IsOneByteIDContinue(uc32 c) {
+ return c == 0x0024 || (c >= 0x0030 && c <= 0x0039) || c == 0x005F ||
+ (c >= 0x0041 && c <= 0x005A) || (c >= 0x0061 && c <= 0x007A) ||
+ c == 0x00AA || c == 0x00B5 || c == 0x00B7 || c == 0x00BA ||
+ (c >= 0x00C0 && c <= 0x00D6) || (c >= 0x00D8 && c <= 0x00F6) ||
+ (c >= 0x00F8 && c <= 0x00FF);
+}
+
+constexpr bool IsOneByteWhitespace(uc32 c) {
+ return c == '\t' || c == '\v' || c == '\f' || c == ' ' || c == u'\xa0';
+}
+
+constexpr uint8_t BuildOneByteCharFlags(uc32 c) {
+ uint8_t result = 0;
+ if (IsOneByteIDStart(c) || c == '\\') result |= kIsIdentifierStart;
+ if (IsOneByteIDContinue(c) || c == '\\') result |= kIsIdentifierPart;
+ if (IsOneByteWhitespace(c)) {
+ result |= kIsWhiteSpace | kIsWhiteSpaceOrLineTerminator;
+ }
+ if (c == '\r' || c == '\n') {
+ result |= kIsWhiteSpaceOrLineTerminator | kMaybeLineEnd;
+ }
+ // Add markers to identify 0x2028 and 0x2029.
+ if (c == static_cast<uint8_t>(0x2028) || c == static_cast<uint8_t>(0x2029)) {
+ result |= kMaybeLineEnd;
+ }
+ return result;
+}
+const constexpr uint8_t kOneByteCharFlags[256] = {
+#define BUILD_CHAR_FLAGS(N) BuildOneByteCharFlags(N),
INT_0_TO_127_LIST(BUILD_CHAR_FLAGS)
#undef BUILD_CHAR_FLAGS
+#define BUILD_CHAR_FLAGS(N) BuildOneByteCharFlags(N + 128),
+ INT_0_TO_127_LIST(BUILD_CHAR_FLAGS)
+#undef BUILD_CHAR_FLAGS
};
bool IsIdentifierStart(uc32 c) {
- if (!base::IsInRange(c, 0, 127)) return IsIdentifierStartSlow(c);
+ if (!base::IsInRange(c, 0, 255)) return IsIdentifierStartSlow(c);
DCHECK_EQ(IsIdentifierStartSlow(c),
- static_cast<bool>(kAsciiCharFlags[c] & kIsIdentifierStart));
- return kAsciiCharFlags[c] & kIsIdentifierStart;
+ static_cast<bool>(kOneByteCharFlags[c] & kIsIdentifierStart));
+ return kOneByteCharFlags[c] & kIsIdentifierStart;
}
bool IsIdentifierPart(uc32 c) {
- if (!base::IsInRange(c, 0, 127)) return IsIdentifierPartSlow(c);
+ if (!base::IsInRange(c, 0, 255)) return IsIdentifierPartSlow(c);
DCHECK_EQ(IsIdentifierPartSlow(c),
- static_cast<bool>(kAsciiCharFlags[c] & kIsIdentifierPart));
- return kAsciiCharFlags[c] & kIsIdentifierPart;
+ static_cast<bool>(kOneByteCharFlags[c] & kIsIdentifierPart));
+ return kOneByteCharFlags[c] & kIsIdentifierPart;
}
bool IsWhiteSpace(uc32 c) {
- if (!base::IsInRange(c, 0, 127)) return IsWhiteSpaceSlow(c);
+ if (!base::IsInRange(c, 0, 255)) return IsWhiteSpaceSlow(c);
DCHECK_EQ(IsWhiteSpaceSlow(c),
- static_cast<bool>(kAsciiCharFlags[c] & kIsWhiteSpace));
- return kAsciiCharFlags[c] & kIsWhiteSpace;
+ static_cast<bool>(kOneByteCharFlags[c] & kIsWhiteSpace));
+ return kOneByteCharFlags[c] & kIsWhiteSpace;
}
bool IsWhiteSpaceOrLineTerminator(uc32 c) {
- if (!base::IsInRange(c, 0, 127)) return IsWhiteSpaceOrLineTerminatorSlow(c);
+ if (!base::IsInRange(c, 0, 255)) return IsWhiteSpaceOrLineTerminatorSlow(c);
DCHECK_EQ(
IsWhiteSpaceOrLineTerminatorSlow(c),
- static_cast<bool>(kAsciiCharFlags[c] & kIsWhiteSpaceOrLineTerminator));
- return kAsciiCharFlags[c] & kIsWhiteSpaceOrLineTerminator;
+ static_cast<bool>(kOneByteCharFlags[c] & kIsWhiteSpaceOrLineTerminator));
+ return kOneByteCharFlags[c] & kIsWhiteSpaceOrLineTerminator;
}
bool IsLineTerminatorSequence(uc32 c, uc32 next) {
- if (!unibrow::IsLineTerminator(c)) return false;
- if (c == 0x000d && next == 0x000a) return false; // CR with following LF.
- return true;
+ if (kOneByteCharFlags[static_cast<uint8_t>(c)] & kMaybeLineEnd) {
+ if (c == '\n') return true;
+ if (c == '\r') return next != '\n';
+ return base::IsInRange(static_cast<unsigned int>(c), 0x2028u, 0x2029u);
+ }
+ return false;
}
} // namespace internal
+
} // namespace v8
#endif // V8_STRINGS_CHAR_PREDICATES_INL_H_
diff --git a/deps/v8/src/strings/string-stream.cc b/deps/v8/src/strings/string-stream.cc
index 5747f66bba..bcde4d7951 100644
--- a/deps/v8/src/strings/string-stream.cc
+++ b/deps/v8/src/strings/string-stream.cc
@@ -298,7 +298,7 @@ void StringStream::PrintName(Object name) {
void StringStream::PrintUsingMap(JSObject js_object) {
Map map = js_object.map();
- DescriptorArray descs = map.instance_descriptors();
+ DescriptorArray descs = map.instance_descriptors(kRelaxedLoad);
for (InternalIndex i : map.IterateOwnDescriptors()) {
PropertyDetails details = descs.GetDetails(i);
if (details.location() == kField) {
diff --git a/deps/v8/src/strings/unicode-inl.h b/deps/v8/src/strings/unicode-inl.h
index 6f730b26be..0539f76264 100644
--- a/deps/v8/src/strings/unicode-inl.h
+++ b/deps/v8/src/strings/unicode-inl.h
@@ -59,6 +59,25 @@ int Mapping<T, s>::CalculateValue(uchar c, uchar n, uchar* result) {
}
#endif // !V8_INTL_SUPPORT
+bool Utf16::HasUnpairedSurrogate(const uint16_t* code_units, size_t length) {
+ for (size_t i = 0; i < length; ++i) {
+ const int code_unit = code_units[i];
+ if (IsLeadSurrogate(code_unit)) {
+ // The current code unit is a leading surrogate. Check if it is followed
+ // by a trailing surrogate.
+ if (i == length - 1) return true;
+ if (!IsTrailSurrogate(code_units[i + 1])) return true;
+ // Skip the paired trailing surrogate.
+ ++i;
+ } else if (IsTrailSurrogate(code_unit)) {
+ // All paired trailing surrogates are skipped above, so this branch is
+ // only for those that are unpaired.
+ return true;
+ }
+ }
+ return false;
+}
+
// Decodes UTF-8 bytes incrementally, allowing the decoding of bytes as they
// stream in. This **must** be followed by a call to ValueOfIncrementalFinish
// when the stream is complete, to ensure incomplete sequences are handled.
diff --git a/deps/v8/src/strings/unicode.h b/deps/v8/src/strings/unicode.h
index a050a27dc9..616ab1c6a9 100644
--- a/deps/v8/src/strings/unicode.h
+++ b/deps/v8/src/strings/unicode.h
@@ -128,6 +128,8 @@ class Utf16 {
static inline uint16_t TrailSurrogate(uint32_t char_code) {
return 0xdc00 + (char_code & 0x3ff);
}
+ static inline bool HasUnpairedSurrogate(const uint16_t* code_units,
+ size_t length);
};
class Latin1 {
diff --git a/deps/v8/src/torque/ast.h b/deps/v8/src/torque/ast.h
index e2efc8c9fd..a51535d392 100644
--- a/deps/v8/src/torque/ast.h
+++ b/deps/v8/src/torque/ast.h
@@ -47,6 +47,7 @@ namespace torque {
#define AST_TYPE_EXPRESSION_NODE_KIND_LIST(V) \
V(BasicTypeExpression) \
V(FunctionTypeExpression) \
+ V(PrecomputedTypeExpression) \
V(UnionTypeExpression)
#define AST_STATEMENT_NODE_KIND_LIST(V) \
@@ -651,6 +652,17 @@ struct FunctionTypeExpression : TypeExpression {
TypeExpression* return_type;
};
+// A PrecomputedTypeExpression is never created directly by the parser. Later
+// stages can use this to insert AST snippets where the type has already been
+// resolved.
+class Type;
+struct PrecomputedTypeExpression : TypeExpression {
+ DEFINE_AST_NODE_LEAF_BOILERPLATE(PrecomputedTypeExpression)
+ PrecomputedTypeExpression(SourcePosition pos, const Type* type)
+ : TypeExpression(kKind, pos), type(type) {}
+ const Type* type;
+};
+
struct UnionTypeExpression : TypeExpression {
DEFINE_AST_NODE_LEAF_BOILERPLATE(UnionTypeExpression)
UnionTypeExpression(SourcePosition pos, TypeExpression* a, TypeExpression* b)
@@ -843,16 +855,22 @@ struct InstanceTypeConstraints {
struct AbstractTypeDeclaration : TypeDeclaration {
DEFINE_AST_NODE_LEAF_BOILERPLATE(AbstractTypeDeclaration)
- AbstractTypeDeclaration(SourcePosition pos, Identifier* name, bool transient,
+ AbstractTypeDeclaration(SourcePosition pos, Identifier* name,
+ AbstractTypeFlags flags,
base::Optional<TypeExpression*> extends,
base::Optional<std::string> generates)
: TypeDeclaration(kKind, pos, name),
- is_constexpr(IsConstexprName(name->value)),
- transient(transient),
+ flags(flags),
extends(extends),
- generates(std::move(generates)) {}
- bool is_constexpr;
- bool transient;
+ generates(std::move(generates)) {
+ CHECK_EQ(IsConstexprName(name->value),
+ !!(flags & AbstractTypeFlag::kConstexpr));
+ }
+
+ bool IsConstexpr() const { return flags & AbstractTypeFlag::kConstexpr; }
+ bool IsTransient() const { return flags & AbstractTypeFlag::kTransient; }
+
+ AbstractTypeFlags flags;
base::Optional<TypeExpression*> extends;
base::Optional<std::string> generates;
};
@@ -1237,6 +1255,58 @@ T* MakeNode(Args... args) {
std::make_unique<T>(CurrentSourcePosition::Get(), std::move(args)...));
}
+inline FieldAccessExpression* MakeFieldAccessExpression(Expression* object,
+ std::string field) {
+ return MakeNode<FieldAccessExpression>(
+ object, MakeNode<Identifier>(std::move(field)));
+}
+
+inline IdentifierExpression* MakeIdentifierExpression(
+ std::vector<std::string> namespace_qualification, std::string name,
+ std::vector<TypeExpression*> args = {}) {
+ return MakeNode<IdentifierExpression>(std::move(namespace_qualification),
+ MakeNode<Identifier>(std::move(name)),
+ std::move(args));
+}
+
+inline IdentifierExpression* MakeIdentifierExpression(std::string name) {
+ return MakeIdentifierExpression({}, std::move(name));
+}
+
+inline CallExpression* MakeCallExpression(
+ IdentifierExpression* callee, std::vector<Expression*> arguments,
+ std::vector<Identifier*> labels = {}) {
+ return MakeNode<CallExpression>(callee, std::move(arguments),
+ std::move(labels));
+}
+
+inline CallExpression* MakeCallExpression(
+ std::string callee, std::vector<Expression*> arguments,
+ std::vector<Identifier*> labels = {}) {
+ return MakeCallExpression(MakeIdentifierExpression(std::move(callee)),
+ std::move(arguments), std::move(labels));
+}
+
+inline VarDeclarationStatement* MakeConstDeclarationStatement(
+ std::string name, Expression* initializer) {
+ return MakeNode<VarDeclarationStatement>(
+ /*const_qualified=*/true, MakeNode<Identifier>(std::move(name)),
+ base::Optional<TypeExpression*>{}, initializer);
+}
+
+inline BasicTypeExpression* MakeBasicTypeExpression(
+ std::vector<std::string> namespace_qualification, std::string name,
+ std::vector<TypeExpression*> generic_arguments = {}) {
+ return MakeNode<BasicTypeExpression>(std::move(namespace_qualification),
+ std::move(name),
+ std::move(generic_arguments));
+}
+
+inline StructExpression* MakeStructExpression(
+ TypeExpression* type, std::vector<NameAndExpression> initializers) {
+ return MakeNode<StructExpression>(type, std::move(initializers));
+}
+
} // namespace torque
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/torque/cc-generator.cc b/deps/v8/src/torque/cc-generator.cc
new file mode 100644
index 0000000000..53170817a1
--- /dev/null
+++ b/deps/v8/src/torque/cc-generator.cc
@@ -0,0 +1,460 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/torque/cc-generator.h"
+
+#include "src/common/globals.h"
+#include "src/torque/global-context.h"
+#include "src/torque/type-oracle.h"
+#include "src/torque/types.h"
+#include "src/torque/utils.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+
+base::Optional<Stack<std::string>> CCGenerator::EmitGraph(
+ Stack<std::string> parameters) {
+ for (BottomOffset i = {0}; i < parameters.AboveTop(); ++i) {
+ SetDefinitionVariable(DefinitionLocation::Parameter(i.offset),
+ parameters.Peek(i));
+ }
+
+ // C++ doesn't have parameterized labels like CSA, so we must pre-declare all
+ // phi values so they're in scope for both the blocks that define them and the
+ // blocks that read them.
+ for (Block* block : cfg_.blocks()) {
+ if (block->IsDead()) continue;
+
+ DCHECK_EQ(block->InputTypes().Size(), block->InputDefinitions().Size());
+ for (BottomOffset i = {0}; i < block->InputTypes().AboveTop(); ++i) {
+ DefinitionLocation input_def = block->InputDefinitions().Peek(i);
+ if (block->InputDefinitions().Peek(i).IsPhiFromBlock(block)) {
+ out() << " " << block->InputTypes().Peek(i)->GetRuntimeType() << " "
+ << DefinitionToVariable(input_def) << ";\n";
+ }
+ }
+ }
+
+ // Redirect the output of non-declarations into a buffer and only output
+ // declarations right away.
+ std::stringstream out_buffer;
+ std::ostream* old_out = out_;
+ out_ = &out_buffer;
+
+ EmitInstruction(GotoInstruction{cfg_.start()}, &parameters);
+
+ for (Block* block : cfg_.blocks()) {
+ if (cfg_.end() && *cfg_.end() == block) continue;
+ if (block->IsDead()) continue;
+ EmitBlock(block);
+ }
+
+ base::Optional<Stack<std::string>> result;
+ if (cfg_.end()) {
+ result = EmitBlock(*cfg_.end());
+ }
+
+ // All declarations have been printed now, so we can append the buffered
+ // output and redirect back to the original output stream.
+ out_ = old_out;
+ out() << out_buffer.str();
+
+ return result;
+}
+
+Stack<std::string> CCGenerator::EmitBlock(const Block* block) {
+ out() << "\n";
+ out() << " " << BlockName(block) << ":\n";
+
+ Stack<std::string> stack;
+
+ for (BottomOffset i = {0}; i < block->InputTypes().AboveTop(); ++i) {
+ const auto& def = block->InputDefinitions().Peek(i);
+ stack.Push(DefinitionToVariable(def));
+ if (def.IsPhiFromBlock(block)) {
+ decls() << " " << block->InputTypes().Peek(i)->GetRuntimeType() << " "
+ << stack.Top() << "{}; USE(" << stack.Top() << ");\n";
+ }
+ }
+
+ for (const Instruction& instruction : block->instructions()) {
+ TorqueCodeGenerator::EmitInstruction(instruction, &stack);
+ }
+ return stack;
+}
+
+void CCGenerator::EmitSourcePosition(SourcePosition pos, bool always_emit) {
+ const std::string& file = SourceFileMap::AbsolutePath(pos.source);
+ if (always_emit || !previous_position_.CompareStartIgnoreColumn(pos)) {
+ // Lines in Torque SourcePositions are zero-based, while the
+ // CodeStubAssembler and downwind systems are one-based.
+ out() << " // " << file << ":" << (pos.start.line + 1) << "\n";
+ previous_position_ = pos;
+ }
+}
+
+void CCGenerator::EmitInstruction(
+ const PushUninitializedInstruction& instruction,
+ Stack<std::string>* stack) {
+ ReportError("Not supported in C++ output: PushUninitialized");
+}
+
+void CCGenerator::EmitInstruction(
+ const PushBuiltinPointerInstruction& instruction,
+ Stack<std::string>* stack) {
+ ReportError("Not supported in C++ output: PushBuiltinPointer");
+}
+
+void CCGenerator::EmitInstruction(
+ const NamespaceConstantInstruction& instruction,
+ Stack<std::string>* stack) {
+ ReportError("Not supported in C++ output: NamespaceConstantInstruction");
+}
+
+std::vector<std::string> CCGenerator::ProcessArgumentsCommon(
+ const TypeVector& parameter_types,
+ std::vector<std::string> constexpr_arguments, Stack<std::string>* stack) {
+ std::vector<std::string> args;
+ for (auto it = parameter_types.rbegin(); it != parameter_types.rend(); ++it) {
+ const Type* type = *it;
+ VisitResult arg;
+ if (type->IsConstexpr()) {
+ args.push_back(std::move(constexpr_arguments.back()));
+ constexpr_arguments.pop_back();
+ } else {
+ std::stringstream s;
+ size_t slot_count = LoweredSlotCount(type);
+ VisitResult arg = VisitResult(type, stack->TopRange(slot_count));
+ EmitCCValue(arg, *stack, s);
+ args.push_back(s.str());
+ stack->PopMany(slot_count);
+ }
+ }
+ std::reverse(args.begin(), args.end());
+ return args;
+}
+
+void CCGenerator::EmitInstruction(const CallIntrinsicInstruction& instruction,
+ Stack<std::string>* stack) {
+ TypeVector parameter_types =
+ instruction.intrinsic->signature().parameter_types.types;
+ std::vector<std::string> args = ProcessArgumentsCommon(
+ parameter_types, instruction.constexpr_arguments, stack);
+
+ Stack<std::string> pre_call_stack = *stack;
+ const Type* return_type = instruction.intrinsic->signature().return_type;
+ std::vector<std::string> results;
+
+ const auto lowered = LowerType(return_type);
+ for (std::size_t i = 0; i < lowered.size(); ++i) {
+ results.push_back(DefinitionToVariable(instruction.GetValueDefinition(i)));
+ stack->Push(results.back());
+ decls() << " " << lowered[i]->GetRuntimeType() << " " << stack->Top()
+ << "{}; USE(" << stack->Top() << ");\n";
+ }
+
+ out() << " ";
+ if (return_type->StructSupertype()) {
+ out() << "std::tie(";
+ PrintCommaSeparatedList(out(), results);
+ out() << ") = ";
+ } else {
+ if (results.size() == 1) {
+ out() << results[0] << " = ";
+ }
+ }
+
+ if (instruction.intrinsic->ExternalName() == "%RawDownCast") {
+ if (parameter_types.size() != 1) {
+ ReportError("%RawDownCast must take a single parameter");
+ }
+ const Type* original_type = parameter_types[0];
+ bool is_subtype =
+ return_type->IsSubtypeOf(original_type) ||
+ (original_type == TypeOracle::GetUninitializedHeapObjectType() &&
+ return_type->IsSubtypeOf(TypeOracle::GetHeapObjectType()));
+ if (!is_subtype) {
+ ReportError("%RawDownCast error: ", *return_type, " is not a subtype of ",
+ *original_type);
+ }
+ if (!original_type->StructSupertype() &&
+ return_type->GetRuntimeType() != original_type->GetRuntimeType()) {
+ out() << "static_cast<" << return_type->GetRuntimeType() << ">";
+ }
+ } else if (instruction.intrinsic->ExternalName() == "%GetClassMapConstant") {
+ ReportError("C++ generator doesn't yet support %GetClassMapConstant");
+ } else if (instruction.intrinsic->ExternalName() == "%FromConstexpr") {
+ if (parameter_types.size() != 1 || !parameter_types[0]->IsConstexpr()) {
+ ReportError(
+ "%FromConstexpr must take a single parameter with constexpr "
+ "type");
+ }
+ if (return_type->IsConstexpr()) {
+ ReportError("%FromConstexpr must return a non-constexpr type");
+ }
+ // Nothing to do here; constexpr expressions are already valid C++.
+ } else {
+ ReportError("no built in intrinsic with name " +
+ instruction.intrinsic->ExternalName());
+ }
+
+ out() << "(";
+ PrintCommaSeparatedList(out(), args);
+ out() << ");\n";
+}
+
+void CCGenerator::EmitInstruction(const CallCsaMacroInstruction& instruction,
+ Stack<std::string>* stack) {
+ TypeVector parameter_types =
+ instruction.macro->signature().parameter_types.types;
+ std::vector<std::string> args = ProcessArgumentsCommon(
+ parameter_types, instruction.constexpr_arguments, stack);
+
+ Stack<std::string> pre_call_stack = *stack;
+ const Type* return_type = instruction.macro->signature().return_type;
+ std::vector<std::string> results;
+
+ const auto lowered = LowerType(return_type);
+ for (std::size_t i = 0; i < lowered.size(); ++i) {
+ results.push_back(DefinitionToVariable(instruction.GetValueDefinition(i)));
+ stack->Push(results.back());
+ decls() << " " << lowered[i]->GetRuntimeType() << " " << stack->Top()
+ << "{}; USE(" << stack->Top() << ");\n";
+ }
+
+ // We should have inlined any calls requiring complex control flow.
+ CHECK(!instruction.catch_block);
+ out() << " ";
+ if (return_type->StructSupertype().has_value()) {
+ out() << "std::tie(";
+ PrintCommaSeparatedList(out(), results);
+ out() << ") = ";
+ } else {
+ if (results.size() == 1) {
+ out() << results[0] << " = ";
+ } else {
+ DCHECK_EQ(0, results.size());
+ }
+ }
+
+ out() << instruction.macro->CCName() << "(isolate";
+ if (!args.empty()) out() << ", ";
+ PrintCommaSeparatedList(out(), args);
+ out() << ");\n";
+}
+
+void CCGenerator::EmitInstruction(
+ const CallCsaMacroAndBranchInstruction& instruction,
+ Stack<std::string>* stack) {
+ ReportError("Not supported in C++ output: CallCsaMacroAndBranch");
+}
+
+void CCGenerator::EmitInstruction(const CallBuiltinInstruction& instruction,
+ Stack<std::string>* stack) {
+ ReportError("Not supported in C++ output: CallBuiltin");
+}
+
+void CCGenerator::EmitInstruction(
+ const CallBuiltinPointerInstruction& instruction,
+ Stack<std::string>* stack) {
+ ReportError("Not supported in C++ output: CallBuiltinPointer");
+}
+
+void CCGenerator::EmitInstruction(const CallRuntimeInstruction& instruction,
+ Stack<std::string>* stack) {
+ ReportError("Not supported in C++ output: CallRuntime");
+}
+
+void CCGenerator::EmitInstruction(const BranchInstruction& instruction,
+ Stack<std::string>* stack) {
+ out() << " if (" << stack->Pop() << ") {\n";
+ EmitGoto(instruction.if_true, stack, " ");
+ out() << " } else {\n";
+ EmitGoto(instruction.if_false, stack, " ");
+ out() << " }\n";
+}
+
+void CCGenerator::EmitInstruction(const ConstexprBranchInstruction& instruction,
+ Stack<std::string>* stack) {
+ out() << " if ((" << instruction.condition << ")) {\n";
+ EmitGoto(instruction.if_true, stack, " ");
+ out() << " } else {\n";
+ EmitGoto(instruction.if_false, stack, " ");
+ out() << " }\n";
+}
+
+void CCGenerator::EmitGoto(const Block* destination, Stack<std::string>* stack,
+ std::string indentation) {
+ const auto& destination_definitions = destination->InputDefinitions();
+ DCHECK_EQ(stack->Size(), destination_definitions.Size());
+ for (BottomOffset i = {0}; i < stack->AboveTop(); ++i) {
+ DefinitionLocation def = destination_definitions.Peek(i);
+ if (def.IsPhiFromBlock(destination)) {
+ out() << indentation << DefinitionToVariable(def) << " = "
+ << stack->Peek(i) << ";\n";
+ }
+ }
+ out() << indentation << "goto " << BlockName(destination) << ";\n";
+}
+
+void CCGenerator::EmitInstruction(const GotoInstruction& instruction,
+ Stack<std::string>* stack) {
+ EmitGoto(instruction.destination, stack, " ");
+}
+
+void CCGenerator::EmitInstruction(const GotoExternalInstruction& instruction,
+ Stack<std::string>* stack) {
+ ReportError("Not supported in C++ output: GotoExternal");
+}
+
+void CCGenerator::EmitInstruction(const ReturnInstruction& instruction,
+ Stack<std::string>* stack) {
+ ReportError("Not supported in C++ output: Return");
+}
+
+void CCGenerator::EmitInstruction(
+ const PrintConstantStringInstruction& instruction,
+ Stack<std::string>* stack) {
+ out() << " std::cout << " << StringLiteralQuote(instruction.message)
+ << ";\n";
+}
+
+void CCGenerator::EmitInstruction(const AbortInstruction& instruction,
+ Stack<std::string>* stack) {
+ switch (instruction.kind) {
+ case AbortInstruction::Kind::kUnreachable:
+ DCHECK(instruction.message.empty());
+ out() << " UNREACHABLE();\n";
+ break;
+ case AbortInstruction::Kind::kDebugBreak:
+ DCHECK(instruction.message.empty());
+ out() << " base::OS::DebugBreak();\n";
+ break;
+ case AbortInstruction::Kind::kAssertionFailure: {
+ std::string file = StringLiteralQuote(
+ SourceFileMap::PathFromV8Root(instruction.pos.source));
+ out() << " CHECK(false, \"Failed Torque assertion: '\""
+ << StringLiteralQuote(instruction.message) << "\"' at \"" << file
+ << "\":\""
+ << StringLiteralQuote(
+ std::to_string(instruction.pos.start.line + 1))
+ << ");\n";
+ break;
+ }
+ }
+}
+
+void CCGenerator::EmitInstruction(const UnsafeCastInstruction& instruction,
+ Stack<std::string>* stack) {
+ const std::string str = "static_cast<" +
+ instruction.destination_type->GetRuntimeType() +
+ ">(" + stack->Top() + ")";
+ stack->Poke(stack->AboveTop() - 1, str);
+ SetDefinitionVariable(instruction.GetValueDefinition(), str);
+}
+
+void CCGenerator::EmitInstruction(const LoadReferenceInstruction& instruction,
+ Stack<std::string>* stack) {
+ std::string result_name =
+ DefinitionToVariable(instruction.GetValueDefinition());
+
+ std::string offset = stack->Pop();
+ std::string object = stack->Pop();
+ stack->Push(result_name);
+
+ std::string result_type = instruction.type->GetRuntimeType();
+ decls() << " " << result_type << " " << result_name << "{}; USE("
+ << result_name << ");\n";
+ out() << " " << result_name << " = ";
+ if (instruction.type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
+ out() << "TaggedField<" << result_type << ">::load(isolate, " << object
+ << ", static_cast<int>(" << offset << "));\n";
+ } else {
+ out() << "(" << object << ").ReadField<" << result_type << ">(" << offset
+ << ");\n";
+ }
+}
+
+void CCGenerator::EmitInstruction(const StoreReferenceInstruction& instruction,
+ Stack<std::string>* stack) {
+ ReportError("Not supported in C++ output: StoreReference");
+}
+
+namespace {
+std::string GetBitFieldSpecialization(const Type* container,
+ const BitField& field) {
+ std::stringstream stream;
+ stream << "base::BitField<"
+ << field.name_and_type.type->GetConstexprGeneratedTypeName() << ", "
+ << field.offset << ", " << field.num_bits << ", "
+ << container->GetConstexprGeneratedTypeName() << ">";
+ return stream.str();
+}
+} // namespace
+
+void CCGenerator::EmitInstruction(const LoadBitFieldInstruction& instruction,
+ Stack<std::string>* stack) {
+ std::string result_name =
+ DefinitionToVariable(instruction.GetValueDefinition());
+
+ std::string bit_field_struct = stack->Pop();
+ stack->Push(result_name);
+
+ const Type* struct_type = instruction.bit_field_struct_type;
+
+ decls() << " " << instruction.bit_field.name_and_type.type->GetRuntimeType()
+ << " " << result_name << "{}; USE(" << result_name << ");\n";
+
+ base::Optional<const Type*> smi_tagged_type =
+ Type::MatchUnaryGeneric(struct_type, TypeOracle::GetSmiTaggedGeneric());
+ if (smi_tagged_type) {
+ // Get the untagged value and its type.
+ bit_field_struct = bit_field_struct + ".value()";
+ struct_type = *smi_tagged_type;
+ }
+
+ out() << " " << result_name << " = "
+ << GetBitFieldSpecialization(struct_type, instruction.bit_field)
+ << "::decode(" << bit_field_struct << ");\n";
+}
+
+void CCGenerator::EmitInstruction(const StoreBitFieldInstruction& instruction,
+ Stack<std::string>* stack) {
+ ReportError("Not supported in C++ output: StoreBitField");
+}
+
+// static
+void CCGenerator::EmitCCValue(VisitResult result,
+ const Stack<std::string>& values,
+ std::ostream& out) {
+ if (!result.IsOnStack()) {
+ out << result.constexpr_value();
+ } else if (auto struct_type = result.type()->StructSupertype()) {
+ out << "std::tuple_cat(";
+ bool first = true;
+ for (auto& field : (*struct_type)->fields()) {
+ if (!first) {
+ out << ", ";
+ }
+ first = false;
+ if (!field.name_and_type.type->IsStructType()) {
+ out << "std::make_tuple(";
+ }
+ EmitCCValue(ProjectStructField(result, field.name_and_type.name), values,
+ out);
+ if (!field.name_and_type.type->IsStructType()) {
+ out << ")";
+ }
+ }
+ out << ")";
+ } else {
+ DCHECK_EQ(1, result.stack_range().Size());
+ out << values.Peek(result.stack_range().begin());
+ }
+}
+
+} // namespace torque
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/torque/cc-generator.h b/deps/v8/src/torque/cc-generator.h
new file mode 100644
index 0000000000..5626f3f7fa
--- /dev/null
+++ b/deps/v8/src/torque/cc-generator.h
@@ -0,0 +1,46 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TORQUE_CC_GENERATOR_H_
+#define V8_TORQUE_CC_GENERATOR_H_
+
+#include "src/torque/torque-code-generator.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+
+class CCGenerator : public TorqueCodeGenerator {
+ public:
+ CCGenerator(const ControlFlowGraph& cfg, std::ostream& out)
+ : TorqueCodeGenerator(cfg, out) {}
+ base::Optional<Stack<std::string>> EmitGraph(Stack<std::string> parameters);
+
+ static void EmitCCValue(VisitResult result, const Stack<std::string>& values,
+ std::ostream& out);
+
+ private:
+ void EmitSourcePosition(SourcePosition pos,
+ bool always_emit = false) override;
+
+ void EmitGoto(const Block* destination, Stack<std::string>* stack,
+ std::string indentation);
+
+ std::vector<std::string> ProcessArgumentsCommon(
+ const TypeVector& parameter_types,
+ std::vector<std::string> constexpr_arguments, Stack<std::string>* stack);
+
+ Stack<std::string> EmitBlock(const Block* block);
+#define EMIT_INSTRUCTION_DECLARATION(T) \
+ void EmitInstruction(const T& instruction, Stack<std::string>* stack) \
+ override;
+ TORQUE_BACKEND_DEPENDENT_INSTRUCTION_LIST(EMIT_INSTRUCTION_DECLARATION)
+#undef EMIT_INSTRUCTION_DECLARATION
+};
+
+} // namespace torque
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TORQUE_CC_GENERATOR_H_
diff --git a/deps/v8/src/torque/constants.h b/deps/v8/src/torque/constants.h
index 8cb3a64c30..54c54c5819 100644
--- a/deps/v8/src/torque/constants.h
+++ b/deps/v8/src/torque/constants.h
@@ -76,6 +76,7 @@ static const char* const UNINITIALIZED_ITERATOR_TYPE_STRING =
static const char* const GENERIC_TYPE_INSTANTIATION_NAMESPACE_STRING =
"_generic_type_instantiation_namespace";
static const char* const FIXED_ARRAY_BASE_TYPE_STRING = "FixedArrayBase";
+static const char* const WEAK_HEAP_OBJECT = "WeakHeapObject";
static const char* const STATIC_ASSERT_MACRO_STRING = "StaticAssert";
static const char* const ANNOTATION_GENERATE_PRINT = "@generatePrint";
@@ -84,6 +85,8 @@ static const char* const ANNOTATION_ABSTRACT = "@abstract";
static const char* const ANNOTATION_HAS_SAME_INSTANCE_TYPE_AS_PARENT =
"@hasSameInstanceTypeAsParent";
static const char* const ANNOTATION_GENERATE_CPP_CLASS = "@generateCppClass";
+static const char* const ANNOTATION_CUSTOM_MAP = "@customMap";
+static const char* const ANNOTATION_CUSTOM_CPP_CLASS = "@customCppClass";
static const char* const ANNOTATION_HIGHEST_INSTANCE_TYPE_WITHIN_PARENT =
"@highestInstanceTypeWithinParentClassRange";
static const char* const ANNOTATION_LOWEST_INSTANCE_TYPE_WITHIN_PARENT =
@@ -96,8 +99,10 @@ static const char* const ANNOTATION_IF = "@if";
static const char* const ANNOTATION_IFNOT = "@ifnot";
static const char* const ANNOTATION_GENERATE_BODY_DESCRIPTOR =
"@generateBodyDescriptor";
-static const char* const ANNOTATION_EXPORT_CPP_CLASS = "@export";
+static const char* const ANNOTATION_EXPORT = "@export";
static const char* const ANNOTATION_DO_NOT_GENERATE_CAST = "@doNotGenerateCast";
+static const char* const ANNOTATION_USE_PARENT_TYPE_CHECKER =
+ "@useParentTypeChecker";
inline bool IsConstexprName(const std::string& name) {
return name.substr(0, std::strlen(CONSTEXPR_TYPE_PREFIX)) ==
@@ -117,7 +122,8 @@ inline std::string GetConstexprName(const std::string& name) {
enum class AbstractTypeFlag {
kNone = 0,
kTransient = 1 << 0,
- kConstexpr = 1 << 1
+ kConstexpr = 1 << 1,
+ kUseParentTypeChecker = 1 << 2,
};
using AbstractTypeFlags = base::Flags<AbstractTypeFlag>;
@@ -131,12 +137,14 @@ enum class ClassFlag {
kIsShape = 1 << 5,
kHasSameInstanceTypeAsParent = 1 << 6,
kGenerateCppClassDefinitions = 1 << 7,
+ kCustomCppClass = 1 << 8,
kHighestInstanceTypeWithinParent = 1 << 9,
kLowestInstanceTypeWithinParent = 1 << 10,
kUndefinedLayout = 1 << 11,
kGenerateBodyDescriptor = 1 << 12,
kExport = 1 << 13,
- kDoNotGenerateCast = 1 << 14
+ kDoNotGenerateCast = 1 << 14,
+ kCustomMap = 1 << 15,
};
using ClassFlags = base::Flags<ClassFlag>;
diff --git a/deps/v8/src/torque/csa-generator.cc b/deps/v8/src/torque/csa-generator.cc
index da16a1b3b4..93e8d47df4 100644
--- a/deps/v8/src/torque/csa-generator.cc
+++ b/deps/v8/src/torque/csa-generator.cc
@@ -83,7 +83,7 @@ Stack<std::string> CSAGenerator::EmitBlock(const Block* block) {
out() << " ca_.Bind(&" << BlockName(block) << phi_names.str() << ");\n";
for (const Instruction& instruction : block->instructions()) {
- EmitInstruction(instruction, &stack);
+ TorqueCodeGenerator::EmitInstruction(instruction, &stack);
}
return stack;
}
@@ -99,53 +99,6 @@ void CSAGenerator::EmitSourcePosition(SourcePosition pos, bool always_emit) {
}
}
-bool CSAGenerator::IsEmptyInstruction(const Instruction& instruction) {
- switch (instruction.kind()) {
- case InstructionKind::kPeekInstruction:
- case InstructionKind::kPokeInstruction:
- case InstructionKind::kDeleteRangeInstruction:
- case InstructionKind::kPushUninitializedInstruction:
- case InstructionKind::kPushBuiltinPointerInstruction:
- case InstructionKind::kUnsafeCastInstruction:
- return true;
- default:
- return false;
- }
-}
-
-void CSAGenerator::EmitInstruction(const Instruction& instruction,
- Stack<std::string>* stack) {
-#ifdef DEBUG
- if (!IsEmptyInstruction(instruction)) {
- EmitSourcePosition(instruction->pos);
- }
-#endif
-
- switch (instruction.kind()) {
-#define ENUM_ITEM(T) \
- case InstructionKind::k##T: \
- return EmitInstruction(instruction.Cast<T>(), stack);
- TORQUE_INSTRUCTION_LIST(ENUM_ITEM)
-#undef ENUM_ITEM
- }
-}
-
-void CSAGenerator::EmitInstruction(const PeekInstruction& instruction,
- Stack<std::string>* stack) {
- stack->Push(stack->Peek(instruction.slot));
-}
-
-void CSAGenerator::EmitInstruction(const PokeInstruction& instruction,
- Stack<std::string>* stack) {
- stack->Poke(instruction.slot, stack->Top());
- stack->Pop();
-}
-
-void CSAGenerator::EmitInstruction(const DeleteRangeInstruction& instruction,
- Stack<std::string>* stack) {
- stack->DeleteRange(instruction.range);
-}
-
void CSAGenerator::EmitInstruction(
const PushUninitializedInstruction& instruction,
Stack<std::string>* stack) {
@@ -198,35 +151,35 @@ void CSAGenerator::EmitInstruction(
}
}
-void CSAGenerator::ProcessArgumentsCommon(
- const TypeVector& parameter_types, std::vector<std::string>* args,
- std::vector<std::string>* constexpr_arguments, Stack<std::string>* stack) {
+std::vector<std::string> CSAGenerator::ProcessArgumentsCommon(
+ const TypeVector& parameter_types,
+ std::vector<std::string> constexpr_arguments, Stack<std::string>* stack) {
+ std::vector<std::string> args;
for (auto it = parameter_types.rbegin(); it != parameter_types.rend(); ++it) {
const Type* type = *it;
VisitResult arg;
if (type->IsConstexpr()) {
- args->push_back(std::move(constexpr_arguments->back()));
- constexpr_arguments->pop_back();
+ args.push_back(std::move(constexpr_arguments.back()));
+ constexpr_arguments.pop_back();
} else {
std::stringstream s;
size_t slot_count = LoweredSlotCount(type);
VisitResult arg = VisitResult(type, stack->TopRange(slot_count));
EmitCSAValue(arg, *stack, s);
- args->push_back(s.str());
+ args.push_back(s.str());
stack->PopMany(slot_count);
}
}
- std::reverse(args->begin(), args->end());
+ std::reverse(args.begin(), args.end());
+ return args;
}
void CSAGenerator::EmitInstruction(const CallIntrinsicInstruction& instruction,
Stack<std::string>* stack) {
- std::vector<std::string> constexpr_arguments =
- instruction.constexpr_arguments;
- std::vector<std::string> args;
TypeVector parameter_types =
instruction.intrinsic->signature().parameter_types.types;
- ProcessArgumentsCommon(parameter_types, &args, &constexpr_arguments, stack);
+ std::vector<std::string> args = ProcessArgumentsCommon(
+ parameter_types, instruction.constexpr_arguments, stack);
Stack<std::string> pre_call_stack = *stack;
const Type* return_type = instruction.intrinsic->signature().return_type;
@@ -355,12 +308,10 @@ void CSAGenerator::EmitInstruction(const CallIntrinsicInstruction& instruction,
void CSAGenerator::EmitInstruction(const CallCsaMacroInstruction& instruction,
Stack<std::string>* stack) {
- std::vector<std::string> constexpr_arguments =
- instruction.constexpr_arguments;
- std::vector<std::string> args;
TypeVector parameter_types =
instruction.macro->signature().parameter_types.types;
- ProcessArgumentsCommon(parameter_types, &args, &constexpr_arguments, stack);
+ std::vector<std::string> args = ProcessArgumentsCommon(
+ parameter_types, instruction.constexpr_arguments, stack);
Stack<std::string> pre_call_stack = *stack;
const Type* return_type = instruction.macro->signature().return_type;
@@ -409,12 +360,10 @@ void CSAGenerator::EmitInstruction(const CallCsaMacroInstruction& instruction,
void CSAGenerator::EmitInstruction(
const CallCsaMacroAndBranchInstruction& instruction,
Stack<std::string>* stack) {
- std::vector<std::string> constexpr_arguments =
- instruction.constexpr_arguments;
- std::vector<std::string> args;
TypeVector parameter_types =
instruction.macro->signature().parameter_types.types;
- ProcessArgumentsCommon(parameter_types, &args, &constexpr_arguments, stack);
+ std::vector<std::string> args = ProcessArgumentsCommon(
+ parameter_types, instruction.constexpr_arguments, stack);
Stack<std::string> pre_call_stack = *stack;
std::vector<std::string> results;
diff --git a/deps/v8/src/torque/csa-generator.h b/deps/v8/src/torque/csa-generator.h
index 83c4ec410a..c2400609d4 100644
--- a/deps/v8/src/torque/csa-generator.h
+++ b/deps/v8/src/torque/csa-generator.h
@@ -5,24 +5,17 @@
#ifndef V8_TORQUE_CSA_GENERATOR_H_
#define V8_TORQUE_CSA_GENERATOR_H_
-#include <iostream>
-
-#include "src/torque/cfg.h"
-#include "src/torque/declarable.h"
+#include "src/torque/torque-code-generator.h"
namespace v8 {
namespace internal {
namespace torque {
-class CSAGenerator {
+class CSAGenerator : public TorqueCodeGenerator {
public:
CSAGenerator(const ControlFlowGraph& cfg, std::ostream& out,
base::Optional<Builtin::Kind> linkage = base::nullopt)
- : cfg_(cfg),
- out_(&out),
- out_decls_(&out),
- linkage_(linkage),
- previous_position_(SourcePosition::Invalid()) {}
+ : TorqueCodeGenerator(cfg, out), linkage_(linkage) {}
base::Optional<Stack<std::string>> EmitGraph(Stack<std::string> parameters);
static constexpr const char* ARGUMENTS_VARIABLE_STRING = "arguments";
@@ -31,46 +24,10 @@ class CSAGenerator {
std::ostream& out);
private:
- const ControlFlowGraph& cfg_;
- std::ostream* out_;
- std::ostream* out_decls_;
- size_t fresh_id_ = 0;
base::Optional<Builtin::Kind> linkage_;
- SourcePosition previous_position_;
- std::map<DefinitionLocation, std::string> location_map_;
-
- std::string DefinitionToVariable(const DefinitionLocation& location) {
- if (location.IsPhi()) {
- std::stringstream stream;
- stream << "phi_bb" << location.GetPhiBlock()->id() << "_"
- << location.GetPhiIndex();
- return stream.str();
- } else if (location.IsParameter()) {
- auto it = location_map_.find(location);
- DCHECK_NE(it, location_map_.end());
- return it->second;
- } else {
- DCHECK(location.IsInstruction());
- auto it = location_map_.find(location);
- if (it == location_map_.end()) {
- it = location_map_.insert(std::make_pair(location, FreshNodeName()))
- .first;
- }
- return it->second;
- }
- }
-
- void SetDefinitionVariable(const DefinitionLocation& definition,
- const std::string& str) {
- DCHECK_EQ(location_map_.find(definition), location_map_.end());
- location_map_.insert(std::make_pair(definition, str));
- }
- std::ostream& out() { return *out_; }
- std::ostream& decls() { return *out_decls_; }
-
- bool IsEmptyInstruction(const Instruction& instruction);
- void EmitSourcePosition(SourcePosition pos, bool always_emit = false);
+ void EmitSourcePosition(SourcePosition pos,
+ bool always_emit = false) override;
std::string PreCallableExceptionPreparation(
base::Optional<Block*> catch_block);
@@ -79,24 +36,15 @@ class CSAGenerator {
base::Optional<Block*> catch_block, Stack<std::string>* stack,
const base::Optional<DefinitionLocation>& exception_object_definition);
- std::string FreshNodeName() { return "tmp" + std::to_string(fresh_id_++); }
- std::string FreshCatchName() { return "catch" + std::to_string(fresh_id_++); }
- std::string FreshLabelName() { return "label" + std::to_string(fresh_id_++); }
- std::string BlockName(const Block* block) {
- return "block" + std::to_string(block->id());
- }
-
- void ProcessArgumentsCommon(const TypeVector& parameter_types,
- std::vector<std::string>* args,
- std::vector<std::string>* constexpr_arguments,
- Stack<std::string>* stack);
+ std::vector<std::string> ProcessArgumentsCommon(
+ const TypeVector& parameter_types,
+ std::vector<std::string> constexpr_arguments, Stack<std::string>* stack);
Stack<std::string> EmitBlock(const Block* block);
- void EmitInstruction(const Instruction& instruction,
- Stack<std::string>* stack);
-#define EMIT_INSTRUCTION_DECLARATION(T) \
- void EmitInstruction(const T& instruction, Stack<std::string>* stack);
- TORQUE_INSTRUCTION_LIST(EMIT_INSTRUCTION_DECLARATION)
+#define EMIT_INSTRUCTION_DECLARATION(T) \
+ void EmitInstruction(const T& instruction, Stack<std::string>* stack) \
+ override;
+ TORQUE_BACKEND_DEPENDENT_INSTRUCTION_LIST(EMIT_INSTRUCTION_DECLARATION)
#undef EMIT_INSTRUCTION_DECLARATION
};
diff --git a/deps/v8/src/torque/declarable.h b/deps/v8/src/torque/declarable.h
index 3580d9b6dd..27edf79636 100644
--- a/deps/v8/src/torque/declarable.h
+++ b/deps/v8/src/torque/declarable.h
@@ -291,6 +291,11 @@ class ExternConstant : public Value {
}
};
+enum class OutputType {
+ kCSA,
+ kCC,
+};
+
class Callable : public Scope {
public:
DECLARE_DECLARABLE_BOILERPLATE(Callable, callable)
@@ -308,8 +313,26 @@ class Callable : public Scope {
bool HasReturns() const { return returns_; }
base::Optional<Statement*> body() const { return body_; }
bool IsExternal() const { return !body_.has_value(); }
- virtual bool ShouldBeInlined() const { return false; }
- virtual bool ShouldGenerateExternalCode() const { return !ShouldBeInlined(); }
+ virtual bool ShouldBeInlined(OutputType output_type) const {
+ // C++ output doesn't support exiting to labels, so functions with labels in
+ // the signature must be inlined.
+ return output_type == OutputType::kCC && !signature().labels.empty();
+ }
+ bool ShouldGenerateExternalCode(OutputType output_type) const {
+ return !ShouldBeInlined(output_type);
+ }
+
+ static std::string PrefixNameForCCOutput(const std::string& name) {
+ // If a Torque macro requires a C++ runtime function to be generated, then
+ // the generated function begins with this prefix to avoid any naming
+ // collisions with the generated CSA function for the same macro.
+ return "TqRuntime" + name;
+ }
+
+ // Name to use in runtime C++ code.
+ virtual std::string CCName() const {
+ return PrefixNameForCCOutput(ExternalName());
+ }
protected:
Callable(Declarable::Kind kind, std::string external_name,
@@ -336,7 +359,7 @@ class Callable : public Scope {
class Macro : public Callable {
public:
DECLARE_DECLARABLE_BOILERPLATE(Macro, macro)
- bool ShouldBeInlined() const override {
+ bool ShouldBeInlined(OutputType output_type) const override {
for (const LabelDeclaration& label : signature().labels) {
for (const Type* type : label.types) {
if (type->StructSupertype()) return true;
@@ -345,7 +368,7 @@ class Macro : public Callable {
// Intrinsics that are used internally in Torque and implemented as torque
// code should be inlined and not generate C++ definitions.
if (ReadableName()[0] == '%') return true;
- return Callable::ShouldBeInlined();
+ return Callable::ShouldBeInlined(output_type);
}
void SetUsed() { used_ = true; }
@@ -375,6 +398,11 @@ class ExternMacro : public Macro {
return external_assembler_name_;
}
+ std::string CCName() const override {
+ return "TorqueRuntimeMacroShims::" + external_assembler_name() +
+ "::" + ExternalName();
+ }
+
private:
friend class Declarations;
ExternMacro(const std::string& name, std::string external_assembler_name,
@@ -390,6 +418,12 @@ class TorqueMacro : public Macro {
public:
DECLARE_DECLARABLE_BOILERPLATE(TorqueMacro, TorqueMacro)
bool IsExportedToCSA() const { return exported_to_csa_; }
+ std::string CCName() const override {
+ // Exported functions must have unique and C++-friendly readable names, so
+ // prefer those wherever possible.
+ return PrefixNameForCCOutput(IsExportedToCSA() ? ReadableName()
+ : ExternalName());
+ }
protected:
TorqueMacro(Declarable::Kind kind, std::string external_name,
@@ -417,8 +451,8 @@ class TorqueMacro : public Macro {
class Method : public TorqueMacro {
public:
DECLARE_DECLARABLE_BOILERPLATE(Method, Method)
- bool ShouldBeInlined() const override {
- return Macro::ShouldBeInlined() ||
+ bool ShouldBeInlined(OutputType output_type) const override {
+ return Macro::ShouldBeInlined(output_type) ||
signature()
.parameter_types.types[signature().implicit_count]
->IsStructType();
diff --git a/deps/v8/src/torque/declarations.cc b/deps/v8/src/torque/declarations.cc
index 0a1d45a510..1e1c89da86 100644
--- a/deps/v8/src/torque/declarations.cc
+++ b/deps/v8/src/torque/declarations.cc
@@ -214,6 +214,7 @@ Macro* Declarations::DeclareMacro(
macro = CreateTorqueMacro(name, name, accessible_from_csa, signature, body,
is_user_defined);
}
+
Declare(name, macro);
if (op) {
if (TryLookupMacro(*op, signature.GetExplicitTypes())) {
diff --git a/deps/v8/src/torque/global-context.h b/deps/v8/src/torque/global-context.h
index 6182762a6a..7ccbc851c6 100644
--- a/deps/v8/src/torque/global-context.h
+++ b/deps/v8/src/torque/global-context.h
@@ -62,6 +62,9 @@ class GlobalContext : public ContextualClass<GlobalContext> {
struct PerFileStreams {
std::stringstream csa_headerfile;
std::stringstream csa_ccfile;
+ std::stringstream class_definition_headerfile;
+ std::stringstream class_definition_inline_headerfile;
+ std::stringstream class_definition_ccfile;
};
static PerFileStreams& GeneratedPerFile(SourceId file) {
return Get().generated_per_file_[file];
@@ -74,6 +77,15 @@ class GlobalContext : public ContextualClass<GlobalContext> {
static bool IsInstanceTypesInitialized() {
return Get().instance_types_initialized_;
}
+ static void EnsureInCCOutputList(TorqueMacro* macro) {
+ GlobalContext& c = Get();
+ if (c.macros_for_cc_output_set_.insert(macro).second) {
+ c.macros_for_cc_output_.push_back(macro);
+ }
+ }
+ static const std::vector<TorqueMacro*>& AllMacrosForCCOutput() {
+ return Get().macros_for_cc_output_;
+ }
private:
bool collect_language_server_data_;
@@ -84,6 +96,8 @@ class GlobalContext : public ContextualClass<GlobalContext> {
std::set<std::string> cpp_includes_;
std::map<SourceId, PerFileStreams> generated_per_file_;
std::map<std::string, size_t> fresh_ids_;
+ std::vector<TorqueMacro*> macros_for_cc_output_;
+ std::unordered_set<TorqueMacro*> macros_for_cc_output_set_;
bool instance_types_initialized_ = false;
friend class LanguageServerData;
diff --git a/deps/v8/src/torque/implementation-visitor.cc b/deps/v8/src/torque/implementation-visitor.cc
index 2f2881fd07..00504b5eff 100644
--- a/deps/v8/src/torque/implementation-visitor.cc
+++ b/deps/v8/src/torque/implementation-visitor.cc
@@ -10,6 +10,7 @@
#include "src/base/optional.h"
#include "src/common/globals.h"
+#include "src/torque/cc-generator.h"
#include "src/torque/constants.h"
#include "src/torque/csa-generator.h"
#include "src/torque/declaration-visitor.h"
@@ -56,58 +57,136 @@ const Type* ImplementationVisitor::Visit(Statement* stmt) {
return result;
}
-void ImplementationVisitor::BeginCSAFiles() {
+void ImplementationVisitor::BeginGeneratedFiles() {
+ std::set<SourceId> contains_class_definitions;
+ for (const ClassType* type : TypeOracle::GetClasses()) {
+ if (type->GenerateCppClassDefinitions()) {
+ contains_class_definitions.insert(type->AttributedToFile());
+ }
+ }
+
for (SourceId file : SourceFileMap::AllSources()) {
- std::ostream& source = GlobalContext::GeneratedPerFile(file).csa_ccfile;
- std::ostream& header = GlobalContext::GeneratedPerFile(file).csa_headerfile;
+ // Output beginning of CSA .cc file.
+ {
+ std::ostream& out = GlobalContext::GeneratedPerFile(file).csa_ccfile;
- for (const std::string& include_path : GlobalContext::CppIncludes()) {
- source << "#include " << StringLiteralQuote(include_path) << "\n";
- }
+ for (const std::string& include_path : GlobalContext::CppIncludes()) {
+ out << "#include " << StringLiteralQuote(include_path) << "\n";
+ }
- for (SourceId file : SourceFileMap::AllSources()) {
- source << "#include \"torque-generated/" +
- SourceFileMap::PathFromV8RootWithoutExtension(file) +
- "-tq-csa.h\"\n";
- }
- source << "\n";
-
- source << "namespace v8 {\n"
- << "namespace internal {\n"
- << "\n";
-
- std::string headerDefine =
- "V8_GEN_TORQUE_GENERATED_" +
- UnderlinifyPath(SourceFileMap::PathFromV8Root(file)) + "_H_";
- header << "#ifndef " << headerDefine << "\n";
- header << "#define " << headerDefine << "\n\n";
- header << "#include \"src/builtins/torque-csa-header-includes.h\"\n";
- header << "\n";
+ for (SourceId file : SourceFileMap::AllSources()) {
+ out << "#include \"torque-generated/" +
+ SourceFileMap::PathFromV8RootWithoutExtension(file) +
+ "-tq-csa.h\"\n";
+ }
+ out << "\n";
+
+ out << "namespace v8 {\n"
+ << "namespace internal {\n"
+ << "\n";
+ }
+ // Output beginning of CSA .h file.
+ {
+ std::ostream& out = GlobalContext::GeneratedPerFile(file).csa_headerfile;
+ std::string headerDefine =
+ "V8_GEN_TORQUE_GENERATED_" +
+ UnderlinifyPath(SourceFileMap::PathFromV8Root(file)) + "_H_";
+ out << "#ifndef " << headerDefine << "\n";
+ out << "#define " << headerDefine << "\n\n";
+ out << "#include \"src/builtins/torque-csa-header-includes.h\"\n";
+ out << "\n";
+
+ out << "namespace v8 {\n"
+ << "namespace internal {\n"
+ << "\n";
+ }
+ // Output beginning of class definition .cc file.
+ {
+ auto& streams = GlobalContext::GeneratedPerFile(file);
+ std::ostream& out = streams.class_definition_ccfile;
+ if (contains_class_definitions.count(file) != 0) {
+ out << "#include \""
+ << SourceFileMap::PathFromV8RootWithoutExtension(file)
+ << "-inl.h\"\n\n";
+ out << "#include \"torque-generated/class-verifiers.h\"\n";
+ out << "#include \"src/objects/instance-type-inl.h\"\n\n";
+ }
- header << "namespace v8 {\n"
- << "namespace internal {\n"
- << "\n";
+ out << "namespace v8 {\n";
+ out << "namespace internal {\n";
+ }
}
}
-void ImplementationVisitor::EndCSAFiles() {
+void ImplementationVisitor::EndGeneratedFiles() {
for (SourceId file : SourceFileMap::AllSources()) {
- std::ostream& source = GlobalContext::GeneratedPerFile(file).csa_ccfile;
- std::ostream& header = GlobalContext::GeneratedPerFile(file).csa_headerfile;
+ {
+ std::ostream& out = GlobalContext::GeneratedPerFile(file).csa_ccfile;
+
+ out << "} // namespace internal\n"
+ << "} // namespace v8\n"
+ << "\n";
+ }
+ {
+ std::ostream& out = GlobalContext::GeneratedPerFile(file).csa_headerfile;
- std::string headerDefine =
- "V8_GEN_TORQUE_GENERATED_" +
- UnderlinifyPath(SourceFileMap::PathFromV8Root(file)) + "_H_";
+ std::string headerDefine =
+ "V8_GEN_TORQUE_GENERATED_" +
+ UnderlinifyPath(SourceFileMap::PathFromV8Root(file)) + "_H_";
+
+ out << "} // namespace internal\n"
+ << "} // namespace v8\n"
+ << "\n";
+ out << "#endif // " << headerDefine << "\n";
+ }
+ {
+ std::ostream& out =
+ GlobalContext::GeneratedPerFile(file).class_definition_ccfile;
+
+ out << "} // namespace v8\n";
+ out << "} // namespace internal\n";
+ }
+ }
+}
- source << "} // namespace internal\n"
- << "} // namespace v8\n"
- << "\n";
+void ImplementationVisitor::BeginRuntimeMacrosFile() {
+ std::ostream& source = runtime_macros_cc_;
+ std::ostream& header = runtime_macros_h_;
- header << "} // namespace internal\n"
- << "} // namespace v8\n"
- << "\n";
- header << "#endif // " << headerDefine << "\n";
+ source << "#include \"torque-generated/runtime-macros.h\"\n\n";
+ source << "#include \"src/torque/runtime-macro-shims.h\"\n";
+ for (const std::string& include_path : GlobalContext::CppIncludes()) {
+ source << "#include " << StringLiteralQuote(include_path) << "\n";
}
+ source << "\n";
+
+ source << "namespace v8 {\n"
+ << "namespace internal {\n"
+ << "\n";
+
+ const char* kHeaderDefine = "V8_GEN_TORQUE_GENERATED_RUNTIME_MACROS_H_";
+ header << "#ifndef " << kHeaderDefine << "\n";
+ header << "#define " << kHeaderDefine << "\n\n";
+ header << "#include \"src/builtins/torque-csa-header-includes.h\"\n";
+ header << "\n";
+
+ header << "namespace v8 {\n"
+ << "namespace internal {\n"
+ << "\n";
+}
+
+void ImplementationVisitor::EndRuntimeMacrosFile() {
+ std::ostream& source = runtime_macros_cc_;
+ std::ostream& header = runtime_macros_h_;
+
+ source << "} // namespace internal\n"
+ << "} // namespace v8\n"
+ << "\n";
+
+ header << "\n} // namespace internal\n"
+ << "} // namespace v8\n"
+ << "\n";
+ header << "#endif // V8_GEN_TORQUE_GENERATED_RUNTIME_MACROS_H_\n";
}
void ImplementationVisitor::Visit(NamespaceConstant* decl) {
@@ -116,15 +195,15 @@ void ImplementationVisitor::Visit(NamespaceConstant* decl) {
BindingsManagersScope bindings_managers_scope;
- header_out() << " ";
- GenerateFunctionDeclaration(header_out(), "", decl->external_name(),
+ csa_headerfile() << " ";
+ GenerateFunctionDeclaration(csa_headerfile(), "", decl->external_name(),
signature, {});
- header_out() << ";\n";
+ csa_headerfile() << ";\n";
- GenerateFunctionDeclaration(source_out(), "", decl->external_name(),
+ GenerateFunctionDeclaration(csa_ccfile(), "", decl->external_name(),
signature, {});
- source_out() << " {\n";
- source_out() << " compiler::CodeAssembler ca_(state_);\n";
+ csa_ccfile() << " {\n";
+ csa_ccfile() << " compiler::CodeAssembler ca_(state_);\n";
DCHECK(!signature.return_type->IsVoidOrNever());
@@ -134,15 +213,15 @@ void ImplementationVisitor::Visit(NamespaceConstant* decl) {
VisitResult return_result =
GenerateImplicitConvert(signature.return_type, expression_result);
- CSAGenerator csa_generator{assembler().Result(), source_out()};
+ CSAGenerator csa_generator{assembler().Result(), csa_ccfile()};
Stack<std::string> values = *csa_generator.EmitGraph(Stack<std::string>{});
assembler_ = base::nullopt;
- source_out() << " return ";
- CSAGenerator::EmitCSAValue(return_result, values, source_out());
- source_out() << ";\n";
- source_out() << "}\n\n";
+ csa_ccfile() << " return ";
+ CSAGenerator::EmitCSAValue(return_result, values, csa_ccfile());
+ csa_ccfile() << ";\n";
+ csa_ccfile() << "}\n\n";
}
void ImplementationVisitor::Visit(TypeAlias* alias) {
@@ -274,14 +353,21 @@ void ImplementationVisitor::VisitMacroCommon(Macro* macro) {
bool has_return_value =
can_return && return_type != TypeOracle::GetVoidType();
- GenerateMacroFunctionDeclaration(header_out(), "", macro);
- header_out() << ";\n";
+ GenerateMacroFunctionDeclaration(csa_headerfile(), macro);
+ csa_headerfile() << ";\n";
- GenerateMacroFunctionDeclaration(source_out(), "", macro);
- source_out() << " {\n";
- source_out() << " compiler::CodeAssembler ca_(state_);\n";
- source_out()
- << " compiler::CodeAssembler::SourcePositionScope pos_scope(&ca_);\n";
+ GenerateMacroFunctionDeclaration(csa_ccfile(), macro);
+ csa_ccfile() << " {\n";
+
+ if (output_type_ == OutputType::kCC) {
+ // For now, generated C++ is only for field offset computations. If we ever
+ // generate C++ code that can allocate, then it should be handlified.
+ csa_ccfile() << " DisallowHeapAllocation no_gc;\n";
+ } else {
+ csa_ccfile() << " compiler::CodeAssembler ca_(state_);\n";
+ csa_ccfile()
+ << " compiler::CodeAssembler::SourcePositionScope pos_scope(&ca_);\n";
+ }
Stack<std::string> lowered_parameters;
Stack<const Type*> lowered_parameter_types;
@@ -363,18 +449,27 @@ void ImplementationVisitor::VisitMacroCommon(Macro* macro) {
assembler().Bind(end);
}
- CSAGenerator csa_generator{assembler().Result(), source_out()};
- base::Optional<Stack<std::string>> values =
- csa_generator.EmitGraph(lowered_parameters);
+ base::Optional<Stack<std::string>> values;
+ if (output_type_ == OutputType::kCC) {
+ CCGenerator cc_generator{assembler().Result(), csa_ccfile()};
+ values = cc_generator.EmitGraph(lowered_parameters);
+ } else {
+ CSAGenerator csa_generator{assembler().Result(), csa_ccfile()};
+ values = csa_generator.EmitGraph(lowered_parameters);
+ }
assembler_ = base::nullopt;
if (has_return_value) {
- source_out() << " return ";
- CSAGenerator::EmitCSAValue(return_value, *values, source_out());
- source_out() << ";\n";
+ csa_ccfile() << " return ";
+ if (output_type_ == OutputType::kCC) {
+ CCGenerator::EmitCCValue(return_value, *values, csa_ccfile());
+ } else {
+ CSAGenerator::EmitCSAValue(return_value, *values, csa_ccfile());
+ }
+ csa_ccfile() << ";\n";
}
- source_out() << "}\n\n";
+ csa_ccfile() << "}\n\n";
}
void ImplementationVisitor::Visit(TorqueMacro* macro) {
@@ -416,7 +511,7 @@ void ImplementationVisitor::Visit(Builtin* builtin) {
const std::string& name = builtin->ExternalName();
const Signature& signature = builtin->signature();
- source_out() << "TF_BUILTIN(" << name << ", CodeStubAssembler) {\n"
+ csa_ccfile() << "TF_BUILTIN(" << name << ", CodeStubAssembler) {\n"
<< " compiler::CodeAssemblerState* state_ = state();"
<< " compiler::CodeAssembler ca_(state());\n";
@@ -435,17 +530,17 @@ void ImplementationVisitor::Visit(Builtin* builtin) {
.Position(signature.parameter_names[signature.implicit_count]->pos);
}
- source_out()
- << " Node* argc = Parameter(Descriptor::kJSActualArgumentsCount);\n";
- source_out() << " TNode<IntPtrT> "
+ csa_ccfile() << " TNode<Word32T> argc = UncheckedParameter<Word32T>("
+ << "Descriptor::kJSActualArgumentsCount);\n";
+ csa_ccfile() << " TNode<IntPtrT> "
"arguments_length(ChangeInt32ToIntPtr(UncheckedCast<"
"Int32T>(argc)));\n";
- source_out() << " TNode<RawPtrT> arguments_frame = "
+ csa_ccfile() << " TNode<RawPtrT> arguments_frame = "
"UncheckedCast<RawPtrT>(LoadFramePointer());\n";
- source_out() << " TorqueStructArguments "
+ csa_ccfile() << " TorqueStructArguments "
"torque_arguments(GetFrameArguments(arguments_frame, "
"arguments_length));\n";
- source_out()
+ csa_ccfile()
<< " CodeStubArguments arguments(this, torque_arguments);\n";
parameters.Push("torque_arguments.frame");
@@ -468,32 +563,32 @@ void ImplementationVisitor::Visit(Builtin* builtin) {
const Type* actual_type = signature.parameter_types.types[i];
std::vector<const Type*> expected_types;
if (param_name == "context") {
- source_out() << " TNode<NativeContext> " << generated_name
- << " = UncheckedCast<NativeContext>(Parameter("
- << "Descriptor::kContext));\n";
- source_out() << " USE(" << generated_name << ");\n";
+ csa_ccfile() << " TNode<NativeContext> " << generated_name
+ << " = UncheckedParameter<NativeContext>("
+ << "Descriptor::kContext);\n";
+ csa_ccfile() << " USE(" << generated_name << ");\n";
expected_types = {TypeOracle::GetNativeContextType(),
TypeOracle::GetContextType()};
} else if (param_name == "receiver") {
- source_out()
+ csa_ccfile()
<< " TNode<Object> " << generated_name << " = "
<< (builtin->IsVarArgsJavaScript()
? "arguments.GetReceiver()"
- : "UncheckedCast<Object>(Parameter(Descriptor::kReceiver))")
+ : "UncheckedParameter<Object>(Descriptor::kReceiver)")
<< ";\n";
- source_out() << "USE(" << generated_name << ");\n";
+ csa_ccfile() << "USE(" << generated_name << ");\n";
expected_types = {TypeOracle::GetJSAnyType()};
} else if (param_name == "newTarget") {
- source_out() << " TNode<Object> " << generated_name
- << " = UncheckedCast<Object>(Parameter("
- << "Descriptor::kJSNewTarget));\n";
- source_out() << "USE(" << generated_name << ");\n";
+ csa_ccfile() << " TNode<Object> " << generated_name
+ << " = UncheckedParameter<Object>("
+ << "Descriptor::kJSNewTarget);\n";
+ csa_ccfile() << "USE(" << generated_name << ");\n";
expected_types = {TypeOracle::GetJSAnyType()};
} else if (param_name == "target") {
- source_out() << " TNode<JSFunction> " << generated_name
- << " = UncheckedCast<JSFunction>(Parameter("
- << "Descriptor::kJSTarget));\n";
- source_out() << "USE(" << generated_name << ");\n";
+ csa_ccfile() << " TNode<JSFunction> " << generated_name
+ << " = UncheckedParameter<JSFunction>("
+ << "Descriptor::kJSTarget);\n";
+ csa_ccfile() << "USE(" << generated_name << ");\n";
expected_types = {TypeOracle::GetJSFunctionType()};
} else {
Error(
@@ -519,12 +614,12 @@ void ImplementationVisitor::Visit(Builtin* builtin) {
const bool mark_as_used = signature.implicit_count > i;
std::string var = AddParameter(i, builtin, &parameters, &parameter_types,
&parameter_bindings, mark_as_used);
- source_out() << " " << type->GetGeneratedTypeName() << " " << var
+ csa_ccfile() << " " << type->GetGeneratedTypeName() << " " << var
<< " = "
- << "UncheckedCast<" << type->GetGeneratedTNodeTypeName()
- << ">(Parameter(Descriptor::k"
- << CamelifyString(parameter_name) << "));\n";
- source_out() << " USE(" << var << ");\n";
+ << "UncheckedParameter<" << type->GetGeneratedTNodeTypeName()
+ << ">(Descriptor::k" << CamelifyString(parameter_name)
+ << ");\n";
+ csa_ccfile() << " USE(" << var << ");\n";
}
} else {
@@ -536,18 +631,18 @@ void ImplementationVisitor::Visit(Builtin* builtin) {
const bool mark_as_used = signature.implicit_count > i;
std::string var = AddParameter(i, builtin, &parameters, &parameter_types,
&parameter_bindings, mark_as_used);
- source_out() << " " << type->GetGeneratedTypeName() << " " << var
+ csa_ccfile() << " " << type->GetGeneratedTypeName() << " " << var
<< " = "
- << "UncheckedCast<" << type->GetGeneratedTNodeTypeName()
- << ">(Parameter(";
+ << "UncheckedParameter<" << type->GetGeneratedTNodeTypeName()
+ << ">(";
if (i == 0 && has_context_parameter) {
- source_out() << "Descriptor::kContext";
+ csa_ccfile() << "Descriptor::kContext";
} else {
- source_out() << "Descriptor::ParameterIndex<"
+ csa_ccfile() << "Descriptor::ParameterIndex<"
<< (has_context_parameter ? i - 1 : i) << ">()";
}
- source_out() << "));\n";
- source_out() << " USE(" << var << ");\n";
+ csa_ccfile() << ");\n";
+ csa_ccfile() << " USE(" << var << ");\n";
}
}
assembler_ = CfgAssembler(parameter_types);
@@ -555,11 +650,11 @@ void ImplementationVisitor::Visit(Builtin* builtin) {
if (body_result != TypeOracle::GetNeverType()) {
ReportError("control reaches end of builtin, expected return of a value");
}
- CSAGenerator csa_generator{assembler().Result(), source_out(),
+ CSAGenerator csa_generator{assembler().Result(), csa_ccfile(),
builtin->kind()};
csa_generator.EmitGraph(parameters);
assembler_ = base::nullopt;
- source_out() << "}\n\n";
+ csa_ccfile() << "}\n\n";
}
const Type* ImplementationVisitor::Visit(VarDeclarationStatement* stmt) {
@@ -1255,53 +1350,20 @@ InitializerResults ImplementationVisitor::VisitInitializerResults(
LocationReference ImplementationVisitor::GenerateFieldReference(
VisitResult object, const Field& field, const ClassType* class_type) {
+ if (field.index.has_value()) {
+ return LocationReference::HeapSlice(
+ GenerateCall(class_type->GetSliceMacroName(field), {{object}, {}}));
+ }
+ DCHECK(field.offset.has_value());
StackRange result_range = assembler().TopRange(0);
result_range.Extend(GenerateCopy(object).stack_range());
- VisitResult offset;
- if (field.offset.has_value()) {
- offset =
- VisitResult(TypeOracle::GetConstInt31Type(), ToString(*field.offset));
- offset = GenerateImplicitConvert(TypeOracle::GetIntPtrType(), offset);
- } else {
- StackScope stack_scope(this);
- for (const Field& f : class_type->ComputeAllFields()) {
- if (f.offset) {
- offset =
- VisitResult(TypeOracle::GetConstInt31Type(), ToString(*f.offset));
- }
- if (f.name_and_type.name == field.name_and_type.name) break;
- if (f.index) {
- if (!offset.IsOnStack()) {
- offset = GenerateImplicitConvert(TypeOracle::GetIntPtrType(), offset);
- }
- VisitResult array_length = GenerateArrayLength(object, f);
- size_t element_size;
- std::string element_size_string;
- std::tie(element_size, element_size_string) =
- *SizeOf(f.name_and_type.type);
- VisitResult array_element_size =
- VisitResult(TypeOracle::GetConstInt31Type(), element_size_string);
- // In contrast to the code used for allocation, we don't need overflow
- // checks here because we already know all the offsets fit into memory.
- VisitResult array_size =
- GenerateCall("*", {{array_length, array_element_size}, {}});
- offset = GenerateCall("+", {{offset, array_size}, {}});
- }
- }
- DCHECK(offset.IsOnStack());
- offset = stack_scope.Yield(offset);
- }
+ VisitResult offset =
+ VisitResult(TypeOracle::GetConstInt31Type(), ToString(*field.offset));
+ offset = GenerateImplicitConvert(TypeOracle::GetIntPtrType(), offset);
result_range.Extend(offset.stack_range());
- if (field.index) {
- VisitResult length = GenerateArrayLength(object, field);
- result_range.Extend(length.stack_range());
- const Type* slice_type = TypeOracle::GetSliceType(field.name_and_type.type);
- return LocationReference::HeapSlice(VisitResult(slice_type, result_range));
- } else {
- const Type* type = TypeOracle::GetReferenceType(field.name_and_type.type,
- field.const_qualified);
- return LocationReference::HeapReference(VisitResult(type, result_range));
- }
+ const Type* type = TypeOracle::GetReferenceType(field.name_and_type.type,
+ field.const_qualified);
+ return LocationReference::HeapReference(VisitResult(type, result_range));
}
// This is used to generate field references during initialization, where we can
@@ -1625,25 +1687,30 @@ VisitResult ImplementationVisitor::Visit(SpreadExpression* expr) {
void ImplementationVisitor::GenerateImplementation(const std::string& dir) {
for (SourceId file : SourceFileMap::AllSources()) {
- std::string path_from_root =
- SourceFileMap::PathFromV8RootWithoutExtension(file);
-
- std::string new_source(
- GlobalContext::GeneratedPerFile(file).csa_ccfile.str());
+ std::string base_filename =
+ dir + "/" + SourceFileMap::PathFromV8RootWithoutExtension(file);
+ GlobalContext::PerFileStreams& streams =
+ GlobalContext::GeneratedPerFile(file);
- std::string source_file_name = dir + "/" + path_from_root + "-tq-csa.cc";
- WriteFile(source_file_name, new_source);
- std::string new_header(
- GlobalContext::GeneratedPerFile(file).csa_headerfile.str());
- std::string header_file_name = dir + "/" + path_from_root + "-tq-csa.h";
- WriteFile(header_file_name, new_header);
+ WriteFile(base_filename + "-tq-csa.cc", streams.csa_ccfile.str());
+ WriteFile(base_filename + "-tq-csa.h", streams.csa_headerfile.str());
+ WriteFile(base_filename + "-tq.inc",
+ streams.class_definition_headerfile.str());
+ WriteFile(base_filename + "-tq-inl.inc",
+ streams.class_definition_inline_headerfile.str());
+ WriteFile(base_filename + "-tq.cc", streams.class_definition_ccfile.str());
}
+
+ WriteFile(dir + "/runtime-macros.h", runtime_macros_h_.str());
+ WriteFile(dir + "/runtime-macros.cc", runtime_macros_cc_.str());
}
-void ImplementationVisitor::GenerateMacroFunctionDeclaration(
- std::ostream& o, const std::string& macro_prefix, Macro* macro) {
- GenerateFunctionDeclaration(o, macro_prefix, macro->ExternalName(),
- macro->signature(), macro->parameter_names());
+void ImplementationVisitor::GenerateMacroFunctionDeclaration(std::ostream& o,
+ Macro* macro) {
+ GenerateFunctionDeclaration(
+ o, "",
+ output_type_ == OutputType::kCC ? macro->CCName() : macro->ExternalName(),
+ macro->signature(), macro->parameter_names());
}
std::vector<std::string> ImplementationVisitor::GenerateFunctionDeclaration(
@@ -1654,12 +1721,17 @@ std::vector<std::string> ImplementationVisitor::GenerateFunctionDeclaration(
if (signature.return_type->IsVoidOrNever()) {
o << "void";
} else {
- o << signature.return_type->GetGeneratedTypeName();
+ o << (output_type_ == OutputType::kCC
+ ? signature.return_type->GetRuntimeType()
+ : signature.return_type->GetGeneratedTypeName());
}
o << " " << macro_prefix << name << "(";
bool first = true;
- if (pass_code_assembler_state) {
+ if (output_type_ == OutputType::kCC) {
+ first = false;
+ o << "Isolate* isolate";
+ } else if (pass_code_assembler_state) {
first = false;
o << "compiler::CodeAssemblerState* state_";
}
@@ -1670,7 +1742,9 @@ std::vector<std::string> ImplementationVisitor::GenerateFunctionDeclaration(
first = false;
const Type* parameter_type = signature.types()[i];
const std::string& generated_type_name =
- parameter_type->GetGeneratedTypeName();
+ output_type_ == OutputType::kCC
+ ? parameter_type->GetRuntimeType()
+ : parameter_type->GetGeneratedTypeName();
generated_parameter_names.push_back(ExternalParameterName(
i < parameter_names.size() ? parameter_names[i]->value
@@ -1679,6 +1753,9 @@ std::vector<std::string> ImplementationVisitor::GenerateFunctionDeclaration(
}
for (const LabelDeclaration& label_info : signature.labels) {
+ if (output_type_ == OutputType::kCC) {
+ ReportError("Macros that generate runtime code can't have label exits");
+ }
if (!first) o << ", ";
first = false;
generated_parameter_names.push_back(
@@ -2487,7 +2564,7 @@ VisitResult ImplementationVisitor::GenerateCall(
}
}
- bool inline_macro = callable->ShouldBeInlined();
+ bool inline_macro = callable->ShouldBeInlined(output_type_);
std::vector<VisitResult> implicit_arguments;
for (size_t i = 0; i < callable->signature().implicit_count; ++i) {
std::string implicit_name = callable->signature().parameter_names[i]->value;
@@ -2594,7 +2671,18 @@ VisitResult ImplementationVisitor::GenerateCall(
if (is_tailcall) {
ReportError("can't tail call a macro");
}
+
macro->SetUsed();
+
+ // If we're currently generating a C++ macro and it's calling another macro,
+ // then we need to make sure that we also generate C++ code for the called
+ // macro.
+ if (output_type_ == OutputType::kCC && !inline_macro) {
+ if (auto* torque_macro = TorqueMacro::DynamicCast(macro)) {
+ GlobalContext::EnsureInCCOutputList(torque_macro);
+ }
+ }
+
if (return_type->IsConstexpr()) {
DCHECK_EQ(0, arguments.labels.size());
std::stringstream result;
@@ -2774,6 +2862,15 @@ VisitResult ImplementationVisitor::GenerateCall(
result << constexpr_arguments[0];
result << ")";
return VisitResult(return_type, result.str());
+ } else if (intrinsic->ExternalName() == "%IndexedFieldLength") {
+ const Type* type = specialization_types[0];
+ const ClassType* class_type = ClassType::DynamicCast(type);
+ if (!class_type) {
+ ReportError("%IndexedFieldLength must take a class type parameter");
+ }
+ const Field& field =
+ class_type->LookupField(StringLiteralUnquote(constexpr_arguments[0]));
+ return GenerateArrayLength(VisitResult(type, argument_range), field);
} else {
assembler().Emit(CallIntrinsicInstruction{intrinsic, specialization_types,
constexpr_arguments});
@@ -3065,6 +3162,7 @@ void ImplementationVisitor::VisitAllDeclarables() {
CurrentCallable::Scope current_callable(nullptr);
const std::vector<std::unique_ptr<Declarable>>& all_declarables =
GlobalContext::AllDeclarables();
+
// This has to be an index-based loop because all_declarables can be extended
// during the loop.
for (size_t i = 0; i < all_declarables.size(); ++i) {
@@ -3074,6 +3172,19 @@ void ImplementationVisitor::VisitAllDeclarables() {
// Recover from compile errors here. The error is recorded already.
}
}
+
+ // Do the same for macros which generate C++ code.
+ output_type_ = OutputType::kCC;
+ const std::vector<TorqueMacro*>& cc_macros =
+ GlobalContext::AllMacrosForCCOutput();
+ for (size_t i = 0; i < cc_macros.size(); ++i) {
+ try {
+ Visit(static_cast<Declarable*>(cc_macros[i]));
+ } catch (TorqueAbortCompilation&) {
+ // Recover from compile errors here. The error is recorded already.
+ }
+ }
+ output_type_ = OutputType::kCSA;
}
void ImplementationVisitor::Visit(Declarable* declarable) {
@@ -3082,7 +3193,7 @@ void ImplementationVisitor::Visit(Declarable* declarable) {
CurrentFileStreams::Scope current_file_streams(
&GlobalContext::GeneratedPerFile(declarable->Position().source));
if (Callable* callable = Callable::DynamicCast(declarable)) {
- if (!callable->ShouldGenerateExternalCode())
+ if (!callable->ShouldGenerateExternalCode(output_type_))
CurrentFileStreams::Get() = nullptr;
}
switch (declarable->kind()) {
@@ -3605,6 +3716,17 @@ base::Optional<std::vector<Field>> GetOrderedUniqueIndexFields(
}
void CppClassGenerator::GenerateClass() {
+ hdr_ << "\n";
+ hdr_ << "// Alias for HeapObject::Is" << name_
+ << "() that avoids inlining.\n";
+ hdr_ << "V8_EXPORT_PRIVATE bool Is" << name_ << "_NonInline(HeapObject o);\n";
+ hdr_ << "\n";
+
+ impl_ << "\n";
+ impl_ << "bool Is" << name_ << "_NonInline(HeapObject o) {\n";
+ impl_ << " return o.Is" << name_ << "();\n";
+ impl_ << "}\n\n";
+
hdr_ << template_decl() << "\n";
hdr_ << "class " << gen_name_ << " : public P {\n";
hdr_ << " static_assert(std::is_same<" << name_ << ", D>::value,\n"
@@ -3707,7 +3829,7 @@ void CppClassGenerator::GenerateClass() {
hdr_ << "};\n\n";
- if (!type_->IsExtern()) {
+ if (type_->ShouldGenerateFullClassDefinition()) {
GenerateClassExport(type_, hdr_, inl_);
}
}
@@ -3732,7 +3854,7 @@ void CppClassGenerator::GenerateClassConstructors() {
<< name_ << ".\");\n";
hdr_ << " }\n";
- hdr_ << "protected:\n";
+ hdr_ << " protected:\n";
hdr_ << " inline explicit " << gen_name_ << "(Address ptr);\n";
hdr_ << " // Special-purpose constructor for subclasses that have fast "
"paths where\n";
@@ -3743,16 +3865,17 @@ void CppClassGenerator::GenerateClassConstructors() {
inl_ << "template<class D, class P>\n";
inl_ << "inline " << gen_name_T_ << "::" << gen_name_ << "(Address ptr)\n";
inl_ << " : P(ptr) {\n";
- inl_ << " SLOW_DCHECK(this->Is" << name_ << "());\n";
+ inl_ << " SLOW_DCHECK(Is" << name_ << "_NonInline(*this));\n";
inl_ << "}\n";
inl_ << "template<class D, class P>\n";
inl_ << "inline " << gen_name_T_ << "::" << gen_name_
<< "(Address ptr, HeapObject::AllowInlineSmiStorage allow_smi)\n";
inl_ << " : P(ptr, allow_smi) {\n";
- inl_ << " SLOW_DCHECK((allow_smi == "
- "HeapObject::AllowInlineSmiStorage::kAllowBeingASmi && "
- << "this->IsSmi()) || this->Is" << name_ << "());\n";
+ inl_ << " SLOW_DCHECK("
+ << "(allow_smi == HeapObject::AllowInlineSmiStorage::kAllowBeingASmi"
+ " && this->IsSmi()) || Is"
+ << name_ << "_NonInline(*this));\n";
inl_ << "}\n";
}
@@ -3767,15 +3890,15 @@ std::string GenerateRuntimeTypeCheck(const Type* type,
type_check << value << ".IsCleared()";
at_start = false;
}
- for (const RuntimeType& runtime_type : type->GetRuntimeTypes()) {
+ for (const TypeChecker& runtime_type : type->GetTypeCheckers()) {
if (!at_start) type_check << " || ";
at_start = false;
if (maybe_object) {
bool strong = runtime_type.weak_ref_to.empty();
- if (strong && runtime_type.type == "MaybeObject") {
- // Rather than a generic Weak<T>, this is a basic type Tagged or
- // WeakHeapObject. We can't validate anything more about the type of
- // the object pointed to, so just check that it's weak.
+ if (strong && runtime_type.type == WEAK_HEAP_OBJECT) {
+ // Rather than a generic Weak<T>, this is the basic type WeakHeapObject.
+ // We can't validate anything more about the type of the object pointed
+ // to, so just check that it's weak.
type_check << value << ".IsWeak()";
} else {
type_check << "(" << (strong ? "!" : "") << value << ".IsWeak() && "
@@ -3954,7 +4077,7 @@ void CppClassGenerator::GenerateFieldAccessorForTagged(const Field& f) {
std::string offset = "k" + CamelifyString(name) + "Offset";
bool strong_pointer = field_type->IsSubtypeOf(TypeOracle::GetObjectType());
- std::string type = field_type->GetRuntimeType();
+ std::string type = field_type->UnhandlifiedCppTypeName();
// Generate declarations in header.
if (!field_type->IsClassType() && field_type != TypeOracle::GetObjectType()) {
hdr_ << " // Torque type: " << field_type->ToString() << "\n";
@@ -3962,7 +4085,7 @@ void CppClassGenerator::GenerateFieldAccessorForTagged(const Field& f) {
hdr_ << " inline " << type << " " << name << "(" << (f.index ? "int i" : "")
<< ") const;\n";
- hdr_ << " inline " << type << " " << name << "(const Isolate* isolates"
+ hdr_ << " inline " << type << " " << name << "(IsolateRoot isolates"
<< (f.index ? ", int i" : "") << ") const;\n";
hdr_ << " inline void set_" << name << "(" << (f.index ? "int i, " : "")
<< type << " value, WriteBarrierMode mode = UPDATE_WRITE_BARRIER);\n\n";
@@ -3973,15 +4096,14 @@ void CppClassGenerator::GenerateFieldAccessorForTagged(const Field& f) {
inl_ << "template <class D, class P>\n";
inl_ << type << " " << gen_name_ << "<D, P>::" << name << "("
<< (f.index ? "int i" : "") << ") const {\n";
- inl_ << " const Isolate* isolate = GetIsolateForPtrCompr(*this);\n";
+ inl_ << " IsolateRoot isolate = GetIsolateForPtrCompr(*this);\n";
inl_ << " return " << gen_name_ << "::" << name << "(isolate"
<< (f.index ? ", i" : "") << ");\n";
inl_ << "}\n";
inl_ << "template <class D, class P>\n";
inl_ << type << " " << gen_name_ << "<D, P>::" << name
- << "(const Isolate* isolate" << (f.index ? ", int i" : "")
- << ") const {\n";
+ << "(IsolateRoot isolate" << (f.index ? ", int i" : "") << ") const {\n";
// TODO(tebbi): The distinction between relaxed and non-relaxed accesses here
// is pretty arbitrary and just tries to preserve what was there before.
@@ -4031,35 +4153,6 @@ void CppClassGenerator::GenerateFieldAccessorForTagged(const Field& f) {
inl_ << "}\n\n";
}
-void EmitClassDefinitionHeadersIncludes(const std::string& basename,
- std::stringstream& header,
- std::stringstream& inline_header) {
- header << "#include \"src/objects/objects.h\"\n";
- header << "#include \"src/objects/heap-object.h\"\n";
- header << "#include \"src/objects/smi.h\"\n";
- header << "#include \"torque-generated/field-offsets.h\"\n";
- header << "#include <type_traits>\n\n";
-
- inline_header << "#include \"torque-generated/class-definitions.h\"\n";
- inline_header << "#include \"src/objects/js-function.h\"\n";
- inline_header << "#include \"src/objects/js-objects.h\"\n";
- inline_header << "#include \"src/objects/js-promise.h\"\n";
- inline_header << "#include \"src/objects/js-weak-refs.h\"\n";
- inline_header << "#include \"src/objects/module.h\"\n";
- inline_header << "#include \"src/objects/objects-inl.h\"\n";
- inline_header << "#include \"src/objects/script.h\"\n";
- inline_header << "#include \"src/objects/shared-function-info.h\"\n";
- inline_header << "#include \"src/objects/tagged-field.h\"\n\n";
-}
-
-void EmitClassDefinitionHeadersForwardDeclarations(std::stringstream& header) {
- // Generate forward declarations for every class.
- for (const ClassType* type : TypeOracle::GetClasses()) {
- header << "class " << type->GetGeneratedTNodeTypeName() << ";\n";
- }
- header << "using BuiltinPtr = Smi;\n\n";
-}
-
void GenerateStructLayoutDescription(std::ostream& header,
const StructType* type) {
header << "struct TorqueGenerated" << CamelifyString(type->name())
@@ -4077,125 +4170,45 @@ void GenerateStructLayoutDescription(std::ostream& header,
void ImplementationVisitor::GenerateClassDefinitions(
const std::string& output_directory) {
- std::stringstream external_header;
- std::stringstream inline_external_header;
- std::stringstream internal_header;
- std::stringstream inline_internal_header;
- std::stringstream exported_header;
- std::stringstream inline_exported_header;
- std::stringstream implementation;
std::stringstream factory_header;
std::stringstream factory_impl;
- std::string basename = "class-definitions";
- std::string internal_basename = "internal-" + basename;
- std::string exported_basename = "exported-" + basename;
- std::string file_basename = output_directory + "/" + basename;
- std::string internal_file_basename =
- output_directory + "/" + internal_basename;
- std::string exported_file_basename =
- output_directory + "/" + exported_basename;
std::string factory_basename = "factory";
- std::string factory_file_basename = output_directory + "/" + factory_basename;
-
- {
- IncludeGuardScope header_guard(external_header, basename + ".h");
-
- IncludeGuardScope inline_header_guard(inline_external_header,
- basename + "-inl.h");
-
- IncludeGuardScope internal_header_guard(internal_header,
- internal_basename + ".h");
-
- IncludeGuardScope internal_inline_header_guard(
- inline_internal_header, internal_basename + "-inl.h");
-
- IncludeGuardScope exported_header_guard(exported_header,
- exported_basename + ".h");
-
- IncludeGuardScope exported_inline_header_guard(
- inline_exported_header, exported_basename + "-inl.h");
-
- internal_header << "#include \"torque-generated/class-definitions.h\"\n";
- internal_header << "#include \"src/objects/fixed-array.h\"\n";
- inline_internal_header
- << "#include \"torque-generated/internal-class-definitions.h\"\n";
- inline_internal_header
- << "#include \"torque-generated/class-definitions-inl.h\"\n";
-
- exported_header << "#include \"src/objects/fixed-array.h\"\n";
- exported_header << "#include \"torque-generated/class-definitions.h\"\n";
- inline_exported_header
- << "#include \"torque-generated/exported-class-definitions.h\"\n";
- inline_exported_header << "#include \"src/objects/fixed-array-inl.h\"\n";
-
- EmitClassDefinitionHeadersIncludes(basename, external_header,
- inline_external_header);
-
- EmitClassDefinitionHeadersIncludes(internal_basename, internal_header,
- inline_internal_header);
-
- IncludeObjectMacrosScope header_macros(external_header);
- IncludeObjectMacrosScope inline_header_macros(inline_external_header);
-
- IncludeObjectMacrosScope internal_header_macros(internal_header);
- IncludeObjectMacrosScope internal_inline_header_macros(
- inline_internal_header);
- IncludeObjectMacrosScope exported_header_macros(exported_header);
- IncludeObjectMacrosScope exported_inline_header_macros(
- inline_exported_header);
-
- NamespaceScope header_namespaces(external_header, {"v8", "internal"});
- NamespaceScope inline_header_namespaces(inline_external_header,
- {"v8", "internal"});
- NamespaceScope internal_header_namespaces(internal_header,
- {"v8", "internal"});
- NamespaceScope internal_inline_header_namespaces(inline_internal_header,
- {"v8", "internal"});
- NamespaceScope exported_header_namespaces(exported_header,
- {"v8", "internal"});
- NamespaceScope exported_inline_header_namespaces(inline_exported_header,
- {"v8", "internal"});
-
- EmitClassDefinitionHeadersForwardDeclarations(external_header);
- EmitClassDefinitionHeadersForwardDeclarations(internal_header);
+ std::stringstream forward_declarations;
+ std::string forward_declarations_filename = "class-forward-declarations.h";
+ {
factory_impl << "#include \"src/heap/factory.h\"\n";
factory_impl << "#include \"src/heap/factory-inl.h\"\n";
factory_impl << "#include \"src/heap/heap.h\"\n";
factory_impl << "#include \"src/heap/heap-inl.h\"\n";
- factory_impl << "#include \"src/execution/isolate.h\"\n\n";
- factory_impl << "#include "
- "\"torque-generated/internal-class-definitions-inl.h\"\n\n";
+ factory_impl << "#include \"src/execution/isolate.h\"\n";
factory_impl << "#include "
- "\"torque-generated/exported-class-definitions-inl.h\"\n\n";
+ "\"src/objects/all-objects-inl.h\"\n\n";
NamespaceScope factory_impl_namespaces(factory_impl, {"v8", "internal"});
factory_impl << "\n";
- implementation << "#include \"torque-generated/class-definitions.h\"\n\n";
- implementation << "#include \"torque-generated/class-verifiers.h\"\n\n";
- implementation
- << "#include \"src/objects/class-definitions-tq-deps-inl.h\"\n\n";
- implementation
- << "#include "
- "\"torque-generated/internal-class-definitions-inl.h\"\n\n";
- implementation
- << "#include "
- "\"torque-generated/exported-class-definitions-inl.h\"\n\n";
- NamespaceScope implementation_namespaces(implementation,
- {"v8", "internal"});
+ IncludeGuardScope include_guard(forward_declarations,
+ forward_declarations_filename);
+ NamespaceScope forward_declarations_namespaces(forward_declarations,
+ {"v8", "internal"});
std::set<const StructType*, TypeLess> structs_used_in_classes;
+ // Emit forward declarations.
+ for (const ClassType* type : TypeOracle::GetClasses()) {
+ auto& streams = GlobalContext::GeneratedPerFile(type->AttributedToFile());
+ std::ostream& header = streams.class_definition_headerfile;
+ header << "class " << type->GetGeneratedTNodeTypeName() << ";\n";
+ forward_declarations << "class " << type->GetGeneratedTNodeTypeName()
+ << ";\n";
+ }
+
for (const ClassType* type : TypeOracle::GetClasses()) {
- std::stringstream& header =
- type->IsExtern()
- ? external_header
- : type->ShouldExport() ? exported_header : internal_header;
- std::stringstream& inline_header =
- type->IsExtern() ? inline_external_header
- : type->ShouldExport() ? inline_exported_header
- : inline_internal_header;
+ auto& streams = GlobalContext::GeneratedPerFile(type->AttributedToFile());
+ std::ostream& header = streams.class_definition_headerfile;
+ std::ostream& inline_header = streams.class_definition_inline_headerfile;
+ std::ostream& implementation = streams.class_definition_ccfile;
if (type->GenerateCppClassDefinitions()) {
CppClassGenerator g(type, header, inline_header, implementation);
@@ -4207,7 +4220,8 @@ void ImplementationVisitor::GenerateClassDefinitions(
structs_used_in_classes.insert(*field_as_struct);
}
}
- if (type->ShouldExport() && !type->IsAbstract()) {
+ if (type->ShouldExport() && !type->IsAbstract() &&
+ !type->HasCustomMap()) {
factory_header << type->HandlifiedCppTypeName() << " New"
<< type->name() << "(";
factory_impl << type->HandlifiedCppTypeName() << " Factory::New"
@@ -4251,9 +4265,12 @@ void ImplementationVisitor::GenerateClassDefinitions(
factory_impl << " "
"isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>"
"(size, allocation_type);\n";
+ factory_impl << " WriteBarrierMode write_barrier_mode =\n"
+ << " allocation_type == AllocationType::kYoung\n"
+ << " ? SKIP_WRITE_BARRIER : UPDATE_WRITE_BARRIER;\n";
factory_impl << " result.set_map_after_allocation(roots."
<< SnakeifyString(type->name())
- << "_map(), SKIP_WRITE_BARRIER);\n";
+ << "_map(), write_barrier_mode);\n";
factory_impl << " " << type->HandlifiedCppTypeName()
<< " result_handle(" << type->name()
<< "::cast(result), isolate());\n";
@@ -4267,7 +4284,7 @@ void ImplementationVisitor::GenerateClassDefinitions(
TypeOracle::GetTaggedType()) &&
!f.name_and_type.type->IsSubtypeOf(TypeOracle::GetSmiType())) {
factory_impl << "*" << f.name_and_type.name
- << ", SKIP_WRITE_BARRIER";
+ << ", write_barrier_mode";
} else {
factory_impl << f.name_and_type.name;
}
@@ -4281,20 +4298,20 @@ void ImplementationVisitor::GenerateClassDefinitions(
}
for (const StructType* type : structs_used_in_classes) {
+ std::ostream& header =
+ GlobalContext::GeneratedPerFile(type->GetPosition().source)
+ .class_definition_headerfile;
if (type != TypeOracle::GetFloat64OrHoleType()) {
- GenerateStructLayoutDescription(external_header, type);
+ GenerateStructLayoutDescription(header, type);
}
}
}
- WriteFile(file_basename + ".h", external_header.str());
- WriteFile(file_basename + "-inl.h", inline_external_header.str());
- WriteFile(file_basename + ".cc", implementation.str());
- WriteFile(internal_file_basename + ".h", internal_header.str());
- WriteFile(internal_file_basename + "-inl.h", inline_internal_header.str());
- WriteFile(exported_file_basename + ".h", exported_header.str());
- WriteFile(exported_file_basename + "-inl.h", inline_exported_header.str());
- WriteFile(factory_file_basename + ".inc", factory_header.str());
- WriteFile(factory_file_basename + ".cc", factory_impl.str());
+ WriteFile(output_directory + "/" + factory_basename + ".inc",
+ factory_header.str());
+ WriteFile(output_directory + "/" + factory_basename + ".cc",
+ factory_impl.str());
+ WriteFile(output_directory + "/" + forward_declarations_filename,
+ forward_declarations.str());
}
namespace {
@@ -4305,7 +4322,7 @@ void GeneratePrintDefinitionsForClass(std::ostream& impl, const ClassType* type,
impl << template_params << "\n";
impl << "void " << gen_name_T << "::" << type->name()
<< "Print(std::ostream& os) {\n";
- impl << " this->PrintHeader(os, \"" << gen_name << "\");\n";
+ impl << " this->PrintHeader(os, \"" << type->name() << "\");\n";
auto hierarchy = type->GetHierarchy();
std::map<std::string, const AggregateType*> field_names;
for (const AggregateType* aggregate_type : hierarchy) {
@@ -4340,14 +4357,8 @@ void ImplementationVisitor::GeneratePrintDefinitions(
{
IfDefScope object_print(impl, "OBJECT_PRINT");
- impl << "#include \"src/objects/objects.h\"\n\n";
impl << "#include <iosfwd>\n\n";
- impl << "#include "
- "\"torque-generated/internal-class-definitions-inl.h\"\n";
- impl << "#include "
- "\"torque-generated/exported-class-definitions-inl.h\"\n";
- impl << "#include \"src/objects/struct-inl.h\"\n\n";
- impl << "#include \"src/objects/template-objects-inl.h\"\n\n";
+ impl << "#include \"src/objects/all-objects-inl.h\"\n\n";
NamespaceScope impl_namespaces(impl, {"v8", "internal"});
@@ -4532,8 +4543,10 @@ void ImplementationVisitor::GenerateBodyDescriptors(
if (type->size().SingleValue()) {
h_contents << " return " << *type->size().SingleValue() << ";\n";
} else {
+ // We use an unchecked_cast here because this is used for concurrent
+ // marking, where we shouldn't re-read the map.
h_contents << " return " << name
- << "::cast(raw_object).AllocatedSize();\n";
+ << "::unchecked_cast(raw_object).AllocatedSize();\n";
}
h_contents << " }\n\n";
@@ -4548,10 +4561,9 @@ namespace {
// Generate verification code for a single piece of class data, which might be
// nested within a struct or might be a single element in an indexed field (or
// both).
-void GenerateFieldValueVerifier(const std::string& class_name,
- const Field& class_field,
- const Field& leaf_field, size_t struct_offset,
- std::string field_size,
+void GenerateFieldValueVerifier(const std::string& class_name, bool indexed,
+ std::string offset, const Field& leaf_field,
+ std::string indexed_field_size,
std::ostream& cc_contents) {
const Type* field_type = leaf_field.name_and_type.type;
@@ -4560,17 +4572,15 @@ void GenerateFieldValueVerifier(const std::string& class_name,
const char* object_type = maybe_object ? "MaybeObject" : "Object";
const char* verify_fn =
maybe_object ? "VerifyMaybeObjectPointer" : "VerifyPointer";
- std::string index_offset = std::to_string(struct_offset);
- if (class_field.index) {
- index_offset += " + i * " + field_size;
+ if (indexed) {
+ offset += " + i * " + indexed_field_size;
}
// Name the local var based on the field name for nicer CHECK output.
const std::string value = leaf_field.name_and_type.name + "__value";
// Read the field.
cc_contents << " " << object_type << " " << value << " = TaggedField<"
- << object_type << ", " << *class_field.offset << ">::load(o, "
- << index_offset << ");\n";
+ << object_type << ">::load(o, " << offset << ");\n";
// Call VerifyPointer or VerifyMaybeObjectPointer on it.
cc_contents << " " << object_type << "::" << verify_fn << "(isolate, "
@@ -4601,49 +4611,49 @@ void GenerateClassFieldVerifier(const std::string& class_name,
// Do not verify if the field may be uninitialized.
if (TypeOracle::GetUninitializedType()->IsSubtypeOf(field_type)) return;
+ std::string field_start_offset;
if (f.index) {
- base::Optional<NameAndType> array_length =
- ExtractSimpleFieldArraySize(class_type, *f.index);
- if (!array_length) {
- Error("Cannot generate verifier for array field with complex length.")
- .Position((*f.index)->pos)
- .Throw();
- }
-
- std::string length_field_offset =
- class_name + "::k" + CamelifyString(array_length->name) + "Offset";
- cc_contents << " for (int i = 0; i < ";
- if (array_length->type == TypeOracle::GetSmiType()) {
- // We already verified the index field because it was listed earlier, so
- // we can assume it's safe to read here.
- cc_contents << "TaggedField<Smi, " << length_field_offset
- << ">::load(o).value()";
- } else {
- const Type* constexpr_version = array_length->type->ConstexprVersion();
- if (constexpr_version == nullptr) {
- Error("constexpr representation for type ",
- array_length->type->ToString(),
- " is required due to usage as index")
- .Position(f.pos);
- }
- cc_contents << "o.ReadField<" << constexpr_version->GetGeneratedTypeName()
- << ">(" << length_field_offset << ")";
- }
- cc_contents << "; ++i) {\n";
+ field_start_offset = f.name_and_type.name + "__offset";
+ std::string length = f.name_and_type.name + "__length";
+ cc_contents << " intptr_t " << field_start_offset << ", " << length
+ << ";\n";
+ cc_contents << " std::tie(std::ignore, " << field_start_offset << ", "
+ << length << ") = "
+ << Callable::PrefixNameForCCOutput(
+ class_type.GetSliceMacroName(f))
+ << "(isolate, o);\n";
+
+ // Slices use intptr, but TaggedField<T>.load() uses int, so verify that
+ // such a cast is valid.
+ cc_contents << " CHECK_EQ(" << field_start_offset << ", static_cast<int>("
+ << field_start_offset << "));\n";
+ cc_contents << " CHECK_EQ(" << length << ", static_cast<int>(" << length
+ << "));\n";
+ field_start_offset = "static_cast<int>(" + field_start_offset + ")";
+ length = "static_cast<int>(" + length + ")";
+
+ cc_contents << " for (int i = 0; i < " << length << "; ++i) {\n";
} else {
+ // Non-indexed fields have known offsets.
+ field_start_offset = std::to_string(*f.offset);
cc_contents << " {\n";
}
if (auto struct_type = field_type->StructSupertype()) {
- for (const Field& field : (*struct_type)->fields()) {
- if (field_type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
- GenerateFieldValueVerifier(class_name, f, field, *field.offset,
- std::to_string((*struct_type)->PackedSize()),
- cc_contents);
+ for (const Field& struct_field : (*struct_type)->fields()) {
+ if (struct_field.name_and_type.type->IsSubtypeOf(
+ TypeOracle::GetTaggedType())) {
+ GenerateFieldValueVerifier(
+ class_name, f.index.has_value(),
+ field_start_offset + " + " + std::to_string(*struct_field.offset),
+ struct_field, std::to_string((*struct_type)->PackedSize()),
+ cc_contents);
}
}
} else {
- GenerateFieldValueVerifier(class_name, f, f, 0, "kTaggedSize", cc_contents);
+ GenerateFieldValueVerifier(class_name, f.index.has_value(),
+ field_start_offset, f, "kTaggedSize",
+ cc_contents);
}
cc_contents << " }\n";
@@ -4668,9 +4678,8 @@ void ImplementationVisitor::GenerateClassVerifiers(
}
cc_contents << "#include \"torque-generated/" << file_name << ".h\"\n";
cc_contents << "#include "
- "\"torque-generated/internal-class-definitions-inl.h\"\n";
- cc_contents << "#include "
- "\"torque-generated/exported-class-definitions-inl.h\"\n";
+ "\"src/objects/all-objects-inl.h\"\n";
+ cc_contents << "#include \"torque-generated/runtime-macros.h\"\n";
IncludeObjectMacrosScope object_macros(cc_contents);
@@ -4781,10 +4790,6 @@ void ImplementationVisitor::GenerateExportedMacrosAssembler(
h_contents << "#include \"src/compiler/code-assembler.h\"\n";
h_contents << "#include \"src/execution/frames.h\"\n";
h_contents << "#include \"torque-generated/csa-types.h\"\n";
- h_contents
- << "#include \"torque-generated/internal-class-definitions.h\"\n";
- h_contents
- << "#include \"torque-generated/exported-class-definitions.h\"\n";
cc_contents << "#include \"src/objects/fixed-array-inl.h\"\n";
cc_contents << "#include \"src/objects/free-space.h\"\n";
cc_contents << "#include \"src/objects/js-regexp-string-iterator.h\"\n";
diff --git a/deps/v8/src/torque/implementation-visitor.h b/deps/v8/src/torque/implementation-visitor.h
index 960f931435..8846b43502 100644
--- a/deps/v8/src/torque/implementation-visitor.h
+++ b/deps/v8/src/torque/implementation-visitor.h
@@ -552,8 +552,12 @@ class ImplementationVisitor {
const Type* Visit(DebugStatement* stmt);
const Type* Visit(AssertStatement* stmt);
- void BeginCSAFiles();
- void EndCSAFiles();
+ void BeginGeneratedFiles();
+ void EndGeneratedFiles();
+ // TODO(tebbi): Switch to per-file generation for runtime macros and merge
+ // these functions into {Begin,End}GeneratedFiles().
+ void BeginRuntimeMacrosFile();
+ void EndRuntimeMacrosFile();
void GenerateImplementation(const std::string& dir);
@@ -727,7 +731,6 @@ class ImplementationVisitor {
Block* false_block);
void GenerateMacroFunctionDeclaration(std::ostream& o,
- const std::string& macro_prefix,
Macro* macro);
std::vector<std::string> GenerateFunctionDeclaration(
std::ostream& o, const std::string& macro_prefix, const std::string& name,
@@ -760,18 +763,39 @@ class ImplementationVisitor {
size_t i);
std::string ExternalParameterName(const std::string& name);
- std::ostream& source_out() {
+ std::ostream& csa_ccfile() {
if (auto* streams = CurrentFileStreams::Get()) {
- return streams->csa_ccfile;
+ return output_type_ == OutputType::kCSA ? streams->csa_ccfile
+ : runtime_macros_cc_;
}
return null_stream_;
}
- std::ostream& header_out() {
+ std::ostream& csa_headerfile() {
if (auto* streams = CurrentFileStreams::Get()) {
- return streams->csa_headerfile;
+ return output_type_ == OutputType::kCSA ? streams->csa_headerfile
+ : runtime_macros_h_;
}
return null_stream_;
}
+ std::ostream& class_definition_headerfile() {
+ if (auto* streams = CurrentFileStreams::Get()) {
+ return streams->class_definition_headerfile;
+ }
+ return null_stream_;
+ }
+ std::ostream& class_definition_inline_headerfile() {
+ if (auto* streams = CurrentFileStreams::Get()) {
+ return streams->class_definition_inline_headerfile;
+ }
+ return null_stream_;
+ }
+ std::ostream& class_definition_ccfile() {
+ if (auto* streams = CurrentFileStreams::Get()) {
+ return streams->class_definition_ccfile;
+ }
+ return null_stream_;
+ }
+
CfgAssembler& assembler() { return *assembler_; }
void SetReturnValue(VisitResult return_value) {
@@ -818,6 +842,16 @@ class ImplementationVisitor {
// the value to load.
std::unordered_map<const Expression*, const Identifier*>
bitfield_expressions_;
+
+ // The contents of the runtime macros output files. These contain all Torque
+ // macros that have been generated using the C++ backend. They're not yet
+ // split per source file like CSA macros, but eventually we should change them
+ // to generate -inl.inc files so that callers can easily inline their
+ // contents.
+ std::stringstream runtime_macros_cc_;
+ std::stringstream runtime_macros_h_;
+
+ OutputType output_type_ = OutputType::kCSA;
};
void ReportAllUnusedMacros();
diff --git a/deps/v8/src/torque/instance-type-generator.cc b/deps/v8/src/torque/instance-type-generator.cc
index cb45a7d801..1e2423deba 100644
--- a/deps/v8/src/torque/instance-type-generator.cc
+++ b/deps/v8/src/torque/instance-type-generator.cc
@@ -451,7 +451,7 @@ void ImplementationVisitor::GenerateInstanceTypes(
if (type->IsExtern()) continue;
torque_defined_class_list << " V(" << upper_case_name << ") \\\n";
- if (type->IsAbstract()) continue;
+ if (type->IsAbstract() || type->HasCustomMap()) continue;
torque_defined_map_csa_list << " V(_, " << upper_case_name << "Map, "
<< lower_case_name << "_map, "
<< upper_case_name << ") \\\n";
diff --git a/deps/v8/src/torque/instructions.h b/deps/v8/src/torque/instructions.h
index 528d5c742e..69dfbd8fc3 100644
--- a/deps/v8/src/torque/instructions.h
+++ b/deps/v8/src/torque/instructions.h
@@ -24,32 +24,40 @@ class Macro;
class NamespaceConstant;
class RuntimeFunction;
-#define TORQUE_INSTRUCTION_LIST(V) \
- V(PeekInstruction) \
- V(PokeInstruction) \
- V(DeleteRangeInstruction) \
- V(PushUninitializedInstruction) \
- V(PushBuiltinPointerInstruction) \
- V(LoadReferenceInstruction) \
- V(StoreReferenceInstruction) \
- V(LoadBitFieldInstruction) \
- V(StoreBitFieldInstruction) \
- V(CallCsaMacroInstruction) \
- V(CallIntrinsicInstruction) \
- V(NamespaceConstantInstruction) \
- V(CallCsaMacroAndBranchInstruction) \
- V(CallBuiltinInstruction) \
- V(CallRuntimeInstruction) \
- V(CallBuiltinPointerInstruction) \
- V(BranchInstruction) \
- V(ConstexprBranchInstruction) \
- V(GotoInstruction) \
- V(GotoExternalInstruction) \
- V(ReturnInstruction) \
- V(PrintConstantStringInstruction) \
- V(AbortInstruction) \
+// Instructions where all backends generate code the same way.
+#define TORQUE_BACKEND_AGNOSTIC_INSTRUCTION_LIST(V) \
+ V(PeekInstruction) \
+ V(PokeInstruction) \
+ V(DeleteRangeInstruction)
+
+// Instructions where different backends may generate different code.
+#define TORQUE_BACKEND_DEPENDENT_INSTRUCTION_LIST(V) \
+ V(PushUninitializedInstruction) \
+ V(PushBuiltinPointerInstruction) \
+ V(LoadReferenceInstruction) \
+ V(StoreReferenceInstruction) \
+ V(LoadBitFieldInstruction) \
+ V(StoreBitFieldInstruction) \
+ V(CallCsaMacroInstruction) \
+ V(CallIntrinsicInstruction) \
+ V(NamespaceConstantInstruction) \
+ V(CallCsaMacroAndBranchInstruction) \
+ V(CallBuiltinInstruction) \
+ V(CallRuntimeInstruction) \
+ V(CallBuiltinPointerInstruction) \
+ V(BranchInstruction) \
+ V(ConstexprBranchInstruction) \
+ V(GotoInstruction) \
+ V(GotoExternalInstruction) \
+ V(ReturnInstruction) \
+ V(PrintConstantStringInstruction) \
+ V(AbortInstruction) \
V(UnsafeCastInstruction)
+#define TORQUE_INSTRUCTION_LIST(V) \
+ TORQUE_BACKEND_AGNOSTIC_INSTRUCTION_LIST(V) \
+ TORQUE_BACKEND_DEPENDENT_INSTRUCTION_LIST(V)
+
#define TORQUE_INSTRUCTION_BOILERPLATE() \
static const InstructionKind kKind; \
std::unique_ptr<InstructionBase> Clone() const override; \
diff --git a/deps/v8/src/torque/runtime-macro-shims.h b/deps/v8/src/torque/runtime-macro-shims.h
new file mode 100644
index 0000000000..89e566bc62
--- /dev/null
+++ b/deps/v8/src/torque/runtime-macro-shims.h
@@ -0,0 +1,36 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains runtime implementations of a few macros that are defined
+// as external in Torque, so that generated runtime code can work.
+
+#ifndef V8_TORQUE_RUNTIME_MACRO_SHIMS_H_
+#define V8_TORQUE_RUNTIME_MACRO_SHIMS_H_
+
+#include "src/objects/smi.h"
+
+namespace v8 {
+namespace internal {
+namespace TorqueRuntimeMacroShims {
+namespace CodeStubAssembler {
+
+inline intptr_t ChangeInt32ToIntPtr(Isolate* isolate, int32_t i) { return i; }
+inline uintptr_t ChangeUint32ToWord(Isolate* isolate, uint32_t u) { return u; }
+inline intptr_t IntPtrAdd(Isolate* isolate, intptr_t a, intptr_t b) {
+ return a + b;
+}
+inline intptr_t IntPtrMul(Isolate* isolate, intptr_t a, intptr_t b) {
+ return a * b;
+}
+inline intptr_t Signed(Isolate* isolate, uintptr_t u) {
+ return static_cast<intptr_t>(u);
+}
+inline int32_t SmiUntag(Isolate* isolate, Smi s) { return s.value(); }
+
+} // namespace CodeStubAssembler
+} // namespace TorqueRuntimeMacroShims
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TORQUE_RUNTIME_MACRO_SHIMS_H_
diff --git a/deps/v8/src/torque/torque-code-generator.cc b/deps/v8/src/torque/torque-code-generator.cc
new file mode 100644
index 0000000000..46763be468
--- /dev/null
+++ b/deps/v8/src/torque/torque-code-generator.cc
@@ -0,0 +1,60 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/torque/torque-code-generator.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+
+bool TorqueCodeGenerator::IsEmptyInstruction(const Instruction& instruction) {
+ switch (instruction.kind()) {
+ case InstructionKind::kPeekInstruction:
+ case InstructionKind::kPokeInstruction:
+ case InstructionKind::kDeleteRangeInstruction:
+ case InstructionKind::kPushUninitializedInstruction:
+ case InstructionKind::kPushBuiltinPointerInstruction:
+ case InstructionKind::kUnsafeCastInstruction:
+ return true;
+ default:
+ return false;
+ }
+}
+
+void TorqueCodeGenerator::EmitInstruction(const Instruction& instruction,
+ Stack<std::string>* stack) {
+#ifdef DEBUG
+ if (!IsEmptyInstruction(instruction)) {
+ EmitSourcePosition(instruction->pos);
+ }
+#endif
+
+ switch (instruction.kind()) {
+#define ENUM_ITEM(T) \
+ case InstructionKind::k##T: \
+ return EmitInstruction(instruction.Cast<T>(), stack);
+ TORQUE_INSTRUCTION_LIST(ENUM_ITEM)
+#undef ENUM_ITEM
+ }
+}
+
+void TorqueCodeGenerator::EmitInstruction(const PeekInstruction& instruction,
+ Stack<std::string>* stack) {
+ stack->Push(stack->Peek(instruction.slot));
+}
+
+void TorqueCodeGenerator::EmitInstruction(const PokeInstruction& instruction,
+ Stack<std::string>* stack) {
+ stack->Poke(instruction.slot, stack->Top());
+ stack->Pop();
+}
+
+void TorqueCodeGenerator::EmitInstruction(
+ const DeleteRangeInstruction& instruction, Stack<std::string>* stack) {
+ stack->DeleteRange(instruction.range);
+}
+
+} // namespace torque
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/torque/torque-code-generator.h b/deps/v8/src/torque/torque-code-generator.h
new file mode 100644
index 0000000000..ddbd5309c9
--- /dev/null
+++ b/deps/v8/src/torque/torque-code-generator.h
@@ -0,0 +1,93 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TORQUE_TORQUE_CODE_GENERATOR_H_
+#define V8_TORQUE_TORQUE_CODE_GENERATOR_H_
+
+#include <iostream>
+
+#include "src/torque/cfg.h"
+#include "src/torque/declarable.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+
+class TorqueCodeGenerator {
+ public:
+ TorqueCodeGenerator(const ControlFlowGraph& cfg, std::ostream& out)
+ : cfg_(cfg),
+ out_(&out),
+ out_decls_(&out),
+ previous_position_(SourcePosition::Invalid()) {}
+
+ protected:
+ const ControlFlowGraph& cfg_;
+ std::ostream* out_;
+ std::ostream* out_decls_;
+ size_t fresh_id_ = 0;
+ SourcePosition previous_position_;
+ std::map<DefinitionLocation, std::string> location_map_;
+
+ std::string DefinitionToVariable(const DefinitionLocation& location) {
+ if (location.IsPhi()) {
+ std::stringstream stream;
+ stream << "phi_bb" << location.GetPhiBlock()->id() << "_"
+ << location.GetPhiIndex();
+ return stream.str();
+ } else if (location.IsParameter()) {
+ auto it = location_map_.find(location);
+ DCHECK_NE(it, location_map_.end());
+ return it->second;
+ } else {
+ DCHECK(location.IsInstruction());
+ auto it = location_map_.find(location);
+ if (it == location_map_.end()) {
+ it = location_map_.insert(std::make_pair(location, FreshNodeName()))
+ .first;
+ }
+ return it->second;
+ }
+ }
+
+ void SetDefinitionVariable(const DefinitionLocation& definition,
+ const std::string& str) {
+ DCHECK_EQ(location_map_.find(definition), location_map_.end());
+ location_map_.insert(std::make_pair(definition, str));
+ }
+
+ std::ostream& out() { return *out_; }
+ std::ostream& decls() { return *out_decls_; }
+
+ static bool IsEmptyInstruction(const Instruction& instruction);
+ virtual void EmitSourcePosition(SourcePosition pos,
+ bool always_emit = false) = 0;
+
+ std::string FreshNodeName() { return "tmp" + std::to_string(fresh_id_++); }
+ std::string FreshCatchName() { return "catch" + std::to_string(fresh_id_++); }
+ std::string FreshLabelName() { return "label" + std::to_string(fresh_id_++); }
+ std::string BlockName(const Block* block) {
+ return "block" + std::to_string(block->id());
+ }
+
+ void EmitInstruction(const Instruction& instruction,
+ Stack<std::string>* stack);
+
+#define EMIT_INSTRUCTION_DECLARATION(T) \
+ void EmitInstruction(const T& instruction, Stack<std::string>* stack);
+ TORQUE_BACKEND_AGNOSTIC_INSTRUCTION_LIST(EMIT_INSTRUCTION_DECLARATION)
+#undef EMIT_INSTRUCTION_DECLARATION
+
+#define EMIT_INSTRUCTION_DECLARATION(T) \
+ virtual void EmitInstruction(const T& instruction, \
+ Stack<std::string>* stack) = 0;
+ TORQUE_BACKEND_DEPENDENT_INSTRUCTION_LIST(EMIT_INSTRUCTION_DECLARATION)
+#undef EMIT_INSTRUCTION_DECLARATION
+};
+
+} // namespace torque
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TORQUE_TORQUE_CODE_GENERATOR_H_
diff --git a/deps/v8/src/torque/torque-compiler.cc b/deps/v8/src/torque/torque-compiler.cc
index 20bc297354..9e00412ca1 100644
--- a/deps/v8/src/torque/torque-compiler.cc
+++ b/deps/v8/src/torque/torque-compiler.cc
@@ -75,7 +75,8 @@ void CompileCurrentAst(TorqueCompilerOptions options) {
implementation_visitor.SetDryRun(output_directory.length() == 0);
implementation_visitor.GenerateInstanceTypes(output_directory);
- implementation_visitor.BeginCSAFiles();
+ implementation_visitor.BeginGeneratedFiles();
+ implementation_visitor.BeginRuntimeMacrosFile();
implementation_visitor.VisitAllDeclarables();
@@ -94,7 +95,8 @@ void CompileCurrentAst(TorqueCompilerOptions options) {
implementation_visitor.GenerateExportedMacrosAssembler(output_directory);
implementation_visitor.GenerateCSATypes(output_directory);
- implementation_visitor.EndCSAFiles();
+ implementation_visitor.EndGeneratedFiles();
+ implementation_visitor.EndRuntimeMacrosFile();
implementation_visitor.GenerateImplementation(output_directory);
if (GlobalContext::collect_language_server_data()) {
diff --git a/deps/v8/src/torque/torque-parser.cc b/deps/v8/src/torque/torque-parser.cc
index 51778161d9..b3ff1538b2 100644
--- a/deps/v8/src/torque/torque-parser.cc
+++ b/deps/v8/src/torque/torque-parser.cc
@@ -580,18 +580,23 @@ base::Optional<ParseResult> MakeIntrinsicDeclaration(
}
namespace {
-bool HasExportAnnotation(ParseResultIterator* child_results,
- const char* declaration) {
+bool HasAnnotation(ParseResultIterator* child_results, const char* annotation,
+ const char* declaration) {
auto annotations = child_results->NextAs<std::vector<Annotation>>();
if (annotations.size()) {
- if (annotations.size() > 1 || annotations[0].name->value != "@export") {
- Error(declaration,
- " declarations only support a single @export annotation");
+ if (annotations.size() > 1 || annotations[0].name->value != annotation) {
+ Error(declaration, " declarations only support a single ", annotation,
+ " annotation");
}
return true;
}
return false;
}
+
+bool HasExportAnnotation(ParseResultIterator* child_results,
+ const char* declaration) {
+ return HasAnnotation(child_results, ANNOTATION_EXPORT, declaration);
+}
} // namespace
base::Optional<ParseResult> MakeTorqueMacroDeclaration(
@@ -685,6 +690,8 @@ base::Optional<ParseResult> MakeTypeAliasDeclaration(
base::Optional<ParseResult> MakeAbstractTypeDeclaration(
ParseResultIterator* child_results) {
+ bool use_parent_type_checker = HasAnnotation(
+ child_results, ANNOTATION_USE_PARENT_TYPE_CHECKER, "abstract type");
auto transient = child_results->NextAs<bool>();
auto name = child_results->NextAs<Identifier*>();
if (!IsValidTypeName(name->value)) {
@@ -693,8 +700,11 @@ base::Optional<ParseResult> MakeAbstractTypeDeclaration(
auto generic_parameters = child_results->NextAs<GenericParameters>();
auto extends = child_results->NextAs<base::Optional<TypeExpression*>>();
auto generates = child_results->NextAs<base::Optional<std::string>>();
+ AbstractTypeFlags flags(AbstractTypeFlag::kNone);
+ if (transient) flags |= AbstractTypeFlag::kTransient;
+ if (use_parent_type_checker) flags |= AbstractTypeFlag::kUseParentTypeChecker;
TypeDeclaration* type_decl = MakeNode<AbstractTypeDeclaration>(
- name, transient, extends, std::move(generates));
+ name, flags, extends, std::move(generates));
Declaration* decl = type_decl;
if (!generic_parameters.empty()) {
decl = MakeNode<GenericTypeDeclaration>(generic_parameters, type_decl);
@@ -715,7 +725,8 @@ base::Optional<ParseResult> MakeAbstractTypeDeclaration(
constexpr_extends = AddConstexpr(*extends);
}
TypeDeclaration* constexpr_decl = MakeNode<AbstractTypeDeclaration>(
- constexpr_name, transient, constexpr_extends, constexpr_generates);
+ constexpr_name, flags | AbstractTypeFlag::kConstexpr, constexpr_extends,
+ constexpr_generates);
constexpr_decl->pos = name->pos;
Declaration* decl = constexpr_decl;
if (!generic_parameters.empty()) {
@@ -878,8 +889,9 @@ base::Optional<ParseResult> MakeClassDeclaration(
child_results,
{ANNOTATION_GENERATE_PRINT, ANNOTATION_NO_VERIFIER, ANNOTATION_ABSTRACT,
ANNOTATION_HAS_SAME_INSTANCE_TYPE_AS_PARENT,
- ANNOTATION_GENERATE_CPP_CLASS, ANNOTATION_GENERATE_BODY_DESCRIPTOR,
- ANNOTATION_EXPORT_CPP_CLASS, ANNOTATION_DO_NOT_GENERATE_CAST,
+ ANNOTATION_GENERATE_CPP_CLASS, ANNOTATION_CUSTOM_CPP_CLASS,
+ ANNOTATION_CUSTOM_MAP, ANNOTATION_GENERATE_BODY_DESCRIPTOR,
+ ANNOTATION_EXPORT, ANNOTATION_DO_NOT_GENERATE_CAST,
ANNOTATION_HIGHEST_INSTANCE_TYPE_WITHIN_PARENT,
ANNOTATION_LOWEST_INSTANCE_TYPE_WITHIN_PARENT},
{ANNOTATION_RESERVE_BITS_IN_INSTANCE_TYPE,
@@ -898,13 +910,19 @@ base::Optional<ParseResult> MakeClassDeclaration(
if (annotations.Contains(ANNOTATION_GENERATE_CPP_CLASS)) {
flags |= ClassFlag::kGenerateCppClassDefinitions;
}
+ if (annotations.Contains(ANNOTATION_CUSTOM_CPP_CLASS)) {
+ flags |= ClassFlag::kCustomCppClass;
+ }
+ if (annotations.Contains(ANNOTATION_CUSTOM_MAP)) {
+ flags |= ClassFlag::kCustomMap;
+ }
if (annotations.Contains(ANNOTATION_DO_NOT_GENERATE_CAST)) {
flags |= ClassFlag::kDoNotGenerateCast;
}
if (annotations.Contains(ANNOTATION_GENERATE_BODY_DESCRIPTOR)) {
flags |= ClassFlag::kGenerateBodyDescriptor;
}
- if (annotations.Contains(ANNOTATION_EXPORT_CPP_CLASS)) {
+ if (annotations.Contains(ANNOTATION_EXPORT)) {
flags |= ClassFlag::kExport;
}
if (annotations.Contains(ANNOTATION_HIGHEST_INSTANCE_TYPE_WITHIN_PARENT)) {
@@ -972,8 +990,10 @@ base::Optional<ParseResult> MakeClassDeclaration(
MakeNode<Identifier>(CONSTEXPR_TYPE_PREFIX + name->value);
constexpr_name->pos = name->pos;
TypeExpression* constexpr_extends = AddConstexpr(extends);
+ AbstractTypeFlags abstract_type_flags(AbstractTypeFlag::kConstexpr);
+ if (transient) abstract_type_flags |= AbstractTypeFlag::kTransient;
TypeDeclaration* constexpr_decl = MakeNode<AbstractTypeDeclaration>(
- constexpr_name, transient, constexpr_extends, name->value);
+ constexpr_name, abstract_type_flags, constexpr_extends, name->value);
constexpr_decl->pos = name->pos;
result.push_back(constexpr_decl);
@@ -1280,7 +1300,8 @@ base::Optional<ParseResult> MakeEnumDeclaration(
// type kEntryN extends Enum;
// }
auto type_decl = MakeNode<AbstractTypeDeclaration>(
- name_identifier, false, base_type_expression, base::nullopt);
+ name_identifier, AbstractTypeFlag::kNone, base_type_expression,
+ base::nullopt);
TypeExpression* name_type_expression =
MakeNode<BasicTypeExpression>(name_identifier->value);
@@ -1289,8 +1310,8 @@ base::Optional<ParseResult> MakeEnumDeclaration(
std::vector<Declaration*> entry_decls;
for (const auto& entry : entries) {
entry_decls.push_back(MakeNode<AbstractTypeDeclaration>(
- entry.name, false, entry.type.value_or(name_type_expression),
- base::nullopt));
+ entry.name, AbstractTypeFlag::kNone,
+ entry.type.value_or(name_type_expression), base::nullopt));
}
result.push_back(type_decl);
@@ -1309,8 +1330,8 @@ base::Optional<ParseResult> MakeEnumDeclaration(
std::vector<Declaration*> entry_decls;
for (const auto& entry : entries) {
entry_decls.push_back(MakeNode<AbstractTypeDeclaration>(
- entry.name, false, entry.type.value_or(*base_type_expression),
- base::nullopt));
+ entry.name, AbstractTypeFlag::kNone,
+ entry.type.value_or(*base_type_expression), base::nullopt));
auto entry_type = MakeNode<BasicTypeExpression>(
std::vector<std::string>{name}, entry.name->value,
@@ -1348,8 +1369,8 @@ base::Optional<ParseResult> MakeEnumDeclaration(
base_constexpr_type_expression = AddConstexpr(*base_type_expression);
}
result.push_back(MakeNode<AbstractTypeDeclaration>(
- constexpr_type_identifier, false, base_constexpr_type_expression,
- constexpr_generates));
+ constexpr_type_identifier, AbstractTypeFlag::kConstexpr,
+ base_constexpr_type_expression, constexpr_generates));
TypeExpression* type_expr = nullptr;
Identifier* fromconstexpr_identifier = nullptr;
@@ -1386,8 +1407,9 @@ base::Optional<ParseResult> MakeEnumDeclaration(
"::" + entry_name);
entry_decls.push_back(MakeNode<AbstractTypeDeclaration>(
- MakeNode<Identifier>(entry_constexpr_type), false,
- constexpr_type_expression, constexpr_generates));
+ MakeNode<Identifier>(entry_constexpr_type),
+ AbstractTypeFlag::kConstexpr, constexpr_type_expression,
+ constexpr_generates));
bool generate_typed_constant = entry.type.has_value();
if (generate_typed_constant) {
@@ -2535,7 +2557,7 @@ struct TorqueGrammar : Grammar {
Token("{"), List<BitFieldDeclaration>(&bitFieldDeclaration),
Token("}")},
AsSingletonVector<Declaration*, MakeBitFieldStructDeclaration>()),
- Rule({CheckIf(Token("transient")), Token("type"), &name,
+ Rule({annotations, CheckIf(Token("transient")), Token("type"), &name,
TryOrDefault<GenericParameters>(&genericParameters),
Optional<TypeExpression*>(Sequence({Token("extends"), &type})),
Optional<std::string>(
diff --git a/deps/v8/src/torque/type-visitor.cc b/deps/v8/src/torque/type-visitor.cc
index 3b37593fc6..a706fc561d 100644
--- a/deps/v8/src/torque/type-visitor.cc
+++ b/deps/v8/src/torque/type-visitor.cc
@@ -77,7 +77,7 @@ std::string ComputeGeneratesType(base::Optional<std::string> opt_gen,
const AbstractType* TypeVisitor::ComputeType(
AbstractTypeDeclaration* decl, MaybeSpecializationKey specialized_from) {
std::string generates =
- ComputeGeneratesType(decl->generates, !decl->is_constexpr);
+ ComputeGeneratesType(decl->generates, !decl->IsConstexpr());
const Type* parent_type = nullptr;
if (decl->extends) {
@@ -90,25 +90,21 @@ const AbstractType* TypeVisitor::ComputeType(
}
}
- if (decl->is_constexpr && decl->transient) {
+ if (decl->IsConstexpr() && decl->IsTransient()) {
ReportError("cannot declare a transient type that is also constexpr");
}
const Type* non_constexpr_version = nullptr;
- if (decl->is_constexpr) {
+ if (decl->IsConstexpr()) {
QualifiedName non_constexpr_name{GetNonConstexprName(decl->name->value)};
if (auto type = Declarations::TryLookupType(non_constexpr_name)) {
non_constexpr_version = *type;
}
}
- AbstractTypeFlags flags = AbstractTypeFlag::kNone;
- if (decl->transient) flags |= AbstractTypeFlag::kTransient;
- if (decl->is_constexpr) flags |= AbstractTypeFlag::kConstexpr;
-
- return TypeOracle::GetAbstractType(parent_type, decl->name->value, flags,
- generates, non_constexpr_version,
- specialized_from);
+ return TypeOracle::GetAbstractType(parent_type, decl->name->value,
+ decl->flags, generates,
+ non_constexpr_version, specialized_from);
}
void DeclareMethods(AggregateType* container_type,
@@ -291,6 +287,15 @@ const ClassType* TypeVisitor::ComputeType(
Error("Class \"", decl->name->value,
"\" requires a layout but doesn't have one");
}
+ if (flags & ClassFlag::kCustomCppClass) {
+ if (!(flags & ClassFlag::kExport)) {
+ Error("Only exported classes can have a custom C++ class.");
+ }
+ if (flags & ClassFlag::kExtern) {
+ Error("No need to specify ", ANNOTATION_CUSTOM_CPP_CLASS,
+ ", extern classes always have a custom C++ class.");
+ }
+ }
if (flags & ClassFlag::kExtern) {
if (decl->generates) {
bool enforce_tnode_type = true;
@@ -354,14 +359,17 @@ const Type* TypeVisitor::ComputeType(TypeExpression* type_expression) {
UnionTypeExpression::DynamicCast(type_expression)) {
return TypeOracle::GetUnionType(ComputeType(union_type->a),
ComputeType(union_type->b));
- } else {
- auto* function_type_exp = FunctionTypeExpression::cast(type_expression);
+ } else if (auto* function_type_exp =
+ FunctionTypeExpression::DynamicCast(type_expression)) {
TypeVector argument_types;
for (TypeExpression* type_exp : function_type_exp->parameters) {
argument_types.push_back(ComputeType(type_exp));
}
return TypeOracle::GetBuiltinPointerType(
argument_types, ComputeType(function_type_exp->return_type));
+ } else {
+ auto* precomputed = PrecomputedTypeExpression::cast(type_expression);
+ return precomputed->type;
}
}
diff --git a/deps/v8/src/torque/types.cc b/deps/v8/src/torque/types.cc
index df35c46300..70dc0fb9fe 100644
--- a/deps/v8/src/torque/types.cc
+++ b/deps/v8/src/torque/types.cc
@@ -12,6 +12,7 @@
#include "src/torque/ast.h"
#include "src/torque/declarable.h"
#include "src/torque/global-context.h"
+#include "src/torque/source-positions.h"
#include "src/torque/type-oracle.h"
#include "src/torque/type-visitor.h"
@@ -74,12 +75,18 @@ std::string Type::SimpleName() const {
std::string Type::HandlifiedCppTypeName() const {
if (IsSubtypeOf(TypeOracle::GetSmiType())) return "int";
if (IsSubtypeOf(TypeOracle::GetTaggedType())) {
- return "Handle<" + ConstexprVersion()->GetGeneratedTypeName() + ">";
+ return "Handle<" + UnhandlifiedCppTypeName() + ">";
} else {
- return ConstexprVersion()->GetGeneratedTypeName();
+ return UnhandlifiedCppTypeName();
}
}
+std::string Type::UnhandlifiedCppTypeName() const {
+ if (IsSubtypeOf(TypeOracle::GetSmiType())) return "int";
+ if (this == TypeOracle::GetObjectType()) return "Object";
+ return GetConstexprGeneratedTypeName();
+}
+
bool Type::IsSubtypeOf(const Type* supertype) const {
if (supertype->IsTopType()) return true;
if (IsNever()) return true;
@@ -173,13 +180,14 @@ std::string AbstractType::GetGeneratedTNodeTypeNameImpl() const {
return generated_type_;
}
-std::vector<RuntimeType> AbstractType::GetRuntimeTypes() const {
- std::string type_name = GetGeneratedTNodeTypeName();
+std::vector<TypeChecker> AbstractType::GetTypeCheckers() const {
+ if (UseParentTypeChecker()) return parent()->GetTypeCheckers();
+ std::string type_name = name();
if (auto strong_type =
Type::MatchUnaryGeneric(this, TypeOracle::GetWeakGeneric())) {
- auto strong_runtime_types = (*strong_type)->GetRuntimeTypes();
- std::vector<RuntimeType> result;
- for (const RuntimeType& type : strong_runtime_types) {
+ auto strong_runtime_types = (*strong_type)->GetTypeCheckers();
+ std::vector<TypeChecker> result;
+ for (const TypeChecker& type : strong_runtime_types) {
// Generic parameter in Weak<T> should have already been checked to
// extend HeapObject, so it couldn't itself be another weak type.
DCHECK(type.weak_ref_to.empty());
@@ -643,29 +651,79 @@ bool ClassType::HasNoPointerSlots() const {
return true;
}
+bool ClassType::HasIndexedFieldsIncludingInParents() const {
+ for (const auto& field : fields_) {
+ if (field.index.has_value()) return true;
+ }
+ if (const ClassType* parent = GetSuperClass()) {
+ return parent->HasIndexedFieldsIncludingInParents();
+ }
+ return false;
+}
+
+const Field* ClassType::GetFieldPreceding(size_t field_index) const {
+ if (field_index > 0) {
+ return &fields_[field_index - 1];
+ }
+ if (const ClassType* parent = GetSuperClass()) {
+ return parent->GetFieldPreceding(parent->fields_.size());
+ }
+ return nullptr;
+}
+
+const ClassType* ClassType::GetClassDeclaringField(const Field& f) const {
+ for (const Field& field : fields_) {
+ if (f.name_and_type.name == field.name_and_type.name) return this;
+ }
+ return GetSuperClass()->GetClassDeclaringField(f);
+}
+
+std::string ClassType::GetSliceMacroName(const Field& field) const {
+ const ClassType* declarer = GetClassDeclaringField(field);
+ return "FieldSlice" + declarer->name() +
+ CamelifyString(field.name_and_type.name);
+}
+
void ClassType::GenerateAccessors() {
+ bool at_or_after_indexed_field = false;
+ if (const ClassType* parent = GetSuperClass()) {
+ at_or_after_indexed_field = parent->HasIndexedFieldsIncludingInParents();
+ }
// For each field, construct AST snippets that implement a CSA accessor
// function. The implementation iterator will turn the snippets into code.
- for (auto& field : fields_) {
+ for (size_t field_index = 0; field_index < fields_.size(); ++field_index) {
+ Field& field = fields_[field_index];
if (field.name_and_type.type == TypeOracle::GetVoidType()) {
continue;
}
+ at_or_after_indexed_field =
+ at_or_after_indexed_field || field.index.has_value();
CurrentSourcePosition::Scope position_activator(field.pos);
- IdentifierExpression* parameter =
- MakeNode<IdentifierExpression>(MakeNode<Identifier>(std::string{"o"}));
- IdentifierExpression* index =
- MakeNode<IdentifierExpression>(MakeNode<Identifier>(std::string{"i"}));
+ IdentifierExpression* parameter = MakeIdentifierExpression("o");
+ IdentifierExpression* index = MakeIdentifierExpression("i");
- // Load accessor
std::string camel_field_name = CamelifyString(field.name_and_type.name);
- std::string load_macro_name = "Load" + this->name() + camel_field_name;
+
+ if (at_or_after_indexed_field) {
+ if (!field.index.has_value()) {
+ // There's no fundamental reason we couldn't generate functions to get
+ // references instead of slices, but it's not yet implemented.
+ ReportError(
+ "Torque doesn't yet support non-indexed fields after indexed "
+ "fields");
+ }
+
+ GenerateSliceAccessor(field_index);
+ }
// For now, only generate indexed accessors for simple types
if (field.index.has_value() && field.name_and_type.type->IsStructType()) {
continue;
}
+ // Load accessor
+ std::string load_macro_name = "Load" + this->name() + camel_field_name;
Signature load_signature;
load_signature.parameter_names.push_back(MakeNode<Identifier>("o"));
load_signature.parameter_types.types.push_back(this);
@@ -677,8 +735,8 @@ void ClassType::GenerateAccessors() {
load_signature.parameter_types.var_args = false;
load_signature.return_type = field.name_and_type.type;
- Expression* load_expression = MakeNode<FieldAccessExpression>(
- parameter, MakeNode<Identifier>(field.name_and_type.name));
+ Expression* load_expression =
+ MakeFieldAccessExpression(parameter, field.name_and_type.name);
if (field.index) {
load_expression =
MakeNode<ElementAccessExpression>(load_expression, index);
@@ -689,8 +747,7 @@ void ClassType::GenerateAccessors() {
// Store accessor
if (!field.const_qualified) {
- IdentifierExpression* value = MakeNode<IdentifierExpression>(
- std::vector<std::string>{}, MakeNode<Identifier>(std::string{"v"}));
+ IdentifierExpression* value = MakeIdentifierExpression("v");
std::string store_macro_name = "Store" + this->name() + camel_field_name;
Signature store_signature;
store_signature.parameter_names.push_back(MakeNode<Identifier>("o"));
@@ -705,8 +762,8 @@ void ClassType::GenerateAccessors() {
store_signature.parameter_types.var_args = false;
// TODO(danno): Store macros probably should return their value argument
store_signature.return_type = TypeOracle::GetVoidType();
- Expression* store_expression = MakeNode<FieldAccessExpression>(
- parameter, MakeNode<Identifier>(field.name_and_type.name));
+ Expression* store_expression =
+ MakeFieldAccessExpression(parameter, field.name_and_type.name);
if (field.index) {
store_expression =
MakeNode<ElementAccessExpression>(store_expression, index);
@@ -720,6 +777,131 @@ void ClassType::GenerateAccessors() {
}
}
+void ClassType::GenerateSliceAccessor(size_t field_index) {
+ // Generate a Torque macro for getting a Slice to this field. This macro can
+ // be called by the dot operator for this field. In Torque, this function for
+ // class "ClassName" and field "field_name" and field type "FieldType" would
+ // be written as one of the following:
+ //
+ // If the field has a known offset (in this example, 16):
+ // FieldSliceClassNameFieldName(o: ClassName) {
+ // return torque_internal::Slice<FieldType> {
+ // object: o,
+ // offset: 16,
+ // length: torque_internal::%IndexedFieldLength<ClassName>(
+ // o, "field_name")),
+ // unsafeMarker: torque_internal::Unsafe {}
+ // };
+ // }
+ //
+ // If the field has an unknown offset, and the previous field is named p, and
+ // an item in the previous field has size 4:
+ // FieldSliceClassNameFieldName(o: ClassName) {
+ // const previous = &o.p;
+ // return torque_internal::Slice<FieldType> {
+ // object: o,
+ // offset: previous.offset + 4 * previous.length,
+ // length: torque_internal::%IndexedFieldLength<ClassName>(
+ // o, "field_name")),
+ // unsafeMarker: torque_internal::Unsafe {}
+ // };
+ // }
+ const Field& field = fields_[field_index];
+ std::string macro_name = GetSliceMacroName(field);
+ Signature signature;
+ Identifier* parameter_identifier = MakeNode<Identifier>("o");
+ signature.parameter_names.push_back(parameter_identifier);
+ signature.parameter_types.types.push_back(this);
+ signature.parameter_types.var_args = false;
+ signature.return_type = TypeOracle::GetSliceType(field.name_and_type.type);
+
+ std::vector<Statement*> statements;
+ Expression* offset_expression = nullptr;
+ IdentifierExpression* parameter =
+ MakeNode<IdentifierExpression>(parameter_identifier);
+
+ if (field.offset.has_value()) {
+ offset_expression =
+ MakeNode<NumberLiteralExpression>(static_cast<double>(*field.offset));
+ } else {
+ const Field* previous = GetFieldPreceding(field_index);
+ DCHECK_NOT_NULL(previous);
+
+ // o.p
+ Expression* previous_expression =
+ MakeFieldAccessExpression(parameter, previous->name_and_type.name);
+
+ // &o.p
+ previous_expression = MakeCallExpression("&", {previous_expression});
+
+ // const previous = &o.p;
+ Statement* define_previous =
+ MakeConstDeclarationStatement("previous", previous_expression);
+ statements.push_back(define_previous);
+
+ // 4
+ size_t previous_element_size;
+ std::tie(previous_element_size, std::ignore) =
+ *SizeOf(previous->name_and_type.type);
+ Expression* previous_element_size_expression =
+ MakeNode<NumberLiteralExpression>(
+ static_cast<double>(previous_element_size));
+
+ // previous.length
+ Expression* previous_length_expression = MakeFieldAccessExpression(
+ MakeIdentifierExpression("previous"), "length");
+
+ // previous.offset
+ Expression* previous_offset_expression = MakeFieldAccessExpression(
+ MakeIdentifierExpression("previous"), "offset");
+
+ // 4 * previous.length
+ // In contrast to the code used for allocation, we don't need overflow
+ // checks here because we already know all the offsets fit into memory.
+ offset_expression = MakeCallExpression(
+ "*", {previous_element_size_expression, previous_length_expression});
+
+ // previous.offset + 4 * previous.length
+ offset_expression = MakeCallExpression(
+ "+", {previous_offset_expression, offset_expression});
+ }
+
+ // torque_internal::%IndexedFieldLength<ClassName>(o, "field_name")
+ Expression* length_expression = MakeCallExpression(
+ MakeIdentifierExpression({"torque_internal"}, "%IndexedFieldLength",
+ {MakeNode<PrecomputedTypeExpression>(this)}),
+ {parameter, MakeNode<StringLiteralExpression>(
+ StringLiteralQuote(field.name_and_type.name))});
+
+ // torque_internal::Unsafe {}
+ Expression* unsafe_expression = MakeStructExpression(
+ MakeBasicTypeExpression({"torque_internal"}, "Unsafe"), {});
+
+ // torque_internal::Slice<FieldType> {
+ // object: o,
+ // offset: <<offset_expression>>,
+ // length: torque_internal::%IndexedFieldLength<ClassName>(
+ // o, "field_name")),
+ // unsafeMarker: torque_internal::Unsafe {}
+ // }
+ Expression* slice_expression = MakeStructExpression(
+ MakeBasicTypeExpression(
+ {"torque_internal"}, "Slice",
+ {MakeNode<PrecomputedTypeExpression>(field.name_and_type.type)}),
+ {{MakeNode<Identifier>("object"), parameter},
+ {MakeNode<Identifier>("offset"), offset_expression},
+ {MakeNode<Identifier>("length"), length_expression},
+ {MakeNode<Identifier>("unsafeMarker"), unsafe_expression}});
+
+ statements.push_back(MakeNode<ReturnStatement>(slice_expression));
+ Statement* block =
+ MakeNode<BlockStatement>(/*deferred=*/false, std::move(statements));
+
+ Macro* macro = Declarations::DeclareMacro(macro_name, true, base::nullopt,
+ signature, block, base::nullopt);
+ GlobalContext::EnsureInCCOutputList(TorqueMacro::cast(macro));
+}
+
bool ClassType::HasStaticSize() const {
// Abstract classes don't have instances directly, so asking this question
// doesn't make sense.
@@ -728,6 +910,15 @@ bool ClassType::HasStaticSize() const {
return size().SingleValue().has_value();
}
+SourceId ClassType::AttributedToFile() const {
+ bool in_test_directory = StringStartsWith(
+ SourceFileMap::PathFromV8Root(GetPosition().source).substr(), "test/");
+ if (!in_test_directory && (IsExtern() || ShouldExport())) {
+ return GetPosition().source;
+ }
+ return SourceFileMap::GetSourceId("src/objects/torque-defined-classes.tq");
+}
+
void PrintSignature(std::ostream& os, const Signature& sig, bool with_names) {
os << "(";
for (size_t i = 0; i < sig.parameter_types.types.size(); ++i) {
@@ -1096,10 +1287,23 @@ base::Optional<NameAndType> ExtractSimpleFieldArraySize(
}
std::string Type::GetRuntimeType() const {
- // TODO(tebbi): Other types are currently unsupported, since there the TNode
- // types and the C++ runtime types disagree.
- DCHECK(this->IsSubtypeOf(TypeOracle::GetTaggedType()));
- return GetGeneratedTNodeTypeName();
+ if (IsSubtypeOf(TypeOracle::GetSmiType())) return "Smi";
+ if (IsSubtypeOf(TypeOracle::GetTaggedType())) {
+ return GetGeneratedTNodeTypeName();
+ }
+ if (base::Optional<const StructType*> struct_type = StructSupertype()) {
+ std::stringstream result;
+ result << "std::tuple<";
+ bool first = true;
+ for (const Type* field_type : LowerType(*struct_type)) {
+ if (!first) result << ", ";
+ first = false;
+ result << field_type->GetRuntimeType();
+ }
+ result << ">";
+ return result.str();
+ }
+ return ConstexprVersion()->GetGeneratedTypeName();
}
} // namespace torque
diff --git a/deps/v8/src/torque/types.h b/deps/v8/src/torque/types.h
index d2e857a261..25c849597d 100644
--- a/deps/v8/src/torque/types.h
+++ b/deps/v8/src/torque/types.h
@@ -95,7 +95,10 @@ struct SpecializationKey {
using MaybeSpecializationKey = base::Optional<SpecializationKey<GenericType>>;
-struct RuntimeType {
+struct TypeChecker {
+ // The type of the object. This string is not guaranteed to correspond to a
+ // C++ class, but just to a type checker function: for any type "Foo" here,
+ // the function Object::IsFoo must exist.
std::string type;
// If {type} is "MaybeObject", then {weak_ref_to} indicates the corresponding
// strong object type. Otherwise, {weak_ref_to} is empty.
@@ -114,6 +117,7 @@ class V8_EXPORT_PRIVATE Type : public TypeBase {
// Used for naming generated code.
virtual std::string SimpleName() const;
+ std::string UnhandlifiedCppTypeName() const;
std::string HandlifiedCppTypeName() const;
const Type* parent() const { return parent_; }
@@ -135,7 +139,7 @@ class V8_EXPORT_PRIVATE Type : public TypeBase {
std::string GetConstexprGeneratedTypeName() const;
base::Optional<const ClassType*> ClassSupertype() const;
base::Optional<const StructType*> StructSupertype() const;
- virtual std::vector<RuntimeType> GetRuntimeTypes() const { return {}; }
+ virtual std::vector<TypeChecker> GetTypeCheckers() const { return {}; }
virtual std::string GetRuntimeType() const;
static const Type* CommonSupertype(const Type* a, const Type* b);
void AddAlias(std::string alias) const { aliases_.insert(std::move(alias)); }
@@ -156,6 +160,7 @@ class V8_EXPORT_PRIVATE Type : public TypeBase {
virtual const Type* ConstexprVersion() const {
if (constexpr_version_) return constexpr_version_;
if (IsConstexpr()) return this;
+ if (parent()) return parent()->ConstexprVersion();
return nullptr;
}
@@ -279,7 +284,7 @@ class AbstractType final : public Type {
return nullptr;
}
- std::vector<RuntimeType> GetRuntimeTypes() const override;
+ std::vector<TypeChecker> GetTypeCheckers() const override;
size_t AlignmentLog2() const override;
@@ -315,6 +320,10 @@ class AbstractType final : public Type {
return flags_ & AbstractTypeFlag::kTransient;
}
+ bool UseParentTypeChecker() const {
+ return flags_ & AbstractTypeFlag::kUseParentTypeChecker;
+ }
+
AbstractTypeFlags flags_;
const std::string name_;
const std::string generated_type_;
@@ -349,7 +358,7 @@ class V8_EXPORT_PRIVATE BuiltinPointerType final : public Type {
}
size_t function_pointer_type_id() const { return function_pointer_type_id_; }
- std::vector<RuntimeType> GetRuntimeTypes() const override {
+ std::vector<TypeChecker> GetTypeCheckers() const override {
return {{"Smi", ""}};
}
@@ -461,10 +470,10 @@ class V8_EXPORT_PRIVATE UnionType final : public Type {
return union_type ? UnionType(*union_type) : UnionType(t);
}
- std::vector<RuntimeType> GetRuntimeTypes() const override {
- std::vector<RuntimeType> result;
+ std::vector<TypeChecker> GetTypeCheckers() const override {
+ std::vector<TypeChecker> result;
for (const Type* member : types_) {
- std::vector<RuntimeType> sub_result = member->GetRuntimeTypes();
+ std::vector<TypeChecker> sub_result = member->GetTypeCheckers();
result.insert(result.end(), sub_result.begin(), sub_result.end());
}
return result;
@@ -498,8 +507,8 @@ class V8_EXPORT_PRIVATE BitFieldStructType final : public Type {
return parent()->GetGeneratedTNodeTypeName();
}
- std::vector<RuntimeType> GetRuntimeTypes() const override {
- return {{parent()->GetGeneratedTNodeTypeName(), ""}};
+ std::vector<TypeChecker> GetTypeCheckers() const override {
+ return parent()->GetTypeCheckers();
}
void SetConstexprVersion(const Type*) const override { UNREACHABLE(); }
@@ -559,7 +568,7 @@ class AggregateType : public Type {
std::vector<Method*> Methods(const std::string& name) const;
std::vector<const AggregateType*> GetHierarchy() const;
- std::vector<RuntimeType> GetRuntimeTypes() const override {
+ std::vector<TypeChecker> GetTypeCheckers() const override {
return {{name_, ""}};
}
@@ -609,6 +618,8 @@ class StructType final : public AggregateType {
// Classifies a struct as containing tagged data, untagged data, or both.
Classification ClassifyContents() const;
+ SourcePosition GetPosition() const { return decl_->pos; }
+
private:
friend class TypeOracle;
StructType(Namespace* nspace, const StructDeclaration* decl,
@@ -672,6 +683,11 @@ class ClassType final : public AggregateType {
return flags_ & ClassFlag::kGenerateCppClassDefinitions || !IsExtern() ||
ShouldGenerateBodyDescriptor();
}
+ bool ShouldGenerateFullClassDefinition() const {
+ return !IsExtern() && !(flags_ & ClassFlag::kCustomCppClass);
+ }
+ // Class with multiple or non-standard maps, do not auto-generate map.
+ bool HasCustomMap() const { return flags_ & ClassFlag::kCustomMap; }
bool ShouldExport() const { return flags_ & ClassFlag::kExport; }
bool IsShape() const { return flags_ & ClassFlag::kIsShape; }
bool HasStaticSize() const;
@@ -703,6 +719,14 @@ class ClassType final : public AggregateType {
std::vector<ObjectSlotKind> ComputeHeaderSlotKinds() const;
base::Optional<ObjectSlotKind> ComputeArraySlotKind() const;
bool HasNoPointerSlots() const;
+ bool HasIndexedFieldsIncludingInParents() const;
+ const Field* GetFieldPreceding(size_t field_index) const;
+
+ // Given that the field exists in this class or a superclass, returns the
+ // specific class that declared the field.
+ const ClassType* GetClassDeclaringField(const Field& f) const;
+
+ std::string GetSliceMacroName(const Field& field) const;
const InstanceTypeConstraints& GetInstanceTypeConstraints() const {
return decl_->instance_type_constraints;
@@ -717,6 +741,7 @@ class ClassType final : public AggregateType {
return flags_ & ClassFlag::kUndefinedLayout;
}
SourcePosition GetPosition() const { return decl_->pos; }
+ SourceId AttributedToFile() const;
// TODO(tebbi): We should no longer pass around types as const pointers, so
// that we can avoid mutable fields and const initializers for
@@ -733,6 +758,8 @@ class ClassType final : public AggregateType {
ClassFlags flags, const std::string& generates,
const ClassDeclaration* decl, const TypeAlias* alias);
+ void GenerateSliceAccessor(size_t field_index);
+
size_t header_size_;
ResidueClass size_;
mutable ClassFlags flags_;
diff --git a/deps/v8/src/tracing/DIR_METADATA b/deps/v8/src/tracing/DIR_METADATA
new file mode 100644
index 0000000000..3ba1106a5f
--- /dev/null
+++ b/deps/v8/src/tracing/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Platform>DevTools>JavaScript"
+} \ No newline at end of file
diff --git a/deps/v8/src/tracing/OWNERS b/deps/v8/src/tracing/OWNERS
index 7ab7c063da..6afd4d0fee 100644
--- a/deps/v8/src/tracing/OWNERS
+++ b/deps/v8/src/tracing/OWNERS
@@ -1,4 +1,2 @@
alph@chromium.org
petermarshall@chromium.org
-
-# COMPONENT: Platform>DevTools>JavaScript
diff --git a/deps/v8/src/tracing/trace-categories.h b/deps/v8/src/tracing/trace-categories.h
index 2f9d672801..28c66a3101 100644
--- a/deps/v8/src/tracing/trace-categories.h
+++ b/deps/v8/src/tracing/trace-categories.h
@@ -46,6 +46,7 @@ PERFETTO_DEFINE_CATEGORIES(
perfetto::Category(TRACE_DISABLED_BY_DEFAULT("v8.runtime")),
perfetto::Category(TRACE_DISABLED_BY_DEFAULT("v8.runtime_stats")),
perfetto::Category(TRACE_DISABLED_BY_DEFAULT("v8.runtime_stats_sampling")),
+ perfetto::Category(TRACE_DISABLED_BY_DEFAULT("v8.stack_trace")),
perfetto::Category(TRACE_DISABLED_BY_DEFAULT("v8.turbofan")),
perfetto::Category(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed")),
perfetto::Category(TRACE_DISABLED_BY_DEFAULT("v8.zone_stats")),
diff --git a/deps/v8/src/trap-handler/DIR_METADATA b/deps/v8/src/trap-handler/DIR_METADATA
new file mode 100644
index 0000000000..3b428d9660
--- /dev/null
+++ b/deps/v8/src/trap-handler/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>WebAssembly"
+} \ No newline at end of file
diff --git a/deps/v8/src/trap-handler/OWNERS b/deps/v8/src/trap-handler/OWNERS
index f6f3bc07ec..7035a46ab6 100644
--- a/deps/v8/src/trap-handler/OWNERS
+++ b/deps/v8/src/trap-handler/OWNERS
@@ -6,5 +6,3 @@ ahaas@chromium.org
# ahaas@chromium.org
# mseaborn@chromium.org
# mark@chromium.org
-
-# COMPONENT: Blink>JavaScript>WebAssembly
diff --git a/deps/v8/src/trap-handler/handler-outside.cc b/deps/v8/src/trap-handler/handler-outside.cc
index c6ee7b2376..62355a5b60 100644
--- a/deps/v8/src/trap-handler/handler-outside.cc
+++ b/deps/v8/src/trap-handler/handler-outside.cc
@@ -249,9 +249,19 @@ bool RegisterDefaultTrapHandler() { return false; }
void RemoveTrapHandler() {}
#endif
-bool g_is_trap_handler_enabled;
+bool g_is_trap_handler_enabled{false};
+std::atomic<bool> g_can_enable_trap_handler{true};
bool EnableTrapHandler(bool use_v8_handler) {
+ // We should only enable the trap handler once, and before any call to
+ // {IsTrapHandlerEnabled}. Enabling the trap handler late can lead to problems
+ // because code or objects might have been generated under the assumption that
+ // trap handlers are disabled.
+ bool can_enable =
+ g_can_enable_trap_handler.exchange(false, std::memory_order_relaxed);
+ if (!can_enable) {
+ FATAL("EnableTrapHandler called twice, or after IsTrapHandlerEnabled");
+ }
if (!V8_TRAP_HANDLER_SUPPORTED) {
return false;
}
diff --git a/deps/v8/src/trap-handler/trap-handler.h b/deps/v8/src/trap-handler/trap-handler.h
index f6fdca553e..e75355decd 100644
--- a/deps/v8/src/trap-handler/trap-handler.h
+++ b/deps/v8/src/trap-handler/trap-handler.h
@@ -8,6 +8,8 @@
#include <stdint.h>
#include <stdlib.h>
+#include <atomic>
+
#include "src/base/build_config.h"
#include "src/common/globals.h"
#include "src/flags/flags.h"
@@ -64,15 +66,32 @@ void V8_EXPORT_PRIVATE ReleaseHandlerData(int index);
#define THREAD_LOCAL __thread
#endif
+// Initially false, set to true if when trap handlers are enabled. Never goes
+// back to false then.
extern bool g_is_trap_handler_enabled;
+
+// Initially true, set to false when either {IsTrapHandlerEnabled} or
+// {EnableTrapHandler} is called to prevent calling {EnableTrapHandler}
+// repeatedly, or after {IsTrapHandlerEnabled}. Needs to be atomic because
+// {IsTrapHandlerEnabled} can be called from any thread. Updated using relaxed
+// semantics, since it's not used for synchronization.
+extern std::atomic<bool> g_can_enable_trap_handler;
+
// Enables trap handling for WebAssembly bounds checks.
//
// use_v8_handler indicates that V8 should install its own handler
// rather than relying on the embedder to do it.
-bool EnableTrapHandler(bool use_v8_handler);
+V8_EXPORT_PRIVATE bool EnableTrapHandler(bool use_v8_handler);
inline bool IsTrapHandlerEnabled() {
DCHECK_IMPLIES(g_is_trap_handler_enabled, V8_TRAP_HANDLER_SUPPORTED);
+ // Disallow enabling the trap handler after retrieving the current value.
+ // Re-enabling them late can produce issues because code or objects might have
+ // been generated under the assumption that trap handlers are disabled.
+ // Note: We test before setting to avoid contention by an unconditional write.
+ if (g_can_enable_trap_handler.load(std::memory_order_relaxed)) {
+ g_can_enable_trap_handler.store(false, std::memory_order_relaxed);
+ }
return g_is_trap_handler_enabled;
}
diff --git a/deps/v8/src/utils/DIR_METADATA b/deps/v8/src/utils/DIR_METADATA
new file mode 100644
index 0000000000..2f8dbbcf45
--- /dev/null
+++ b/deps/v8/src/utils/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript"
+} \ No newline at end of file
diff --git a/deps/v8/src/utils/OWNERS b/deps/v8/src/utils/OWNERS
index 4750620072..48d72aea5e 100644
--- a/deps/v8/src/utils/OWNERS
+++ b/deps/v8/src/utils/OWNERS
@@ -1,3 +1 @@
file:../../COMMON_OWNERS
-
-# COMPONENT: Blink>JavaScript
diff --git a/deps/v8/src/utils/bit-vector.cc b/deps/v8/src/utils/bit-vector.cc
index 20e645f24c..f90175189b 100644
--- a/deps/v8/src/utils/bit-vector.cc
+++ b/deps/v8/src/utils/bit-vector.cc
@@ -11,7 +11,7 @@ namespace v8 {
namespace internal {
#ifdef DEBUG
-void BitVector::Print() {
+void BitVector::Print() const {
bool first = true;
PrintF("{");
for (int i = 0; i < length(); i++) {
diff --git a/deps/v8/src/utils/bit-vector.h b/deps/v8/src/utils/bit-vector.h
index d68009d723..c171f51160 100644
--- a/deps/v8/src/utils/bit-vector.h
+++ b/deps/v8/src/utils/bit-vector.h
@@ -277,7 +277,7 @@ class V8_EXPORT_PRIVATE BitVector : public ZoneObject {
int length() const { return length_; }
#ifdef DEBUG
- void Print();
+ void Print() const;
#endif
MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(BitVector);
diff --git a/deps/v8/src/utils/identity-map.cc b/deps/v8/src/utils/identity-map.cc
index 909c175007..6e22cc783a 100644
--- a/deps/v8/src/utils/identity-map.cc
+++ b/deps/v8/src/utils/identity-map.cc
@@ -26,7 +26,7 @@ void IdentityMapBase::Clear() {
DCHECK(!is_iterable());
DCHECK_NOT_NULL(strong_roots_entry_);
heap_->UnregisterStrongRoots(strong_roots_entry_);
- DeletePointerArray(reinterpret_cast<void**>(keys_), capacity_);
+ DeletePointerArray(reinterpret_cast<uintptr_t*>(keys_), capacity_);
DeletePointerArray(values_, capacity_);
keys_ = nullptr;
strong_roots_entry_ = nullptr;
@@ -47,8 +47,8 @@ void IdentityMapBase::DisableIteration() {
is_iterable_ = false;
}
-int IdentityMapBase::ScanKeysFor(Address address) const {
- int start = Hash(address) & mask_;
+int IdentityMapBase::ScanKeysFor(Address address, uint32_t hash) const {
+ int start = hash & mask_;
Address not_mapped = ReadOnlyRoots(heap_).not_mapped_symbol().ptr();
for (int index = start; index < capacity_; index++) {
if (keys_[index] == address) return index; // Found.
@@ -61,33 +61,41 @@ int IdentityMapBase::ScanKeysFor(Address address) const {
return -1;
}
-int IdentityMapBase::InsertKey(Address address) {
+std::pair<int, bool> IdentityMapBase::InsertKey(Address address,
+ uint32_t hash) {
+ DCHECK_EQ(gc_counter_, heap_->gc_count());
+
+ // Grow the map if we reached >= 80% occupancy.
+ if (size_ + size_ / 4 >= capacity_) {
+ Resize(capacity_ * kResizeFactor);
+ }
+
Address not_mapped = ReadOnlyRoots(heap_).not_mapped_symbol().ptr();
+
+ int start = hash & mask_;
+ // Guaranteed to terminate since size_ < capacity_, there must be at least
+ // one empty slot.
+ int index = start;
while (true) {
- int start = Hash(address) & mask_;
- int limit = capacity_ / 2;
- // Search up to {limit} entries.
- for (int index = start; --limit > 0; index = (index + 1) & mask_) {
- if (keys_[index] == address) return index; // Found.
- if (keys_[index] == not_mapped) { // Free entry.
- size_++;
- DCHECK_LE(size_, capacity_);
- keys_[index] = address;
- return index;
- }
+ if (keys_[index] == address) return {index, true}; // Found.
+ if (keys_[index] == not_mapped) { // Free entry.
+ size_++;
+ DCHECK_LE(size_, capacity_);
+ keys_[index] = address;
+ return {index, false};
}
- // Should only have to resize once, since we grow 4x.
- Resize(capacity_ * kResizeFactor);
+ index = (index + 1) & mask_;
+ // We should never loop back to the start.
+ DCHECK_NE(index, start);
}
- UNREACHABLE();
}
-bool IdentityMapBase::DeleteIndex(int index, void** deleted_value) {
+bool IdentityMapBase::DeleteIndex(int index, uintptr_t* deleted_value) {
if (deleted_value != nullptr) *deleted_value = values_[index];
Address not_mapped = ReadOnlyRoots(heap_).not_mapped_symbol().ptr();
DCHECK_NE(keys_[index], not_mapped);
keys_[index] = not_mapped;
- values_[index] = nullptr;
+ values_[index] = 0;
size_--;
DCHECK_GE(size_, 0);
@@ -113,7 +121,7 @@ bool IdentityMapBase::DeleteIndex(int index, void** deleted_value) {
}
DCHECK_EQ(not_mapped, keys_[index]);
- DCHECK_NULL(values_[index]);
+ DCHECK_EQ(values_[index], 0);
std::swap(keys_[index], keys_[next_index]);
std::swap(values_[index], values_[next_index]);
index = next_index;
@@ -123,39 +131,69 @@ bool IdentityMapBase::DeleteIndex(int index, void** deleted_value) {
}
int IdentityMapBase::Lookup(Address key) const {
- int index = ScanKeysFor(key);
+ uint32_t hash = Hash(key);
+ int index = ScanKeysFor(key, hash);
if (index < 0 && gc_counter_ != heap_->gc_count()) {
// Miss; rehash if there was a GC, then lookup again.
const_cast<IdentityMapBase*>(this)->Rehash();
- index = ScanKeysFor(key);
+ index = ScanKeysFor(key, hash);
}
return index;
}
-int IdentityMapBase::LookupOrInsert(Address key) {
+std::pair<int, bool> IdentityMapBase::LookupOrInsert(Address key) {
+ uint32_t hash = Hash(key);
// Perform an optimistic lookup.
- int index = ScanKeysFor(key);
+ int index = ScanKeysFor(key, hash);
+ bool already_exists;
if (index < 0) {
// Miss; rehash if there was a GC, then insert.
if (gc_counter_ != heap_->gc_count()) Rehash();
- index = InsertKey(key);
+ std::tie(index, already_exists) = InsertKey(key, hash);
+ } else {
+ already_exists = true;
}
DCHECK_GE(index, 0);
- return index;
+ return {index, already_exists};
}
-int IdentityMapBase::Hash(Address address) const {
+uint32_t IdentityMapBase::Hash(Address address) const {
CHECK_NE(address, ReadOnlyRoots(heap_).not_mapped_symbol().ptr());
- return static_cast<int>(hasher_(address));
+ return static_cast<uint32_t>(hasher_(address));
}
// Searches this map for the given key using the object's address
// as the identity, returning:
-// found => a pointer to the storage location for the value
-// not found => a pointer to a new storage location for the value
-IdentityMapBase::RawEntry IdentityMapBase::GetEntry(Address key) {
+// found => a pointer to the storage location for the value, true
+// not found => a pointer to a new storage location for the value, false
+IdentityMapFindResult<uintptr_t> IdentityMapBase::FindOrInsertEntry(
+ Address key) {
CHECK(!is_iterable()); // Don't allow insertion while iterable.
if (capacity_ == 0) {
+ return {InsertEntry(key), false};
+ }
+ auto lookup_result = LookupOrInsert(key);
+ return {&values_[lookup_result.first], lookup_result.second};
+}
+
+// Searches this map for the given key using the object's address
+// as the identity, returning:
+// found => a pointer to the storage location for the value
+// not found => {nullptr}
+IdentityMapBase::RawEntry IdentityMapBase::FindEntry(Address key) const {
+ // Don't allow find by key while iterable (might rehash).
+ CHECK(!is_iterable());
+ if (size_ == 0) return nullptr;
+ int index = Lookup(key);
+ return index >= 0 ? &values_[index] : nullptr;
+}
+
+// Inserts the given key using the object's address as the identity, returning
+// a pointer to the new storage location for the value.
+IdentityMapBase::RawEntry IdentityMapBase::InsertEntry(Address key) {
+ // Don't allow find by key while iterable (might rehash).
+ CHECK(!is_iterable());
+ if (capacity_ == 0) {
// Allocate the initial storage for keys and values.
capacity_ = kInitialIdentityMapSize;
mask_ = kInitialIdentityMapSize - 1;
@@ -165,32 +203,26 @@ IdentityMapBase::RawEntry IdentityMapBase::GetEntry(Address key) {
Address not_mapped = ReadOnlyRoots(heap_).not_mapped_symbol().ptr();
for (int i = 0; i < capacity_; i++) keys_[i] = not_mapped;
values_ = NewPointerArray(capacity_);
- memset(values_, 0, sizeof(void*) * capacity_);
+ memset(values_, 0, sizeof(uintptr_t) * capacity_);
strong_roots_entry_ = heap_->RegisterStrongRoots(
FullObjectSlot(keys_), FullObjectSlot(keys_ + capacity_));
+ } else {
+ // Rehash if there was a GC, then insert.
+ if (gc_counter_ != heap_->gc_count()) Rehash();
}
- int index = LookupOrInsert(key);
- return &values_[index];
-}
-// Searches this map for the given key using the object's address
-// as the identity, returning:
-// found => a pointer to the storage location for the value
-// not found => {nullptr}
-IdentityMapBase::RawEntry IdentityMapBase::FindEntry(Address key) const {
- // Don't allow find by key while iterable (might rehash).
- CHECK(!is_iterable());
- if (size_ == 0) return nullptr;
- // Remove constness since lookup might have to rehash.
- int index = Lookup(key);
- return index >= 0 ? &values_[index] : nullptr;
+ int index;
+ bool already_exists;
+ std::tie(index, already_exists) = InsertKey(key, Hash(key));
+ DCHECK(!already_exists);
+ return &values_[index];
}
// Deletes the given key from the map using the object's address as the
// identity, returning true iff the key was found (in which case, the value
// argument will be set to the deleted entry's value).
-bool IdentityMapBase::DeleteEntry(Address key, void** deleted_value) {
+bool IdentityMapBase::DeleteEntry(Address key, uintptr_t* deleted_value) {
CHECK(!is_iterable()); // Don't allow deletion by key while iterable.
if (size_ == 0) return false;
int index = Lookup(key);
@@ -232,7 +264,7 @@ void IdentityMapBase::Rehash() {
// Record the current GC counter.
gc_counter_ = heap_->gc_count();
// Assume that most objects won't be moved.
- std::vector<std::pair<Address, void*>> reinsert;
+ std::vector<std::pair<Address, uintptr_t>> reinsert;
// Search the table looking for keys that wouldn't be found with their
// current hashcode and evacuate them.
int last_empty = -1;
@@ -244,9 +276,9 @@ void IdentityMapBase::Rehash() {
int pos = Hash(keys_[i]) & mask_;
if (pos <= last_empty || pos > i) {
// Evacuate an entry that is in the wrong place.
- reinsert.push_back(std::pair<Address, void*>(keys_[i], values_[i]));
+ reinsert.push_back(std::pair<Address, uintptr_t>(keys_[i], values_[i]));
keys_[i] = not_mapped;
- values_[i] = nullptr;
+ values_[i] = 0;
last_empty = i;
size_--;
}
@@ -254,7 +286,7 @@ void IdentityMapBase::Rehash() {
}
// Reinsert all the key/value pairs that were in the wrong place.
for (auto pair : reinsert) {
- int index = InsertKey(pair.first);
+ int index = InsertKey(pair.first, Hash(pair.first)).first;
DCHECK_GE(index, 0);
values_[index] = pair.second;
}
@@ -266,7 +298,7 @@ void IdentityMapBase::Resize(int new_capacity) {
DCHECK_GT(new_capacity, size_);
int old_capacity = capacity_;
Address* old_keys = keys_;
- void** old_values = values_;
+ uintptr_t* old_values = values_;
capacity_ = new_capacity;
mask_ = capacity_ - 1;
@@ -277,11 +309,11 @@ void IdentityMapBase::Resize(int new_capacity) {
Address not_mapped = ReadOnlyRoots(heap_).not_mapped_symbol().ptr();
for (int i = 0; i < capacity_; i++) keys_[i] = not_mapped;
values_ = NewPointerArray(capacity_);
- memset(values_, 0, sizeof(void*) * capacity_);
+ memset(values_, 0, sizeof(uintptr_t) * capacity_);
for (int i = 0; i < old_capacity; i++) {
if (old_keys[i] == not_mapped) continue;
- int index = InsertKey(old_keys[i]);
+ int index = InsertKey(old_keys[i], Hash(old_keys[i])).first;
DCHECK_GE(index, 0);
values_[index] = old_values[i];
}
@@ -292,7 +324,7 @@ void IdentityMapBase::Resize(int new_capacity) {
FullObjectSlot(keys_ + capacity_));
// Delete old storage;
- DeletePointerArray(reinterpret_cast<void**>(old_keys), old_capacity);
+ DeletePointerArray(reinterpret_cast<uintptr_t*>(old_keys), old_capacity);
DeletePointerArray(old_values, old_capacity);
}
diff --git a/deps/v8/src/utils/identity-map.h b/deps/v8/src/utils/identity-map.h
index 362a3decfa..20b5f100bf 100644
--- a/deps/v8/src/utils/identity-map.h
+++ b/deps/v8/src/utils/identity-map.h
@@ -5,6 +5,8 @@
#ifndef V8_UTILS_IDENTITY_MAP_H_
#define V8_UTILS_IDENTITY_MAP_H_
+#include <type_traits>
+
#include "src/base/functional.h"
#include "src/handles/handles.h"
#include "src/objects/heap-object.h"
@@ -16,6 +18,12 @@ namespace internal {
class Heap;
class StrongRootsEntry;
+template <typename T>
+struct IdentityMapFindResult {
+ T* entry;
+ bool already_exists;
+};
+
// Base class of identity maps contains shared code for all template
// instantions.
class V8_EXPORT_PRIVATE IdentityMapBase {
@@ -30,7 +38,7 @@ class V8_EXPORT_PRIVATE IdentityMapBase {
// within the {keys_} array in order to simulate a moving GC.
friend class IdentityMapTester;
- using RawEntry = void**;
+ using RawEntry = uintptr_t*;
explicit IdentityMapBase(Heap* heap)
: heap_(heap),
@@ -44,9 +52,10 @@ class V8_EXPORT_PRIVATE IdentityMapBase {
is_iterable_(false) {}
virtual ~IdentityMapBase();
- RawEntry GetEntry(Address key);
+ IdentityMapFindResult<uintptr_t> FindOrInsertEntry(Address key);
RawEntry FindEntry(Address key) const;
- bool DeleteEntry(Address key, void** deleted_value);
+ RawEntry InsertEntry(Address key);
+ bool DeleteEntry(Address key, uintptr_t* deleted_value);
void Clear();
Address KeyAtIndex(int index) const;
@@ -57,19 +66,19 @@ class V8_EXPORT_PRIVATE IdentityMapBase {
void EnableIteration();
void DisableIteration();
- virtual void** NewPointerArray(size_t length) = 0;
- virtual void DeletePointerArray(void** array, size_t length) = 0;
+ virtual uintptr_t* NewPointerArray(size_t length) = 0;
+ virtual void DeletePointerArray(uintptr_t* array, size_t length) = 0;
private:
// Internal implementation should not be called directly by subclasses.
- int ScanKeysFor(Address address) const;
- int InsertKey(Address address);
+ int ScanKeysFor(Address address, uint32_t hash) const;
+ std::pair<int, bool> InsertKey(Address address, uint32_t hash);
int Lookup(Address key) const;
- int LookupOrInsert(Address key);
- bool DeleteIndex(int index, void** deleted_value);
+ std::pair<int, bool> LookupOrInsert(Address key);
+ bool DeleteIndex(int index, uintptr_t* deleted_value);
void Rehash();
void Resize(int new_capacity);
- int Hash(Address address) const;
+ uint32_t Hash(Address address) const;
base::hash<uintptr_t> hasher_;
Heap* heap_;
@@ -79,7 +88,7 @@ class V8_EXPORT_PRIVATE IdentityMapBase {
int mask_;
Address* keys_;
StrongRootsEntry* strong_roots_entry_;
- void** values_;
+ uintptr_t* values_;
bool is_iterable_;
DISALLOW_COPY_AND_ASSIGN(IdentityMapBase);
@@ -89,11 +98,15 @@ class V8_EXPORT_PRIVATE IdentityMapBase {
// The map is robust w.r.t. garbage collection by synchronization with the
// supplied {heap}.
// * Keys are treated as strong roots.
-// * The value type {V} must be reinterpret_cast'able to {void*}
+// * The value type {V} must be reinterpret_cast'able to {uintptr_t}
// * The value type {V} must not be a heap type.
template <typename V, class AllocationPolicy>
class IdentityMap : public IdentityMapBase {
public:
+ STATIC_ASSERT(sizeof(V) <= sizeof(uintptr_t));
+ STATIC_ASSERT(std::is_trivially_copyable<V>::value);
+ STATIC_ASSERT(std::is_trivially_destructible<V>::value);
+
explicit IdentityMap(Heap* heap,
AllocationPolicy allocator = AllocationPolicy())
: IdentityMapBase(heap), allocator_(allocator) {}
@@ -101,10 +114,15 @@ class IdentityMap : public IdentityMapBase {
// Searches this map for the given key using the object's address
// as the identity, returning:
- // found => a pointer to the storage location for the value
- // not found => a pointer to a new storage location for the value
- V* Get(Handle<Object> key) { return Get(*key); }
- V* Get(Object key) { return reinterpret_cast<V*>(GetEntry(key.ptr())); }
+ // found => a pointer to the storage location for the value, true
+ // not found => a pointer to a new storage location for the value, false
+ IdentityMapFindResult<V> FindOrInsert(Handle<Object> key) {
+ return FindOrInsert(*key);
+ }
+ IdentityMapFindResult<V> FindOrInsert(Object key) {
+ auto raw = FindOrInsertEntry(key.ptr());
+ return {reinterpret_cast<V*>(raw.entry), raw.already_exists};
+ }
// Searches this map for the given key using the object's address
// as the identity, returning:
@@ -115,17 +133,18 @@ class IdentityMap : public IdentityMapBase {
return reinterpret_cast<V*>(FindEntry(key.ptr()));
}
- // Set the value for the given key.
- void Set(Handle<Object> key, V v) { Set(*key, v); }
- void Set(Object key, V v) {
- *(reinterpret_cast<V*>(GetEntry(key.ptr()))) = v;
+ // Insert the value for the given key. The key must not have previously
+ // existed.
+ void Insert(Handle<Object> key, V v) { Insert(*key, v); }
+ void Insert(Object key, V v) {
+ *reinterpret_cast<V*>(InsertEntry(key.ptr())) = v;
}
bool Delete(Handle<Object> key, V* deleted_value) {
return Delete(*key, deleted_value);
}
bool Delete(Object key, V* deleted_value) {
- void* v = nullptr;
+ uintptr_t v;
bool deleted_something = DeleteEntry(key.ptr(), &v);
if (deleted_value != nullptr && deleted_something) {
*deleted_value = *reinterpret_cast<V*>(&v);
@@ -188,12 +207,12 @@ class IdentityMap : public IdentityMapBase {
// TODO(ishell): consider removing virtual methods in favor of combining
// IdentityMapBase and IdentityMap into one class. This would also save
- // space when sizeof(V) is less than sizeof(void*).
- void** NewPointerArray(size_t length) override {
- return allocator_.template NewArray<void*, Buffer>(length);
+ // space when sizeof(V) is less than sizeof(uintptr_t).
+ uintptr_t* NewPointerArray(size_t length) override {
+ return allocator_.template NewArray<uintptr_t, Buffer>(length);
}
- void DeletePointerArray(void** array, size_t length) override {
- allocator_.template DeleteArray<void*, Buffer>(array, length);
+ void DeletePointerArray(uintptr_t* array, size_t length) override {
+ allocator_.template DeleteArray<uintptr_t, Buffer>(array, length);
}
private:
diff --git a/deps/v8/src/utils/locked-queue-inl.h b/deps/v8/src/utils/locked-queue-inl.h
index 9416dd7d37..edcdf03a5d 100644
--- a/deps/v8/src/utils/locked-queue-inl.h
+++ b/deps/v8/src/utils/locked-queue-inl.h
@@ -38,10 +38,10 @@ inline LockedQueue<Record>::~LockedQueue() {
}
template <typename Record>
-inline void LockedQueue<Record>::Enqueue(const Record& record) {
+inline void LockedQueue<Record>::Enqueue(Record record) {
Node* n = new Node();
CHECK_NOT_NULL(n);
- n->value = record;
+ n->value = std::move(record);
{
base::MutexGuard guard(&tail_mutex_);
tail_->next.SetValue(n);
@@ -57,7 +57,7 @@ inline bool LockedQueue<Record>::Dequeue(Record* record) {
old_head = head_;
Node* const next_node = head_->next.Value();
if (next_node == nullptr) return false;
- *record = next_node->value;
+ *record = std::move(next_node->value);
head_ = next_node;
}
delete old_head;
diff --git a/deps/v8/src/utils/locked-queue.h b/deps/v8/src/utils/locked-queue.h
index 4dd6488184..7594cc93c3 100644
--- a/deps/v8/src/utils/locked-queue.h
+++ b/deps/v8/src/utils/locked-queue.h
@@ -21,7 +21,7 @@ class LockedQueue final {
public:
inline LockedQueue();
inline ~LockedQueue();
- inline void Enqueue(const Record& record);
+ inline void Enqueue(Record record);
inline bool Dequeue(Record* record);
inline bool IsEmpty() const;
inline bool Peek(Record* record) const;
diff --git a/deps/v8/src/utils/utils.h b/deps/v8/src/utils/utils.h
index 7ec0dd2c00..af8f34030f 100644
--- a/deps/v8/src/utils/utils.h
+++ b/deps/v8/src/utils/utils.h
@@ -69,13 +69,13 @@ static T ArithmeticShiftRight(T x, int shift) {
// Returns the maximum of the two parameters.
template <typename T>
constexpr T Max(T a, T b) {
- return a < b ? b : a;
+ return std::max(a, b);
}
// Returns the minimum of the two parameters.
template <typename T>
constexpr T Min(T a, T b) {
- return a < b ? a : b;
+ return std::min(a, b);
}
// Returns the maximum of the two parameters according to JavaScript semantics.
@@ -135,6 +135,15 @@ inline double Modulo(double x, double y) {
}
template <typename T>
+T Saturate(int64_t value) {
+ static_assert(sizeof(int64_t) > sizeof(T), "T must be int32_t or smaller");
+ int64_t min = static_cast<int64_t>(std::numeric_limits<T>::min());
+ int64_t max = static_cast<int64_t>(std::numeric_limits<T>::max());
+ int64_t clamped = std::max(min, std::min(max, value));
+ return static_cast<T>(clamped);
+}
+
+template <typename T>
T SaturateAdd(T a, T b) {
if (std::is_signed<T>::value) {
if (a > 0 && b > 0) {
@@ -176,6 +185,53 @@ T SaturateSub(T a, T b) {
return a - b;
}
+template <typename T>
+T SaturateRoundingQMul(T a, T b) {
+ // Saturating rounding multiplication for Q-format numbers. See
+ // https://en.wikipedia.org/wiki/Q_(number_format) for a description.
+ // Specifically this supports Q7, Q15, and Q31. This follows the
+ // implementation in simulator-logic-arm64.cc (sqrdmulh) to avoid overflow
+ // when a == b == int32 min.
+ static_assert(std::is_integral<T>::value, "only integral types");
+
+ constexpr int size_in_bits = sizeof(T) * 8;
+ int round_const = 1 << (size_in_bits - 2);
+ int64_t product = a * b;
+ product += round_const;
+ product >>= (size_in_bits - 1);
+ return Saturate<T>(product);
+}
+
+// Multiply two numbers, returning a result that is twice as wide, no overflow.
+// Put Wide first so we can use function template argument deduction for Narrow,
+// and callers can provide only Wide.
+template <typename Wide, typename Narrow>
+Wide MultiplyLong(Narrow a, Narrow b) {
+ static_assert(
+ std::is_integral<Narrow>::value && std::is_integral<Wide>::value,
+ "only integral types");
+ static_assert(std::is_signed<Narrow>::value == std::is_signed<Wide>::value,
+ "both must have same signedness");
+ static_assert(sizeof(Narrow) * 2 == sizeof(Wide), "only twice as long");
+
+ return static_cast<Wide>(a) * static_cast<Wide>(b);
+}
+
+// Add two numbers, returning a result that is twice as wide, no overflow.
+// Put Wide first so we can use function template argument deduction for Narrow,
+// and callers can provide only Wide.
+template <typename Wide, typename Narrow>
+Wide AddLong(Narrow a, Narrow b) {
+ static_assert(
+ std::is_integral<Narrow>::value && std::is_integral<Wide>::value,
+ "only integral types");
+ static_assert(std::is_signed<Narrow>::value == std::is_signed<Wide>::value,
+ "both must have same signedness");
+ static_assert(sizeof(Narrow) * 2 == sizeof(Wide), "only twice as long");
+
+ return static_cast<Wide>(a) + static_cast<Wide>(b);
+}
+
// Helper macros for defining a contiguous sequence of field offset constants.
// Example: (backslashes at the ends of respective lines of this multi-line
// macro definition are omitted here to please the compiler)
@@ -682,6 +738,19 @@ static inline V ByteReverse(V value) {
}
}
+#if V8_OS_AIX
+// glibc on aix has a bug when using ceil, trunc or nearbyint:
+// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=97086
+template <typename T>
+T FpOpWorkaround(T input, T value) {
+ if (/*if -*/ std::signbit(input) && value == 0.0 &&
+ /*if +*/ !std::signbit(value)) {
+ return -0.0;
+ }
+ return value;
+}
+#endif
+
V8_EXPORT_PRIVATE bool PassesFilter(Vector<const char> name,
Vector<const char> filter);
diff --git a/deps/v8/src/wasm/DIR_METADATA b/deps/v8/src/wasm/DIR_METADATA
new file mode 100644
index 0000000000..3b428d9660
--- /dev/null
+++ b/deps/v8/src/wasm/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>WebAssembly"
+} \ No newline at end of file
diff --git a/deps/v8/src/wasm/OWNERS b/deps/v8/src/wasm/OWNERS
index 801795058d..38224181e9 100644
--- a/deps/v8/src/wasm/OWNERS
+++ b/deps/v8/src/wasm/OWNERS
@@ -8,5 +8,3 @@ thibaudm@chromium.org
zhin@chromium.org
per-file wasm-js.*=adamk@chromium.org
-
-# COMPONENT: Blink>JavaScript>WebAssembly
diff --git a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
index b4966c012b..af969f387e 100644
--- a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
+++ b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
@@ -139,6 +139,8 @@ template <void (Assembler::*op)(Register, Register, const Operand&, SBit,
SBit, Condition)>
inline void I64BinopI(LiftoffAssembler* assm, LiftoffRegister dst,
LiftoffRegister lhs, int32_t imm) {
+ // The compiler allocated registers such that either {dst == lhs} or there is
+ // no overlap between the two.
DCHECK_NE(dst.low_gp(), lhs.high_gp());
(assm->*op)(dst.low_gp(), lhs.low_gp(), Operand(imm), SetCC, al);
// Top half of the immediate sign extended, either 0 or -1.
@@ -532,16 +534,14 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
}
}
-void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
- int size) {
- DCHECK_LE(offset, kMaxInt);
+void LiftoffAssembler::LoadFromInstance(Register dst, int offset, int size) {
+ DCHECK_LE(0, offset);
DCHECK_EQ(4, size);
ldr(dst, liftoff::GetInstanceOperand());
ldr(dst, MemOperand(dst, offset));
}
-void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
- uint32_t offset) {
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst, int offset) {
LoadFromInstance(dst, offset, kTaggedSize);
}
@@ -1005,11 +1005,13 @@ void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
if (cache_state()->is_used(LiftoffRegister(dst_high))) {
SpillRegister(LiftoffRegister(dst_high));
}
- UseScratchRegisterScope temps(this);
- Register actual_addr = liftoff::CalculateActualAddress(
- this, &temps, src_addr, offset_reg, offset_imm);
- ldrexd(dst_low, dst_high, actual_addr);
- dmb(ISH);
+ {
+ UseScratchRegisterScope temps(this);
+ Register actual_addr = liftoff::CalculateActualAddress(
+ this, &temps, src_addr, offset_reg, offset_imm);
+ ldrexd(dst_low, dst_high, actual_addr);
+ dmb(ISH);
+ }
ParallelRegisterMove(
{{dst, LiftoffRegister::ForPair(dst_low, dst_high), kWasmI64}});
@@ -1323,12 +1325,10 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
}
void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
-#ifdef DEBUG
// The {str} instruction needs a temp register when the immediate in the
// provided MemOperand does not fit into 12 bits. This happens for large stack
// frames. This DCHECK checks that the temp register is available when needed.
DCHECK(UseScratchRegisterScope{this}.CanAcquire());
-#endif
DCHECK_LT(0, offset);
RecordUsedSpillOffset(offset);
MemOperand dst(fp, -offset);
@@ -2259,6 +2259,18 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
NeonMemOperand(actual_src_addr));
vmovl(NeonU32, liftoff::GetSimd128Register(dst), dst.low_fp());
}
+ } else if (transform == LoadTransformationKind::kZeroExtend) {
+ Simd128Register dest = liftoff::GetSimd128Register(dst);
+ if (memtype == MachineType::Int32()) {
+ vmov(dest, 0);
+ vld1s(Neon32, NeonListOperand(dst.low_fp()), 0,
+ NeonMemOperand(actual_src_addr));
+ } else {
+ DCHECK_EQ(MachineType::Int64(), memtype);
+ vmov(dest.high(), 0);
+ vld1(Neon64, NeonListOperand(dest.low()),
+ NeonMemOperand(actual_src_addr));
+ }
} else {
DCHECK_EQ(LoadTransformationKind::kSplat, transform);
if (memtype == MachineType::Int8()) {
@@ -2921,6 +2933,23 @@ void LiftoffAssembler::emit_i32x4_max_u(LiftoffRegister dst,
liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
+void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ QwNeonRegister dest = liftoff::GetSimd128Register(dst);
+ QwNeonRegister left = liftoff::GetSimd128Register(lhs);
+ QwNeonRegister right = liftoff::GetSimd128Register(rhs);
+
+ UseScratchRegisterScope temps(this);
+ Simd128Register scratch = temps.AcquireQ();
+
+ vmull(NeonS16, scratch, left.low(), right.low());
+ vpadd(Neon32, dest.low(), scratch.low(), scratch.high());
+
+ vmull(NeonS16, scratch, left.high(), right.high());
+ vpadd(Neon32, dest.high(), scratch.low(), scratch.high());
+}
+
void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
LiftoffRegister src) {
vdup(Neon16, liftoff::GetSimd128Register(dst), src.gp());
@@ -3015,9 +3044,9 @@ void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
-void LiftoffAssembler::emit_i16x8_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
vqadd(NeonS16, liftoff::GetSimd128Register(dst),
liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
@@ -3028,16 +3057,16 @@ void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
-void LiftoffAssembler::emit_i16x8_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
vqsub(NeonS16, liftoff::GetSimd128Register(dst),
liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
-void LiftoffAssembler::emit_i16x8_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
vqsub(NeonU16, liftoff::GetSimd128Register(dst),
liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
@@ -3048,9 +3077,9 @@ void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
-void LiftoffAssembler::emit_i16x8_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
vqadd(NeonU16, liftoff::GetSimd128Register(dst),
liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
@@ -3133,7 +3162,6 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
}
int table_size = src1 == src2 ? 2 : 4;
- uint32_t mask = table_size == 2 ? 0x0F0F0F0F : 0x1F1F1F1F;
int scratch_s_base = scratch.code() * 4;
for (int j = 0; j < 4; j++) {
@@ -3141,11 +3169,9 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
for (int i = 3; i >= 0; i--) {
imm = (imm << 8) | shuffle[j * 4 + i];
}
- uint32_t four_lanes = imm;
+ DCHECK_EQ(0, imm & (table_size == 2 ? 0xF0F0F0F0 : 0xE0E0E0E0));
// Ensure indices are in [0,15] if table_size is 2, or [0,31] if 4.
- four_lanes &= mask;
- vmov(SwVfpRegister::from_code(scratch_s_base + j),
- Float32::FromBits(four_lanes));
+ vmov(SwVfpRegister::from_code(scratch_s_base + j), Float32::FromBits(imm));
}
DwVfpRegister table_base = src1.low();
@@ -3277,9 +3303,9 @@ void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
-void LiftoffAssembler::emit_i8x16_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
vqadd(NeonS8, liftoff::GetSimd128Register(dst),
liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
@@ -3290,16 +3316,16 @@ void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
-void LiftoffAssembler::emit_i8x16_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
vqsub(NeonS8, liftoff::GetSimd128Register(dst),
liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
-void LiftoffAssembler::emit_i8x16_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
vqsub(NeonU8, liftoff::GetSimd128Register(dst),
liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
@@ -3310,9 +3336,9 @@ void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
-void LiftoffAssembler::emit_i8x16_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
vqadd(NeonU8, liftoff::GetSimd128Register(dst),
liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
diff --git a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
index 4fe3abc544..402f0d2e84 100644
--- a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
+++ b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
@@ -186,25 +186,36 @@ int LiftoffAssembler::PrepareStackFrame() {
void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
int stack_param_delta) {
UseScratchRegisterScope temps(this);
- Register scratch = temps.AcquireX();
+ temps.Exclude(x16, x17);
+
+ // This is the previous stack pointer value (before we push the lr and the
+ // fp). We need to keep it to autenticate the lr and adjust the new stack
+ // pointer afterwards.
+ Add(x16, fp, 16);
+
+ // Load the fp and lr of the old frame, they will be pushed in the new frame
+ // during the actual call.
+#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
+ Ldp(fp, x17, MemOperand(fp));
+ Autib1716();
+ Mov(lr, x17);
+#else
+ Ldp(fp, lr, MemOperand(fp));
+#endif
- // Push the return address and frame pointer to complete the stack frame.
- sub(sp, sp, 16);
- ldr(scratch, MemOperand(fp, 8));
- Poke(scratch, 8);
- ldr(scratch, MemOperand(fp, 0));
- Poke(scratch, 0);
+ temps.Include(x17);
+
+ Register scratch = temps.AcquireX();
- // Shift the whole frame upwards.
- int slot_count = num_callee_stack_params + 2;
+ // Shift the whole frame upwards, except for fp and lr.
+ int slot_count = num_callee_stack_params;
for (int i = slot_count - 1; i >= 0; --i) {
ldr(scratch, MemOperand(sp, i * 8));
- str(scratch, MemOperand(fp, (i - stack_param_delta) * 8));
+ str(scratch, MemOperand(x16, (i - stack_param_delta) * 8));
}
- // Set the new stack and frame pointer.
- Sub(sp, fp, stack_param_delta * 8);
- Pop<kAuthLR>(fp, lr);
+ // Set the new stack pointer.
+ Sub(sp, x16, stack_param_delta * 8);
}
void LiftoffAssembler::PatchPrepareStackFrame(int offset, int frame_size) {
@@ -302,9 +313,8 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
}
}
-void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
- int size) {
- DCHECK_LE(offset, kMaxInt);
+void LiftoffAssembler::LoadFromInstance(Register dst, int offset, int size) {
+ DCHECK_LE(0, offset);
Ldr(dst, liftoff::GetInstanceOperand());
DCHECK(size == 4 || size == 8);
if (size == 4) {
@@ -314,9 +324,8 @@ void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
}
}
-void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
- uint32_t offset) {
- DCHECK_LE(offset, kMaxInt);
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst, int offset) {
+ DCHECK_LE(0, offset);
Ldr(dst, liftoff::GetInstanceOperand());
LoadTaggedPointerField(dst, MemOperand(dst, offset));
}
@@ -676,11 +685,12 @@ void LiftoffAssembler::AtomicCompareExchange(
}
UseScratchRegisterScope temps(this);
- Register store_result = temps.AcquireW();
Register actual_addr = liftoff::CalculateActualAddress(
this, dst_addr, offset_reg, offset_imm, temps.AcquireX());
+ Register store_result = temps.AcquireW();
+
Label retry;
Label done;
Bind(&retry);
@@ -1495,6 +1505,13 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
Ldr(dst.fp().D(), src_op);
Uxtl(dst.fp().V2D(), dst.fp().V2S());
}
+ } else if (transform == LoadTransformationKind::kZeroExtend) {
+ if (memtype == MachineType::Int32()) {
+ Ldr(dst.fp().S(), src_op);
+ } else {
+ DCHECK_EQ(MachineType::Int64(), memtype);
+ Ldr(dst.fp().D(), src_op);
+ }
} else {
// ld1r only allows no offset or post-index, so emit an add.
DCHECK_EQ(LoadTransformationKind::kSplat, transform);
@@ -2003,6 +2020,17 @@ void LiftoffAssembler::emit_i32x4_max_u(LiftoffRegister dst,
Umax(dst.fp().V4S(), lhs.fp().V4S(), rhs.fp().V4S());
}
+void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ UseScratchRegisterScope scope(this);
+ VRegister tmp1 = scope.AcquireV(kFormat4S);
+ VRegister tmp2 = scope.AcquireV(kFormat4S);
+ Smull(tmp1, lhs.fp().V4H(), rhs.fp().V4H());
+ Smull2(tmp2, lhs.fp().V8H(), rhs.fp().V8H());
+ Addp(dst.fp().V4S(), tmp1, tmp2);
+}
+
void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
LiftoffRegister src) {
Dup(dst.fp().V8H(), src.gp().W());
@@ -2105,9 +2133,9 @@ void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
Add(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
}
-void LiftoffAssembler::emit_i16x8_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
Sqadd(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
}
@@ -2116,15 +2144,15 @@ void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
Sub(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
}
-void LiftoffAssembler::emit_i16x8_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
Sqsub(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
}
-void LiftoffAssembler::emit_i16x8_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
Uqsub(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
}
@@ -2133,9 +2161,9 @@ void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
Mul(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
}
-void LiftoffAssembler::emit_i16x8_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
Uqadd(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
}
@@ -2187,12 +2215,13 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
Mov(src2.Q(), rhs.fp().Q());
}
- uint8_t mask = lhs == rhs ? 0x0F : 0x1F;
int64_t imms[2] = {0, 0};
for (int i = 7; i >= 0; i--) {
- imms[0] = (imms[0] << 8) | (shuffle[i] & mask);
- imms[1] = (imms[1] << 8) | (shuffle[i + 8] & mask);
+ imms[0] = (imms[0] << 8) | (shuffle[i]);
+ imms[1] = (imms[1] << 8) | (shuffle[i + 8]);
}
+ DCHECK_EQ(0, (imms[0] | imms[1]) &
+ (lhs == rhs ? 0xF0F0F0F0F0F0F0F0 : 0xE0E0E0E0E0E0E0E0));
Movi(temp.V16B(), imms[1], imms[0]);
@@ -2307,9 +2336,9 @@ void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
Add(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
}
-void LiftoffAssembler::emit_i8x16_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
Sqadd(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
}
@@ -2318,15 +2347,15 @@ void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
Sub(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
}
-void LiftoffAssembler::emit_i8x16_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
Sqsub(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
}
-void LiftoffAssembler::emit_i8x16_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
Uqsub(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
}
@@ -2335,9 +2364,9 @@ void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
Mul(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
}
-void LiftoffAssembler::emit_i8x16_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
Uqadd(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
}
diff --git a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
index 3c2fccc997..5e640093c4 100644
--- a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
+++ b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
@@ -261,16 +261,14 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
}
}
-void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
- int size) {
- DCHECK_LE(offset, kMaxInt);
+void LiftoffAssembler::LoadFromInstance(Register dst, int offset, int size) {
+ DCHECK_LE(0, offset);
mov(dst, liftoff::GetInstanceOperand());
DCHECK_EQ(4, size);
mov(dst, Operand(dst, offset));
}
-void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
- uint32_t offset) {
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst, int offset) {
LoadFromInstance(dst, offset, kTaggedSize);
}
@@ -1456,31 +1454,19 @@ template <void (Assembler::*op)(Register, const Immediate&),
void (Assembler::*op_with_carry)(Register, int32_t)>
inline void OpWithCarryI(LiftoffAssembler* assm, LiftoffRegister dst,
LiftoffRegister lhs, int32_t imm) {
- // First, compute the low half of the result, potentially into a temporary dst
- // register if {dst.low_gp()} equals any register we need to
- // keep alive for computing the upper half.
- LiftoffRegList keep_alive = LiftoffRegList::ForRegs(lhs.high_gp());
- Register dst_low = keep_alive.has(dst.low_gp())
- ? assm->GetUnusedRegister(kGpReg, keep_alive).gp()
- : dst.low_gp();
-
- if (dst_low != lhs.low_gp()) assm->mov(dst_low, lhs.low_gp());
- (assm->*op)(dst_low, Immediate(imm));
+ // The compiler allocated registers such that either {dst == lhs} or there is
+ // no overlap between the two.
+ DCHECK_NE(dst.low_gp(), lhs.high_gp());
- // Now compute the upper half, while keeping alive the previous result.
- keep_alive = LiftoffRegList::ForRegs(dst_low);
- Register dst_high = keep_alive.has(dst.high_gp())
- ? assm->GetUnusedRegister(kGpReg, keep_alive).gp()
- : dst.high_gp();
+ // First, compute the low half of the result.
+ if (dst.low_gp() != lhs.low_gp()) assm->mov(dst.low_gp(), lhs.low_gp());
+ (assm->*op)(dst.low_gp(), Immediate(imm));
- if (dst_high != lhs.high_gp()) assm->mov(dst_high, lhs.high_gp());
+ // Now compute the upper half.
+ if (dst.high_gp() != lhs.high_gp()) assm->mov(dst.high_gp(), lhs.high_gp());
// Top half of the immediate sign extended, either 0 or -1.
int32_t sign_extend = imm < 0 ? -1 : 0;
- (assm->*op_with_carry)(dst_high, sign_extend);
-
- // If necessary, move result into the right registers.
- LiftoffRegister tmp_result = LiftoffRegister::ForPair(dst_low, dst_high);
- if (tmp_result != dst) assm->Move(dst, tmp_result, kWasmI64);
+ (assm->*op_with_carry)(dst.high_gp(), sign_extend);
}
} // namespace liftoff
@@ -2665,6 +2651,13 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
} else if (memtype == MachineType::Uint32()) {
Pmovzxdq(dst.fp(), src_op);
}
+ } else if (transform == LoadTransformationKind::kZeroExtend) {
+ if (memtype == MachineType::Int32()) {
+ movss(dst.fp(), src_op);
+ } else {
+ DCHECK_EQ(MachineType::Int64(), memtype);
+ movsd(dst.fp(), src_op);
+ }
} else {
DCHECK_EQ(LoadTransformationKind::kSplat, transform);
if (memtype == MachineType::Int8()) {
@@ -2700,15 +2693,7 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
for (int i = 3; i >= 0; i--) {
push_imm32(imms[i]);
}
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpshufb(dst.fp(), lhs.fp(), Operand(esp, 0));
- } else {
- if (dst != lhs) {
- movups(dst.fp(), lhs.fp());
- }
- pshufb(dst.fp(), Operand(esp, 0));
- }
+ Pshufb(dst.fp(), lhs.fp(), Operand(esp, 0));
mov(esp, tmp.gp());
return;
}
@@ -2723,7 +2708,7 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
}
push(Immediate(mask));
}
- Pshufb(liftoff::kScratchDoubleReg, Operand(esp, 0));
+ Pshufb(liftoff::kScratchDoubleReg, lhs.fp(), Operand(esp, 0));
for (int i = 3; i >= 0; i--) {
uint32_t mask = 0;
@@ -2734,10 +2719,7 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
}
push(Immediate(mask));
}
- if (dst.fp() != rhs.fp()) {
- movups(dst.fp(), rhs.fp());
- }
- Pshufb(dst.fp(), Operand(esp, 0));
+ Pshufb(dst.fp(), rhs.fp(), Operand(esp, 0));
Por(dst.fp(), liftoff::kScratchDoubleReg);
mov(esp, tmp.gp());
}
@@ -2751,10 +2733,7 @@ void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
TurboAssembler::Move(mask, uint32_t{0x70707070});
Pshufd(mask, mask, uint8_t{0x0});
Paddusb(mask, rhs.fp());
- if (lhs != dst) {
- Movaps(dst.fp(), lhs.fp());
- }
- Pshufb(dst.fp(), mask);
+ Pshufb(dst.fp(), lhs.fp(), mask);
}
void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
@@ -3211,16 +3190,16 @@ void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i8x16_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddsb, &Assembler::paddsb>(
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i8x16_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddusb, &Assembler::paddusb>(
this, dst, lhs, rhs);
}
@@ -3231,16 +3210,16 @@ void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i8x16_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpsubsb, &Assembler::psubsb>(
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i8x16_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpsubusb,
&Assembler::psubusb>(this, dst, lhs,
rhs);
@@ -3409,16 +3388,16 @@ void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i16x8_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddsw, &Assembler::paddsw>(
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i16x8_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddusw, &Assembler::paddusw>(
this, dst, lhs, rhs);
}
@@ -3429,16 +3408,16 @@ void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i16x8_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpsubsw, &Assembler::psubsw>(
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i16x8_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpsubusw,
&Assembler::psubusw>(this, dst, lhs,
rhs);
@@ -3588,6 +3567,13 @@ void LiftoffAssembler::emit_i32x4_max_u(LiftoffRegister dst,
this, dst, lhs, rhs, base::Optional<CpuFeature>(SSE4_1));
}
+void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpmaddwd, &Assembler::pmaddwd>(
+ this, dst, lhs, rhs);
+}
+
void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
LiftoffRegister src) {
DoubleRegister reg =
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.cc b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
index e219025e53..dea5221ac6 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
@@ -37,6 +37,7 @@ class StackTransferRecipe {
struct RegisterLoad {
enum LoadKind : uint8_t {
+ kNop, // no-op, used for high fp of a fp pair.
kConstant, // load a constant value into a register.
kStack, // fill a register from a stack slot.
kLowHalfStack, // fill a register from the low half of a stack slot.
@@ -63,6 +64,10 @@ class StackTransferRecipe {
return {half == kLowWord ? kLowHalfStack : kHighHalfStack, kWasmI32,
offset};
}
+ static RegisterLoad Nop() {
+ // ValueType does not matter.
+ return {kNop, kWasmI32, 0};
+ }
private:
RegisterLoad(LoadKind kind, ValueType type, int32_t value)
@@ -71,6 +76,8 @@ class StackTransferRecipe {
public:
explicit StackTransferRecipe(LiftoffAssembler* wasm_asm) : asm_(wasm_asm) {}
+ StackTransferRecipe(const StackTransferRecipe&) = delete;
+ StackTransferRecipe& operator=(const StackTransferRecipe&) = delete;
~StackTransferRecipe() { Execute(); }
void Execute() {
@@ -217,11 +224,11 @@ class StackTransferRecipe {
RegisterLoad::HalfStack(stack_offset, kHighWord);
} else if (dst.is_fp_pair()) {
DCHECK_EQ(kWasmS128, type);
- // load_dst_regs_.set above will set both low and high fp regs.
- // But unlike gp_pair, we load a kWasm128 in one go in ExecuteLoads.
- // So unset the top fp register to skip loading it.
- load_dst_regs_.clear(dst.high());
+ // Only need register_load for low_gp since we load 128 bits at one go.
+ // Both low and high need to be set in load_dst_regs_ but when iterating
+ // over it, both low and high will be cleared, so we won't load twice.
*register_load(dst.low()) = RegisterLoad::Stack(stack_offset, type);
+ *register_load(dst.high()) = RegisterLoad::Nop();
} else {
*register_load(dst) = RegisterLoad::Stack(stack_offset, type);
}
@@ -318,6 +325,8 @@ class StackTransferRecipe {
for (LiftoffRegister dst : load_dst_regs_) {
RegisterLoad* load = register_load(dst);
switch (load->kind) {
+ case RegisterLoad::kNop:
+ break;
case RegisterLoad::kConstant:
asm_->LoadConstant(dst, load->type == kWasmI64
? WasmValue(int64_t{load->value})
@@ -343,8 +352,6 @@ class StackTransferRecipe {
}
load_dst_regs_ = {};
}
-
- DISALLOW_COPY_AND_ASSIGN(StackTransferRecipe);
};
class RegisterReuseMap {
@@ -519,9 +526,7 @@ int LiftoffAssembler::GetTotalFrameSlotCountForGC() const {
namespace {
-constexpr AssemblerOptions DefaultLiftoffOptions() {
- return AssemblerOptions{};
-}
+AssemblerOptions DefaultLiftoffOptions() { return AssemblerOptions{}; }
} // namespace
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.h b/deps/v8/src/wasm/baseline/liftoff-assembler.h
index e2bd99841f..895abbbbb4 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.h
@@ -478,8 +478,8 @@ class LiftoffAssembler : public TurboAssembler {
inline void LoadConstant(LiftoffRegister, WasmValue,
RelocInfo::Mode rmode = RelocInfo::NONE);
- inline void LoadFromInstance(Register dst, uint32_t offset, int size);
- inline void LoadTaggedPointerFromInstance(Register dst, uint32_t offset);
+ inline void LoadFromInstance(Register dst, int offset, int size);
+ inline void LoadTaggedPointerFromInstance(Register dst, int offset);
inline void SpillInstance(Register instance);
inline void FillInstanceInto(Register dst);
inline void LoadTaggedPointer(Register dst, Register src_addr,
@@ -675,6 +675,15 @@ class LiftoffAssembler : public TurboAssembler {
}
}
+ inline void emit_ptrsize_zeroextend_i32(Register dst, Register src) {
+ if (kSystemPointerSize == 8) {
+ emit_type_conversion(kExprI64UConvertI32, LiftoffRegister(dst),
+ LiftoffRegister(src));
+ } else if (dst != src) {
+ Move(dst, src, kWasmI32);
+ }
+ }
+
// f32 binops.
inline void emit_f32_add(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs);
@@ -852,20 +861,16 @@ class LiftoffAssembler : public TurboAssembler {
int32_t rhs);
inline void emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
- inline void emit_i8x16_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs);
- inline void emit_i8x16_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs);
+ inline void emit_i8x16_add_sat_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i8x16_add_sat_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
inline void emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
- inline void emit_i8x16_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs);
- inline void emit_i8x16_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs);
+ inline void emit_i8x16_sub_sat_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i8x16_sub_sat_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
inline void emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
inline void emit_i8x16_min_s(LiftoffRegister dst, LiftoffRegister lhs,
@@ -894,20 +899,16 @@ class LiftoffAssembler : public TurboAssembler {
int32_t rhs);
inline void emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
- inline void emit_i16x8_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs);
- inline void emit_i16x8_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs);
+ inline void emit_i16x8_add_sat_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i16x8_add_sat_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
inline void emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
- inline void emit_i16x8_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs);
- inline void emit_i16x8_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs);
+ inline void emit_i16x8_sub_sat_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i16x8_sub_sat_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
inline void emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
inline void emit_i16x8_min_s(LiftoffRegister dst, LiftoffRegister lhs,
@@ -948,6 +949,8 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister rhs);
inline void emit_i32x4_max_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
+ inline void emit_i32x4_dot_i16x8_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
inline void emit_i64x2_neg(LiftoffRegister dst, LiftoffRegister src);
inline void emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
@@ -1302,6 +1305,8 @@ void LiftoffAssembler::emit_i64_xori(LiftoffRegister dst, LiftoffRegister lhs,
class LiftoffStackSlots {
public:
explicit LiftoffStackSlots(LiftoffAssembler* wasm_asm) : asm_(wasm_asm) {}
+ LiftoffStackSlots(const LiftoffStackSlots&) = delete;
+ LiftoffStackSlots& operator=(const LiftoffStackSlots&) = delete;
void Add(const LiftoffAssembler::VarState& src, uint32_t src_offset,
RegPairHalf half) {
@@ -1328,8 +1333,6 @@ class LiftoffStackSlots {
base::SmallVector<Slot, 8> slots_;
LiftoffAssembler* const asm_;
-
- DISALLOW_COPY_AND_ASSIGN(LiftoffStackSlots);
};
} // namespace wasm
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.cc b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
index 447be8cdae..1ead202ea0 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
@@ -258,9 +258,9 @@ class DebugSideTableBuilder {
class LiftoffCompiler {
public:
// TODO(clemensb): Make this a template parameter.
- static constexpr Decoder::ValidateFlag validate = Decoder::kValidate;
+ static constexpr Decoder::ValidateFlag validate = Decoder::kBooleanValidation;
- using Value = ValueBase;
+ using Value = ValueBase<validate>;
static constexpr auto kI32 = ValueType::kI32;
static constexpr auto kI64 = ValueType::kI64;
@@ -273,7 +273,7 @@ class LiftoffCompiler {
LiftoffAssembler::CacheState state;
};
- struct Control : public ControlBase<Value> {
+ struct Control : public ControlBase<Value, validate> {
std::unique_ptr<ElseState> else_state;
LiftoffAssembler::CacheState label_state;
MovableLabel label;
@@ -557,7 +557,7 @@ class LiftoffCompiler {
void StartFunctionBody(FullDecoder* decoder, Control* block) {
for (uint32_t i = 0; i < __ num_locals(); ++i) {
if (!CheckSupportedType(decoder,
- FLAG_liftoff_extern_ref
+ FLAG_experimental_liftoff_extern_ref
? kSupportedTypes
: kSupportedTypesWithoutRefs,
__ local_type(i), "param"))
@@ -621,7 +621,7 @@ class LiftoffCompiler {
}
}
- if (FLAG_liftoff_extern_ref) {
+ if (FLAG_experimental_liftoff_extern_ref) {
// Initialize all reference type locals with ref.null.
for (uint32_t param_idx = num_params; param_idx < __ num_locals();
++param_idx) {
@@ -690,30 +690,6 @@ class LiftoffCompiler {
}
if (FLAG_trace_wasm) TraceFunctionEntry(decoder);
-
- // If we are generating debug code, do check the "hook on function call"
- // flag. If set, trigger a break.
- if (V8_UNLIKELY(for_debugging_)) {
- // If there is a breakpoint set on the first instruction (== start of the
- // function), then skip the check for "hook on function call", since we
- // will unconditionally break there anyway.
- bool has_breakpoint = next_breakpoint_ptr_ != nullptr &&
- (*next_breakpoint_ptr_ == 0 ||
- *next_breakpoint_ptr_ == decoder->position());
- if (!has_breakpoint) {
- DEBUG_CODE_COMMENT("check hook on function call");
- Register flag = __ GetUnusedRegister(kGpReg, {}).gp();
- LOAD_INSTANCE_FIELD(flag, HookOnFunctionCallAddress,
- kSystemPointerSize);
- Label no_break;
- __ Load(LiftoffRegister{flag}, flag, no_reg, 0, LoadType::kI32Load8U,
- {});
- // Unary "equal" means "equals zero".
- __ emit_cond_jump(kEqual, &no_break, kWasmI32, flag);
- EmitBreakpoint(decoder);
- __ bind(&no_break);
- }
- }
}
void GenerateOutOfLineCode(OutOfLineCode* ool) {
@@ -799,14 +775,14 @@ class LiftoffCompiler {
}
V8_NOINLINE void EmitDebuggingInfo(FullDecoder* decoder, WasmOpcode opcode) {
- DCHECK(V8_UNLIKELY(for_debugging_));
+ DCHECK(for_debugging_);
+ if (!WasmOpcodes::IsBreakable(opcode)) return;
+ bool has_breakpoint = false;
if (next_breakpoint_ptr_) {
if (*next_breakpoint_ptr_ == 0) {
// A single breakpoint at offset 0 indicates stepping.
DCHECK_EQ(next_breakpoint_ptr_ + 1, next_breakpoint_end_);
- if (WasmOpcodes::IsBreakable(opcode)) {
- EmitBreakpoint(decoder);
- }
+ has_breakpoint = true;
} else {
while (next_breakpoint_ptr_ != next_breakpoint_end_ &&
*next_breakpoint_ptr_ < decoder->position()) {
@@ -816,18 +792,34 @@ class LiftoffCompiler {
if (next_breakpoint_ptr_ == next_breakpoint_end_) {
next_breakpoint_ptr_ = next_breakpoint_end_ = nullptr;
} else if (*next_breakpoint_ptr_ == decoder->position()) {
- DCHECK(WasmOpcodes::IsBreakable(opcode));
- EmitBreakpoint(decoder);
+ has_breakpoint = true;
}
}
}
- if (dead_breakpoint_ == decoder->position()) {
+ if (has_breakpoint) {
+ EmitBreakpoint(decoder);
+ // Once we emitted a breakpoint, we don't need to check the "hook on
+ // function call" any more.
+ checked_hook_on_function_call_ = true;
+ } else if (!checked_hook_on_function_call_) {
+ checked_hook_on_function_call_ = true;
+ // Check the "hook on function call" flag. If set, trigger a break.
+ DEBUG_CODE_COMMENT("check hook on function call");
+ Register flag = __ GetUnusedRegister(kGpReg, {}).gp();
+ LOAD_INSTANCE_FIELD(flag, HookOnFunctionCallAddress, kSystemPointerSize);
+ Label no_break;
+ __ Load(LiftoffRegister{flag}, flag, no_reg, 0, LoadType::kI32Load8U, {});
+ // Unary "equal" means "equals zero".
+ __ emit_cond_jump(kEqual, &no_break, kWasmI32, flag);
+ EmitBreakpoint(decoder);
+ __ bind(&no_break);
+ } else if (dead_breakpoint_ == decoder->position()) {
DCHECK(!next_breakpoint_ptr_ ||
*next_breakpoint_ptr_ != dead_breakpoint_);
// The top frame is paused at this position, but the breakpoint was
- // removed. Adding a dead breakpoint here ensures that the source position
- // exists, and that the offset to the return address is the same as in the
- // old code.
+ // removed. Adding a dead breakpoint here ensures that the source
+ // position exists, and that the offset to the return address is the
+ // same as in the old code.
Label cont;
__ emit_jump(&cont);
EmitBreakpoint(decoder);
@@ -843,7 +835,8 @@ class LiftoffCompiler {
#ifdef DEBUG
SLOW_DCHECK(__ ValidateCacheState());
if (WasmOpcodes::IsPrefixOpcode(opcode)) {
- opcode = decoder->read_prefixed_opcode<Decoder::kValidate>(decoder->pc());
+ opcode = decoder->read_prefixed_opcode<Decoder::kFullValidation>(
+ decoder->pc());
}
DEBUG_CODE_COMMENT(WasmOpcodes::OpcodeName(opcode));
#endif
@@ -1251,9 +1244,12 @@ class LiftoffCompiler {
int32_t imm = rhs_slot.i32_const();
LiftoffRegister lhs = __ PopToRegister();
+ // Either reuse {lhs} for {dst}, or choose a register (pair) which does
+ // not overlap, for easier code generation.
+ LiftoffRegList pinned = LiftoffRegList::ForRegs(lhs);
LiftoffRegister dst = src_rc == result_rc
- ? __ GetUnusedRegister(result_rc, {lhs}, {})
- : __ GetUnusedRegister(result_rc, {});
+ ? __ GetUnusedRegister(result_rc, {lhs}, pinned)
+ : __ GetUnusedRegister(result_rc, pinned);
CallEmitFn(fnImm, dst, lhs, imm);
__ PushRegister(ValueType::Primitive(result_type), dst);
@@ -1632,7 +1628,7 @@ class LiftoffCompiler {
}
void RefNull(FullDecoder* decoder, ValueType type, Value*) {
- if (!FLAG_liftoff_extern_ref) {
+ if (!FLAG_experimental_liftoff_extern_ref) {
unsupported(decoder, kRefTypes, "ref_null");
return;
}
@@ -1815,7 +1811,7 @@ class LiftoffCompiler {
const GlobalIndexImmediate<validate>& imm) {
const auto* global = &env_->module->globals[imm.index];
if (!CheckSupportedType(decoder,
- FLAG_liftoff_extern_ref
+ FLAG_experimental_liftoff_extern_ref
? kSupportedTypes
: kSupportedTypesWithoutRefs,
global->type, "global")) {
@@ -1854,7 +1850,7 @@ class LiftoffCompiler {
const GlobalIndexImmediate<validate>& imm) {
auto* global = &env_->module->globals[imm.index];
if (!CheckSupportedType(decoder,
- FLAG_liftoff_extern_ref
+ FLAG_experimental_liftoff_extern_ref
? kSupportedTypes
: kSupportedTypesWithoutRefs,
global->type, "global")) {
@@ -2184,25 +2180,36 @@ class LiftoffCompiler {
__ SpillAllRegisters();
LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
- // Get one register for computing the address (offset + index).
- LiftoffRegister address = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- // Compute offset+index in address.
- __ LoadConstant(address, WasmValue(offset));
- __ emit_i32_add(address.gp(), address.gp(), index);
+ // Get one register for computing the effective offset (offset + index).
+ LiftoffRegister effective_offset =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ __ LoadConstant(effective_offset, WasmValue(offset));
+ __ emit_i32_add(effective_offset.gp(), effective_offset.gp(), index);
// Get a register to hold the stack slot for MemoryTracingInfo.
LiftoffRegister info = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
// Allocate stack slot for MemoryTracingInfo.
__ AllocateStackSlot(info.gp(), sizeof(MemoryTracingInfo));
+ // Reuse the {effective_offset} register for all information to be stored in
+ // the MemoryTracingInfo struct.
+ LiftoffRegister data = effective_offset;
+
// Now store all information into the MemoryTracingInfo struct.
- __ Store(info.gp(), no_reg, offsetof(MemoryTracingInfo, address), address,
- StoreType::kI32Store, pinned);
- __ LoadConstant(address, WasmValue(is_store ? 1 : 0));
- __ Store(info.gp(), no_reg, offsetof(MemoryTracingInfo, is_store), address,
+ if (kSystemPointerSize == 8) {
+ // Zero-extend the effective offset to u64.
+ CHECK(__ emit_type_conversion(kExprI64UConvertI32, data, effective_offset,
+ nullptr));
+ }
+ __ Store(
+ info.gp(), no_reg, offsetof(MemoryTracingInfo, offset), data,
+ kSystemPointerSize == 8 ? StoreType::kI64Store : StoreType::kI32Store,
+ pinned);
+ __ LoadConstant(data, WasmValue(is_store ? 1 : 0));
+ __ Store(info.gp(), no_reg, offsetof(MemoryTracingInfo, is_store), data,
StoreType::kI32Store8, pinned);
- __ LoadConstant(address, WasmValue(static_cast<int>(rep)));
- __ Store(info.gp(), no_reg, offsetof(MemoryTracingInfo, mem_rep), address,
+ __ LoadConstant(data, WasmValue(static_cast<int>(rep)));
+ __ Store(info.gp(), no_reg, offsetof(MemoryTracingInfo, mem_rep), data,
StoreType::kI32Store8, pinned);
WasmTraceMemoryDescriptor descriptor;
@@ -2287,15 +2294,11 @@ class LiftoffCompiler {
return;
}
- if (transform == LoadTransformationKind::kZeroExtend) {
- unsupported(decoder, kSimd, "prototyping s128 load zero extend");
- return;
- }
-
LiftoffRegList pinned;
Register index = pinned.set(__ PopToRegister()).gp();
- // For load splats, LoadType is the size of the load, and for load
- // extends, LoadType is the size of the lane, and it always loads 8 bytes.
+ // For load splats and load zero, LoadType is the size of the load, and for
+ // load extends, LoadType is the size of the lane, and it always loads 8
+ // bytes.
uint32_t access_size =
transform == LoadTransformationKind::kExtend ? 8 : type.size();
if (BoundsCheckMem(decoder, access_size, imm.offset, index, pinned,
@@ -2330,6 +2333,12 @@ class LiftoffCompiler {
}
}
+ void LoadLane(FullDecoder* decoder, LoadType type, const Value& value,
+ const Value& index, const MemoryAccessImmediate<validate>& imm,
+ const uint8_t laneidx, Value* result) {
+ unsupported(decoder, kSimd, "simd load lane");
+ }
+
void StoreMem(FullDecoder* decoder, StoreType type,
const MemoryAccessImmediate<validate>& imm,
const Value& index_val, const Value& value_val) {
@@ -2364,6 +2373,12 @@ class LiftoffCompiler {
}
}
+ void StoreLane(FullDecoder* decoder, StoreType type,
+ const MemoryAccessImmediate<validate>& imm, const Value& index,
+ const Value& value, const uint8_t laneidx) {
+ unsupported(decoder, kSimd, "simd load lane");
+ }
+
void CurrentMemoryPages(FullDecoder* decoder, Value* result) {
Register mem_size = __ GetUnusedRegister(kGpReg, {}).gp();
LOAD_INSTANCE_FIELD(mem_size, MemorySize, kSystemPointerSize);
@@ -2658,20 +2673,16 @@ class LiftoffCompiler {
&LiftoffAssembler::emit_i8x16_shri_u);
case wasm::kExprI8x16Add:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_add);
- case wasm::kExprI8x16AddSaturateS:
- return EmitBinOp<kS128, kS128>(
- &LiftoffAssembler::emit_i8x16_add_saturate_s);
- case wasm::kExprI8x16AddSaturateU:
- return EmitBinOp<kS128, kS128>(
- &LiftoffAssembler::emit_i8x16_add_saturate_u);
+ case wasm::kExprI8x16AddSatS:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_add_sat_s);
+ case wasm::kExprI8x16AddSatU:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_add_sat_u);
case wasm::kExprI8x16Sub:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_sub);
- case wasm::kExprI8x16SubSaturateS:
- return EmitBinOp<kS128, kS128>(
- &LiftoffAssembler::emit_i8x16_sub_saturate_s);
- case wasm::kExprI8x16SubSaturateU:
- return EmitBinOp<kS128, kS128>(
- &LiftoffAssembler::emit_i8x16_sub_saturate_u);
+ case wasm::kExprI8x16SubSatS:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_sub_sat_s);
+ case wasm::kExprI8x16SubSatU:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_sub_sat_u);
case wasm::kExprI8x16Mul:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_mul);
case wasm::kExprI8x16MinS:
@@ -2701,20 +2712,16 @@ class LiftoffCompiler {
&LiftoffAssembler::emit_i16x8_shri_u);
case wasm::kExprI16x8Add:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_add);
- case wasm::kExprI16x8AddSaturateS:
- return EmitBinOp<kS128, kS128>(
- &LiftoffAssembler::emit_i16x8_add_saturate_s);
- case wasm::kExprI16x8AddSaturateU:
- return EmitBinOp<kS128, kS128>(
- &LiftoffAssembler::emit_i16x8_add_saturate_u);
+ case wasm::kExprI16x8AddSatS:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_add_sat_s);
+ case wasm::kExprI16x8AddSatU:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_add_sat_u);
case wasm::kExprI16x8Sub:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_sub);
- case wasm::kExprI16x8SubSaturateS:
- return EmitBinOp<kS128, kS128>(
- &LiftoffAssembler::emit_i16x8_sub_saturate_s);
- case wasm::kExprI16x8SubSaturateU:
- return EmitBinOp<kS128, kS128>(
- &LiftoffAssembler::emit_i16x8_sub_saturate_u);
+ case wasm::kExprI16x8SubSatS:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_sub_sat_s);
+ case wasm::kExprI16x8SubSatU:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_sub_sat_u);
case wasm::kExprI16x8Mul:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_mul);
case wasm::kExprI16x8MinS:
@@ -2756,6 +2763,9 @@ class LiftoffCompiler {
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_max_s);
case wasm::kExprI32x4MaxU:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_max_u);
+ case wasm::kExprI32x4DotI16x8S:
+ return EmitBinOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i32x4_dot_i16x8_s);
case wasm::kExprI64x2Neg:
return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i64x2_neg);
case wasm::kExprI64x2Shl:
@@ -3238,13 +3248,15 @@ class LiftoffCompiler {
uint32_t offset = imm.offset;
index_reg = AddMemoryMasking(index_reg, &offset, &pinned);
- Register index_plus_offset = index_reg;
+ Register index_plus_offset =
+ __ cache_state()->is_used(LiftoffRegister(index_reg))
+ ? pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp()
+ : index_reg;
if (offset) {
- if (__ cache_state()->is_used(LiftoffRegister(index_reg))) {
- index_plus_offset =
- pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- }
__ emit_i32_addi(index_plus_offset, index_reg, offset);
+ __ emit_ptrsize_zeroextend_i32(index_plus_offset, index_plus_offset);
+ } else {
+ __ emit_ptrsize_zeroextend_i32(index_plus_offset, index_reg);
}
LiftoffAssembler::VarState timeout =
@@ -3285,7 +3297,7 @@ class LiftoffCompiler {
}
}
- ValueType sig_reps[] = {kWasmI32, type, kWasmI64};
+ ValueType sig_reps[] = {kPointerValueType, type, kWasmI64};
FunctionSig sig(0, 3, sig_reps);
__ PrepareBuiltinCall(&sig, call_descriptor,
@@ -3313,16 +3325,18 @@ class LiftoffCompiler {
uint32_t offset = imm.offset;
index_reg = AddMemoryMasking(index_reg, &offset, &pinned);
- Register index_plus_offset = index_reg;
+ Register index_plus_offset =
+ __ cache_state()->is_used(LiftoffRegister(index_reg))
+ ? pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp()
+ : index_reg;
if (offset) {
- if (__ cache_state()->is_used(LiftoffRegister(index_reg))) {
- index_plus_offset =
- pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- }
__ emit_i32_addi(index_plus_offset, index_reg, offset);
+ __ emit_ptrsize_zeroextend_i32(index_plus_offset, index_plus_offset);
+ } else {
+ __ emit_ptrsize_zeroextend_i32(index_plus_offset, index_reg);
}
- ValueType sig_reps[] = {kWasmI32, kWasmI32, kWasmI32};
+ ValueType sig_reps[] = {kWasmI32, kPointerValueType, kWasmI32};
FunctionSig sig(1, 2, sig_reps);
auto call_descriptor =
GetBuiltinCallDescriptor<WasmAtomicNotifyDescriptor>(compilation_zone_);
@@ -3806,7 +3820,7 @@ class LiftoffCompiler {
const Value args[], Value returns[], CallKind call_kind) {
for (ValueType ret : imm.sig->returns()) {
if (!CheckSupportedType(decoder,
- FLAG_liftoff_extern_ref
+ FLAG_experimental_liftoff_extern_ref
? kSupportedTypes
: kSupportedTypesWithoutRefs,
ret, "return")) {
@@ -3888,7 +3902,7 @@ class LiftoffCompiler {
}
for (ValueType ret : imm.sig->returns()) {
if (!CheckSupportedType(decoder,
- FLAG_liftoff_extern_ref
+ FLAG_experimental_liftoff_extern_ref
? kSupportedTypes
: kSupportedTypesWithoutRefs,
ret, "return")) {
@@ -3915,9 +3929,10 @@ class LiftoffCompiler {
// Bounds check against the table size.
Label* invalid_func_label = AddOutOfLineTrap(
- decoder->position(), WasmCode::kThrowWasmTrapFuncInvalid);
+ decoder->position(), WasmCode::kThrowWasmTrapTableOutOfBounds);
- uint32_t canonical_sig_num = env_->module->signature_ids[imm.sig_index];
+ uint32_t canonical_sig_num =
+ env_->module->canonicalized_type_ids[imm.sig_index];
DCHECK_GE(canonical_sig_num, 0);
DCHECK_GE(kMaxInt, canonical_sig_num);
@@ -4057,6 +4072,11 @@ class LiftoffCompiler {
// address in OSR is correct.
int dead_breakpoint_ = 0;
+ // Remember whether the "hook on function call" has already been checked.
+ // This happens at the first breakable opcode in the function (if compiling
+ // for debugging).
+ bool checked_hook_on_function_call_ = false;
+
bool has_outstanding_op() const {
return outstanding_op_ != kNoOutstandingOp;
}
@@ -4094,15 +4114,11 @@ WasmCompilationResult ExecuteLiftoffCompilation(
std::unique_ptr<DebugSideTable>* debug_sidetable, int dead_breakpoint) {
int func_body_size = static_cast<int>(func_body.end - func_body.start);
TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
- "wasm.CompileBaseline", "func_index", func_index, "body_size",
+ "wasm.CompileBaseline", "funcIndex", func_index, "bodySize",
func_body_size);
Zone zone(allocator, "LiftoffCompilationZone");
auto call_descriptor = compiler::GetWasmCallDescriptor(&zone, func_body.sig);
- base::Optional<TimedHistogramScope> liftoff_compile_time_scope;
- if (counters) {
- liftoff_compile_time_scope.emplace(counters->liftoff_compile_time());
- }
size_t code_size_estimate =
WasmCodeManager::EstimateLiftoffCodeSize(func_body_size);
// Allocate the initial buffer a bit bigger to avoid reallocation during code
@@ -4115,18 +4131,14 @@ WasmCompilationResult ExecuteLiftoffCompilation(
if (debug_sidetable) {
debug_sidetable_builder = std::make_unique<DebugSideTableBuilder>();
}
- WasmFullDecoder<Decoder::kValidate, LiftoffCompiler> decoder(
+ WasmFullDecoder<Decoder::kBooleanValidation, LiftoffCompiler> decoder(
&zone, env->module, env->enabled_features, detected, func_body,
call_descriptor, env, &zone, instruction_buffer->CreateView(),
debug_sidetable_builder.get(), for_debugging, func_index, breakpoints,
dead_breakpoint);
decoder.Decode();
- liftoff_compile_time_scope.reset();
LiftoffCompiler* compiler = &decoder.interface();
- if (decoder.failed()) {
- compiler->OnFirstError(&decoder);
- return WasmCompilationResult{};
- }
+ if (decoder.failed()) compiler->OnFirstError(&decoder);
if (counters) {
// Check that the histogram for the bailout reasons has the correct size.
@@ -4172,7 +4184,7 @@ std::unique_ptr<DebugSideTable> GenerateLiftoffDebugSideTable(
auto call_descriptor = compiler::GetWasmCallDescriptor(&zone, func_body.sig);
DebugSideTableBuilder debug_sidetable_builder;
WasmFeatures detected;
- WasmFullDecoder<Decoder::kValidate, LiftoffCompiler> decoder(
+ WasmFullDecoder<Decoder::kBooleanValidation, LiftoffCompiler> decoder(
&zone, env->module, env->enabled_features, &detected, func_body,
call_descriptor, env, &zone,
NewAssemblerBuffer(AssemblerBase::kDefaultBufferSize),
diff --git a/deps/v8/src/wasm/baseline/liftoff-register.h b/deps/v8/src/wasm/baseline/liftoff-register.h
index 49aac008f0..285af7dac0 100644
--- a/deps/v8/src/wasm/baseline/liftoff-register.h
+++ b/deps/v8/src/wasm/baseline/liftoff-register.h
@@ -137,8 +137,8 @@ static_assert(2 * kBitsPerGpRegCode >= kBitsPerFpRegCode,
class LiftoffRegister {
static constexpr int needed_bits =
- Max(kNeedI64RegPair || kNeedS128RegPair ? kBitsPerRegPair : 0,
- kBitsPerLiftoffRegCode);
+ std::max(kNeedI64RegPair || kNeedS128RegPair ? kBitsPerRegPair : 0,
+ kBitsPerLiftoffRegCode);
using storage_t = std::conditional<
needed_bits <= 8, uint8_t,
std::conditional<needed_bits <= 16, uint16_t, uint32_t>::type>::type;
diff --git a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
index 97b8487848..5c78eca319 100644
--- a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
+++ b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
@@ -360,16 +360,16 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
}
}
-void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
+void LiftoffAssembler::LoadFromInstance(Register dst, int32_t offset,
int size) {
- DCHECK_LE(offset, kMaxInt);
+ DCHECK_LE(0, offset);
lw(dst, liftoff::GetInstanceOperand());
DCHECK_EQ(4, size);
lw(dst, MemOperand(dst, offset));
}
void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
- uint32_t offset) {
+ int32_t offset) {
LoadFromInstance(dst, offset, kTaggedSize);
}
@@ -1883,16 +1883,16 @@ void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kSimd, "emit_i8x16_add");
}
-void LiftoffAssembler::emit_i8x16_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_add_saturate_s");
+void LiftoffAssembler::emit_i8x16_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_add_sat_s");
}
-void LiftoffAssembler::emit_i8x16_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_add_saturate_u");
+void LiftoffAssembler::emit_i8x16_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_add_sat_u");
}
void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
@@ -1900,16 +1900,16 @@ void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kSimd, "emit_i8x16_sub");
}
-void LiftoffAssembler::emit_i8x16_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_sub_saturate_s");
+void LiftoffAssembler::emit_i8x16_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_sub_sat_s");
}
-void LiftoffAssembler::emit_i8x16_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_sub_saturate_u");
+void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_sub_sat_u");
}
void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
@@ -1998,16 +1998,16 @@ void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kSimd, "emit_i16x8_add");
}
-void LiftoffAssembler::emit_i16x8_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_add_saturate_s");
+void LiftoffAssembler::emit_i16x8_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_add_sat_s");
}
-void LiftoffAssembler::emit_i16x8_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_add_saturate_u");
+void LiftoffAssembler::emit_i16x8_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_add_sat_u");
}
void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
@@ -2015,16 +2015,16 @@ void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kSimd, "emit_i16x8_sub");
}
-void LiftoffAssembler::emit_i16x8_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_sub_saturate_s");
+void LiftoffAssembler::emit_i16x8_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_sub_sat_s");
}
-void LiftoffAssembler::emit_i16x8_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_sub_saturate_u");
+void LiftoffAssembler::emit_i16x8_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_sub_sat_u");
}
void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
@@ -2147,6 +2147,12 @@ void LiftoffAssembler::emit_i32x4_max_u(LiftoffRegister dst,
bailout(kSimd, "emit_i32x4_max_u");
}
+void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_dot_i16x8_s");
+}
+
void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "emit_i64x2_neg");
diff --git a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
index 4c6c1fe1ce..b97c49437f 100644
--- a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
+++ b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
@@ -5,6 +5,7 @@
#ifndef V8_WASM_BASELINE_MIPS64_LIFTOFF_ASSEMBLER_MIPS64_H_
#define V8_WASM_BASELINE_MIPS64_LIFTOFF_ASSEMBLER_MIPS64_H_
+#include "src/heap/memory-chunk.h"
#include "src/wasm/baseline/liftoff-assembler.h"
namespace v8 {
@@ -339,9 +340,9 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
}
}
-void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
+void LiftoffAssembler::LoadFromInstance(Register dst, int32_t offset,
int size) {
- DCHECK_LE(offset, kMaxInt);
+ DCHECK_LE(0, offset);
Ld(dst, liftoff::GetInstanceOperand());
DCHECK(size == 4 || size == 8);
if (size == 4) {
@@ -352,7 +353,7 @@ void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
}
void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
- uint32_t offset) {
+ int32_t offset) {
LoadFromInstance(dst, offset, kTaggedSize);
}
@@ -378,7 +379,27 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
int32_t offset_imm,
LiftoffRegister src,
LiftoffRegList pinned) {
- bailout(kRefTypes, "GlobalSet");
+ DCHECK_GE(offset_imm, 0);
+ DCHECK_LE(offset_imm, std::numeric_limits<int32_t>::max());
+ STATIC_ASSERT(kTaggedSize == kInt64Size);
+ Register scratch = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
+ Sd(src.gp(), MemOperand(dst_addr, offset_imm));
+
+ Label write_barrier;
+ Label exit;
+ CheckPageFlag(dst_addr, scratch,
+ MemoryChunk::kPointersFromHereAreInterestingMask, ne,
+ &write_barrier);
+ b(&exit);
+ bind(&write_barrier);
+ JumpIfSmi(src.gp(), &exit);
+ CheckPageFlag(src.gp(), scratch,
+ MemoryChunk::kPointersToHereAreInterestingMask, eq,
+ &exit);
+ Daddu(scratch, dst_addr, offset_imm);
+ CallRecordWriteStub(dst_addr, scratch, EMIT_REMEMBERED_SET, kSaveFPRegs,
+ wasm::WasmCode::kRecordWrite);
+ bind(&exit);
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
@@ -1487,6 +1508,16 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
fill_d(dst_msa, scratch);
ilvr_w(dst_msa, kSimd128RegZero, dst_msa);
}
+ } else if (transform == LoadTransformationKind::kZeroExtend) {
+ xor_v(dst_msa, dst_msa, dst_msa);
+ if (memtype == MachineType::Int32()) {
+ Lwu(scratch, src_op);
+ insert_w(dst_msa, 0, scratch);
+ } else {
+ DCHECK_EQ(MachineType::Int64(), memtype);
+ Ld(scratch, src_op);
+ insert_d(dst_msa, 0, scratch);
+ }
} else {
DCHECK_EQ(LoadTransformationKind::kSplat, transform);
if (memtype == MachineType::Int8()) {
@@ -1841,15 +1872,15 @@ void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
addv_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
-void LiftoffAssembler::emit_i8x16_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
adds_s_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
-void LiftoffAssembler::emit_i8x16_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
adds_u_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
@@ -1858,15 +1889,15 @@ void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
subv_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
-void LiftoffAssembler::emit_i8x16_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
subs_s_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
-void LiftoffAssembler::emit_i8x16_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
subs_u_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
@@ -1970,15 +2001,15 @@ void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
addv_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
-void LiftoffAssembler::emit_i16x8_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
adds_s_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
-void LiftoffAssembler::emit_i16x8_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
adds_u_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
@@ -1987,15 +2018,15 @@ void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
subv_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
-void LiftoffAssembler::emit_i16x8_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
subs_s_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
-void LiftoffAssembler::emit_i16x8_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
subs_u_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
@@ -2131,6 +2162,12 @@ void LiftoffAssembler::emit_i32x4_max_u(LiftoffRegister dst,
max_u_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
+void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ dotp_s_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
+}
+
void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
LiftoffRegister src) {
xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
@@ -2264,6 +2301,8 @@ void LiftoffAssembler::emit_f32x4_min(LiftoffRegister dst, LiftoffRegister lhs,
// dst = (scratch1 <= scratch0) ? scratch1 : scratch0.
fsle_w(dst_msa, scratch1, scratch0);
bsel_v(dst_msa, scratch0, scratch1);
+ // Canonicalize the result.
+ fmin_w(dst_msa, dst_msa, dst_msa);
}
void LiftoffAssembler::emit_f32x4_max(LiftoffRegister dst, LiftoffRegister lhs,
@@ -2284,6 +2323,8 @@ void LiftoffAssembler::emit_f32x4_max(LiftoffRegister dst, LiftoffRegister lhs,
// dst = (scratch0 <= scratch1) ? scratch1 : scratch0.
fsle_w(dst_msa, scratch0, scratch1);
bsel_v(dst_msa, scratch0, scratch1);
+ // Canonicalize the result.
+ fmax_w(dst_msa, dst_msa, dst_msa);
}
void LiftoffAssembler::emit_f32x4_pmin(LiftoffRegister dst, LiftoffRegister lhs,
@@ -2383,6 +2424,8 @@ void LiftoffAssembler::emit_f64x2_min(LiftoffRegister dst, LiftoffRegister lhs,
// dst = (scratch1 <= scratch0) ? scratch1 : scratch0.
fsle_d(dst_msa, scratch1, scratch0);
bsel_v(dst_msa, scratch0, scratch1);
+ // Canonicalize the result.
+ fmin_d(dst_msa, dst_msa, dst_msa);
}
void LiftoffAssembler::emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs,
@@ -2403,6 +2446,8 @@ void LiftoffAssembler::emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs,
// dst = (scratch0 <= scratch1) ? scratch1 : scratch0.
fsle_d(dst_msa, scratch0, scratch1);
bsel_v(dst_msa, scratch0, scratch1);
+ // Canonicalize the result.
+ fmax_d(dst_msa, dst_msa, dst_msa);
}
void LiftoffAssembler::emit_f64x2_pmin(LiftoffRegister dst, LiftoffRegister lhs,
diff --git a/deps/v8/src/wasm/baseline/ppc/OWNERS b/deps/v8/src/wasm/baseline/ppc/OWNERS
index 6edd45a6ef..02c2cd757c 100644
--- a/deps/v8/src/wasm/baseline/ppc/OWNERS
+++ b/deps/v8/src/wasm/baseline/ppc/OWNERS
@@ -2,3 +2,4 @@ junyan@redhat.com
joransiu@ca.ibm.com
midawson@redhat.com
mfarazma@redhat.com
+vasili.skurydzin@ibm.com
diff --git a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
index ef7b720ea9..f75e9db459 100644
--- a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
+++ b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
@@ -88,13 +88,11 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
bailout(kUnsupportedArchitecture, "LoadConstant");
}
-void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
- int size) {
+void LiftoffAssembler::LoadFromInstance(Register dst, int offset, int size) {
bailout(kUnsupportedArchitecture, "LoadFromInstance");
}
-void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
- uint32_t offset) {
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst, int offset) {
bailout(kUnsupportedArchitecture, "LoadTaggedPointerFromInstance");
}
@@ -944,6 +942,12 @@ void LiftoffAssembler::emit_i32x4_max_u(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i32x4_max_u");
}
+void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i32x4_dot_i16x8_s");
+}
+
void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_i16x8splat");
@@ -1006,9 +1010,9 @@ void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kUnsupportedArchitecture, "emit_i16x8add");
}
-void LiftoffAssembler::emit_i16x8_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i16x8addsaturate_s");
}
@@ -1017,15 +1021,15 @@ void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kUnsupportedArchitecture, "emit_i16x8sub");
}
-void LiftoffAssembler::emit_i16x8_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i16x8subsaturate_s");
}
-void LiftoffAssembler::emit_i16x8_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i16x8subsaturate_u");
}
@@ -1034,9 +1038,9 @@ void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kUnsupportedArchitecture, "emit_i16x8mul");
}
-void LiftoffAssembler::emit_i16x8_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i16x8addsaturate_u");
}
@@ -1172,9 +1176,9 @@ void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kUnsupportedArchitecture, "emit_i8x16add");
}
-void LiftoffAssembler::emit_i8x16_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i8x16addsaturate_s");
}
@@ -1485,15 +1489,15 @@ void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kUnsupportedArchitecture, "emit_i8x16sub");
}
-void LiftoffAssembler::emit_i8x16_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i8x16subsaturate_s");
}
-void LiftoffAssembler::emit_i8x16_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i8x16subsaturate_u");
}
@@ -1502,9 +1506,9 @@ void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kUnsupportedArchitecture, "emit_i8x16mul");
}
-void LiftoffAssembler::emit_i8x16_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i8x16addsaturate_u");
}
diff --git a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
index dc6ce2f0b3..a88baa1146 100644
--- a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
+++ b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
@@ -87,13 +87,11 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
bailout(kUnsupportedArchitecture, "LoadConstant");
}
-void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
- int size) {
+void LiftoffAssembler::LoadFromInstance(Register dst, int offset, int size) {
bailout(kUnsupportedArchitecture, "LoadFromInstance");
}
-void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
- uint32_t offset) {
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst, int offset) {
bailout(kUnsupportedArchitecture, "LoadTaggedPointerFromInstance");
}
@@ -948,6 +946,12 @@ void LiftoffAssembler::emit_i32x4_max_u(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i32x4_max_u");
}
+void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i32x4_dot_i16x8_s");
+}
+
void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_i16x8splat");
@@ -1010,9 +1014,9 @@ void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kUnsupportedArchitecture, "emit_i16x8add");
}
-void LiftoffAssembler::emit_i16x8_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i16x8addsaturate_s");
}
@@ -1021,15 +1025,15 @@ void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kUnsupportedArchitecture, "emit_i16x8sub");
}
-void LiftoffAssembler::emit_i16x8_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i16x8subsaturate_s");
}
-void LiftoffAssembler::emit_i16x8_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i16x8subsaturate_u");
}
@@ -1038,9 +1042,9 @@ void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kUnsupportedArchitecture, "emit_i16x8mul");
}
-void LiftoffAssembler::emit_i16x8_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i16x8addsaturate_u");
}
@@ -1176,9 +1180,9 @@ void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kUnsupportedArchitecture, "emit_i8x16add");
}
-void LiftoffAssembler::emit_i8x16_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i8x16addsaturate_s");
}
@@ -1187,15 +1191,15 @@ void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kUnsupportedArchitecture, "emit_i8x16sub");
}
-void LiftoffAssembler::emit_i8x16_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i8x16subsaturate_s");
}
-void LiftoffAssembler::emit_i8x16_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i8x16subsaturate_u");
}
@@ -1204,9 +1208,9 @@ void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kUnsupportedArchitecture, "emit_i8x16mul");
}
-void LiftoffAssembler::emit_i8x16_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i8x16addsaturate_u");
}
diff --git a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
index 713a1ce72a..a64b0e2e37 100644
--- a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
+++ b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
@@ -236,11 +236,10 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
}
}
-void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
- int size) {
- DCHECK_LE(offset, kMaxInt);
- movq(dst, liftoff::GetInstanceOperand());
+void LiftoffAssembler::LoadFromInstance(Register dst, int offset, int size) {
+ DCHECK_LE(0, offset);
DCHECK(size == 4 || size == 8);
+ movq(dst, liftoff::GetInstanceOperand());
if (size == 4) {
movl(dst, Operand(dst, offset));
} else {
@@ -248,9 +247,8 @@ void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
}
}
-void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
- uint32_t offset) {
- DCHECK_LE(offset, kMaxInt);
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst, int offset) {
+ DCHECK_LE(0, offset);
movq(dst, liftoff::GetInstanceOperand());
LoadTaggedPointerField(dst, Operand(dst, offset));
}
@@ -2232,11 +2230,11 @@ void EmitI64x2ShrS(LiftoffAssembler* assm, LiftoffRegister dst,
assm->Pextrq(tmp, lhs.fp(), int8_t{0x0});
assm->sarq_cl(tmp);
- assm->Pinsrq(dst.fp(), tmp, int8_t{0x0});
+ assm->Pinsrq(dst.fp(), tmp, uint8_t{0x0});
assm->Pextrq(tmp, lhs.fp(), int8_t{0x1});
assm->sarq_cl(tmp);
- assm->Pinsrq(dst.fp(), tmp, int8_t{0x1});
+ assm->Pinsrq(dst.fp(), tmp, uint8_t{0x1});
// restore rcx.
if (restore_rcx) {
@@ -2289,14 +2287,21 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
} else if (memtype == MachineType::Uint32()) {
Pmovzxdq(dst.fp(), src_op);
}
+ } else if (transform == LoadTransformationKind::kZeroExtend) {
+ if (memtype == MachineType::Int32()) {
+ Movss(dst.fp(), src_op);
+ } else {
+ DCHECK_EQ(MachineType::Int64(), memtype);
+ Movsd(dst.fp(), src_op);
+ }
} else {
DCHECK_EQ(LoadTransformationKind::kSplat, transform);
if (memtype == MachineType::Int8()) {
- Pinsrb(dst.fp(), src_op, 0);
+ Pinsrb(dst.fp(), dst.fp(), src_op, 0);
Pxor(kScratchDoubleReg, kScratchDoubleReg);
Pshufb(dst.fp(), kScratchDoubleReg);
} else if (memtype == MachineType::Int16()) {
- Pinsrw(dst.fp(), src_op, 0);
+ Pinsrw(dst.fp(), dst.fp(), src_op, 0);
Pshuflw(dst.fp(), dst.fp(), uint8_t{0});
Punpcklqdq(dst.fp(), dst.fp());
} else if (memtype == MachineType::Int32()) {
@@ -2304,8 +2309,8 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
CpuFeatureScope avx_scope(this, AVX);
vbroadcastss(dst.fp(), src_op);
} else {
- Movss(dst.fp(), src_op);
- Shufps(dst.fp(), dst.fp(), byte{0});
+ movss(dst.fp(), src_op);
+ shufps(dst.fp(), dst.fp(), byte{0});
}
} else if (memtype == MachineType::Int64()) {
Movddup(dst.fp(), src_op);
@@ -2324,22 +2329,10 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
wasm::SimdShuffle::Pack16Lanes(imms, shuffle);
TurboAssembler::Move(kScratchDoubleReg, make_uint64(imms[3], imms[2]),
make_uint64(imms[1], imms[0]));
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpshufb(dst.fp(), lhs.fp(), kScratchDoubleReg);
- } else {
- if (dst != lhs) {
- movups(dst.fp(), lhs.fp());
- }
- pshufb(dst.fp(), kScratchDoubleReg);
- }
+ Pshufb(dst.fp(), lhs.fp(), kScratchDoubleReg);
return;
}
- LiftoffRegister tmp_simd =
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst, lhs, rhs));
- Movups(kScratchDoubleReg, lhs.fp());
-
uint64_t mask1[2] = {};
for (int i = 15; i >= 0; i--) {
uint8_t lane = shuffle[i];
@@ -2347,10 +2340,8 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
mask1[j] <<= 8;
mask1[j] |= lane < kSimd128Size ? lane : 0x80;
}
- TurboAssembler::Move(tmp_simd.fp(), mask1[0]);
- movq(kScratchRegister, mask1[1]);
- Pinsrq(tmp_simd.fp(), kScratchRegister, int8_t{1});
- Pshufb(kScratchDoubleReg, tmp_simd.fp());
+ TurboAssembler::Move(liftoff::kScratchDoubleReg2, mask1[1], mask1[0]);
+ Pshufb(kScratchDoubleReg, lhs.fp(), liftoff::kScratchDoubleReg2);
uint64_t mask2[2] = {};
for (int i = 15; i >= 0; i--) {
@@ -2359,14 +2350,9 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
mask2[j] <<= 8;
mask2[j] |= lane >= kSimd128Size ? (lane & 0x0F) : 0x80;
}
- TurboAssembler::Move(tmp_simd.fp(), mask2[0]);
- movq(kScratchRegister, mask2[1]);
- Pinsrq(tmp_simd.fp(), kScratchRegister, int8_t{1});
+ TurboAssembler::Move(liftoff::kScratchDoubleReg2, mask2[1], mask2[0]);
- if (dst.fp() != rhs.fp()) {
- Movups(dst.fp(), rhs.fp());
- }
- Pshufb(dst.fp(), tmp_simd.fp());
+ Pshufb(dst.fp(), rhs.fp(), liftoff::kScratchDoubleReg2);
Por(dst.fp(), kScratchDoubleReg);
}
@@ -2379,10 +2365,7 @@ void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
TurboAssembler::Move(mask, uint32_t{0x70707070});
Pshufd(mask, mask, uint8_t{0x0});
Paddusb(mask, rhs.fp());
- if (lhs != dst) {
- Movaps(dst.fp(), lhs.fp());
- }
- Pshufb(dst.fp(), mask);
+ Pshufb(dst.fp(), lhs.fp(), mask);
}
void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
@@ -2413,10 +2396,7 @@ void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst,
void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
LiftoffRegister src) {
- if (dst.fp() != src.fp()) {
- Movss(dst.fp(), src.fp());
- }
- Shufps(dst.fp(), src.fp(), static_cast<byte>(0));
+ Shufps(dst.fp(), src.fp(), 0);
}
void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
@@ -2659,7 +2639,7 @@ void LiftoffAssembler::emit_s128_const(LiftoffRegister dst,
memcpy(vals, imms, sizeof(vals));
TurboAssembler::Move(dst.fp(), vals[0]);
movq(kScratchRegister, vals[1]);
- Pinsrq(dst.fp(), kScratchRegister, int8_t{1});
+ Pinsrq(dst.fp(), kScratchRegister, uint8_t{1});
}
void LiftoffAssembler::emit_s128_not(LiftoffRegister dst, LiftoffRegister src) {
@@ -2827,16 +2807,16 @@ void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i8x16_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddsb, &Assembler::paddsb>(
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i8x16_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddusb, &Assembler::paddusb>(
this, dst, lhs, rhs);
}
@@ -2847,16 +2827,16 @@ void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i8x16_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpsubsb, &Assembler::psubsb>(
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i8x16_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpsubusb,
&Assembler::psubusb>(this, dst, lhs,
rhs);
@@ -3025,16 +3005,16 @@ void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i16x8_add_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddsw, &Assembler::paddsw>(
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i16x8_add_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddusw, &Assembler::paddusw>(
this, dst, lhs, rhs);
}
@@ -3045,16 +3025,16 @@ void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i16x8_sub_saturate_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpsubsw, &Assembler::psubsw>(
this, dst, lhs, rhs);
}
-void LiftoffAssembler::emit_i16x8_sub_saturate_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
+void LiftoffAssembler::emit_i16x8_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
liftoff::EmitSimdNonCommutativeBinOp<&Assembler::vpsubusw,
&Assembler::psubusw>(this, dst, lhs,
rhs);
@@ -3204,6 +3184,13 @@ void LiftoffAssembler::emit_i32x4_max_u(LiftoffRegister dst,
this, dst, lhs, rhs, base::Optional<CpuFeature>(SSE4_1));
}
+void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpmaddwd, &Assembler::pmaddwd>(
+ this, dst, lhs, rhs);
+}
+
void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
LiftoffRegister src) {
DoubleRegister reg = dst.fp() == src.fp() ? kScratchDoubleReg : dst.fp();
diff --git a/deps/v8/src/wasm/c-api.cc b/deps/v8/src/wasm/c-api.cc
index 0bb6552943..f79833464d 100644
--- a/deps/v8/src/wasm/c-api.cc
+++ b/deps/v8/src/wasm/c-api.cc
@@ -209,8 +209,7 @@ auto seal(const typename implement<C>::type* x) -> const C* {
// Configuration
-struct ConfigImpl {
-};
+struct ConfigImpl {};
template <>
struct implement<Config> {
@@ -888,8 +887,8 @@ own<Instance> GetInstance(StoreImpl* store,
own<Frame> CreateFrameFromInternal(i::Handle<i::FixedArray> frames, int index,
i::Isolate* isolate, StoreImpl* store) {
- i::Handle<i::StackTraceFrame> frame(i::StackTraceFrame::cast(frames->get(0)),
- isolate);
+ i::Handle<i::StackTraceFrame> frame(
+ i::StackTraceFrame::cast(frames->get(index)), isolate);
i::Handle<i::WasmInstanceObject> instance =
i::StackTraceFrame::GetWasmInstance(frame);
uint32_t func_index = i::StackTraceFrame::GetWasmFunctionIndex(frame);
@@ -1511,7 +1510,8 @@ auto Func::call(const Val args[], Val results[]) const -> own<Trap> {
auto store = func->store();
auto isolate = store->i_isolate();
i::HandleScope handle_scope(isolate);
- i::Object raw_function_data = func->v8_object()->shared().function_data();
+ i::Object raw_function_data =
+ func->v8_object()->shared().function_data(v8::kAcquireLoad);
// WasmCapiFunctions can be called directly.
if (raw_function_data.IsWasmCapiFunctionData()) {
@@ -1544,7 +1544,7 @@ auto Func::call(const Val args[], Val results[]) const -> own<Trap> {
if (object_ref->IsTuple2()) {
i::JSFunction jsfunc =
i::JSFunction::cast(i::Tuple2::cast(*object_ref).value2());
- i::Object data = jsfunc.shared().function_data();
+ i::Object data = jsfunc.shared().function_data(v8::kAcquireLoad);
if (data.IsWasmCapiFunctionData()) {
return CallWasmCapiFunction(i::WasmCapiFunctionData::cast(data), args,
results);
diff --git a/deps/v8/src/wasm/decoder.h b/deps/v8/src/wasm/decoder.h
index 86cec955b9..458b564313 100644
--- a/deps/v8/src/wasm/decoder.h
+++ b/deps/v8/src/wasm/decoder.h
@@ -13,7 +13,6 @@
#include "src/base/memory.h"
#include "src/codegen/signature.h"
#include "src/flags/flags.h"
-#include "src/utils/utils.h"
#include "src/utils/vector.h"
#include "src/wasm/wasm-opcodes.h"
#include "src/wasm/wasm-result.h"
@@ -39,9 +38,12 @@ using DecodeResult = VoidResult;
// a buffer of bytes.
class Decoder {
public:
- enum ValidateFlag : bool { kValidate = true, kNoValidate = false };
-
- enum AdvancePCFlag : bool { kAdvancePc = true, kNoAdvancePc = false };
+ // {ValidateFlag} can be used in a boolean manner ({if (!validate) ...}).
+ enum ValidateFlag : int8_t {
+ kNoValidation = 0, // Don't run validation, assume valid input.
+ kBooleanValidation, // Run validation but only store a generic error.
+ kFullValidation // Run full validation with error message and location.
+ };
enum TraceFlag : bool { kTrace = true, kNoTrace = false };
@@ -59,7 +61,7 @@ class Decoder {
virtual ~Decoder() = default;
- inline bool validate_size(const byte* pc, uint32_t length, const char* msg) {
+ bool validate_size(const byte* pc, uint32_t length, const char* msg) {
DCHECK_LE(start_, pc);
if (V8_UNLIKELY(pc > end_ || length > static_cast<uint32_t>(end_ - pc))) {
error(pc, msg);
@@ -70,28 +72,25 @@ class Decoder {
// Reads an 8-bit unsigned integer.
template <ValidateFlag validate>
- inline uint8_t read_u8(const byte* pc, const char* msg = "expected 1 byte") {
+ uint8_t read_u8(const byte* pc, const char* msg = "expected 1 byte") {
return read_little_endian<uint8_t, validate>(pc, msg);
}
// Reads a 16-bit unsigned integer (little endian).
template <ValidateFlag validate>
- inline uint16_t read_u16(const byte* pc,
- const char* msg = "expected 2 bytes") {
+ uint16_t read_u16(const byte* pc, const char* msg = "expected 2 bytes") {
return read_little_endian<uint16_t, validate>(pc, msg);
}
// Reads a 32-bit unsigned integer (little endian).
template <ValidateFlag validate>
- inline uint32_t read_u32(const byte* pc,
- const char* msg = "expected 4 bytes") {
+ uint32_t read_u32(const byte* pc, const char* msg = "expected 4 bytes") {
return read_little_endian<uint32_t, validate>(pc, msg);
}
// Reads a 64-bit unsigned integer (little endian).
template <ValidateFlag validate>
- inline uint64_t read_u64(const byte* pc,
- const char* msg = "expected 8 bytes") {
+ uint64_t read_u64(const byte* pc, const char* msg = "expected 8 bytes") {
return read_little_endian<uint64_t, validate>(pc, msg);
}
@@ -99,72 +98,64 @@ class Decoder {
template <ValidateFlag validate>
uint32_t read_u32v(const byte* pc, uint32_t* length,
const char* name = "LEB32") {
- return read_leb<uint32_t, validate, kNoAdvancePc, kNoTrace>(pc, length,
- name);
+ return read_leb<uint32_t, validate, kNoTrace>(pc, length, name);
}
// Reads a variable-length signed integer (little endian).
template <ValidateFlag validate>
int32_t read_i32v(const byte* pc, uint32_t* length,
const char* name = "signed LEB32") {
- return read_leb<int32_t, validate, kNoAdvancePc, kNoTrace>(pc, length,
- name);
+ return read_leb<int32_t, validate, kNoTrace>(pc, length, name);
}
// Reads a variable-length unsigned integer (little endian).
template <ValidateFlag validate>
uint64_t read_u64v(const byte* pc, uint32_t* length,
const char* name = "LEB64") {
- return read_leb<uint64_t, validate, kNoAdvancePc, kNoTrace>(pc, length,
- name);
+ return read_leb<uint64_t, validate, kNoTrace>(pc, length, name);
}
// Reads a variable-length signed integer (little endian).
template <ValidateFlag validate>
int64_t read_i64v(const byte* pc, uint32_t* length,
const char* name = "signed LEB64") {
- return read_leb<int64_t, validate, kNoAdvancePc, kNoTrace>(pc, length,
- name);
+ return read_leb<int64_t, validate, kNoTrace>(pc, length, name);
}
// Reads a variable-length 33-bit signed integer (little endian).
template <ValidateFlag validate>
int64_t read_i33v(const byte* pc, uint32_t* length,
const char* name = "signed LEB33") {
- return read_leb<int64_t, validate, kNoAdvancePc, kNoTrace, 33>(pc, length,
- name);
+ return read_leb<int64_t, validate, kNoTrace, 33>(pc, length, name);
+ }
+
+ // Convenient overload for callers who don't care about length.
+ template <ValidateFlag validate>
+ WasmOpcode read_prefixed_opcode(const byte* pc) {
+ uint32_t len;
+ return read_prefixed_opcode<validate>(pc, &len);
}
// Reads a prefixed-opcode, possibly with variable-length index.
- // The length param is set to the number of bytes this index is encoded with.
- // For most cases (non variable-length), it will be 1.
+ // `length` is set to the number of bytes that make up this opcode,
+ // *including* the prefix byte. For most opcodes, it will be 2.
template <ValidateFlag validate>
- WasmOpcode read_prefixed_opcode(const byte* pc, uint32_t* length = nullptr,
+ WasmOpcode read_prefixed_opcode(const byte* pc, uint32_t* length,
const char* name = "prefixed opcode") {
- uint32_t unused_length;
- if (length == nullptr) {
- length = &unused_length;
- }
uint32_t index;
- if (*pc == WasmOpcode::kSimdPrefix) {
- // SIMD opcodes can be multiple bytes (when LEB128 encoded).
- index = read_u32v<validate>(pc + 1, length, "prefixed opcode index");
- // Only support SIMD opcodes that go up to 0xFF (when decoded). Anything
- // bigger will need 1 more byte, and the '<< 8' below will be wrong.
- if (validate && V8_UNLIKELY(index > 0xff)) {
- errorf(pc, "Invalid SIMD opcode %d", index);
- }
- } else {
- if (!validate || validate_size(pc, 2, "expected 2 bytes")) {
- DCHECK(validate_size(pc, 2, "expected 2 bytes"));
- index = *(pc + 1);
- *length = 1;
- } else {
- // If kValidate and size validation fails.
- index = 0;
- *length = 0;
- }
+
+ // Prefixed opcodes all use LEB128 encoding.
+ index = read_u32v<validate>(pc + 1, length, "prefixed opcode index");
+ *length += 1; // Prefix byte.
+ // Only support opcodes that go up to 0xFF (when decoded). Anything
+ // bigger will need 1 more byte, and the '<< 8' below will be wrong.
+ if (validate && V8_UNLIKELY(index > 0xff)) {
+ errorf(pc, "Invalid prefixed opcode %d", index);
+ // If size validation fails.
+ index = 0;
+ *length = 0;
}
+
return static_cast<WasmOpcode>((*pc) << 8 | index);
}
@@ -186,21 +177,28 @@ class Decoder {
// Reads a LEB128 variable-length unsigned 32-bit integer and advances {pc_}.
uint32_t consume_u32v(const char* name = nullptr) {
uint32_t length = 0;
- return read_leb<uint32_t, kValidate, kAdvancePc, kTrace>(pc_, &length,
- name);
+ uint32_t result =
+ read_leb<uint32_t, kFullValidation, kTrace>(pc_, &length, name);
+ pc_ += length;
+ return result;
}
// Reads a LEB128 variable-length signed 32-bit integer and advances {pc_}.
int32_t consume_i32v(const char* name = nullptr) {
uint32_t length = 0;
- return read_leb<int32_t, kValidate, kAdvancePc, kTrace>(pc_, &length, name);
+ int32_t result =
+ read_leb<int32_t, kFullValidation, kTrace>(pc_, &length, name);
+ pc_ += length;
+ return result;
}
// Reads a LEB128 variable-length unsigned 64-bit integer and advances {pc_}.
uint64_t consume_u64v(const char* name = nullptr) {
uint32_t length = 0;
- return read_leb<uint64_t, kValidate, kAdvancePc, kTrace>(pc_, &length,
- name);
+ uint64_t result =
+ read_leb<uint64_t, kFullValidation, kTrace>(pc_, &length, name);
+ pc_ += length;
+ return result;
}
// Consume {size} bytes and send them to the bit bucket, advancing {pc_}.
@@ -224,6 +222,14 @@ class Decoder {
return true;
}
+ // Use this for "boolean validation", i.e. if the error message is not used
+ // anyway.
+ void V8_NOINLINE MarkError() {
+ if (!ok()) return;
+ error_ = {0, "validation failed"};
+ onFirstError();
+ }
+
// Do not inline error methods. This has measurable impact on validation time,
// see https://crbug.com/910432.
void V8_NOINLINE error(const char* msg) { errorf(pc_offset(), "%s", msg); }
@@ -234,6 +240,13 @@ class Decoder {
errorf(offset, "%s", msg);
}
+ void V8_NOINLINE PRINTF_FORMAT(2, 3) errorf(const char* format, ...) {
+ va_list args;
+ va_start(args, format);
+ verrorf(pc_offset(), format, args);
+ va_end(args);
+ }
+
void V8_NOINLINE PRINTF_FORMAT(3, 4)
errorf(uint32_t offset, const char* format, ...) {
va_list args;
@@ -343,8 +356,8 @@ class Decoder {
onFirstError();
}
- template <typename IntType, bool validate>
- inline IntType read_little_endian(const byte* pc, const char* msg) {
+ template <typename IntType, ValidateFlag validate>
+ IntType read_little_endian(const byte* pc, const char* msg) {
if (!validate) {
DCHECK(validate_size(pc, sizeof(IntType), msg));
} else if (!validate_size(pc, sizeof(IntType), msg)) {
@@ -354,36 +367,59 @@ class Decoder {
}
template <typename IntType>
- inline IntType consume_little_endian(const char* name) {
+ IntType consume_little_endian(const char* name) {
TRACE(" +%u %-20s: ", pc_offset(), name);
if (!checkAvailable(sizeof(IntType))) {
traceOffEnd();
pc_ = end_;
return IntType{0};
}
- IntType val = read_little_endian<IntType, false>(pc_, name);
+ IntType val = read_little_endian<IntType, kNoValidation>(pc_, name);
traceByteRange(pc_, pc_ + sizeof(IntType));
TRACE("= %d\n", val);
pc_ += sizeof(IntType);
return val;
}
- template <typename IntType, ValidateFlag validate, AdvancePCFlag advance_pc,
- TraceFlag trace, size_t size_in_bits = 8 * sizeof(IntType)>
- inline IntType read_leb(const byte* pc, uint32_t* length,
- const char* name = "varint") {
- DCHECK_IMPLIES(advance_pc, pc == pc_);
+ template <typename IntType, ValidateFlag validate, TraceFlag trace,
+ size_t size_in_bits = 8 * sizeof(IntType)>
+ V8_INLINE IntType read_leb(const byte* pc, uint32_t* length,
+ const char* name = "varint") {
static_assert(size_in_bits <= 8 * sizeof(IntType),
"leb does not fit in type");
TRACE_IF(trace, " +%u %-20s: ", pc_offset(), name);
- return read_leb_tail<IntType, validate, advance_pc, trace, size_in_bits, 0>(
- pc, length, name, 0);
+ // Fast path for single-byte integers.
+ if ((!validate || V8_LIKELY(pc < end_)) && !(*pc & 0x80)) {
+ TRACE_IF(trace, "%02x ", *pc);
+ *length = 1;
+ IntType result = *pc;
+ if (std::is_signed<IntType>::value) {
+ // Perform sign extension.
+ constexpr int sign_ext_shift = int{8 * sizeof(IntType)} - 7;
+ result = (result << sign_ext_shift) >> sign_ext_shift;
+ TRACE_IF(trace, "= %" PRIi64 "\n", static_cast<int64_t>(result));
+ } else {
+ TRACE_IF(trace, "= %" PRIu64 "\n", static_cast<uint64_t>(result));
+ }
+ return result;
+ }
+ return read_leb_slowpath<IntType, validate, trace, size_in_bits>(pc, length,
+ name);
+ }
+
+ template <typename IntType, ValidateFlag validate, TraceFlag trace,
+ size_t size_in_bits = 8 * sizeof(IntType)>
+ V8_NOINLINE IntType read_leb_slowpath(const byte* pc, uint32_t* length,
+ const char* name) {
+ // Create an unrolled LEB decoding function per integer type.
+ return read_leb_tail<IntType, validate, trace, size_in_bits, 0>(pc, length,
+ name, 0);
}
- template <typename IntType, ValidateFlag validate, AdvancePCFlag advance_pc,
- TraceFlag trace, size_t size_in_bits, int byte_index>
- IntType read_leb_tail(const byte* pc, uint32_t* length, const char* name,
- IntType result) {
+ template <typename IntType, ValidateFlag validate, TraceFlag trace,
+ size_t size_in_bits, int byte_index>
+ V8_INLINE IntType read_leb_tail(const byte* pc, uint32_t* length,
+ const char* name, IntType result) {
constexpr bool is_signed = std::is_signed<IntType>::value;
constexpr int kMaxLength = (size_in_bits + 6) / 7;
static_assert(byte_index < kMaxLength, "invalid template instantiation");
@@ -404,15 +440,19 @@ class Decoder {
// Compilers are not smart enough to figure out statically that the
// following call is unreachable if is_last_byte is false.
constexpr int next_byte_index = byte_index + (is_last_byte ? 0 : 1);
- return read_leb_tail<IntType, validate, advance_pc, trace, size_in_bits,
+ return read_leb_tail<IntType, validate, trace, size_in_bits,
next_byte_index>(pc + 1, length, name, result);
}
- if (advance_pc) pc_ = pc + (at_end ? 0 : 1);
*length = byte_index + (at_end ? 0 : 1);
if (validate && V8_UNLIKELY(at_end || (b & 0x80))) {
TRACE_IF(trace, at_end ? "<end> " : "<length overflow> ");
- errorf(pc, "expected %s", name);
+ if (validate == kFullValidation) {
+ errorf(pc, "expected %s", name);
+ } else {
+ MarkError();
+ }
result = 0;
+ *length = 0;
}
if (is_last_byte) {
// A signed-LEB128 must sign-extend the final byte, excluding its
@@ -431,12 +471,17 @@ class Decoder {
if (!validate) {
DCHECK(valid_extra_bits);
} else if (V8_UNLIKELY(!valid_extra_bits)) {
- error(pc, "extra bits in varint");
+ if (validate == kFullValidation) {
+ error(pc, "extra bits in varint");
+ } else {
+ MarkError();
+ }
result = 0;
+ *length = 0;
}
}
constexpr int sign_ext_shift =
- is_signed ? Max(0, int{8 * sizeof(IntType)} - shift - 7) : 0;
+ is_signed ? std::max(0, int{8 * sizeof(IntType)} - shift - 7) : 0;
// Perform sign extension.
result = (result << sign_ext_shift) >> sign_ext_shift;
if (trace && is_signed) {
diff --git a/deps/v8/src/wasm/function-body-decoder-impl.h b/deps/v8/src/wasm/function-body-decoder-impl.h
index 42b36f359b..3e07806d89 100644
--- a/deps/v8/src/wasm/function-body-decoder-impl.h
+++ b/deps/v8/src/wasm/function-body-decoder-impl.h
@@ -44,15 +44,14 @@ struct WasmException;
return true; \
}())
-#define CHECK_PROTOTYPE_OPCODE(feat) \
- DCHECK(this->module_->origin == kWasmOrigin); \
- if (!VALIDATE(this->enabled_.has_##feat())) { \
- this->errorf(this->pc(), \
- "Invalid opcode 0x%x (enable with --experimental-wasm-" #feat \
- ")", \
- opcode); \
- return 0; \
- } \
+#define CHECK_PROTOTYPE_OPCODE(feat) \
+ DCHECK(this->module_->origin == kWasmOrigin); \
+ if (!VALIDATE(this->enabled_.has_##feat())) { \
+ this->DecodeError( \
+ "Invalid opcode 0x%x (enable with --experimental-wasm-" #feat ")", \
+ opcode); \
+ return 0; \
+ } \
this->detected_->Add(kFeature_##feat);
#define ATOMIC_OP_LIST(V) \
@@ -125,6 +124,57 @@ struct WasmException;
V(I64AtomicStore16U, Uint16) \
V(I64AtomicStore32U, Uint32)
+// Decoder error with explicit PC and format arguments.
+template <Decoder::ValidateFlag validate, typename... Args>
+void DecodeError(Decoder* decoder, const byte* pc, const char* str,
+ Args&&... args) {
+ CHECK(validate == Decoder::kFullValidation ||
+ validate == Decoder::kBooleanValidation);
+ STATIC_ASSERT(sizeof...(Args) > 0);
+ if (validate == Decoder::kBooleanValidation) {
+ decoder->MarkError();
+ } else {
+ decoder->errorf(pc, str, std::forward<Args>(args)...);
+ }
+}
+
+// Decoder error with explicit PC and no format arguments.
+template <Decoder::ValidateFlag validate>
+void DecodeError(Decoder* decoder, const byte* pc, const char* str) {
+ CHECK(validate == Decoder::kFullValidation ||
+ validate == Decoder::kBooleanValidation);
+ if (validate == Decoder::kBooleanValidation) {
+ decoder->MarkError();
+ } else {
+ decoder->error(pc, str);
+ }
+}
+
+// Decoder error without explicit PC, but with format arguments.
+template <Decoder::ValidateFlag validate, typename... Args>
+void DecodeError(Decoder* decoder, const char* str, Args&&... args) {
+ CHECK(validate == Decoder::kFullValidation ||
+ validate == Decoder::kBooleanValidation);
+ STATIC_ASSERT(sizeof...(Args) > 0);
+ if (validate == Decoder::kBooleanValidation) {
+ decoder->MarkError();
+ } else {
+ decoder->errorf(str, std::forward<Args>(args)...);
+ }
+}
+
+// Decoder error without explicit PC and without format arguments.
+template <Decoder::ValidateFlag validate>
+void DecodeError(Decoder* decoder, const char* str) {
+ CHECK(validate == Decoder::kFullValidation ||
+ validate == Decoder::kBooleanValidation);
+ if (validate == Decoder::kBooleanValidation) {
+ decoder->MarkError();
+ } else {
+ decoder->error(str);
+ }
+}
+
namespace value_type_reader {
V8_INLINE WasmFeature feature_for_heap_type(HeapType heap_type) {
@@ -147,6 +197,12 @@ HeapType read_heap_type(Decoder* decoder, const byte* pc,
uint32_t* const length, const WasmFeatures& enabled) {
int64_t heap_index = decoder->read_i33v<validate>(pc, length, "heap type");
if (heap_index < 0) {
+ int64_t min_1_byte_leb128 = -64;
+ if (heap_index < min_1_byte_leb128) {
+ DecodeError<validate>(decoder, pc, "Unknown heap type %" PRId64,
+ heap_index);
+ return HeapType(HeapType::kBottom);
+ }
uint8_t uint_7_mask = 0x7F;
uint8_t code = static_cast<ValueTypeCode>(heap_index) & uint_7_mask;
switch (code) {
@@ -157,8 +213,9 @@ HeapType read_heap_type(Decoder* decoder, const byte* pc,
case kI31RefCode: {
HeapType result = HeapType::from_code(code);
if (!VALIDATE(enabled.contains(feature_for_heap_type(result)))) {
- decoder->errorf(
- pc, "invalid heap type '%s', enable with --experimental-wasm-%s",
+ DecodeError<validate>(
+ decoder, pc,
+ "invalid heap type '%s', enable with --experimental-wasm-%s",
result.name().c_str(),
WasmFeatures::name_for_feature(feature_for_heap_type(result)));
return HeapType(HeapType::kBottom);
@@ -166,25 +223,25 @@ HeapType read_heap_type(Decoder* decoder, const byte* pc,
return result;
}
default:
- if (validate) {
- decoder->errorf(pc, "Unknown heap type %" PRId64, heap_index);
- }
+ DecodeError<validate>(decoder, pc, "Unknown heap type %" PRId64,
+ heap_index);
return HeapType(HeapType::kBottom);
}
UNREACHABLE();
} else {
if (!VALIDATE(enabled.has_typed_funcref())) {
- decoder->error(pc,
- "Invalid indexed heap type, enable with "
- "--experimental-wasm-typed-funcref");
+ DecodeError<validate>(decoder, pc,
+ "Invalid indexed heap type, enable with "
+ "--experimental-wasm-typed-funcref");
return HeapType(HeapType::kBottom);
}
uint32_t type_index = static_cast<uint32_t>(heap_index);
if (!VALIDATE(type_index < kV8MaxWasmTypes)) {
- decoder->errorf(pc,
- "Type index %u is greater than the maximum number %zu "
- "of type definitions supported by V8",
- type_index, kV8MaxWasmTypes);
+ DecodeError<validate>(
+ decoder, pc,
+ "Type index %u is greater than the maximum number %zu "
+ "of type definitions supported by V8",
+ type_index, kV8MaxWasmTypes);
return HeapType(HeapType::kBottom);
}
return HeapType(type_index);
@@ -214,8 +271,9 @@ ValueType read_value_type(Decoder* decoder, const byte* pc,
ValueType result = ValueType::Ref(
heap_type, code == kI31RefCode ? kNonNullable : kNullable);
if (!VALIDATE(enabled.contains(feature_for_heap_type(heap_type)))) {
- decoder->errorf(
- pc, "invalid value type '%s', enable with --experimental-wasm-%s",
+ DecodeError<validate>(
+ decoder, pc,
+ "invalid value type '%s', enable with --experimental-wasm-%s",
result.name().c_str(),
WasmFeatures::name_for_feature(feature_for_heap_type(heap_type)));
return kWasmBottom;
@@ -234,10 +292,10 @@ ValueType read_value_type(Decoder* decoder, const byte* pc,
case kOptRefCode: {
Nullability nullability = code == kOptRefCode ? kNullable : kNonNullable;
if (!VALIDATE(enabled.has_typed_funcref())) {
- decoder->errorf(pc,
- "Invalid type '(ref%s <heaptype>)', enable with "
- "--experimental-wasm-typed-funcref",
- nullability == kNullable ? " null" : "");
+ DecodeError<validate>(decoder, pc,
+ "Invalid type '(ref%s <heaptype>)', enable with "
+ "--experimental-wasm-typed-funcref",
+ nullability == kNullable ? " null" : "");
return kWasmBottom;
}
HeapType heap_type =
@@ -248,18 +306,20 @@ ValueType read_value_type(Decoder* decoder, const byte* pc,
}
case kRttCode: {
if (!VALIDATE(enabled.has_gc())) {
- decoder->error(
- pc, "invalid value type 'rtt', enable with --experimental-wasm-gc");
+ DecodeError<validate>(
+ decoder, pc,
+ "invalid value type 'rtt', enable with --experimental-wasm-gc");
return kWasmBottom;
}
uint32_t depth_length;
uint32_t depth =
decoder->read_u32v<validate>(pc + 1, &depth_length, "depth");
if (!VALIDATE(depth <= kV8MaxRttSubtypingDepth)) {
- decoder->errorf(pc,
- "subtyping depth %u is greater than the maximum depth "
- "%u supported by V8",
- depth, kV8MaxRttSubtypingDepth);
+ DecodeError<validate>(
+ decoder, pc,
+ "subtyping depth %u is greater than the maximum depth "
+ "%u supported by V8",
+ depth, kV8MaxRttSubtypingDepth);
return kWasmBottom;
}
HeapType heap_type = read_heap_type<validate>(
@@ -270,9 +330,9 @@ ValueType read_value_type(Decoder* decoder, const byte* pc,
}
case kS128Code: {
if (!VALIDATE(enabled.has_simd())) {
- decoder->error(pc,
- "invalid value type 's128', enable with "
- "--experimental-wasm-simd");
+ DecodeError<validate>(
+ decoder, pc,
+ "invalid value type 's128', enable with --experimental-wasm-simd");
return kWasmBottom;
}
return kWasmS128;
@@ -376,8 +436,9 @@ struct SelectTypeImmediate {
uint8_t num_types =
decoder->read_u32v<validate>(pc, &length, "number of select types");
if (!VALIDATE(num_types == 1)) {
- decoder->error(
- pc + 1, "Invalid number of types. Select accepts exactly one type");
+ DecodeError<validate>(
+ decoder, pc + 1,
+ "Invalid number of types. Select accepts exactly one type");
return;
}
uint32_t type_length;
@@ -385,7 +446,7 @@ struct SelectTypeImmediate {
&type_length, enabled);
length += type_length;
if (!VALIDATE(type != kWasmBottom)) {
- decoder->error(pc + 1, "invalid select type");
+ DecodeError<validate>(decoder, pc + 1, "invalid select type");
}
}
};
@@ -402,18 +463,20 @@ struct BlockTypeImmediate {
int64_t block_type =
decoder->read_i33v<validate>(pc, &length, "block type");
if (block_type < 0) {
- if ((static_cast<uint8_t>(block_type) & byte{0x7f}) == kVoidCode) return;
+ constexpr int64_t kVoidCode_i64_extended = (~int64_t{0x7F}) | kVoidCode;
+ if (block_type == kVoidCode_i64_extended) return;
type = value_type_reader::read_value_type<validate>(decoder, pc, &length,
enabled);
if (!VALIDATE(type != kWasmBottom)) {
- decoder->errorf(pc, "Invalid block type %" PRId64, block_type);
+ DecodeError<validate>(decoder, pc, "Invalid block type %" PRId64,
+ block_type);
}
} else {
if (!VALIDATE(enabled.has_mv())) {
- decoder->errorf(pc,
- "invalid block type %" PRId64
- ", enable with --experimental-wasm-mv",
- block_type);
+ DecodeError<validate>(decoder, pc,
+ "invalid block type %" PRId64
+ ", enable with --experimental-wasm-mv",
+ block_type);
return;
}
type = kWasmBottom;
@@ -480,7 +543,8 @@ struct MemoryIndexImmediate {
inline MemoryIndexImmediate(Decoder* decoder, const byte* pc) {
index = decoder->read_u8<validate>(pc, "memory index");
if (!VALIDATE(index == 0)) {
- decoder->errorf(pc, "expected memory index 0, found %u", index);
+ DecodeError<validate>(decoder, pc, "expected memory index 0, found %u",
+ index);
}
}
};
@@ -543,8 +607,8 @@ struct CallIndirectImmediate {
TableIndexImmediate<validate> table(decoder, pc + len);
if (!VALIDATE((table.index == 0 && table.length == 1) ||
enabled.has_reftypes())) {
- decoder->errorf(pc + len, "expected table index 0, found %u",
- table.index);
+ DecodeError<validate>(decoder, pc + len,
+ "expected table index 0, found %u", table.index);
}
table_index = table.index;
length = len + table.length;
@@ -623,10 +687,11 @@ struct MemoryAccessImmediate {
alignment =
decoder->read_u32v<validate>(pc, &alignment_length, "alignment");
if (!VALIDATE(alignment <= max_alignment)) {
- decoder->errorf(pc,
- "invalid alignment; expected maximum alignment is %u, "
- "actual alignment is %u",
- max_alignment, alignment);
+ DecodeError<validate>(
+ decoder, pc,
+ "invalid alignment; expected maximum alignment is %u, "
+ "actual alignment is %u",
+ max_alignment, alignment);
}
uint32_t offset_length;
offset = decoder->read_u32v<validate>(pc + alignment_length, &offset_length,
@@ -746,12 +811,29 @@ struct HeapTypeImmediate {
}
};
+template <Decoder::ValidateFlag validate>
+struct PcForErrors {
+ PcForErrors(const byte* /* pc */) {}
+
+ const byte* pc() const { return nullptr; }
+};
+
+template <>
+struct PcForErrors<Decoder::kFullValidation> {
+ const byte* pc_for_errors = nullptr;
+
+ PcForErrors(const byte* pc) : pc_for_errors(pc) {}
+
+ const byte* pc() const { return pc_for_errors; }
+};
+
// An entry on the value stack.
-struct ValueBase {
- const byte* pc = nullptr;
+template <Decoder::ValidateFlag validate>
+struct ValueBase : public PcForErrors<validate> {
ValueType type = kWasmStmt;
- ValueBase(const byte* pc, ValueType type) : pc(pc), type(type) {}
+ ValueBase(const byte* pc, ValueType type)
+ : PcForErrors<validate>(pc), type(type) {}
};
template <typename Value>
@@ -794,12 +876,11 @@ enum Reachability : uint8_t {
};
// An entry on the control stack (i.e. if, block, loop, or try).
-template <typename Value>
-struct ControlBase {
+template <typename Value, Decoder::ValidateFlag validate>
+struct ControlBase : public PcForErrors<validate> {
ControlKind kind = kControlBlock;
uint32_t locals_count = 0;
uint32_t stack_depth = 0; // stack height at the beginning of the construct.
- const uint8_t* pc = nullptr;
Reachability reachability = kReachable;
// Values merged into the start or end of this control construct.
@@ -810,10 +891,10 @@ struct ControlBase {
ControlBase(ControlKind kind, uint32_t locals_count, uint32_t stack_depth,
const uint8_t* pc, Reachability reachability)
- : kind(kind),
+ : PcForErrors<validate>(pc),
+ kind(kind),
locals_count(locals_count),
stack_depth(stack_depth),
- pc(pc),
reachability(reachability),
start_merge(reachability == kReachable) {
DCHECK(kind == kControlLet || locals_count == 0);
@@ -904,8 +985,13 @@ struct ControlBase {
F(LoadTransform, LoadType type, LoadTransformationKind transform, \
const MemoryAccessImmediate<validate>& imm, const Value& index, \
Value* result) \
+ F(LoadLane, LoadType type, const Value& value, const Value& index, \
+ const MemoryAccessImmediate<validate>& imm, const uint8_t laneidx, \
+ Value* result) \
F(StoreMem, StoreType type, const MemoryAccessImmediate<validate>& imm, \
const Value& index, const Value& value) \
+ F(StoreLane, StoreType type, const MemoryAccessImmediate<validate>& imm, \
+ const Value& index, const Value& value, const uint8_t laneidx) \
F(CurrentMemoryPages, Value* result) \
F(MemoryGrow, const Value& value, Value* result) \
F(CallDirect, const CallFunctionImmediate<validate>& imm, \
@@ -1035,9 +1121,10 @@ class WasmDecoder : public Decoder {
: local_types_.begin();
// Decode local declarations, if any.
- uint32_t entries = read_u32v<kValidate>(pc, &length, "local decls count");
+ uint32_t entries =
+ read_u32v<kFullValidation>(pc, &length, "local decls count");
if (!VALIDATE(ok())) {
- error(pc + *total_length, "invalid local decls count");
+ DecodeError(pc + *total_length, "invalid local decls count");
return false;
}
@@ -1046,26 +1133,27 @@ class WasmDecoder : public Decoder {
while (entries-- > 0) {
if (!VALIDATE(more())) {
- error(end(), "expected more local decls but reached end of input");
+ DecodeError(end(),
+ "expected more local decls but reached end of input");
return false;
}
- uint32_t count =
- read_u32v<kValidate>(pc + *total_length, &length, "local count");
+ uint32_t count = read_u32v<kFullValidation>(pc + *total_length, &length,
+ "local count");
if (!VALIDATE(ok())) {
- error(pc + *total_length, "invalid local count");
+ DecodeError(pc + *total_length, "invalid local count");
return false;
}
DCHECK_LE(local_types_.size(), kV8MaxWasmFunctionLocals);
if (!VALIDATE(count <= kV8MaxWasmFunctionLocals - local_types_.size())) {
- error(pc + *total_length, "local count too large");
+ DecodeError(pc + *total_length, "local count too large");
return false;
}
*total_length += length;
- ValueType type = value_type_reader::read_value_type<kValidate>(
+ ValueType type = value_type_reader::read_value_type<kFullValidation>(
this, pc + *total_length, &length, enabled_);
if (!VALIDATE(type != kWasmBottom)) {
- error(pc + *total_length, "invalid local type");
+ DecodeError(pc + *total_length, "invalid local type");
return false;
}
*total_length += length;
@@ -1081,6 +1169,13 @@ class WasmDecoder : public Decoder {
return true;
}
+ // Shorthand that forwards to the {DecodeError} functions above, passing our
+ // {validate} flag.
+ template <typename... Args>
+ void DecodeError(Args... args) {
+ wasm::DecodeError<validate>(this, std::forward<Args>(args)...);
+ }
+
static BitVector* AnalyzeLoopAssignment(WasmDecoder* decoder, const byte* pc,
uint32_t locals_count, Zone* zone) {
if (pc >= decoder->end()) return nullptr;
@@ -1138,7 +1233,7 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, LocalIndexImmediate<validate>& imm) {
if (!VALIDATE(imm.index < num_locals())) {
- errorf(pc, "invalid local index: %u", imm.index);
+ DecodeError(pc, "invalid local index: %u", imm.index);
return false;
}
return true;
@@ -1152,7 +1247,7 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, ExceptionIndexImmediate<validate>& imm) {
if (!Complete(imm)) {
- errorf(pc, "Invalid exception index: %u", imm.index);
+ DecodeError(pc, "Invalid exception index: %u", imm.index);
return false;
}
return true;
@@ -1160,7 +1255,7 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, GlobalIndexImmediate<validate>& imm) {
if (!VALIDATE(imm.index < module_->globals.size())) {
- errorf(pc, "invalid global index: %u", imm.index);
+ DecodeError(pc, "invalid global index: %u", imm.index);
return false;
}
imm.global = &module_->globals[imm.index];
@@ -1176,15 +1271,15 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, StructIndexImmediate<validate>& imm) {
if (Complete(imm)) return true;
- errorf(pc, "invalid struct index: %u", imm.index);
+ DecodeError(pc, "invalid struct index: %u", imm.index);
return false;
}
inline bool Validate(const byte* pc, FieldIndexImmediate<validate>& imm) {
if (!Validate(pc, imm.struct_index)) return false;
if (!VALIDATE(imm.index < imm.struct_index.struct_type->field_count())) {
- errorf(pc + imm.struct_index.length, "invalid field index: %u",
- imm.index);
+ DecodeError(pc + imm.struct_index.length, "invalid field index: %u",
+ imm.index);
return false;
}
return true;
@@ -1198,7 +1293,7 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, ArrayIndexImmediate<validate>& imm) {
if (!Complete(imm)) {
- errorf(pc, "invalid array index: %u", imm.index);
+ DecodeError(pc, "invalid array index: %u", imm.index);
return false;
}
return true;
@@ -1225,7 +1320,7 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, CallFunctionImmediate<validate>& imm) {
if (!Complete(imm)) {
- errorf(pc, "invalid function index: %u", imm.index);
+ DecodeError(pc, "invalid function index: %u", imm.index);
return false;
}
return true;
@@ -1242,27 +1337,28 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, CallIndirectImmediate<validate>& imm) {
if (!VALIDATE(imm.table_index < module_->tables.size())) {
- error("call_indirect: table index immediate out of bounds");
+ DecodeError(pc, "call_indirect: table index immediate out of bounds");
return false;
}
ValueType table_type = module_->tables[imm.table_index].type;
if (!VALIDATE(IsSubtypeOf(table_type, kWasmFuncRef, module_))) {
- errorf(pc, "call_indirect: immediate table #%u is not of a function type",
- imm.table_index);
+ DecodeError(
+ pc, "call_indirect: immediate table #%u is not of a function type",
+ imm.table_index);
return false;
}
if (!Complete(imm)) {
- errorf(pc, "invalid signature index: #%u", imm.sig_index);
+ DecodeError(pc, "invalid signature index: #%u", imm.sig_index);
return false;
}
// Check that the dynamic signature for this call is a subtype of the static
// type of the table the function is defined in.
ValueType immediate_type = ValueType::Ref(imm.sig_index, kNonNullable);
if (!VALIDATE(IsSubtypeOf(immediate_type, table_type, module_))) {
- errorf(pc,
- "call_indirect: Immediate signature #%u is not a subtype of "
- "immediate table #%u",
- imm.sig_index, imm.table_index);
+ DecodeError(pc,
+ "call_indirect: Immediate signature #%u is not a subtype of "
+ "immediate table #%u",
+ imm.sig_index, imm.table_index);
}
return true;
}
@@ -1270,7 +1366,7 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, BranchDepthImmediate<validate>& imm,
size_t control_depth) {
if (!VALIDATE(imm.depth < control_depth)) {
- errorf(pc, "invalid branch depth: %u", imm.depth);
+ DecodeError(pc, "invalid branch depth: %u", imm.depth);
return false;
}
return true;
@@ -1279,8 +1375,8 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, BranchTableImmediate<validate>& imm,
size_t block_depth) {
if (!VALIDATE(imm.table_count <= kV8MaxWasmFunctionBrTableSize)) {
- errorf(pc, "invalid table count (> max br_table size): %u",
- imm.table_count);
+ DecodeError(pc, "invalid table count (> max br_table size): %u",
+ imm.table_count);
return false;
}
return checkAvailable(imm.table_count);
@@ -1324,7 +1420,7 @@ class WasmDecoder : public Decoder {
break;
}
if (!VALIDATE(imm.lane >= 0 && imm.lane < num_lanes)) {
- error(pc, "invalid lane index");
+ DecodeError(pc, "invalid lane index");
return false;
} else {
return true;
@@ -1338,7 +1434,7 @@ class WasmDecoder : public Decoder {
}
// Shuffle indices must be in [0..31] for a 16 lane shuffle.
if (!VALIDATE(max_lane < 2 * kSimd128Size)) {
- error(pc, "invalid shuffle mask");
+ DecodeError(pc, "invalid shuffle mask");
return false;
}
return true;
@@ -1356,8 +1452,8 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, BlockTypeImmediate<validate>& imm) {
if (!Complete(imm)) {
- errorf(pc, "block type index %u out of bounds (%zu types)", imm.sig_index,
- module_->types.size());
+ DecodeError(pc, "block type index %u out of bounds (%zu types)",
+ imm.sig_index, module_->types.size());
return false;
}
return true;
@@ -1365,11 +1461,11 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, FunctionIndexImmediate<validate>& imm) {
if (!VALIDATE(imm.index < module_->functions.size())) {
- errorf(pc, "invalid function index: %u", imm.index);
+ DecodeError(pc, "invalid function index: %u", imm.index);
return false;
}
if (!VALIDATE(module_->functions[imm.index].declared)) {
- this->errorf(pc, "undeclared reference to function #%u", imm.index);
+ DecodeError(pc, "undeclared reference to function #%u", imm.index);
return false;
}
return true;
@@ -1377,7 +1473,7 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, MemoryIndexImmediate<validate>& imm) {
if (!VALIDATE(module_->has_memory)) {
- errorf(pc, "memory instruction with no memory");
+ DecodeError(pc, "memory instruction with no memory");
return false;
}
return true;
@@ -1386,7 +1482,7 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, MemoryInitImmediate<validate>& imm) {
if (!VALIDATE(imm.data_segment_index <
module_->num_declared_data_segments)) {
- errorf(pc, "invalid data segment index: %u", imm.data_segment_index);
+ DecodeError(pc, "invalid data segment index: %u", imm.data_segment_index);
return false;
}
if (!Validate(pc + imm.length - imm.memory.length, imm.memory))
@@ -1396,7 +1492,7 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, DataDropImmediate<validate>& imm) {
if (!VALIDATE(imm.index < module_->num_declared_data_segments)) {
- errorf(pc, "invalid data segment index: %u", imm.index);
+ DecodeError(pc, "invalid data segment index: %u", imm.index);
return false;
}
return true;
@@ -1409,7 +1505,7 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, TableIndexImmediate<validate>& imm) {
if (!VALIDATE(imm.index < module_->tables.size())) {
- errorf(pc, "invalid table index: %u", imm.index);
+ DecodeError(pc, "invalid table index: %u", imm.index);
return false;
}
return true;
@@ -1417,7 +1513,8 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, TableInitImmediate<validate>& imm) {
if (!VALIDATE(imm.elem_segment_index < module_->elem_segments.size())) {
- errorf(pc, "invalid element segment index: %u", imm.elem_segment_index);
+ DecodeError(pc, "invalid element segment index: %u",
+ imm.elem_segment_index);
return false;
}
if (!Validate(pc + imm.length - imm.table.length, imm.table)) {
@@ -1426,8 +1523,8 @@ class WasmDecoder : public Decoder {
ValueType elem_type = module_->elem_segments[imm.elem_segment_index].type;
if (!VALIDATE(IsSubtypeOf(elem_type, module_->tables[imm.table.index].type,
module_))) {
- errorf(pc, "table %u is not a super-type of %s", imm.table.index,
- elem_type.name().c_str());
+ DecodeError(pc, "table %u is not a super-type of %s", imm.table.index,
+ elem_type.name().c_str());
return false;
}
return true;
@@ -1435,7 +1532,7 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, ElemDropImmediate<validate>& imm) {
if (!VALIDATE(imm.index < module_->elem_segments.size())) {
- errorf(pc, "invalid element segment index: %u", imm.index);
+ DecodeError(pc, "invalid element segment index: %u", imm.index);
return false;
}
return true;
@@ -1447,8 +1544,8 @@ class WasmDecoder : public Decoder {
ValueType src_type = module_->tables[imm.table_src.index].type;
if (!VALIDATE(IsSubtypeOf(
src_type, module_->tables[imm.table_dst.index].type, module_))) {
- errorf(pc, "table %u is not a super-type of %s", imm.table_dst.index,
- src_type.name().c_str());
+ DecodeError(pc, "table %u is not a super-type of %s", imm.table_dst.index,
+ src_type.name().c_str());
return false;
}
return true;
@@ -1456,12 +1553,12 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, HeapTypeImmediate<validate>& imm) {
if (!VALIDATE(!imm.type.is_bottom())) {
- error(pc, "invalid heap type");
+ DecodeError(pc, "invalid heap type");
return false;
}
if (!VALIDATE(imm.type.is_generic() ||
module_->has_type(imm.type.ref_index()))) {
- errorf(pc, "Type index %u is out of bounds", imm.type.ref_index());
+ DecodeError(pc, "Type index %u is out of bounds", imm.type.ref_index());
return false;
}
return true;
@@ -1581,10 +1678,8 @@ class WasmDecoder : public Decoder {
case kExprF64Const:
return 9;
case kNumericPrefix: {
- byte numeric_index =
- decoder->read_u8<validate>(pc + 1, "numeric_index");
- WasmOpcode opcode =
- static_cast<WasmOpcode>(kNumericPrefix << 8 | numeric_index);
+ uint32_t length = 0;
+ opcode = decoder->read_prefixed_opcode<validate>(pc, &length);
switch (opcode) {
case kExprI32SConvertSatF32:
case kExprI32UConvertSatF32:
@@ -1594,44 +1689,44 @@ class WasmDecoder : public Decoder {
case kExprI64UConvertSatF32:
case kExprI64SConvertSatF64:
case kExprI64UConvertSatF64:
- return 2;
+ return length;
case kExprMemoryInit: {
- MemoryInitImmediate<validate> imm(decoder, pc + 2);
- return 2 + imm.length;
+ MemoryInitImmediate<validate> imm(decoder, pc + length);
+ return length + imm.length;
}
case kExprDataDrop: {
- DataDropImmediate<validate> imm(decoder, pc + 2);
- return 2 + imm.length;
+ DataDropImmediate<validate> imm(decoder, pc + length);
+ return length + imm.length;
}
case kExprMemoryCopy: {
- MemoryCopyImmediate<validate> imm(decoder, pc + 2);
- return 2 + imm.length;
+ MemoryCopyImmediate<validate> imm(decoder, pc + length);
+ return length + imm.length;
}
case kExprMemoryFill: {
- MemoryIndexImmediate<validate> imm(decoder, pc + 2);
- return 2 + imm.length;
+ MemoryIndexImmediate<validate> imm(decoder, pc + length);
+ return length + imm.length;
}
case kExprTableInit: {
- TableInitImmediate<validate> imm(decoder, pc + 2);
- return 2 + imm.length;
+ TableInitImmediate<validate> imm(decoder, pc + length);
+ return length + imm.length;
}
case kExprElemDrop: {
- ElemDropImmediate<validate> imm(decoder, pc + 2);
- return 2 + imm.length;
+ ElemDropImmediate<validate> imm(decoder, pc + length);
+ return length + imm.length;
}
case kExprTableCopy: {
- TableCopyImmediate<validate> imm(decoder, pc + 2);
- return 2 + imm.length;
+ TableCopyImmediate<validate> imm(decoder, pc + length);
+ return length + imm.length;
}
case kExprTableGrow:
case kExprTableSize:
case kExprTableFill: {
- TableIndexImmediate<validate> imm(decoder, pc + 2);
- return 2 + imm.length;
+ TableIndexImmediate<validate> imm(decoder, pc + length);
+ return length + imm.length;
}
default:
- decoder->error(pc, "invalid numeric opcode");
- return 2;
+ decoder->DecodeError(pc, "invalid numeric opcode");
+ return length;
}
}
case kSimdPrefix: {
@@ -1641,67 +1736,81 @@ class WasmDecoder : public Decoder {
#define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
FOREACH_SIMD_0_OPERAND_OPCODE(DECLARE_OPCODE_CASE)
#undef DECLARE_OPCODE_CASE
- return 1 + length;
+ return length;
#define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
FOREACH_SIMD_1_OPERAND_OPCODE(DECLARE_OPCODE_CASE)
#undef DECLARE_OPCODE_CASE
- return 2 + length;
+ return length + 1;
#define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
FOREACH_SIMD_MEM_OPCODE(DECLARE_OPCODE_CASE)
- FOREACH_SIMD_POST_MVP_MEM_OPCODE(DECLARE_OPCODE_CASE)
#undef DECLARE_OPCODE_CASE
{
- MemoryAccessImmediate<validate> imm(decoder, pc + length + 1,
+ MemoryAccessImmediate<validate> imm(decoder, pc + length,
+ UINT32_MAX);
+ return length + imm.length;
+ }
+ case kExprS128Load8Lane:
+ case kExprS128Load16Lane:
+ case kExprS128Load32Lane:
+ case kExprS128Load64Lane:
+ case kExprS128Store8Lane:
+ case kExprS128Store16Lane:
+ case kExprS128Store32Lane:
+ case kExprS128Store64Lane: {
+ MemoryAccessImmediate<validate> imm(decoder, pc + length,
UINT32_MAX);
- return 1 + length + imm.length;
+ // 1 more byte for lane index immediate.
+ return length + imm.length + 1;
}
// Shuffles require a byte per lane, or 16 immediate bytes.
case kExprS128Const:
case kExprI8x16Shuffle:
- return 1 + length + kSimd128Size;
+ return length + kSimd128Size;
default:
- decoder->error(pc, "invalid SIMD opcode");
- return 1 + length;
+ decoder->DecodeError(pc, "invalid SIMD opcode");
+ return length;
}
}
case kAtomicPrefix: {
- byte atomic_index = decoder->read_u8<validate>(pc + 1, "atomic_index");
- WasmOpcode opcode =
- static_cast<WasmOpcode>(kAtomicPrefix << 8 | atomic_index);
+ uint32_t length = 0;
+ opcode = decoder->read_prefixed_opcode<validate>(pc, &length,
+ "atomic_index");
switch (opcode) {
#define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
FOREACH_ATOMIC_OPCODE(DECLARE_OPCODE_CASE)
#undef DECLARE_OPCODE_CASE
{
- MemoryAccessImmediate<validate> imm(decoder, pc + 2, UINT32_MAX);
- return 2 + imm.length;
+ MemoryAccessImmediate<validate> imm(decoder, pc + length,
+ UINT32_MAX);
+ return length + imm.length;
}
#define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
FOREACH_ATOMIC_0_OPERAND_OPCODE(DECLARE_OPCODE_CASE)
#undef DECLARE_OPCODE_CASE
{
- return 2 + 1;
+ return length + 1;
}
default:
- decoder->error(pc, "invalid Atomics opcode");
- return 2;
+ decoder->DecodeError(pc, "invalid Atomics opcode");
+ return length;
}
}
case kGCPrefix: {
- byte gc_index = decoder->read_u8<validate>(pc + 1, "gc_index");
- WasmOpcode opcode = static_cast<WasmOpcode>(kGCPrefix << 8 | gc_index);
+ uint32_t length = 0;
+ opcode =
+ decoder->read_prefixed_opcode<validate>(pc, &length, "gc_index");
switch (opcode) {
case kExprStructNewWithRtt:
case kExprStructNewDefault: {
- StructIndexImmediate<validate> imm(decoder, pc + 2);
- return 2 + imm.length;
+ StructIndexImmediate<validate> imm(decoder, pc + length);
+ return length + imm.length;
}
case kExprStructGet:
case kExprStructGetS:
case kExprStructGetU:
case kExprStructSet: {
- FieldIndexImmediate<validate> imm(decoder, pc + 2);
- return 2 + imm.length;
+ FieldIndexImmediate<validate> imm(decoder, pc + length);
+ return length + imm.length;
}
case kExprArrayNewWithRtt:
case kExprArrayNewDefault:
@@ -1710,39 +1819,39 @@ class WasmDecoder : public Decoder {
case kExprArrayGetU:
case kExprArraySet:
case kExprArrayLen: {
- ArrayIndexImmediate<validate> imm(decoder, pc + 2);
- return 2 + imm.length;
+ ArrayIndexImmediate<validate> imm(decoder, pc + length);
+ return length + imm.length;
}
case kExprBrOnCast: {
- BranchDepthImmediate<validate> imm(decoder, pc + 2);
- return 2 + imm.length;
+ BranchDepthImmediate<validate> imm(decoder, pc + length);
+ return length + imm.length;
}
case kExprRttCanon:
case kExprRttSub: {
// TODO(7748): Account for rtt.sub's additional immediates if
// they stick.
HeapTypeImmediate<validate> imm(WasmFeatures::All(), decoder,
- pc + 2);
- return 2 + imm.length;
+ pc + length);
+ return length + imm.length;
}
case kExprI31New:
case kExprI31GetS:
case kExprI31GetU:
- return 2;
+ return length;
case kExprRefTest:
case kExprRefCast: {
HeapTypeImmediate<validate> ht1(WasmFeatures::All(), decoder,
- pc + 2);
+ pc + length);
HeapTypeImmediate<validate> ht2(WasmFeatures::All(), decoder,
- pc + 2 + ht1.length);
- return 2 + ht1.length + ht2.length;
+ pc + length + ht1.length);
+ return length + ht1.length + ht2.length;
}
default:
// This is unreachable except for malformed modules.
- decoder->error(pc, "invalid gc opcode");
- return 2;
+ decoder->DecodeError(pc, "invalid gc opcode");
+ return length;
}
}
default:
@@ -1966,8 +2075,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
this->consume_bytes(locals_length);
for (uint32_t index = params_count; index < this->num_locals(); index++) {
if (!VALIDATE(this->local_type(index).is_defaultable())) {
- this->errorf(
- this->pc(),
+ this->DecodeError(
"Cannot define function-level local of non-defaultable type %s",
this->local_type(index).name().c_str());
return this->TraceFailed();
@@ -1980,9 +2088,10 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (!VALIDATE(control_.empty())) {
if (control_.size() > 1) {
- this->error(control_.back().pc, "unterminated control structure");
+ this->DecodeError(control_.back().pc(),
+ "unterminated control structure");
} else {
- this->error("function body must end with \"end\" opcode");
+ this->DecodeError("function body must end with \"end\" opcode");
}
return TraceFailed();
}
@@ -1994,19 +2103,24 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
bool TraceFailed() {
- TRACE("wasm-error module+%-6d func+%d: %s\n\n", this->error_.offset(),
- this->GetBufferRelativeOffset(this->error_.offset()),
- this->error_.message().c_str());
+ if (this->error_.offset()) {
+ TRACE("wasm-error module+%-6d func+%d: %s\n\n", this->error_.offset(),
+ this->GetBufferRelativeOffset(this->error_.offset()),
+ this->error_.message().c_str());
+ } else {
+ TRACE("wasm-error: %s\n\n", this->error_.message().c_str());
+ }
return false;
}
const char* SafeOpcodeNameAt(const byte* pc) {
+ if (!pc) return "<null>";
if (pc >= this->end_) return "<end>";
WasmOpcode opcode = static_cast<WasmOpcode>(*pc);
if (!WasmOpcodes::IsPrefixOpcode(opcode)) {
return WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(opcode));
}
- opcode = this->template read_prefixed_opcode<Decoder::kValidate>(pc);
+ opcode = this->template read_prefixed_opcode<Decoder::kFullValidation>(pc);
return WasmOpcodes::OpcodeName(opcode);
}
@@ -2067,16 +2181,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
bool CheckHasMemory() {
if (!VALIDATE(this->module_->has_memory)) {
- this->error(this->pc_ - 1, "memory instruction with no memory");
- return false;
- }
- return true;
- }
-
- bool CheckHasMemoryForAtomics() {
- if (FLAG_wasm_atomics_on_non_shared_memory && CheckHasMemory()) return true;
- if (!VALIDATE(this->module_->has_shared_memory)) {
- this->error(this->pc_ - 1, "Atomic opcodes used without shared memory");
+ this->DecodeError(this->pc_ - 1, "memory instruction with no memory");
return false;
}
return true;
@@ -2084,7 +2189,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
bool CheckSimdPostMvp(WasmOpcode opcode) {
if (!FLAG_wasm_simd_post_mvp && WasmOpcodes::IsSimdPostMvpOpcode(opcode)) {
- this->error(
+ this->DecodeError(
"simd opcode not available, enable with --wasm-simd-post-mvp");
return false;
}
@@ -2154,41 +2259,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
Append(" | ");
for (size_t i = 0; i < decoder_->stack_size(); ++i) {
Value& val = decoder_->stack_[i];
- WasmOpcode val_opcode = static_cast<WasmOpcode>(*val.pc);
- if (WasmOpcodes::IsPrefixOpcode(val_opcode)) {
- val_opcode =
- decoder_->template read_prefixed_opcode<Decoder::kNoValidate>(
- val.pc);
- }
- Append(" %c@%d:%s", val.type.short_name(),
- static_cast<int>(val.pc - decoder_->start_),
- WasmOpcodes::OpcodeName(val_opcode));
- // If the decoder failed, don't try to decode the immediates, as this
- // can trigger a DCHECK failure.
- if (decoder_->failed()) continue;
- switch (val_opcode) {
- case kExprI32Const: {
- ImmI32Immediate<Decoder::kNoValidate> imm(decoder_, val.pc + 1);
- Append("[%d]", imm.value);
- break;
- }
- case kExprLocalGet:
- case kExprLocalSet:
- case kExprLocalTee: {
- LocalIndexImmediate<Decoder::kNoValidate> imm(decoder_, val.pc + 1);
- Append("[%u]", imm.index);
- break;
- }
- case kExprGlobalGet:
- case kExprGlobalSet: {
- GlobalIndexImmediate<Decoder::kNoValidate> imm(decoder_,
- val.pc + 1);
- Append("[%u]", imm.index);
- break;
- }
- default:
- break;
- }
+ Append(" %c", val.type.short_name());
}
}
@@ -2268,16 +2339,16 @@ class WasmFullDecoder : public WasmDecoder<validate> {
DECODE(Catch) {
CHECK_PROTOTYPE_OPCODE(eh);
if (!VALIDATE(!control_.empty())) {
- this->error("catch does not match any try");
+ this->DecodeError("catch does not match any try");
return 0;
}
Control* c = &control_.back();
if (!VALIDATE(c->is_try())) {
- this->error("catch does not match any try");
+ this->DecodeError("catch does not match any try");
return 0;
}
if (!VALIDATE(c->is_incomplete_try())) {
- this->error("catch already present for try");
+ this->DecodeError("catch already present for try");
return 0;
}
c->kind = kControlTryCatch;
@@ -2298,12 +2369,13 @@ class WasmFullDecoder : public WasmDecoder<validate> {
Control* c = control_at(imm.depth.depth);
Value exception = Pop(0, kWasmExnRef);
const WasmExceptionSig* sig = imm.index.exception->sig;
- size_t value_count = sig->parameter_count();
+ int value_count = static_cast<int>(sig->parameter_count());
// TODO(wasm): This operand stack mutation is an ugly hack to make
// both type checking here as well as environment merging in the
// graph builder interface work out of the box. We should introduce
// special handling for both and do minimal/no stack mutation here.
- for (size_t i = 0; i < value_count; ++i) Push(sig->GetParam(i));
+ EnsureStackSpace(value_count);
+ for (int i = 0; i < value_count; ++i) Push(sig->GetParam(i));
Vector<Value> values(stack_ + c->stack_depth, value_count);
TypeCheckBranchResult check_result = TypeCheckBranch(c, true);
if (this->failed()) return 0;
@@ -2314,7 +2386,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
} else if (check_result == kInvalidStack) {
return 0;
}
- for (int i = static_cast<int>(value_count) - 1; i >= 0; i--) Pop(i);
+ for (int i = value_count - 1; i >= 0; i--) Pop(i);
Value* pexception = Push(kWasmExnRef);
*pexception = exception;
return 1 + imm.length;
@@ -2330,6 +2402,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
TypeCheckBranchResult check_result = TypeCheckBranch(c, true);
if (V8_LIKELY(check_result == kReachableBranch)) {
switch (ref_object.type.kind()) {
+ case ValueType::kBottom:
+ // We are in unreachable code, just forward the bottom value.
case ValueType::kRef: {
Value* result = Push(ref_object.type);
CALL_INTERFACE(PassThrough, ref_object, result);
@@ -2347,7 +2421,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
break;
}
default:
- this->error(this->pc_, "invalid argument type to br_on_null");
+ this->DecodeError("invalid argument type to br_on_null");
return 0;
}
} else if (check_result == kInvalidStack) {
@@ -2361,8 +2435,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_ + 1);
if (!this->Validate(this->pc_ + 1, imm)) return 0;
uint32_t old_local_count = this->num_locals();
- // Temporarily add the let-defined values
- // to the beginning of the function locals.
+ // Temporarily add the let-defined values to the beginning of the function
+ // locals.
uint32_t locals_length;
if (!this->DecodeLocals(this->pc() + 1 + imm.length, &locals_length, 0)) {
return 0;
@@ -2406,16 +2480,16 @@ class WasmFullDecoder : public WasmDecoder<validate> {
DECODE(Else) {
if (!VALIDATE(!control_.empty())) {
- this->error("else does not match any if");
+ this->DecodeError("else does not match any if");
return 0;
}
Control* c = &control_.back();
if (!VALIDATE(c->is_if())) {
- this->error(this->pc_, "else does not match an if");
+ this->DecodeError("else does not match an if");
return 0;
}
if (!VALIDATE(c->is_onearmed_if())) {
- this->error(this->pc_, "else already present for if");
+ this->DecodeError("else already present for if");
return 0;
}
if (!TypeCheckFallThru()) return 0;
@@ -2430,18 +2504,18 @@ class WasmFullDecoder : public WasmDecoder<validate> {
DECODE(End) {
if (!VALIDATE(!control_.empty())) {
- this->error("end does not match any if, try, or block");
+ this->DecodeError("end does not match any if, try, or block");
return 0;
}
Control* c = &control_.back();
if (!VALIDATE(!c->is_incomplete_try())) {
- this->error(this->pc_, "missing catch or catch-all in try");
+ this->DecodeError("missing catch or catch-all in try");
return 0;
}
if (c->is_onearmed_if()) {
if (!VALIDATE(c->end_merge.arity == c->start_merge.arity)) {
- this->error(c->pc,
- "start-arity and end-arity of one-armed if must match");
+ this->DecodeError(
+ c->pc(), "start-arity and end-arity of one-armed if must match");
return 0;
}
if (!TypeCheckOneArmedIf(c)) return 0;
@@ -2457,7 +2531,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (control_.size() == 1) {
// If at the last (implicit) control, check we are at end.
if (!VALIDATE(this->pc_ + 1 == this->end_)) {
- this->error(this->pc_ + 1, "trailing code after function end");
+ this->DecodeError(this->pc_ + 1, "trailing code after function end");
return 0;
}
// The result of the block is the return value.
@@ -2477,7 +2551,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
Value tval = Pop(0, fval.type);
ValueType type = tval.type == kWasmBottom ? fval.type : tval.type;
if (!VALIDATE(!type.is_reference_type())) {
- this->error("select without type is only valid for value type inputs");
+ this->DecodeError(
+ "select without type is only valid for value type inputs");
return 0;
}
Value* result = Push(type);
@@ -2654,16 +2729,18 @@ class WasmFullDecoder : public WasmDecoder<validate> {
case ValueType::kOptRef:
CALL_INTERFACE_IF_REACHABLE(UnOp, kExprRefIsNull, value, result);
return 1;
+ case ValueType::kBottom:
+ // We are in unreachable code, the return value does not matter.
case ValueType::kRef:
// For non-nullable references, the result is always false.
CALL_INTERFACE_IF_REACHABLE(I32Const, result, 0);
return 1;
default:
if (validate) {
- this->errorf(this->pc_,
- "invalid argument type to ref.is_null. Expected "
- "reference type, got %s",
- value.type.name().c_str());
+ this->DecodeError(
+ "invalid argument type to ref.is_null. Expected reference type, "
+ "got %s",
+ value.type.name().c_str());
return 0;
}
UNREACHABLE();
@@ -2686,6 +2763,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
CHECK_PROTOTYPE_OPCODE(typed_funcref);
Value value = Pop(0);
switch (value.type.kind()) {
+ case ValueType::kBottom:
+ // We are in unreachable code. Forward the bottom value.
case ValueType::kRef: {
Value* result = Push(value.type);
CALL_INTERFACE_IF_REACHABLE(PassThrough, value, result);
@@ -2699,10 +2778,10 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
default:
if (validate) {
- this->errorf(this->pc_,
- "invalid agrument type to ref.as_non_null: Expected "
- "reference type, got %s",
- value.type.name().c_str());
+ this->DecodeError(
+ "invalid agrument type to ref.as_non_null: Expected reference "
+ "type, got %s",
+ value.type.name().c_str());
}
return 0;
}
@@ -2751,8 +2830,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
GlobalIndexImmediate<validate> imm(this, this->pc_ + 1);
if (!this->Validate(this->pc_ + 1, imm)) return 0;
if (!VALIDATE(imm.global->mutability)) {
- this->errorf(this->pc_, "immutable global #%u cannot be assigned",
- imm.index);
+ this->DecodeError("immutable global #%u cannot be assigned", imm.index);
return 0;
}
Value value = Pop(0, imm.type);
@@ -2818,7 +2896,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (!CheckHasMemory()) return 0;
MemoryIndexImmediate<validate> imm(this, this->pc_ + 1);
if (!VALIDATE(this->module_->origin == kWasmOrigin)) {
- this->error("grow_memory is not supported for asmjs modules");
+ this->DecodeError("grow_memory is not supported for asmjs modules");
return 0;
}
Value value = Pop(0, kWasmI32);
@@ -2860,9 +2938,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
CallFunctionImmediate<validate> imm(this, this->pc_ + 1);
if (!this->Validate(this->pc_ + 1, imm)) return 0;
if (!VALIDATE(this->CanReturnCall(imm.sig))) {
- this->errorf(this->pc_, "%s: %s",
- WasmOpcodes::OpcodeName(kExprReturnCall),
- "tail call return types mismatch");
+ this->DecodeError("%s: %s", WasmOpcodes::OpcodeName(kExprReturnCall),
+ "tail call return types mismatch");
return 0;
}
ArgVector args = PopArgs(imm.sig);
@@ -2876,9 +2953,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
CallIndirectImmediate<validate> imm(this->enabled_, this, this->pc_ + 1);
if (!this->Validate(this->pc_ + 1, imm)) return 0;
if (!VALIDATE(this->CanReturnCall(imm.sig))) {
- this->errorf(this->pc_, "%s: %s",
- WasmOpcodes::OpcodeName(kExprReturnCallIndirect),
- "tail call return types mismatch");
+ this->DecodeError("%s: %s",
+ WasmOpcodes::OpcodeName(kExprReturnCallIndirect),
+ "tail call return types mismatch");
return 0;
}
Value index = Pop(0, kWasmI32);
@@ -2892,12 +2969,17 @@ class WasmFullDecoder : public WasmDecoder<validate> {
CHECK_PROTOTYPE_OPCODE(typed_funcref);
Value func_ref = Pop(0);
ValueType func_type = func_ref.type;
- if (!func_type.is_object_reference_type() || !func_type.has_index() ||
- !this->module_->has_signature(func_type.ref_index())) {
- this->errorf(this->pc_,
- "call_ref: Expected function reference on top of stack, "
- "found %s of type %s instead",
- SafeOpcodeNameAt(func_ref.pc), func_type.name().c_str());
+ if (func_type == kWasmBottom) {
+ // We are in unreachable code, maintain the polymorphic stack.
+ return 1;
+ }
+ if (!VALIDATE(func_type.is_object_reference_type() &&
+ func_type.has_index() &&
+ this->module_->has_signature(func_type.ref_index()))) {
+ this->DecodeError(
+ "call_ref: Expected function reference on top of stack, found %s of "
+ "type %s instead",
+ SafeOpcodeNameAt(func_ref.pc()), func_type.name().c_str());
return 0;
}
const FunctionSig* sig = this->module_->signature(func_type.ref_index());
@@ -2913,12 +2995,17 @@ class WasmFullDecoder : public WasmDecoder<validate> {
CHECK_PROTOTYPE_OPCODE(return_call);
Value func_ref = Pop(0);
ValueType func_type = func_ref.type;
- if (!func_type.is_object_reference_type() || !func_type.has_index() ||
- !this->module_->has_signature(func_type.ref_index())) {
- this->errorf(this->pc_,
- "return_call_ref: Expected function reference on top of "
- "found %s of type %s instead",
- SafeOpcodeNameAt(func_ref.pc), func_type.name().c_str());
+ if (func_type == kWasmBottom) {
+ // We are in unreachable code, maintain the polymorphic stack.
+ return 1;
+ }
+ if (!VALIDATE(func_type.is_object_reference_type() &&
+ func_type.has_index() &&
+ this->module_->has_signature(func_type.ref_index()))) {
+ this->DecodeError(
+ "return_call_ref: Expected function reference on top of stack, found "
+ "%s of type %s instead",
+ SafeOpcodeNameAt(func_ref.pc()), func_type.name().c_str());
return 0;
}
const FunctionSig* sig = this->module_->signature(func_type.ref_index());
@@ -2930,10 +3017,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
DECODE(Numeric) {
- byte numeric_index =
- this->template read_u8<validate>(this->pc_ + 1, "numeric index");
- WasmOpcode full_opcode =
- static_cast<WasmOpcode>(kNumericPrefix << 8 | numeric_index);
+ uint32_t opcode_length = 0;
+ WasmOpcode full_opcode = this->template read_prefixed_opcode<validate>(
+ this->pc_, &opcode_length, "numeric index");
if (full_opcode == kExprTableGrow || full_opcode == kExprTableSize ||
full_opcode == kExprTableFill) {
CHECK_PROTOTYPE_OPCODE(reftypes);
@@ -2941,7 +3027,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
CHECK_PROTOTYPE_OPCODE(bulk_memory);
}
trace_msg->AppendOpcode(full_opcode);
- return DecodeNumericOpcode(full_opcode);
+ return DecodeNumericOpcode(full_opcode, opcode_length);
}
DECODE(Simd) {
@@ -2951,25 +3037,25 @@ class WasmFullDecoder : public WasmDecoder<validate> {
this->pc_, &opcode_length);
if (!VALIDATE(this->ok())) return 0;
trace_msg->AppendOpcode(full_opcode);
- return DecodeSimdOpcode(full_opcode, 1 + opcode_length);
+ return DecodeSimdOpcode(full_opcode, opcode_length);
}
DECODE(Atomic) {
CHECK_PROTOTYPE_OPCODE(threads);
- byte atomic_index =
- this->template read_u8<validate>(this->pc_ + 1, "atomic index");
- WasmOpcode full_opcode =
- static_cast<WasmOpcode>(kAtomicPrefix << 8 | atomic_index);
+ uint32_t opcode_length = 0;
+ WasmOpcode full_opcode = this->template read_prefixed_opcode<validate>(
+ this->pc_, &opcode_length, "atomic index");
trace_msg->AppendOpcode(full_opcode);
- return DecodeAtomicOpcode(full_opcode);
+ return DecodeAtomicOpcode(full_opcode, opcode_length);
}
DECODE(GC) {
CHECK_PROTOTYPE_OPCODE(gc);
- byte gc_index = this->template read_u8<validate>(this->pc_ + 1, "gc index");
- WasmOpcode full_opcode = static_cast<WasmOpcode>(kGCPrefix << 8 | gc_index);
+ uint32_t opcode_length = 0;
+ WasmOpcode full_opcode = this->template read_prefixed_opcode<validate>(
+ this->pc_, &opcode_length, "gc index");
trace_msg->AppendOpcode(full_opcode);
- return DecodeGCOpcode(full_opcode);
+ return DecodeGCOpcode(full_opcode, opcode_length);
}
#define SIMPLE_PROTOTYPE_CASE(name, opc, sig) \
@@ -2980,7 +3066,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
DECODE(UnknownOrAsmJs) {
// Deal with special asmjs opcodes.
if (!VALIDATE(is_asmjs_module(this->module_))) {
- this->errorf(this->pc(), "Invalid opcode 0x%x", opcode);
+ this->DecodeError("Invalid opcode 0x%x", opcode);
return 0;
}
const FunctionSig* sig = WasmOpcodes::AsmjsSignature(opcode);
@@ -3108,7 +3194,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
if (!VALIDATE(this->pc_ == this->end_)) {
- this->error("Beyond end of code");
+ this->DecodeError("Beyond end of code");
}
}
@@ -3207,7 +3293,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (!CheckHasMemory()) return 0;
MemoryAccessImmediate<validate> imm(this, this->pc_ + prefix_len,
type.size_log_2());
- Value index = Pop(0, kWasmI32);
+ ValueType index_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32;
+ Value index = Pop(0, index_type);
Value* result = Push(type.value_type());
CALL_INTERFACE_IF_REACHABLE(LoadMem, type, imm, index, result);
return prefix_len + imm.length;
@@ -3221,27 +3308,58 @@ class WasmFullDecoder : public WasmDecoder<validate> {
transform == LoadTransformationKind::kExtend ? 3 : type.size_log_2();
MemoryAccessImmediate<validate> imm(this, this->pc_ + opcode_length,
max_alignment);
- Value index = Pop(0, kWasmI32);
+ ValueType index_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32;
+ Value index = Pop(0, index_type);
Value* result = Push(kWasmS128);
CALL_INTERFACE_IF_REACHABLE(LoadTransform, type, transform, imm, index,
result);
return opcode_length + imm.length;
}
+ int DecodeLoadLane(LoadType type, uint32_t opcode_length) {
+ if (!CheckHasMemory()) return 0;
+ MemoryAccessImmediate<validate> mem_imm(this, this->pc_ + opcode_length,
+ type.size_log_2());
+ SimdLaneImmediate<validate> lane_imm(
+ this, this->pc_ + opcode_length + mem_imm.length);
+ Value v128 = Pop(1, kWasmS128);
+ Value index = Pop(0, kWasmI32);
+
+ Value* result = Push(kWasmS128);
+ CALL_INTERFACE_IF_REACHABLE(LoadLane, type, v128, index, mem_imm,
+ lane_imm.lane, result);
+ return opcode_length + mem_imm.length + lane_imm.length;
+ }
+
+ int DecodeStoreLane(StoreType type, uint32_t opcode_length) {
+ if (!CheckHasMemory()) return 0;
+ MemoryAccessImmediate<validate> mem_imm(this, this->pc_ + opcode_length,
+ type.size_log_2());
+ SimdLaneImmediate<validate> lane_imm(
+ this, this->pc_ + opcode_length + mem_imm.length);
+ Value v128 = Pop(1, kWasmS128);
+ Value index = Pop(0, kWasmI32);
+
+ CALL_INTERFACE_IF_REACHABLE(StoreLane, type, mem_imm, index, v128,
+ lane_imm.lane);
+ return opcode_length + mem_imm.length + lane_imm.length;
+ }
+
int DecodeStoreMem(StoreType store, int prefix_len = 1) {
if (!CheckHasMemory()) return 0;
MemoryAccessImmediate<validate> imm(this, this->pc_ + prefix_len,
store.size_log_2());
Value value = Pop(1, store.value_type());
- Value index = Pop(0, kWasmI32);
+ ValueType index_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32;
+ Value index = Pop(0, index_type);
CALL_INTERFACE_IF_REACHABLE(StoreMem, store, imm, index, value);
return prefix_len + imm.length;
}
bool ValidateBrTableTarget(uint32_t target, const byte* pos, int index) {
if (!VALIDATE(target < this->control_.size())) {
- this->errorf(pos, "improper branch in br_table target %u (depth %u)",
- index, target);
+ this->DecodeError(pos, "improper branch in br_table target %u (depth %u)",
+ index, target);
return false;
}
return true;
@@ -3263,10 +3381,10 @@ class WasmFullDecoder : public WasmDecoder<validate> {
int br_arity = merge->arity;
// First we check if the arities match.
if (!VALIDATE(br_arity == static_cast<int>(result_types->size()))) {
- this->errorf(pos,
- "inconsistent arity in br_table target %u (previous was "
- "%zu, this one is %u)",
- index, result_types->size(), br_arity);
+ this->DecodeError(pos,
+ "inconsistent arity in br_table target %u (previous "
+ "was %zu, this one is %u)",
+ index, result_types->size(), br_arity);
return false;
}
@@ -3277,21 +3395,21 @@ class WasmFullDecoder : public WasmDecoder<validate> {
(*result_types)[i] =
CommonSubtype((*result_types)[i], (*merge)[i].type, this->module_);
if (!VALIDATE((*result_types)[i] != kWasmBottom)) {
- this->errorf(pos,
- "inconsistent type in br_table target %u (previous "
- "was %s, this one is %s)",
- index, type.name().c_str(),
- (*merge)[i].type.name().c_str());
+ this->DecodeError(pos,
+ "inconsistent type in br_table target %u (previous "
+ "was %s, this one is %s)",
+ index, type.name().c_str(),
+ (*merge)[i].type.name().c_str());
return false;
}
} else {
// All target must have the same signature.
if (!VALIDATE((*result_types)[i] == (*merge)[i].type)) {
- this->errorf(pos,
- "inconsistent type in br_table target %u (previous "
- "was %s, this one is %s)",
- index, (*result_types)[i].name().c_str(),
- (*merge)[i].type.name().c_str());
+ this->DecodeError(pos,
+ "inconsistent type in br_table target %u (previous "
+ "was %s, this one is %s)",
+ index, (*result_types)[i].name().c_str(),
+ (*merge)[i].type.name().c_str());
return false;
}
}
@@ -3306,10 +3424,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
static_cast<int>(stack_size()) - control_.back().stack_depth;
// There have to be enough values on the stack.
if (!VALIDATE(available >= br_arity)) {
- this->errorf(this->pc_,
- "expected %u elements on the stack for branch to "
- "@%d, found %u",
- br_arity, startrel(control_.back().pc), available);
+ this->DecodeError(
+ "expected %u elements on the stack for branch to @%d, found %u",
+ br_arity, startrel(control_.back().pc()), available);
return false;
}
Value* stack_values = stack_end_ - br_arity;
@@ -3317,9 +3434,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
for (int i = 0; i < br_arity; ++i) {
Value& val = stack_values[i];
if (!VALIDATE(IsSubtypeOf(val.type, result_types[i], this->module_))) {
- this->errorf(this->pc_,
- "type error in merge[%u] (expected %s, got %s)", i,
- result_types[i].name().c_str(), val.type.name().c_str());
+ this->DecodeError("type error in merge[%u] (expected %s, got %s)", i,
+ result_types[i].name().c_str(),
+ val.type.name().c_str());
return false;
}
}
@@ -3408,17 +3525,11 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return DecodeLoadMem(LoadType::kS128Load, opcode_length);
case kExprS128StoreMem:
return DecodeStoreMem(StoreType::kS128Store, opcode_length);
- case kExprS128LoadMem32Zero:
- if (!CheckSimdPostMvp(opcode)) {
- return 0;
- }
+ case kExprS128Load32Zero:
return DecodeLoadTransformMem(LoadType::kI32Load,
LoadTransformationKind::kZeroExtend,
opcode_length);
- case kExprS128LoadMem64Zero:
- if (!CheckSimdPostMvp(opcode)) {
- return 0;
- }
+ case kExprS128Load64Zero:
return DecodeLoadTransformMem(LoadType::kI64Load,
LoadTransformationKind::kZeroExtend,
opcode_length);
@@ -3460,6 +3571,30 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return DecodeLoadTransformMem(LoadType::kI64Load32U,
LoadTransformationKind::kExtend,
opcode_length);
+ case kExprS128Load8Lane: {
+ return DecodeLoadLane(LoadType::kI32Load8S, opcode_length);
+ }
+ case kExprS128Load16Lane: {
+ return DecodeLoadLane(LoadType::kI32Load16S, opcode_length);
+ }
+ case kExprS128Load32Lane: {
+ return DecodeLoadLane(LoadType::kI32Load, opcode_length);
+ }
+ case kExprS128Load64Lane: {
+ return DecodeLoadLane(LoadType::kI64Load, opcode_length);
+ }
+ case kExprS128Store8Lane: {
+ return DecodeStoreLane(StoreType::kI32Store8, opcode_length);
+ }
+ case kExprS128Store16Lane: {
+ return DecodeStoreLane(StoreType::kI32Store16, opcode_length);
+ }
+ case kExprS128Store32Lane: {
+ return DecodeStoreLane(StoreType::kI32Store, opcode_length);
+ }
+ case kExprS128Store64Lane: {
+ return DecodeStoreLane(StoreType::kI64Store, opcode_length);
+ }
case kExprS128Const:
return SimdConstOp(opcode_length);
default: {
@@ -3468,7 +3603,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
const FunctionSig* sig = WasmOpcodes::Signature(opcode);
if (!VALIDATE(sig != nullptr)) {
- this->error("invalid simd opcode");
+ this->DecodeError("invalid simd opcode");
return 0;
}
ArgVector args = PopArgs(sig);
@@ -3480,98 +3615,98 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
}
- int DecodeGCOpcode(WasmOpcode opcode) {
+ int DecodeGCOpcode(WasmOpcode opcode, uint32_t opcode_length) {
switch (opcode) {
case kExprStructNewWithRtt: {
- StructIndexImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ StructIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
Value rtt = Pop(imm.struct_type->field_count());
- if (!VALIDATE(rtt.type.kind() == ValueType::kRtt)) {
- this->errorf(this->pc_,
- "struct.new_with_rtt expected rtt, found %s of type %s",
- SafeOpcodeNameAt(rtt.pc), rtt.type.name().c_str());
+ if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
+ this->DecodeError(
+ "struct.new_with_rtt expected rtt, found %s of type %s",
+ SafeOpcodeNameAt(rtt.pc()), rtt.type.name().c_str());
return 0;
}
// TODO(7748): Drop this check if {imm} is dropped from the proposal
// à la https://github.com/WebAssembly/function-references/pull/31.
- if (!VALIDATE(rtt.type.heap_representation() == imm.index)) {
- this->errorf(this->pc_,
- "struct.new_with_rtt expected rtt for type %d, found "
- "rtt for type %s",
- imm.index, rtt.type.heap_type().name().c_str());
+ if (!VALIDATE(rtt.type.is_bottom() ||
+ rtt.type.heap_representation() == imm.index)) {
+ this->DecodeError(
+ "struct.new_with_rtt expected rtt for type %d, found rtt for "
+ "type %s",
+ imm.index, rtt.type.heap_type().name().c_str());
return 0;
}
ArgVector args = PopArgs(imm.struct_type);
Value* value = Push(ValueType::Ref(imm.index, kNonNullable));
CALL_INTERFACE_IF_REACHABLE(StructNewWithRtt, imm, rtt, args.begin(),
value);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprStructNewDefault: {
- StructIndexImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ StructIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
if (validate) {
for (uint32_t i = 0; i < imm.struct_type->field_count(); i++) {
ValueType ftype = imm.struct_type->field(i);
if (!VALIDATE(ftype.is_defaultable())) {
- this->errorf(this->pc_,
- "struct.new_default_with_rtt: struct type %d has "
- "non-defaultable type %s for field %d",
- imm.index, ftype.name().c_str(), i);
+ this->DecodeError(
+ "struct.new_default_with_rtt: struct type %d has "
+ "non-defaultable type %s for field %d",
+ imm.index, ftype.name().c_str(), i);
return 0;
}
}
}
Value rtt = Pop(0);
- if (!VALIDATE(rtt.type.kind() == ValueType::kRtt)) {
- this->errorf(
- this->pc_,
+ if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
+ this->DecodeError(
"struct.new_default_with_rtt expected rtt, found %s of type %s",
- SafeOpcodeNameAt(rtt.pc), rtt.type.name().c_str());
+ SafeOpcodeNameAt(rtt.pc()), rtt.type.name().c_str());
return 0;
}
// TODO(7748): Drop this check if {imm} is dropped from the proposal
// à la https://github.com/WebAssembly/function-references/pull/31.
- if (!VALIDATE(rtt.type.heap_representation() == imm.index)) {
- this->errorf(
- this->pc_,
- "struct.new_default_with_rtt expected rtt for type %d, found "
- "rtt for type %s",
+ if (!VALIDATE(rtt.type.is_bottom() ||
+ rtt.type.heap_representation() == imm.index)) {
+ this->DecodeError(
+ "struct.new_default_with_rtt expected rtt for type %d, found rtt "
+ "for type %s",
imm.index, rtt.type.heap_type().name().c_str());
return 0;
}
Value* value = Push(ValueType::Ref(imm.index, kNonNullable));
CALL_INTERFACE_IF_REACHABLE(StructNewDefault, imm, rtt, value);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprStructGet: {
- FieldIndexImmediate<validate> field(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, field)) return 0;
+ FieldIndexImmediate<validate> field(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, field)) return 0;
ValueType field_type =
field.struct_index.struct_type->field(field.index);
if (!VALIDATE(!field_type.is_packed())) {
- this->error(this->pc_,
- "struct.get used with a field of packed type. "
- "Use struct.get_s or struct.get_u instead.");
+ this->DecodeError(
+ "struct.get used with a field of packed type. Use struct.get_s "
+ "or struct.get_u instead.");
return 0;
}
Value struct_obj =
Pop(0, ValueType::Ref(field.struct_index.index, kNullable));
Value* value = Push(field_type);
CALL_INTERFACE_IF_REACHABLE(StructGet, struct_obj, field, true, value);
- return 2 + field.length;
+ return opcode_length + field.length;
}
case kExprStructGetU:
case kExprStructGetS: {
- FieldIndexImmediate<validate> field(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, field)) return 0;
+ FieldIndexImmediate<validate> field(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, field)) return 0;
ValueType field_type =
field.struct_index.struct_type->field(field.index);
if (!VALIDATE(field_type.is_packed())) {
- this->errorf(this->pc_,
- "%s is only valid for packed struct fields. "
- "Use struct.get instead.",
- WasmOpcodes::OpcodeName(opcode));
+ this->DecodeError(
+ "%s is only valid for packed struct fields. Use struct.get "
+ "instead.",
+ WasmOpcodes::OpcodeName(opcode));
return 0;
}
Value struct_obj =
@@ -3579,39 +3714,42 @@ class WasmFullDecoder : public WasmDecoder<validate> {
Value* value = Push(field_type.Unpacked());
CALL_INTERFACE_IF_REACHABLE(StructGet, struct_obj, field,
opcode == kExprStructGetS, value);
- return 2 + field.length;
+ return opcode_length + field.length;
}
case kExprStructSet: {
- FieldIndexImmediate<validate> field(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, field)) return 0;
+ FieldIndexImmediate<validate> field(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, field)) return 0;
const StructType* struct_type = field.struct_index.struct_type;
if (!VALIDATE(struct_type->mutability(field.index))) {
- this->error(this->pc_, "setting immutable struct field");
+ this->DecodeError("setting immutable struct field");
return 0;
}
Value field_value = Pop(1, struct_type->field(field.index).Unpacked());
Value struct_obj =
Pop(0, ValueType::Ref(field.struct_index.index, kNullable));
CALL_INTERFACE_IF_REACHABLE(StructSet, struct_obj, field, field_value);
- return 2 + field.length;
+ return opcode_length + field.length;
}
case kExprArrayNewWithRtt: {
- ArrayIndexImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ ArrayIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
Value rtt = Pop(2);
- if (!VALIDATE(rtt.type.kind() == ValueType::kRtt)) {
- this->errorf(this->pc_ + 2,
- "array.new_with_rtt expected rtt, found %s of type %s",
- SafeOpcodeNameAt(rtt.pc), rtt.type.name().c_str());
+ if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
+ this->DecodeError(
+ this->pc_ + opcode_length,
+ "array.new_with_rtt expected rtt, found %s of type %s",
+ SafeOpcodeNameAt(rtt.pc()), rtt.type.name().c_str());
return 0;
}
// TODO(7748): Drop this check if {imm} is dropped from the proposal
// à la https://github.com/WebAssembly/function-references/pull/31.
- if (!VALIDATE(rtt.type.heap_representation() == imm.index)) {
- this->errorf(this->pc_ + 2,
- "array.new_with_rtt expected rtt for type %d, found "
- "rtt for type %s",
- imm.index, rtt.type.heap_type().name().c_str());
+ if (!VALIDATE(rtt.type.is_bottom() ||
+ rtt.type.heap_representation() == imm.index)) {
+ this->DecodeError(
+ this->pc_ + opcode_length,
+ "array.new_with_rtt expected rtt for type %d, found "
+ "rtt for type %s",
+ imm.index, rtt.type.heap_type().name().c_str());
return 0;
}
Value length = Pop(1, kWasmI32);
@@ -3619,48 +3757,47 @@ class WasmFullDecoder : public WasmDecoder<validate> {
Value* value = Push(ValueType::Ref(imm.index, kNonNullable));
CALL_INTERFACE_IF_REACHABLE(ArrayNewWithRtt, imm, length, initial_value,
rtt, value);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprArrayNewDefault: {
- ArrayIndexImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ ArrayIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
if (!VALIDATE(imm.array_type->element_type().is_defaultable())) {
- this->errorf(this->pc_,
- "array.new_default_with_rtt: array type %d has "
- "non-defaultable element type %s",
- imm.index,
- imm.array_type->element_type().name().c_str());
+ this->DecodeError(
+ "array.new_default_with_rtt: array type %d has "
+ "non-defaultable element type %s",
+ imm.index, imm.array_type->element_type().name().c_str());
return 0;
}
Value rtt = Pop(1);
- if (!VALIDATE(rtt.type.kind() == ValueType::kRtt)) {
- this->errorf(
- this->pc_ + 2,
+ if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
+ this->DecodeError(
+ this->pc_ + opcode_length,
"array.new_default_with_rtt expected rtt, found %s of type %s",
- SafeOpcodeNameAt(rtt.pc), rtt.type.name().c_str());
+ SafeOpcodeNameAt(rtt.pc()), rtt.type.name().c_str());
return 0;
}
// TODO(7748): Drop this check if {imm} is dropped from the proposal
// à la https://github.com/WebAssembly/function-references/pull/31.
- if (!VALIDATE(rtt.type.heap_representation() == imm.index)) {
- this->errorf(this->pc_ + 2,
- "array.new_default_with_rtt expected rtt for type %d, "
- "found rtt for type %s",
- imm.index, rtt.type.heap_type().name().c_str());
+ if (!VALIDATE(rtt.type.is_bottom() ||
+ rtt.type.heap_representation() == imm.index)) {
+ this->DecodeError(this->pc_ + opcode_length,
+ "array.new_default_with_rtt expected rtt for type "
+ "%d, found rtt for type %s",
+ imm.index, rtt.type.heap_type().name().c_str());
return 0;
}
Value length = Pop(0, kWasmI32);
Value* value = Push(ValueType::Ref(imm.index, kNonNullable));
CALL_INTERFACE_IF_REACHABLE(ArrayNewDefault, imm, length, rtt, value);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprArrayGetS:
case kExprArrayGetU: {
- ArrayIndexImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ ArrayIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
if (!VALIDATE(imm.array_type->element_type().is_packed())) {
- this->errorf(
- this->pc_,
+ this->DecodeError(
"%s is only valid for packed arrays. Use array.get instead.",
WasmOpcodes::OpcodeName(opcode));
return 0;
@@ -3670,15 +3807,15 @@ class WasmFullDecoder : public WasmDecoder<validate> {
Value* value = Push(imm.array_type->element_type().Unpacked());
CALL_INTERFACE_IF_REACHABLE(ArrayGet, array_obj, imm, index,
opcode == kExprArrayGetS, value);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprArrayGet: {
- ArrayIndexImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ ArrayIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
if (!VALIDATE(!imm.array_type->element_type().is_packed())) {
- this->error(this->pc_,
- "array.get used with a field of packed type. "
- "Use array.get_s or array.get_u instead.");
+ this->DecodeError(
+ "array.get used with a field of packed type. Use array.get_s or "
+ "array.get_u instead.");
return 0;
}
Value index = Pop(1, kWasmI32);
@@ -3686,53 +3823,54 @@ class WasmFullDecoder : public WasmDecoder<validate> {
Value* value = Push(imm.array_type->element_type());
CALL_INTERFACE_IF_REACHABLE(ArrayGet, array_obj, imm, index, true,
value);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprArraySet: {
- ArrayIndexImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ ArrayIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
if (!VALIDATE(imm.array_type->mutability())) {
- this->error(this->pc_, "setting element of immutable array");
+ this->DecodeError("setting element of immutable array");
return 0;
}
Value value = Pop(2, imm.array_type->element_type().Unpacked());
Value index = Pop(1, kWasmI32);
Value array_obj = Pop(0, ValueType::Ref(imm.index, kNullable));
CALL_INTERFACE_IF_REACHABLE(ArraySet, array_obj, imm, index, value);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprArrayLen: {
- ArrayIndexImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ ArrayIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
Value array_obj = Pop(0, ValueType::Ref(imm.index, kNullable));
Value* value = Push(kWasmI32);
CALL_INTERFACE_IF_REACHABLE(ArrayLen, array_obj, value);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprI31New: {
Value input = Pop(0, kWasmI32);
Value* value = Push(kWasmI31Ref);
CALL_INTERFACE_IF_REACHABLE(I31New, input, value);
- return 2;
+ return opcode_length;
}
case kExprI31GetS: {
Value i31 = Pop(0, kWasmI31Ref);
Value* value = Push(kWasmI32);
CALL_INTERFACE_IF_REACHABLE(I31GetS, i31, value);
- return 2;
+ return opcode_length;
}
case kExprI31GetU: {
Value i31 = Pop(0, kWasmI31Ref);
Value* value = Push(kWasmI32);
CALL_INTERFACE_IF_REACHABLE(I31GetU, i31, value);
- return 2;
+ return opcode_length;
}
case kExprRttCanon: {
- HeapTypeImmediate<validate> imm(this->enabled_, this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ HeapTypeImmediate<validate> imm(this->enabled_, this,
+ this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
Value* value = Push(ValueType::Rtt(imm.type, 1));
CALL_INTERFACE_IF_REACHABLE(RttCanon, imm, value);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprRttSub: {
// TODO(7748): The proposal currently includes additional immediates
@@ -3741,29 +3879,35 @@ class WasmFullDecoder : public WasmDecoder<validate> {
// If these immediates don't get dropped (in the spirit of
// https://github.com/WebAssembly/function-references/pull/31 ),
// implement them here.
- HeapTypeImmediate<validate> imm(this->enabled_, this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ HeapTypeImmediate<validate> imm(this->enabled_, this,
+ this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
Value parent = Pop(0);
- // TODO(7748): Consider exposing "IsSubtypeOfHeap(HeapType t1, t2)" so
- // we can avoid creating (ref heaptype) wrappers here.
- if (!VALIDATE(parent.type.kind() == ValueType::kRtt &&
- IsSubtypeOf(
- ValueType::Ref(imm.type, kNonNullable),
- ValueType::Ref(parent.type.heap_type(), kNonNullable),
- this->module_))) {
- this->error(this->pc_, "rtt.sub requires a supertype rtt on stack");
- return 0;
+ if (parent.type.is_bottom()) {
+ Push(kWasmBottom);
+ } else {
+ // TODO(7748): Consider exposing "IsSubtypeOfHeap(HeapType t1, t2)" so
+ // we can avoid creating (ref heaptype) wrappers here.
+ if (!VALIDATE(parent.type.is_rtt() &&
+ IsSubtypeOf(ValueType::Ref(imm.type, kNonNullable),
+ ValueType::Ref(parent.type.heap_type(),
+ kNonNullable),
+ this->module_))) {
+ this->DecodeError("rtt.sub requires a supertype rtt on stack");
+ return 0;
+ }
+ Value* value =
+ Push(ValueType::Rtt(imm.type, parent.type.depth() + 1));
+ CALL_INTERFACE_IF_REACHABLE(RttSub, imm, parent, value);
}
- Value* value = Push(ValueType::Rtt(imm.type, parent.type.depth() + 1));
- CALL_INTERFACE_IF_REACHABLE(RttSub, imm, parent, value);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprRefTest: {
// "Tests whether {obj}'s runtime type is a runtime subtype of {rtt}."
HeapTypeImmediate<validate> obj_type(this->enabled_, this,
- this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, obj_type)) return 0;
- int len = 2 + obj_type.length;
+ this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, obj_type)) return 0;
+ int len = opcode_length + obj_type.length;
HeapTypeImmediate<validate> rtt_type(this->enabled_, this,
this->pc_ + len);
if (!this->Validate(this->pc_ + len, rtt_type)) return 0;
@@ -3772,16 +3916,17 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (!VALIDATE(IsSubtypeOf(ValueType::Ref(rtt_type.type, kNonNullable),
ValueType::Ref(obj_type.type, kNonNullable),
this->module_))) {
- this->errorf(this->pc_,
- "ref.test: rtt type must be subtype of object type");
+ this->DecodeError(
+ "ref.test: rtt type must be subtype of object type");
return 0;
}
Value rtt = Pop(1);
- if (!VALIDATE(rtt.type.kind() == ValueType::kRtt &&
- rtt.type.heap_type() == rtt_type.type)) {
- this->errorf(this->pc_,
- "ref.test: expected rtt for type %s but got %s",
- rtt_type.type.name().c_str(), rtt.type.name().c_str());
+ if (!VALIDATE(
+ (rtt.type.is_rtt() && rtt.type.heap_type() == rtt_type.type) ||
+ rtt.type == kWasmBottom)) {
+ this->DecodeError("ref.test: expected rtt for type %s but got %s",
+ rtt_type.type.name().c_str(),
+ rtt.type.name().c_str());
return 0;
}
Value obj = Pop(0, ValueType::Ref(obj_type.type, kNullable));
@@ -3791,9 +3936,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
case kExprRefCast: {
HeapTypeImmediate<validate> obj_type(this->enabled_, this,
- this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, obj_type)) return 0;
- int len = 2 + obj_type.length;
+ this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, obj_type)) return 0;
+ int len = opcode_length + obj_type.length;
HeapTypeImmediate<validate> rtt_type(this->enabled_, this,
this->pc_ + len);
if (!this->Validate(this->pc_ + len, rtt_type)) return 0;
@@ -3801,16 +3946,17 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (!VALIDATE(IsSubtypeOf(ValueType::Ref(rtt_type.type, kNonNullable),
ValueType::Ref(obj_type.type, kNonNullable),
this->module_))) {
- this->errorf(this->pc_,
- "ref.cast: rtt type must be subtype of object type");
+ this->DecodeError(
+ "ref.cast: rtt type must be subtype of object type");
return 0;
}
Value rtt = Pop(1);
- if (!VALIDATE(rtt.type.kind() == ValueType::kRtt &&
- rtt.type.heap_type() == rtt_type.type)) {
- this->errorf(this->pc_,
- "ref.cast: expected rtt for type %s but got %s",
- rtt_type.type.name().c_str(), rtt.type.name().c_str());
+ if (!VALIDATE(
+ (rtt.type.is_rtt() && rtt.type.heap_type() == rtt_type.type) ||
+ rtt.type == kWasmBottom)) {
+ this->DecodeError("ref.cast: expected rtt for type %s but got %s",
+ rtt_type.type.name().c_str(),
+ rtt.type.name().c_str());
return 0;
}
Value obj = Pop(0, ValueType::Ref(obj_type.type, kNullable));
@@ -3819,34 +3965,40 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return len;
}
case kExprBrOnCast: {
- BranchDepthImmediate<validate> branch_depth(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, branch_depth, control_.size())) {
+ BranchDepthImmediate<validate> branch_depth(this,
+ this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, branch_depth,
+ control_.size())) {
return 0;
}
// TODO(7748): If the heap type immediates remain in the spec, read
// them here.
Value rtt = Pop(1);
- if (!VALIDATE(rtt.type.kind() == ValueType::kRtt)) {
- this->error(this->pc_, "br_on_cast[1]: expected rtt on stack");
+ if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
+ this->DecodeError("br_on_cast[1]: expected rtt on stack");
return 0;
}
Value obj = Pop(0);
- if (!VALIDATE(obj.type.is_object_reference_type())) {
- this->error(this->pc_, "br_on_cast[0]: expected reference on stack");
+ if (!VALIDATE(obj.type.is_object_reference_type() ||
+ rtt.type.is_bottom())) {
+ this->DecodeError("br_on_cast[0]: expected reference on stack");
return 0;
}
// The static type of {obj} must be a supertype of {rtt}'s type.
if (!VALIDATE(
+ rtt.type.is_bottom() || obj.type.is_bottom() ||
IsSubtypeOf(ValueType::Ref(rtt.type.heap_type(), kNonNullable),
ValueType::Ref(obj.type.heap_type(), kNonNullable),
this->module_))) {
- this->error(this->pc_,
- "br_on_cast: rtt type must be a subtype of object type");
+ this->DecodeError(
+ "br_on_cast: rtt type must be a subtype of object type");
return 0;
}
Control* c = control_at(branch_depth.depth);
Value* result_on_branch =
- Push(ValueType::Ref(rtt.type.heap_type(), kNonNullable));
+ Push(rtt.type.is_bottom()
+ ? kWasmBottom
+ : ValueType::Ref(rtt.type.heap_type(), kNonNullable));
TypeCheckBranchResult check_result = TypeCheckBranch(c, true);
if (V8_LIKELY(check_result == kReachableBranch)) {
CALL_INTERFACE(BrOnCast, obj, rtt, result_on_branch,
@@ -3858,19 +4010,19 @@ class WasmFullDecoder : public WasmDecoder<validate> {
Pop(0); // Drop {result_on_branch}, restore original value.
Value* result_on_fallthrough = Push(obj.type);
*result_on_fallthrough = obj;
- return 2 + branch_depth.length;
+ return opcode_length + branch_depth.length;
}
default:
- this->error("invalid gc opcode");
+ this->DecodeError("invalid gc opcode");
return 0;
}
}
- uint32_t DecodeAtomicOpcode(WasmOpcode opcode) {
+ uint32_t DecodeAtomicOpcode(WasmOpcode opcode, uint32_t opcode_length) {
ValueType ret_type;
const FunctionSig* sig = WasmOpcodes::Signature(opcode);
if (!VALIDATE(sig != nullptr)) {
- this->error("invalid atomic opcode");
+ this->DecodeError("invalid atomic opcode");
return 0;
}
MachineType memtype;
@@ -3892,31 +4044,37 @@ class WasmFullDecoder : public WasmDecoder<validate> {
ATOMIC_OP_LIST(CASE_ATOMIC_OP)
#undef CASE_ATOMIC_OP
case kExprAtomicFence: {
- byte zero = this->template read_u8<validate>(this->pc_ + 2, "zero");
+ byte zero =
+ this->template read_u8<validate>(this->pc_ + opcode_length, "zero");
if (!VALIDATE(zero == 0)) {
- this->error(this->pc_ + 2, "invalid atomic operand");
+ this->DecodeError(this->pc_ + opcode_length,
+ "invalid atomic operand");
return 0;
}
CALL_INTERFACE_IF_REACHABLE(AtomicFence);
- return 3;
+ return 1 + opcode_length;
}
default:
- this->error("invalid atomic opcode");
+ this->DecodeError("invalid atomic opcode");
return 0;
}
- if (!CheckHasMemoryForAtomics()) return 0;
+ if (!CheckHasMemory()) return 0;
MemoryAccessImmediate<validate> imm(
- this, this->pc_ + 2, ElementSizeLog2Of(memtype.representation()));
+ this, this->pc_ + opcode_length,
+ ElementSizeLog2Of(memtype.representation()));
+ // TODO(10949): Fix this for memory64 (index type should be kWasmI64
+ // then).
+ CHECK(!this->module_->is_memory64);
ArgVector args = PopArgs(sig);
Value* result = ret_type == kWasmStmt ? nullptr : Push(GetReturnType(sig));
CALL_INTERFACE_IF_REACHABLE(AtomicOp, opcode, VectorOf(args), imm, result);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
- unsigned DecodeNumericOpcode(WasmOpcode opcode) {
+ unsigned DecodeNumericOpcode(WasmOpcode opcode, uint32_t opcode_length) {
const FunctionSig* sig = WasmOpcodes::Signature(opcode);
if (!VALIDATE(sig != nullptr)) {
- this->error("invalid numeric opcode");
+ this->DecodeError("invalid numeric opcode");
return 0;
}
switch (opcode) {
@@ -3927,88 +4085,90 @@ class WasmFullDecoder : public WasmDecoder<validate> {
case kExprI64SConvertSatF32:
case kExprI64UConvertSatF32:
case kExprI64SConvertSatF64:
- case kExprI64UConvertSatF64:
- return 1 + BuildSimpleOperator(opcode, sig);
+ case kExprI64UConvertSatF64: {
+ BuildSimpleOperator(opcode, sig);
+ return opcode_length;
+ }
case kExprMemoryInit: {
- MemoryInitImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ MemoryInitImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
Value size = Pop(2, sig->GetParam(2));
Value src = Pop(1, sig->GetParam(1));
Value dst = Pop(0, sig->GetParam(0));
CALL_INTERFACE_IF_REACHABLE(MemoryInit, imm, dst, src, size);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprDataDrop: {
- DataDropImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ DataDropImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
CALL_INTERFACE_IF_REACHABLE(DataDrop, imm);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprMemoryCopy: {
- MemoryCopyImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ MemoryCopyImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
Value size = Pop(2, sig->GetParam(2));
Value src = Pop(1, sig->GetParam(1));
Value dst = Pop(0, sig->GetParam(0));
CALL_INTERFACE_IF_REACHABLE(MemoryCopy, imm, dst, src, size);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprMemoryFill: {
- MemoryIndexImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ MemoryIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
Value size = Pop(2, sig->GetParam(2));
Value value = Pop(1, sig->GetParam(1));
Value dst = Pop(0, sig->GetParam(0));
CALL_INTERFACE_IF_REACHABLE(MemoryFill, imm, dst, value, size);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprTableInit: {
- TableInitImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ TableInitImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
ArgVector args = PopArgs(sig);
CALL_INTERFACE_IF_REACHABLE(TableInit, imm, VectorOf(args));
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprElemDrop: {
- ElemDropImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ ElemDropImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
CALL_INTERFACE_IF_REACHABLE(ElemDrop, imm);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprTableCopy: {
- TableCopyImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ TableCopyImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
ArgVector args = PopArgs(sig);
CALL_INTERFACE_IF_REACHABLE(TableCopy, imm, VectorOf(args));
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprTableGrow: {
- TableIndexImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ TableIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
Value delta = Pop(1, sig->GetParam(1));
Value value = Pop(0, this->module_->tables[imm.index].type);
Value* result = Push(kWasmI32);
CALL_INTERFACE_IF_REACHABLE(TableGrow, imm, value, delta, result);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprTableSize: {
- TableIndexImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ TableIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
Value* result = Push(kWasmI32);
CALL_INTERFACE_IF_REACHABLE(TableSize, imm, result);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
case kExprTableFill: {
- TableIndexImmediate<validate> imm(this, this->pc_ + 2);
- if (!this->Validate(this->pc_ + 2, imm)) return 0;
+ TableIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
+ if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
Value count = Pop(2, sig->GetParam(2));
Value value = Pop(1, this->module_->tables[imm.index].type);
Value start = Pop(0, sig->GetParam(0));
CALL_INTERFACE_IF_REACHABLE(TableFill, imm, start, value, count);
- return 2 + imm.length;
+ return opcode_length + imm.length;
}
default:
- this->error("invalid numeric opcode");
+ this->DecodeError("invalid numeric opcode");
return 0;
}
}
@@ -4087,15 +4247,16 @@ class WasmFullDecoder : public WasmDecoder<validate> {
// size increase. Not inlining them should not create a performance
// degradation, because their invocations are guarded by V8_LIKELY.
V8_NOINLINE void PopTypeError(int index, Value val, ValueType expected) {
- this->errorf(val.pc, "%s[%d] expected type %s, found %s of type %s",
- SafeOpcodeNameAt(this->pc_), index, expected.name().c_str(),
- SafeOpcodeNameAt(val.pc), val.type.name().c_str());
+ this->DecodeError(val.pc(), "%s[%d] expected type %s, found %s of type %s",
+ SafeOpcodeNameAt(this->pc_), index,
+ expected.name().c_str(), SafeOpcodeNameAt(val.pc()),
+ val.type.name().c_str());
}
V8_NOINLINE void NotEnoughArgumentsError(int index) {
- this->errorf(this->pc_,
- "not enough arguments on the stack for %s, expected %d more",
- SafeOpcodeNameAt(this->pc_), index + 1);
+ this->DecodeError(
+ "not enough arguments on the stack for %s, expected %d more",
+ SafeOpcodeNameAt(this->pc_), index + 1);
}
V8_INLINE Value Pop(int index, ValueType expected) {
@@ -4133,6 +4294,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
int index_offset = conditional_branch ? 1 : 0;
for (int i = arity - 1; i >= 0; --i) Pop(index_offset + i, merge[i].type);
// Push values of the correct type back on the stack.
+ EnsureStackSpace(arity);
for (int i = 0; i < arity; ++i) Push(merge[i].type);
return this->ok();
}
@@ -4162,8 +4324,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
Value& val = stack_values[i];
Value& old = (*merge)[i];
if (!VALIDATE(IsSubtypeOf(val.type, old.type, this->module_))) {
- this->errorf(this->pc_, "type error in merge[%u] (expected %s, got %s)",
- i, old.type.name().c_str(), val.type.name().c_str());
+ this->DecodeError("type error in merge[%u] (expected %s, got %s)", i,
+ old.type.name().c_str(), val.type.name().c_str());
return false;
}
}
@@ -4179,8 +4341,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
Value& start = c->start_merge[i];
Value& end = c->end_merge[i];
if (!VALIDATE(IsSubtypeOf(start.type, end.type, this->module_))) {
- this->errorf(this->pc_, "type error in merge[%u] (expected %s, got %s)",
- i, end.type.name().c_str(), start.type.name().c_str());
+ this->DecodeError("type error in merge[%u] (expected %s, got %s)", i,
+ end.type.name().c_str(), start.type.name().c_str());
return false;
}
}
@@ -4197,10 +4359,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
uint32_t actual = stack_size() - c.stack_depth;
// Fallthrus must match the arity of the control exactly.
if (!VALIDATE(actual == expected)) {
- this->errorf(
- this->pc_,
+ this->DecodeError(
"expected %u elements on the stack for fallthru to @%d, found %u",
- expected, startrel(c.pc), actual);
+ expected, startrel(c.pc()), actual);
return false;
}
if (expected == 0) return true; // Fast path.
@@ -4216,10 +4377,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
int available = static_cast<int>(stack_size()) - c.stack_depth;
// For fallthrus, not more than the needed values should be available.
if (!VALIDATE(available <= arity)) {
- this->errorf(
- this->pc_,
+ this->DecodeError(
"expected %u elements on the stack for fallthru to @%d, found %u",
- arity, startrel(c.pc), available);
+ arity, startrel(c.pc()), available);
return false;
}
// Pop all values from the stack for type checking of existing stack
@@ -4246,10 +4406,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
uint32_t actual =
static_cast<uint32_t>(stack_size()) - control_.back().stack_depth;
if (!VALIDATE(actual >= expected)) {
- this->errorf(
- this->pc_,
+ this->DecodeError(
"expected %u elements on the stack for br to @%d, found %u",
- expected, startrel(c->pc), actual);
+ expected, startrel(c->pc()), actual);
return kInvalidStack;
}
return TypeCheckMergeValues(c, c->br_merge()) ? kReachableBranch
@@ -4270,9 +4429,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
int num_available =
static_cast<int>(stack_size()) - control_.back().stack_depth;
if (!VALIDATE(num_available >= num_returns)) {
- this->errorf(this->pc_,
- "expected %u elements on the stack for return, found %u",
- num_returns, num_available);
+ this->DecodeError(
+ "expected %u elements on the stack for return, found %u", num_returns,
+ num_available);
return false;
}
@@ -4283,9 +4442,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
Value& val = stack_values[i];
ValueType expected_type = this->sig_->GetReturn(i);
if (!VALIDATE(IsSubtypeOf(val.type, expected_type, this->module_))) {
- this->errorf(this->pc_,
- "type error in return[%u] (expected %s, got %s)", i,
- expected_type.name().c_str(), val.type.name().c_str());
+ this->DecodeError("type error in return[%u] (expected %s, got %s)", i,
+ expected_type.name().c_str(),
+ val.type.name().c_str());
return false;
}
}
@@ -4350,9 +4509,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
class EmptyInterface {
public:
- static constexpr Decoder::ValidateFlag validate = Decoder::kValidate;
- using Value = ValueBase;
- using Control = ControlBase<Value>;
+ static constexpr Decoder::ValidateFlag validate = Decoder::kFullValidation;
+ using Value = ValueBase<validate>;
+ using Control = ControlBase<Value, validate>;
using FullDecoder = WasmFullDecoder<validate, EmptyInterface>;
#define DEFINE_EMPTY_CALLBACK(name, ...) \
diff --git a/deps/v8/src/wasm/function-body-decoder.cc b/deps/v8/src/wasm/function-body-decoder.cc
index a7471c3a7b..77c84bd615 100644
--- a/deps/v8/src/wasm/function-body-decoder.cc
+++ b/deps/v8/src/wasm/function-body-decoder.cc
@@ -23,8 +23,8 @@ bool DecodeLocalDecls(const WasmFeatures& enabled, BodyLocalDecls* decls,
const byte* start, const byte* end) {
WasmFeatures no_features = WasmFeatures::None();
Zone* zone = decls->type_list.get_allocator().zone();
- WasmDecoder<Decoder::kValidate> decoder(zone, nullptr, enabled, &no_features,
- nullptr, start, end, 0);
+ WasmDecoder<Decoder::kFullValidation> decoder(
+ zone, nullptr, enabled, &no_features, nullptr, start, end, 0);
uint32_t length;
if (!decoder.DecodeLocals(decoder.pc(), &length, 0)) {
decls->encoded_size = 0;
@@ -54,7 +54,7 @@ DecodeResult VerifyWasmCode(AccountingAllocator* allocator,
const WasmModule* module, WasmFeatures* detected,
const FunctionBody& body) {
Zone zone(allocator, ZONE_NAME);
- WasmFullDecoder<Decoder::kValidate, EmptyInterface> decoder(
+ WasmFullDecoder<Decoder::kFullValidation, EmptyInterface> decoder(
&zone, module, enabled, detected, body);
decoder.Decode();
return decoder.toResult(nullptr);
@@ -65,9 +65,9 @@ unsigned OpcodeLength(const byte* pc, const byte* end) {
Zone* no_zone = nullptr;
WasmModule* no_module = nullptr;
FunctionSig* no_sig = nullptr;
- WasmDecoder<Decoder::kNoValidate> decoder(no_zone, no_module, no_features,
- &no_features, no_sig, pc, end, 0);
- return WasmDecoder<Decoder::kNoValidate>::OpcodeLength(&decoder, pc);
+ WasmDecoder<Decoder::kNoValidation> decoder(no_zone, no_module, no_features,
+ &no_features, no_sig, pc, end, 0);
+ return WasmDecoder<Decoder::kNoValidation>::OpcodeLength(&decoder, pc);
}
std::pair<uint32_t, uint32_t> StackEffect(const WasmModule* module,
@@ -75,7 +75,7 @@ std::pair<uint32_t, uint32_t> StackEffect(const WasmModule* module,
const byte* pc, const byte* end) {
WasmFeatures unused_detected_features = WasmFeatures::None();
Zone* no_zone = nullptr;
- WasmDecoder<Decoder::kNoValidate> decoder(
+ WasmDecoder<Decoder::kNoValidation> decoder(
no_zone, module, WasmFeatures::All(), &unused_detected_features, sig, pc,
end);
return decoder.StackEffect(pc);
@@ -124,9 +124,9 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
std::ostream& os, std::vector<int>* line_numbers) {
Zone zone(allocator, ZONE_NAME);
WasmFeatures unused_detected_features = WasmFeatures::None();
- WasmDecoder<Decoder::kNoValidate> decoder(&zone, module, WasmFeatures::All(),
- &unused_detected_features, body.sig,
- body.start, body.end);
+ WasmDecoder<Decoder::kNoValidation> decoder(
+ &zone, module, WasmFeatures::All(), &unused_detected_features, body.sig,
+ body.start, body.end);
int line_nr = 0;
constexpr int kNoByteCode = -1;
@@ -174,7 +174,7 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
unsigned control_depth = 0;
for (; i.has_next(); i.next()) {
unsigned length =
- WasmDecoder<Decoder::kNoValidate>::OpcodeLength(&decoder, i.pc());
+ WasmDecoder<Decoder::kNoValidation>::OpcodeLength(&decoder, i.pc());
unsigned offset = 1;
WasmOpcode opcode = i.current();
@@ -243,8 +243,8 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
case kExprIf:
case kExprBlock:
case kExprTry: {
- BlockTypeImmediate<Decoder::kNoValidate> imm(WasmFeatures::All(), &i,
- i.pc() + 1);
+ BlockTypeImmediate<Decoder::kNoValidation> imm(WasmFeatures::All(), &i,
+ i.pc() + 1);
os << " @" << i.pc_offset();
if (decoder.Complete(imm)) {
for (uint32_t i = 0; i < imm.out_arity(); i++) {
@@ -259,23 +259,23 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
control_depth--;
break;
case kExprBr: {
- BranchDepthImmediate<Decoder::kNoValidate> imm(&i, i.pc() + 1);
+ BranchDepthImmediate<Decoder::kNoValidation> imm(&i, i.pc() + 1);
os << " depth=" << imm.depth;
break;
}
case kExprBrIf: {
- BranchDepthImmediate<Decoder::kNoValidate> imm(&i, i.pc() + 1);
+ BranchDepthImmediate<Decoder::kNoValidation> imm(&i, i.pc() + 1);
os << " depth=" << imm.depth;
break;
}
case kExprBrTable: {
- BranchTableImmediate<Decoder::kNoValidate> imm(&i, i.pc() + 1);
+ BranchTableImmediate<Decoder::kNoValidation> imm(&i, i.pc() + 1);
os << " entries=" << imm.table_count;
break;
}
case kExprCallIndirect: {
- CallIndirectImmediate<Decoder::kNoValidate> imm(WasmFeatures::All(), &i,
- i.pc() + 1);
+ CallIndirectImmediate<Decoder::kNoValidation> imm(WasmFeatures::All(),
+ &i, i.pc() + 1);
os << " sig #" << imm.sig_index;
if (decoder.Complete(imm)) {
os << ": " << *imm.sig;
@@ -283,7 +283,7 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
break;
}
case kExprCallFunction: {
- CallFunctionImmediate<Decoder::kNoValidate> imm(&i, i.pc() + 1);
+ CallFunctionImmediate<Decoder::kNoValidation> imm(&i, i.pc() + 1);
os << " function #" << imm.index;
if (decoder.Complete(imm)) {
os << ": " << *imm.sig;
@@ -304,9 +304,9 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
BitVector* AnalyzeLoopAssignmentForTesting(Zone* zone, size_t num_locals,
const byte* start, const byte* end) {
WasmFeatures no_features = WasmFeatures::None();
- WasmDecoder<Decoder::kValidate> decoder(zone, nullptr, no_features,
- &no_features, nullptr, start, end, 0);
- return WasmDecoder<Decoder::kValidate>::AnalyzeLoopAssignment(
+ WasmDecoder<Decoder::kFullValidation> decoder(
+ zone, nullptr, no_features, &no_features, nullptr, start, end, 0);
+ return WasmDecoder<Decoder::kFullValidation>::AnalyzeLoopAssignment(
&decoder, start, static_cast<uint32_t>(num_locals), zone);
}
diff --git a/deps/v8/src/wasm/function-body-decoder.h b/deps/v8/src/wasm/function-body-decoder.h
index 6bc626cb18..d3144c9e46 100644
--- a/deps/v8/src/wasm/function-body-decoder.h
+++ b/deps/v8/src/wasm/function-body-decoder.h
@@ -163,7 +163,7 @@ class V8_EXPORT_PRIVATE BytecodeIterator : public NON_EXPORTED_BASE(Decoder) {
WasmOpcode current() {
return static_cast<WasmOpcode>(
- read_u8<Decoder::kNoValidate>(pc_, "expected bytecode"));
+ read_u8<Decoder::kNoValidation>(pc_, "expected bytecode"));
}
void next() {
@@ -176,7 +176,7 @@ class V8_EXPORT_PRIVATE BytecodeIterator : public NON_EXPORTED_BASE(Decoder) {
bool has_next() { return pc_ < end_; }
WasmOpcode prefixed_opcode() {
- return read_prefixed_opcode<Decoder::kNoValidate>(pc_);
+ return read_prefixed_opcode<Decoder::kNoValidation>(pc_);
}
};
diff --git a/deps/v8/src/wasm/function-compiler.cc b/deps/v8/src/wasm/function-compiler.cc
index 8b41a90992..0e4135f03a 100644
--- a/deps/v8/src/wasm/function-compiler.cc
+++ b/deps/v8/src/wasm/function-compiler.cc
@@ -267,7 +267,6 @@ void WasmCompilationUnit::CompileWasmFunction(Isolate* isolate,
namespace {
bool UseGenericWrapper(const FunctionSig* sig) {
-// Work only for int32 parameters and 1 or 0 return value for now.
#if V8_TARGET_ARCH_X64
if (sig->returns().size() > 1) {
return false;
@@ -295,10 +294,11 @@ bool UseGenericWrapper(const FunctionSig* sig) {
JSToWasmWrapperCompilationUnit::JSToWasmWrapperCompilationUnit(
Isolate* isolate, WasmEngine* wasm_engine, const FunctionSig* sig,
const WasmModule* module, bool is_import,
- const WasmFeatures& enabled_features)
+ const WasmFeatures& enabled_features, AllowGeneric allow_generic)
: is_import_(is_import),
sig_(sig),
- use_generic_wrapper_(UseGenericWrapper(sig) && !is_import),
+ use_generic_wrapper_(allow_generic && UseGenericWrapper(sig) &&
+ !is_import),
job_(use_generic_wrapper_ ? nullptr
: compiler::NewJSToWasmCompilationJob(
isolate, wasm_engine, sig, module,
@@ -339,7 +339,21 @@ Handle<Code> JSToWasmWrapperCompilationUnit::CompileJSToWasmWrapper(
// Run the compilation unit synchronously.
WasmFeatures enabled_features = WasmFeatures::FromIsolate(isolate);
JSToWasmWrapperCompilationUnit unit(isolate, isolate->wasm_engine(), sig,
- module, is_import, enabled_features);
+ module, is_import, enabled_features,
+ kAllowGeneric);
+ unit.Execute();
+ return unit.Finalize(isolate);
+}
+
+// static
+Handle<Code> JSToWasmWrapperCompilationUnit::CompileSpecificJSToWasmWrapper(
+ Isolate* isolate, const FunctionSig* sig, const WasmModule* module) {
+ // Run the compilation unit synchronously.
+ const bool is_import = false;
+ WasmFeatures enabled_features = WasmFeatures::FromIsolate(isolate);
+ JSToWasmWrapperCompilationUnit unit(isolate, isolate->wasm_engine(), sig,
+ module, is_import, enabled_features,
+ kDontAllowGeneric);
unit.Execute();
return unit.Finalize(isolate);
}
diff --git a/deps/v8/src/wasm/function-compiler.h b/deps/v8/src/wasm/function-compiler.h
index 3d232773e3..4894076303 100644
--- a/deps/v8/src/wasm/function-compiler.h
+++ b/deps/v8/src/wasm/function-compiler.h
@@ -32,6 +32,8 @@ struct WasmFunction;
class WasmInstructionBuffer final {
public:
WasmInstructionBuffer() = delete;
+ WasmInstructionBuffer(const WasmInstructionBuffer&) = delete;
+ WasmInstructionBuffer& operator=(const WasmInstructionBuffer&) = delete;
~WasmInstructionBuffer();
std::unique_ptr<AssemblerBuffer> CreateView();
std::unique_ptr<uint8_t[]> ReleaseBuffer();
@@ -43,9 +45,6 @@ class WasmInstructionBuffer final {
// Override {operator delete} to avoid implicit instantiation of {operator
// delete} with {size_t} argument. The {size_t} argument would be incorrect.
void operator delete(void* ptr) { ::operator delete(ptr); }
-
- private:
- DISALLOW_COPY_AND_ASSIGN(WasmInstructionBuffer);
};
struct WasmCompilationResult {
@@ -113,10 +112,15 @@ STATIC_ASSERT(sizeof(WasmCompilationUnit) <= 2 * kSystemPointerSize);
class V8_EXPORT_PRIVATE JSToWasmWrapperCompilationUnit final {
public:
+ // A flag to mark whether the compilation unit can skip the compilation
+ // and return the builtin (generic) wrapper, when available.
+ enum AllowGeneric : bool { kAllowGeneric = true, kDontAllowGeneric = false };
+
JSToWasmWrapperCompilationUnit(Isolate* isolate, WasmEngine* wasm_engine,
const FunctionSig* sig,
const wasm::WasmModule* module, bool is_import,
- const WasmFeatures& enabled_features);
+ const WasmFeatures& enabled_features,
+ AllowGeneric allow_generic);
~JSToWasmWrapperCompilationUnit();
void Execute();
@@ -131,6 +135,12 @@ class V8_EXPORT_PRIVATE JSToWasmWrapperCompilationUnit final {
const WasmModule* module,
bool is_import);
+ // Run a compilation unit synchronously, but ask for the specific
+ // wrapper.
+ static Handle<Code> CompileSpecificJSToWasmWrapper(Isolate* isolate,
+ const FunctionSig* sig,
+ const WasmModule* module);
+
private:
bool is_import_;
const FunctionSig* sig_;
diff --git a/deps/v8/src/wasm/graph-builder-interface.cc b/deps/v8/src/wasm/graph-builder-interface.cc
index 3fc6b066bb..ea071df575 100644
--- a/deps/v8/src/wasm/graph-builder-interface.cc
+++ b/deps/v8/src/wasm/graph-builder-interface.cc
@@ -74,11 +74,11 @@ constexpr uint32_t kNullCatch = static_cast<uint32_t>(-1);
class WasmGraphBuildingInterface {
public:
- static constexpr Decoder::ValidateFlag validate = Decoder::kValidate;
+ static constexpr Decoder::ValidateFlag validate = Decoder::kFullValidation;
using FullDecoder = WasmFullDecoder<validate, WasmGraphBuildingInterface>;
using CheckForNull = compiler::WasmGraphBuilder::CheckForNull;
- struct Value : public ValueBase {
+ struct Value : public ValueBase<validate> {
TFNode* node = nullptr;
template <typename... Args>
@@ -97,7 +97,7 @@ class WasmGraphBuildingInterface {
explicit TryInfo(SsaEnv* c) : catch_env(c) {}
};
- struct Control : public ControlBase<Value> {
+ struct Control : public ControlBase<Value, validate> {
SsaEnv* end_env = nullptr; // end environment for the construct.
SsaEnv* false_env = nullptr; // false environment (only for if).
TryInfo* try_info = nullptr; // information about try statements.
@@ -436,6 +436,13 @@ class WasmGraphBuildingInterface {
index.node, imm.offset, imm.alignment, decoder->position());
}
+ void LoadLane(FullDecoder* decoder, LoadType type, const Value& value,
+ const Value& index, const MemoryAccessImmediate<validate>& imm,
+ const uint8_t laneidx, Value* result) {
+ result->node = BUILD(LoadLane, type.mem_type(), value.node, index.node,
+ imm.offset, laneidx, decoder->position());
+ }
+
void StoreMem(FullDecoder* decoder, StoreType type,
const MemoryAccessImmediate<validate>& imm, const Value& index,
const Value& value) {
@@ -443,6 +450,13 @@ class WasmGraphBuildingInterface {
value.node, decoder->position(), type.value_type());
}
+ void StoreLane(FullDecoder* decoder, StoreType type,
+ const MemoryAccessImmediate<validate>& imm, const Value& index,
+ const Value& value, const uint8_t laneidx) {
+ BUILD(StoreLane, type.mem_rep(), index.node, imm.offset, imm.alignment,
+ value.node, laneidx, decoder->position(), type.value_type());
+ }
+
void CurrentMemoryPages(FullDecoder* decoder, Value* result) {
result->node = BUILD(CurrentMemoryPages);
}
@@ -1071,33 +1085,20 @@ class WasmGraphBuildingInterface {
BitVector* assigned = WasmDecoder<validate>::AnalyzeLoopAssignment(
decoder, decoder->pc(), decoder->num_locals() + 1, decoder->zone());
if (decoder->failed()) return;
- if (assigned != nullptr) {
- // Only introduce phis for variables assigned in this loop.
- int instance_cache_index = decoder->num_locals();
- for (int i = decoder->num_locals() - 1; i >= 0; i--) {
- if (!assigned->Contains(i)) continue;
- TFNode* inputs[] = {ssa_env_->locals[i], control()};
- ssa_env_->locals[i] = builder_->Phi(decoder->local_type(i), 1, inputs);
- }
- // Introduce phis for instance cache pointers if necessary.
- if (assigned->Contains(instance_cache_index)) {
- builder_->PrepareInstanceCacheForLoop(&ssa_env_->instance_cache,
- control());
- }
+ DCHECK_NOT_NULL(assigned);
- SetEnv(Split(decoder->zone(), ssa_env_));
- builder_->StackCheck(decoder->position());
- return;
- }
-
- // Conservatively introduce phis for all local variables.
+ // Only introduce phis for variables assigned in this loop.
+ int instance_cache_index = decoder->num_locals();
for (int i = decoder->num_locals() - 1; i >= 0; i--) {
+ if (!assigned->Contains(i)) continue;
TFNode* inputs[] = {ssa_env_->locals[i], control()};
ssa_env_->locals[i] = builder_->Phi(decoder->local_type(i), 1, inputs);
}
-
- // Conservatively introduce phis for instance cache.
- builder_->PrepareInstanceCacheForLoop(&ssa_env_->instance_cache, control());
+ // Introduce phis for instance cache pointers if necessary.
+ if (assigned->Contains(instance_cache_index)) {
+ builder_->PrepareInstanceCacheForLoop(&ssa_env_->instance_cache,
+ control());
+ }
SetEnv(Split(decoder->zone(), ssa_env_));
builder_->StackCheck(decoder->position());
@@ -1200,7 +1201,7 @@ DecodeResult BuildTFGraph(AccountingAllocator* allocator,
WasmFeatures* detected, const FunctionBody& body,
compiler::NodeOriginTable* node_origins) {
Zone zone(allocator, ZONE_NAME);
- WasmFullDecoder<Decoder::kValidate, WasmGraphBuildingInterface> decoder(
+ WasmFullDecoder<Decoder::kFullValidation, WasmGraphBuildingInterface> decoder(
&zone, module, enabled, detected, body, builder);
if (node_origins) {
builder->AddBytecodePositionDecorator(node_origins, &decoder);
diff --git a/deps/v8/src/wasm/memory-tracing.cc b/deps/v8/src/wasm/memory-tracing.cc
index 075a6e2f25..0d88c4b461 100644
--- a/deps/v8/src/wasm/memory-tracing.cc
+++ b/deps/v8/src/wasm/memory-tracing.cc
@@ -19,14 +19,13 @@ void TraceMemoryOperation(base::Optional<ExecutionTier> tier,
int position, uint8_t* mem_start) {
EmbeddedVector<char, 91> value;
auto mem_rep = static_cast<MachineRepresentation>(info->mem_rep);
+ Address address = reinterpret_cast<Address>(mem_start) + info->offset;
switch (mem_rep) {
-#define TRACE_TYPE(rep, str, format, ctype1, ctype2) \
- case MachineRepresentation::rep: \
- SNPrintF(value, str ":" format, \
- base::ReadLittleEndianValue<ctype1>( \
- reinterpret_cast<Address>(mem_start) + info->address), \
- base::ReadLittleEndianValue<ctype2>( \
- reinterpret_cast<Address>(mem_start) + info->address)); \
+#define TRACE_TYPE(rep, str, format, ctype1, ctype2) \
+ case MachineRepresentation::rep: \
+ SNPrintF(value, str ":" format, \
+ base::ReadLittleEndianValue<ctype1>(address), \
+ base::ReadLittleEndianValue<ctype2>(address)); \
break;
TRACE_TYPE(kWord8, " i8", "%d / %02x", uint8_t, uint8_t)
TRACE_TYPE(kWord16, "i16", "%d / %04x", uint16_t, uint16_t)
@@ -37,30 +36,22 @@ void TraceMemoryOperation(base::Optional<ExecutionTier> tier,
#undef TRACE_TYPE
case MachineRepresentation::kSimd128:
SNPrintF(value, "s128:%d %d %d %d / %08x %08x %08x %08x",
- base::ReadLittleEndianValue<uint32_t>(
- reinterpret_cast<Address>(mem_start) + info->address),
- base::ReadLittleEndianValue<uint32_t>(
- reinterpret_cast<Address>(mem_start) + info->address + 4),
- base::ReadLittleEndianValue<uint32_t>(
- reinterpret_cast<Address>(mem_start) + info->address + 8),
- base::ReadLittleEndianValue<uint32_t>(
- reinterpret_cast<Address>(mem_start) + info->address + 12),
- base::ReadLittleEndianValue<uint32_t>(
- reinterpret_cast<Address>(mem_start) + info->address),
- base::ReadLittleEndianValue<uint32_t>(
- reinterpret_cast<Address>(mem_start) + info->address + 4),
- base::ReadLittleEndianValue<uint32_t>(
- reinterpret_cast<Address>(mem_start) + info->address + 8),
- base::ReadLittleEndianValue<uint32_t>(
- reinterpret_cast<Address>(mem_start) + info->address + 12));
+ base::ReadLittleEndianValue<uint32_t>(address),
+ base::ReadLittleEndianValue<uint32_t>(address + 4),
+ base::ReadLittleEndianValue<uint32_t>(address + 8),
+ base::ReadLittleEndianValue<uint32_t>(address + 12),
+ base::ReadLittleEndianValue<uint32_t>(address),
+ base::ReadLittleEndianValue<uint32_t>(address + 4),
+ base::ReadLittleEndianValue<uint32_t>(address + 8),
+ base::ReadLittleEndianValue<uint32_t>(address + 12));
break;
default:
SNPrintF(value, "???");
}
const char* eng =
tier.has_value() ? ExecutionTierToString(tier.value()) : "?";
- printf("%-11s func:%6d+0x%-6x%s %08x val: %s\n", eng, func_index, position,
- info->is_store ? " store to" : "load from", info->address,
+ printf("%-11s func:%6d+0x%-6x%s %016" PRIuPTR " val: %s\n", eng, func_index,
+ position, info->is_store ? " store to" : "load from", info->offset,
value.begin());
}
diff --git a/deps/v8/src/wasm/memory-tracing.h b/deps/v8/src/wasm/memory-tracing.h
index ca1b2f38c4..f025f07ded 100644
--- a/deps/v8/src/wasm/memory-tracing.h
+++ b/deps/v8/src/wasm/memory-tracing.h
@@ -17,7 +17,7 @@ namespace wasm {
// This struct is create in generated code, hence use low-level types.
struct MemoryTracingInfo {
- uint32_t address;
+ uintptr_t offset;
uint8_t is_store; // 0 or 1
uint8_t mem_rep;
static_assert(
@@ -25,8 +25,10 @@ struct MemoryTracingInfo {
std::underlying_type<MachineRepresentation>::type>::value,
"MachineRepresentation uses uint8_t");
- MemoryTracingInfo(uint32_t addr, bool is_store, MachineRepresentation rep)
- : address(addr), is_store(is_store), mem_rep(static_cast<uint8_t>(rep)) {}
+ MemoryTracingInfo(uintptr_t offset, bool is_store, MachineRepresentation rep)
+ : offset(offset),
+ is_store(is_store),
+ mem_rep(static_cast<uint8_t>(rep)) {}
};
// Callback for tracing a memory operation for debugging.
diff --git a/deps/v8/src/wasm/module-compiler.cc b/deps/v8/src/wasm/module-compiler.cc
index 967e092b5b..82f86786a7 100644
--- a/deps/v8/src/wasm/module-compiler.cc
+++ b/deps/v8/src/wasm/module-compiler.cc
@@ -79,105 +79,24 @@ enum class CompileStrategy : uint8_t {
kDefault = kEager,
};
-// Background compile jobs hold a shared pointer to this token. The token is
-// used to notify them that they should stop. As soon as they see this (after
-// finishing their current compilation unit), they will stop.
-// This allows to already remove the NativeModule without having to synchronize
-// on background compile jobs.
-class BackgroundCompileToken {
- public:
- explicit BackgroundCompileToken(
- const std::shared_ptr<NativeModule>& native_module)
- : native_module_(native_module) {}
-
- void Cancel() {
- base::SharedMutexGuard<base::kExclusive> mutex_guard(
- &compilation_scope_mutex_);
- native_module_.reset();
- }
-
- private:
- friend class BackgroundCompileScope;
-
- std::shared_ptr<NativeModule> StartScope() {
- compilation_scope_mutex_.LockShared();
- return native_module_.lock();
- }
-
- // This private method can only be called via {BackgroundCompileScope}.
- void SchedulePublishCode(NativeModule* native_module,
- std::vector<std::unique_ptr<WasmCode>> codes) {
- {
- base::MutexGuard guard(&publish_mutex_);
- if (publisher_running_) {
- // Add new code to the queue and return.
- publish_queue_.reserve(publish_queue_.size() + codes.size());
- for (auto& c : codes) publish_queue_.emplace_back(std::move(c));
- return;
- }
- publisher_running_ = true;
- }
- while (true) {
- PublishCode(native_module, VectorOf(codes));
- codes.clear();
-
- // Keep publishing new code that came in.
- base::MutexGuard guard(&publish_mutex_);
- DCHECK(publisher_running_);
- if (publish_queue_.empty()) {
- publisher_running_ = false;
- return;
- }
- codes.swap(publish_queue_);
- }
- }
-
- void PublishCode(NativeModule*, Vector<std::unique_ptr<WasmCode>>);
-
- void ExitScope() { compilation_scope_mutex_.UnlockShared(); }
-
- // {compilation_scope_mutex_} protects {native_module_}.
- base::SharedMutex compilation_scope_mutex_;
- std::weak_ptr<NativeModule> native_module_;
-
- // {publish_mutex_} protects {publish_queue_} and {publisher_running_}.
- base::Mutex publish_mutex_;
- std::vector<std::unique_ptr<WasmCode>> publish_queue_;
- bool publisher_running_ = false;
-};
-
class CompilationStateImpl;
-// Keep these scopes short, as they hold the mutex of the token, which
-// sequentializes all these scopes. The mutex is also acquired from foreground
-// tasks, which should not be blocked for a long time.
class BackgroundCompileScope {
public:
- explicit BackgroundCompileScope(
- const std::shared_ptr<BackgroundCompileToken>& token)
- : token_(token.get()), native_module_(token->StartScope()) {}
-
- ~BackgroundCompileScope() { token_->ExitScope(); }
-
- bool cancelled() const { return native_module_ == nullptr; }
+ explicit BackgroundCompileScope(std::weak_ptr<NativeModule> native_module)
+ : native_module_(native_module.lock()) {}
- NativeModule* native_module() {
- DCHECK(!cancelled());
+ NativeModule* native_module() const {
+ DCHECK(native_module_);
return native_module_.get();
}
+ inline CompilationStateImpl* compilation_state() const;
- inline CompilationStateImpl* compilation_state();
-
- // Call {SchedulePublishCode} via the {BackgroundCompileScope} to guarantee
- // that the {NativeModule} stays alive.
- void SchedulePublishCode(std::vector<std::unique_ptr<WasmCode>> codes) {
- token_->SchedulePublishCode(native_module_.get(), std::move(codes));
- }
+ bool cancelled() const;
private:
- BackgroundCompileToken* const token_;
// Keep the native module alive while in this scope.
- std::shared_ptr<NativeModule> const native_module_;
+ std::shared_ptr<NativeModule> native_module_;
};
enum CompileBaselineOnly : bool {
@@ -190,33 +109,74 @@ enum CompileBaselineOnly : bool {
// runs empty.
class CompilationUnitQueues {
public:
- explicit CompilationUnitQueues(int max_tasks, int num_declared_functions)
- : queues_(max_tasks), top_tier_priority_units_queues_(max_tasks) {
- DCHECK_LT(0, max_tasks);
- for (int task_id = 0; task_id < max_tasks; ++task_id) {
- queues_[task_id].next_steal_task_id = next_task_id(task_id);
- }
+ // Public API for QueueImpl.
+ struct Queue {
+ bool ShouldPublish(int num_processed_units) const;
+ };
+
+ explicit CompilationUnitQueues(int num_declared_functions)
+ : num_declared_functions_(num_declared_functions) {
+ // Add one first queue, to add units to.
+ queues_.emplace_back(std::make_unique<QueueImpl>(0));
+
for (auto& atomic_counter : num_units_) {
std::atomic_init(&atomic_counter, size_t{0});
}
- treated_ = std::make_unique<std::atomic<bool>[]>(num_declared_functions);
+ top_tier_compiled_ =
+ std::make_unique<std::atomic<bool>[]>(num_declared_functions);
for (int i = 0; i < num_declared_functions; i++) {
- std::atomic_init(&treated_.get()[i], false);
+ std::atomic_init(&top_tier_compiled_.get()[i], false);
}
}
- base::Optional<WasmCompilationUnit> GetNextUnit(
- int task_id, CompileBaselineOnly baseline_only) {
- DCHECK_LE(0, task_id);
- DCHECK_GT(queues_.size(), task_id);
+ Queue* GetQueueForTask(int task_id) {
+ int required_queues = task_id + 1;
+ {
+ base::SharedMutexGuard<base::kShared> queues_guard(&queues_mutex_);
+ if (V8_LIKELY(static_cast<int>(queues_.size()) >= required_queues)) {
+ return queues_[task_id].get();
+ }
+ }
+
+ // Otherwise increase the number of queues.
+ base::SharedMutexGuard<base::kExclusive> queues_guard(&queues_mutex_);
+ int num_queues = static_cast<int>(queues_.size());
+ while (num_queues < required_queues) {
+ int steal_from = num_queues + 1;
+ queues_.emplace_back(std::make_unique<QueueImpl>(steal_from));
+ ++num_queues;
+ }
+
+ // Update the {publish_limit}s of all queues.
+
+ // We want background threads to publish regularly (to avoid contention when
+ // they are all publishing at the end). On the other side, each publishing
+ // has some overhead (part of it for synchronizing between threads), so it
+ // should not happen *too* often. Thus aim for 4-8 publishes per thread, but
+ // distribute it such that publishing is likely to happen at different
+ // times.
+ int units_per_thread = num_declared_functions_ / num_queues;
+ int min = std::max(10, units_per_thread / 8);
+ int queue_id = 0;
+ for (auto& queue : queues_) {
+ // Set a limit between {min} and {2*min}, but not smaller than {10}.
+ int limit = min + (min * queue_id / num_queues);
+ queue->publish_limit.store(limit, std::memory_order_relaxed);
+ ++queue_id;
+ }
+
+ return queues_[task_id].get();
+ }
+ base::Optional<WasmCompilationUnit> GetNextUnit(
+ Queue* queue, CompileBaselineOnly baseline_only) {
// As long as any lower-tier units are outstanding we need to steal them
// before executing own higher-tier units.
int max_tier = baseline_only ? kBaseline : kTopTier;
for (int tier = GetLowestTierWithUnits(); tier <= max_tier; ++tier) {
- if (auto unit = GetNextUnitOfTier(task_id, tier)) {
+ if (auto unit = GetNextUnitOfTier(queue, tier)) {
size_t old_units_count =
num_units_[tier].fetch_sub(1, std::memory_order_relaxed);
DCHECK_LE(1, old_units_count);
@@ -233,13 +193,18 @@ class CompilationUnitQueues {
DCHECK_LT(0, baseline_units.size() + top_tier_units.size());
// Add to the individual queues in a round-robin fashion. No special care is
// taken to balance them; they will be balanced by work stealing.
- int queue_to_add = next_queue_to_add.load(std::memory_order_relaxed);
- while (!next_queue_to_add.compare_exchange_weak(
- queue_to_add, next_task_id(queue_to_add), std::memory_order_relaxed)) {
- // Retry with updated {queue_to_add}.
+ QueueImpl* queue;
+ {
+ int queue_to_add = next_queue_to_add.load(std::memory_order_relaxed);
+ base::SharedMutexGuard<base::kShared> queues_guard(&queues_mutex_);
+ while (!next_queue_to_add.compare_exchange_weak(
+ queue_to_add, next_task_id(queue_to_add, queues_.size()),
+ std::memory_order_relaxed)) {
+ // Retry with updated {queue_to_add}.
+ }
+ queue = queues_[queue_to_add].get();
}
- Queue* queue = &queues_[queue_to_add];
base::MutexGuard guard(&queue->mutex);
base::Optional<base::MutexGuard> big_units_guard;
for (auto pair : {std::make_pair(int{kBaseline}, baseline_units),
@@ -265,22 +230,24 @@ class CompilationUnitQueues {
}
void AddTopTierPriorityUnit(WasmCompilationUnit unit, size_t priority) {
+ base::SharedMutexGuard<base::kShared> queues_guard(&queues_mutex_);
// Add to the individual queues in a round-robin fashion. No special care is
// taken to balance them; they will be balanced by work stealing. We use
// the same counter for this reason.
int queue_to_add = next_queue_to_add.load(std::memory_order_relaxed);
while (!next_queue_to_add.compare_exchange_weak(
- queue_to_add, next_task_id(queue_to_add), std::memory_order_relaxed)) {
+ queue_to_add, next_task_id(queue_to_add, queues_.size()),
+ std::memory_order_relaxed)) {
// Retry with updated {queue_to_add}.
}
- TopTierPriorityUnitsQueue* queue =
- &top_tier_priority_units_queues_[queue_to_add];
- base::MutexGuard guard(&queue->mutex);
-
+ {
+ auto* queue = queues_[queue_to_add].get();
+ base::MutexGuard guard(&queue->mutex);
+ queue->top_tier_priority_units.emplace(priority, unit);
+ }
num_priority_units_.fetch_add(1, std::memory_order_relaxed);
num_units_[kTopTier].fetch_add(1, std::memory_order_relaxed);
- queue->units.emplace(priority, unit);
}
// Get the current total number of units in all queues. This is only a
@@ -304,15 +271,6 @@ class CompilationUnitQueues {
// order of their function body size.
static constexpr size_t kBigUnitsLimit = 4096;
- struct Queue {
- base::Mutex mutex;
-
- // Protected by {mutex}:
- std::vector<WasmCompilationUnit> units[kNumTiers];
- int next_steal_task_id;
- // End of fields protected by {mutex}.
- };
-
struct BigUnit {
BigUnit(size_t func_size, WasmCompilationUnit unit)
: func_size{func_size}, unit(unit) {}
@@ -351,28 +309,27 @@ class CompilationUnitQueues {
std::priority_queue<BigUnit> units[kNumTiers];
};
- struct TopTierPriorityUnitsQueue {
+ struct QueueImpl : public Queue {
+ explicit QueueImpl(int next_steal_task_id)
+ : next_steal_task_id(next_steal_task_id) {}
+
+ // Number of units after which the task processing this queue should publish
+ // compilation results. Updated (reduced, using relaxed ordering) when new
+ // queues are allocated. If there is only one thread running, we can delay
+ // publishing arbitrarily.
+ std::atomic<int> publish_limit{kMaxInt};
+
base::Mutex mutex;
- // Protected by {mutex}:
- std::priority_queue<TopTierPriorityUnit> units;
+ // All fields below are protected by {mutex}.
+ std::vector<WasmCompilationUnit> units[kNumTiers];
+ std::priority_queue<TopTierPriorityUnit> top_tier_priority_units;
int next_steal_task_id;
- // End of fields protected by {mutex}.
};
- std::vector<Queue> queues_;
- BigUnitsQueue big_units_queue_;
-
- std::vector<TopTierPriorityUnitsQueue> top_tier_priority_units_queues_;
-
- std::atomic<size_t> num_units_[kNumTiers];
- std::atomic<size_t> num_priority_units_{0};
- std::unique_ptr<std::atomic<bool>[]> treated_;
- std::atomic<int> next_queue_to_add{0};
-
- int next_task_id(int task_id) const {
+ int next_task_id(int task_id, size_t num_queues) const {
int next = task_id + 1;
- return next == static_cast<int>(queues_.size()) ? 0 : next;
+ return next == static_cast<int>(num_queues) ? 0 : next;
}
int GetLowestTierWithUnits() const {
@@ -382,13 +339,13 @@ class CompilationUnitQueues {
return kNumTiers;
}
- base::Optional<WasmCompilationUnit> GetNextUnitOfTier(int task_id, int tier) {
- Queue* queue = &queues_[task_id];
+ base::Optional<WasmCompilationUnit> GetNextUnitOfTier(Queue* public_queue,
+ int tier) {
+ QueueImpl* queue = static_cast<QueueImpl*>(public_queue);
- // First check whether there is a priority unit. Execute that
- // first.
+ // First check whether there is a priority unit. Execute that first.
if (tier == kTopTier) {
- if (auto unit = GetTopTierPriorityUnit(task_id)) {
+ if (auto unit = GetTopTierPriorityUnit(queue)) {
return unit;
}
}
@@ -411,12 +368,16 @@ class CompilationUnitQueues {
// Try to steal from all other queues. If this succeeds, return one of the
// stolen units.
- size_t steal_trials = queues_.size();
- for (; steal_trials > 0;
- --steal_trials, steal_task_id = next_task_id(steal_task_id)) {
- if (steal_task_id == task_id) continue;
- if (auto unit = StealUnitsAndGetFirst(task_id, steal_task_id, tier)) {
- return unit;
+ {
+ base::SharedMutexGuard<base::kShared> guard(&queues_mutex_);
+ for (size_t steal_trials = 0; steal_trials < queues_.size();
+ ++steal_trials, ++steal_task_id) {
+ if (steal_task_id >= static_cast<int>(queues_.size())) {
+ steal_task_id = 0;
+ }
+ if (auto unit = StealUnitsAndGetFirst(queue, steal_task_id, tier)) {
+ return unit;
+ }
}
}
@@ -425,7 +386,7 @@ class CompilationUnitQueues {
}
base::Optional<WasmCompilationUnit> GetBigUnitOfTier(int tier) {
- // Fast-path without locking.
+ // Fast path without locking.
if (!big_units_queue_.has_units[tier].load(std::memory_order_relaxed)) {
return {};
}
@@ -439,25 +400,22 @@ class CompilationUnitQueues {
return unit;
}
- base::Optional<WasmCompilationUnit> GetTopTierPriorityUnit(int task_id) {
- // Fast-path without locking.
+ base::Optional<WasmCompilationUnit> GetTopTierPriorityUnit(QueueImpl* queue) {
+ // Fast path without locking.
if (num_priority_units_.load(std::memory_order_relaxed) == 0) {
return {};
}
- TopTierPriorityUnitsQueue* queue =
- &top_tier_priority_units_queues_[task_id];
-
int steal_task_id;
{
base::MutexGuard mutex_guard(&queue->mutex);
- while (!queue->units.empty()) {
- auto unit = queue->units.top().unit;
- queue->units.pop();
+ while (!queue->top_tier_priority_units.empty()) {
+ auto unit = queue->top_tier_priority_units.top().unit;
+ queue->top_tier_priority_units.pop();
num_priority_units_.fetch_sub(1, std::memory_order_relaxed);
- if (!treated_[unit.func_index()].exchange(true,
- std::memory_order_relaxed)) {
+ if (!top_tier_compiled_[unit.func_index()].exchange(
+ true, std::memory_order_relaxed)) {
return unit;
}
num_units_[kTopTier].fetch_sub(1, std::memory_order_relaxed);
@@ -467,28 +425,34 @@ class CompilationUnitQueues {
// Try to steal from all other queues. If this succeeds, return one of the
// stolen units.
- size_t steal_trials = queues_.size();
- for (; steal_trials > 0;
- --steal_trials, steal_task_id = next_task_id(steal_task_id)) {
- if (steal_task_id == task_id) continue;
- if (auto unit = StealTopTierPriorityUnit(task_id, steal_task_id)) {
- return unit;
+ {
+ base::SharedMutexGuard<base::kShared> guard(&queues_mutex_);
+ for (size_t steal_trials = 0; steal_trials < queues_.size();
+ ++steal_trials, ++steal_task_id) {
+ if (steal_task_id >= static_cast<int>(queues_.size())) {
+ steal_task_id = 0;
+ }
+ if (auto unit = StealTopTierPriorityUnit(queue, steal_task_id)) {
+ return unit;
+ }
}
}
return {};
}
- // Steal units of {wanted_tier} from {steal_from_task_id} to {task_id}. Return
+ // Steal units of {wanted_tier} from {steal_from_task_id} to {queue}. Return
// first stolen unit (rest put in queue of {task_id}), or {nullopt} if
// {steal_from_task_id} had no units of {wanted_tier}.
+ // Hold a shared lock on {queues_mutex_} when calling this method.
base::Optional<WasmCompilationUnit> StealUnitsAndGetFirst(
- int task_id, int steal_from_task_id, int wanted_tier) {
- DCHECK_NE(task_id, steal_from_task_id);
+ QueueImpl* queue, int steal_from_task_id, int wanted_tier) {
+ auto* steal_queue = queues_[steal_from_task_id].get();
+ // Cannot steal from own queue.
+ if (steal_queue == queue) return {};
std::vector<WasmCompilationUnit> stolen;
base::Optional<WasmCompilationUnit> returned_unit;
{
- Queue* steal_queue = &queues_[steal_from_task_id];
base::MutexGuard guard(&steal_queue->mutex);
auto* steal_from_vector = &steal_queue->units[wanted_tier];
if (steal_from_vector->empty()) return {};
@@ -498,81 +462,65 @@ class CompilationUnitQueues {
stolen.assign(steal_begin + 1, steal_from_vector->end());
steal_from_vector->erase(steal_begin, steal_from_vector->end());
}
- Queue* queue = &queues_[task_id];
base::MutexGuard guard(&queue->mutex);
auto* target_queue = &queue->units[wanted_tier];
target_queue->insert(target_queue->end(), stolen.begin(), stolen.end());
- queue->next_steal_task_id = next_task_id(steal_from_task_id);
+ queue->next_steal_task_id = steal_from_task_id + 1;
return returned_unit;
}
// Steal one priority unit from {steal_from_task_id} to {task_id}. Return
// stolen unit, or {nullopt} if {steal_from_task_id} had no priority units.
+ // Hold a shared lock on {queues_mutex_} when calling this method.
base::Optional<WasmCompilationUnit> StealTopTierPriorityUnit(
- int task_id, int steal_from_task_id) {
- DCHECK_NE(task_id, steal_from_task_id);
-
+ QueueImpl* queue, int steal_from_task_id) {
+ auto* steal_queue = queues_[steal_from_task_id].get();
+ // Cannot steal from own queue.
+ if (steal_queue == queue) return {};
base::Optional<WasmCompilationUnit> returned_unit;
{
- TopTierPriorityUnitsQueue* steal_queue =
- &top_tier_priority_units_queues_[steal_from_task_id];
base::MutexGuard guard(&steal_queue->mutex);
while (true) {
- if (steal_queue->units.empty()) return {};
+ if (steal_queue->top_tier_priority_units.empty()) return {};
- auto unit = steal_queue->units.top().unit;
- steal_queue->units.pop();
+ auto unit = steal_queue->top_tier_priority_units.top().unit;
+ steal_queue->top_tier_priority_units.pop();
num_priority_units_.fetch_sub(1, std::memory_order_relaxed);
- if (!treated_[unit.func_index()].exchange(true,
- std::memory_order_relaxed)) {
+ if (!top_tier_compiled_[unit.func_index()].exchange(
+ true, std::memory_order_relaxed)) {
returned_unit = unit;
break;
}
num_units_[kTopTier].fetch_sub(1, std::memory_order_relaxed);
}
}
- TopTierPriorityUnitsQueue* queue =
- &top_tier_priority_units_queues_[task_id];
base::MutexGuard guard(&queue->mutex);
- queue->next_steal_task_id = next_task_id(steal_from_task_id);
+ queue->next_steal_task_id = steal_from_task_id + 1;
return returned_unit;
}
-};
-
-// {JobHandle} is not thread safe in general (at least both the
-// {DefaultJobHandle} and chromium's {base::JobHandle} are not). Hence, protect
-// concurrent accesses via a mutex.
-class ThreadSafeJobHandle {
- public:
- explicit ThreadSafeJobHandle(std::shared_ptr<JobHandle> job_handle)
- : job_handle_(std::move(job_handle)) {}
- void NotifyConcurrencyIncrease() {
- base::MutexGuard guard(&mutex_);
- job_handle_->NotifyConcurrencyIncrease();
- }
+ // {queues_mutex_} protectes {queues_};
+ base::SharedMutex queues_mutex_;
+ std::vector<std::unique_ptr<QueueImpl>> queues_;
- void Join() {
- base::MutexGuard guard(&mutex_);
- job_handle_->Join();
- }
+ const int num_declared_functions_;
- void Cancel() {
- base::MutexGuard guard(&mutex_);
- job_handle_->Cancel();
- }
-
- bool IsRunning() const {
- base::MutexGuard guard(&mutex_);
- return job_handle_->IsRunning();
- }
+ BigUnitsQueue big_units_queue_;
- private:
- mutable base::Mutex mutex_;
- std::shared_ptr<JobHandle> job_handle_;
+ std::atomic<size_t> num_units_[kNumTiers];
+ std::atomic<size_t> num_priority_units_{0};
+ std::unique_ptr<std::atomic<bool>[]> top_tier_compiled_;
+ std::atomic<int> next_queue_to_add{0};
};
+bool CompilationUnitQueues::Queue::ShouldPublish(
+ int num_processed_units) const {
+ auto* queue = static_cast<const QueueImpl*>(this);
+ return num_processed_units >=
+ queue->publish_limit.load(std::memory_order_relaxed);
+}
+
// The {CompilationStateImpl} keeps track of the compilation state of the
// owning NativeModule, i.e. which functions are left to be compiled.
// It contains a task manager to allow parallel and asynchronous background
@@ -586,6 +534,7 @@ class CompilationStateImpl {
// Cancel all background compilation, without waiting for compile tasks to
// finish.
void CancelCompilation();
+ bool cancelled() const;
// Initialize compilation progress. Set compilation tiers to expect for
// baseline and top tier compilation. Must be set before {AddCompilationUnits}
@@ -618,8 +567,11 @@ class CompilationStateImpl {
js_to_wasm_wrapper_units);
void AddTopTierCompilationUnit(WasmCompilationUnit);
void AddTopTierPriorityCompilationUnit(WasmCompilationUnit, size_t);
+
+ CompilationUnitQueues::Queue* GetQueueForCompileTask(int task_id);
+
base::Optional<WasmCompilationUnit> GetNextCompilationUnit(
- int task_id, CompileBaselineOnly baseline_only);
+ CompilationUnitQueues::Queue*, CompileBaselineOnly);
std::shared_ptr<JSToWasmWrapperCompilationUnit>
GetNextJSToWasmWrapperCompilationUnit();
@@ -629,13 +581,13 @@ class CompilationStateImpl {
void OnFinishedUnits(Vector<WasmCode*>);
void OnFinishedJSToWasmWrapperUnits(int num);
- int GetFreeCompileTaskId();
- int GetUnpublishedUnitsLimits(int task_id);
- void OnCompilationStopped(int task_id, const WasmFeatures& detected);
+ void OnCompilationStopped(const WasmFeatures& detected);
void PublishDetectedFeatures(Isolate*);
+ void SchedulePublishCompilationResults(
+ std::vector<std::unique_ptr<WasmCode>> unpublished_code);
// Ensure that a compilation job is running, and increase its concurrency if
// needed.
- void ScheduleCompileJobForNewUnits(int new_units);
+ void ScheduleCompileJobForNewUnits();
size_t NumOutstandingCompilations() const;
@@ -687,8 +639,12 @@ class CompilationStateImpl {
// Hold the {callbacks_mutex_} when calling this method.
void TriggerCallbacks(base::EnumSet<CompilationEvent> additional_events = {});
+ void PublishCompilationResults(
+ std::vector<std::unique_ptr<WasmCode>> unpublished_code);
+ void PublishCode(Vector<std::unique_ptr<WasmCode>> codes);
+
NativeModule* const native_module_;
- const std::shared_ptr<BackgroundCompileToken> background_compile_token_;
+ std::weak_ptr<NativeModule> const native_module_weak_;
const CompileMode compile_mode_;
const std::shared_ptr<Counters> async_counters_;
@@ -696,20 +652,9 @@ class CompilationStateImpl {
// using relaxed semantics.
std::atomic<bool> compile_failed_{false};
- // The atomic counter is shared with the compilation job. It's increased if
- // more units are added, and decreased when the queue drops to zero. Hence
- // it's an approximation of the current number of available units in the
- // queue, but it's not updated after popping a single unit, because that
- // would create too much contention.
- // This counter is not used for synchronization, hence relaxed memory ordering
- // can be used. The thread that increases the counter is the same that calls
- // {NotifyConcurrencyIncrease} later. The only reduction of the counter is a
- // drop to zero after a worker does not find any unit in the queue, and after
- // that drop another check is executed to ensure that any left-over units are
- // still processed.
- std::shared_ptr<std::atomic<int>> scheduled_units_approximation_ =
- std::make_shared<std::atomic<int>>(0);
- const int max_compile_concurrency_ = 0;
+ // True if compilation was cancelled and worker threads should return. This
+ // flag can be updated and read using relaxed semantics.
+ std::atomic<bool> compile_cancelled_{false};
CompilationUnitQueues compilation_unit_queues_;
@@ -729,7 +674,7 @@ class CompilationStateImpl {
//////////////////////////////////////////////////////////////////////////////
// Protected by {mutex_}:
- std::shared_ptr<ThreadSafeJobHandle> current_compile_job_;
+ std::shared_ptr<JobHandle> current_compile_job_;
// Features detected to be used in this module. Features can be detected
// as a module is being compiled.
@@ -768,6 +713,11 @@ class CompilationStateImpl {
// End of fields protected by {callbacks_mutex_}.
//////////////////////////////////////////////////////////////////////////////
+ // {publish_mutex_} protects {publish_queue_} and {publisher_running_}.
+ base::Mutex publish_mutex_;
+ std::vector<std::unique_ptr<WasmCode>> publish_queue_;
+ bool publisher_running_ = false;
+
// Encoding of fields in the {compilation_progress_} vector.
using RequiredBaselineTierField = base::BitField8<ExecutionTier, 0, 2>;
using RequiredTopTierField = base::BitField8<ExecutionTier, 2, 2>;
@@ -782,21 +732,14 @@ const CompilationStateImpl* Impl(const CompilationState* compilation_state) {
return reinterpret_cast<const CompilationStateImpl*>(compilation_state);
}
-CompilationStateImpl* BackgroundCompileScope::compilation_state() {
- return Impl(native_module()->compilation_state());
+CompilationStateImpl* BackgroundCompileScope::compilation_state() const {
+ DCHECK(native_module_);
+ return Impl(native_module_->compilation_state());
}
-void BackgroundCompileToken::PublishCode(
- NativeModule* native_module, Vector<std::unique_ptr<WasmCode>> code) {
- WasmCodeRefScope code_ref_scope;
- std::vector<WasmCode*> published_code = native_module->PublishCode(code);
- // Defer logging code in case wire bytes were not fully received yet.
- if (native_module->HasWireBytes()) {
- native_module->engine()->LogCode(VectorOf(published_code));
- }
-
- Impl(native_module->compilation_state())
- ->OnFinishedUnits(VectorOf(published_code));
+bool BackgroundCompileScope::cancelled() const {
+ return native_module_ == nullptr ||
+ Impl(native_module_->compilation_state())->cancelled();
}
void UpdateFeatureUseCounts(Isolate* isolate, const WasmFeatures& detected) {
@@ -877,8 +820,9 @@ bool CompilationState::recompilation_finished() const {
std::unique_ptr<CompilationState> CompilationState::New(
const std::shared_ptr<NativeModule>& native_module,
std::shared_ptr<Counters> async_counters) {
- return std::unique_ptr<CompilationState>(reinterpret_cast<CompilationState*>(
- new CompilationStateImpl(native_module, std::move(async_counters))));
+ return std::unique_ptr<CompilationState>(
+ reinterpret_cast<CompilationState*>(new CompilationStateImpl(
+ std::move(native_module), std::move(async_counters))));
}
// End of PIMPL implementation of {CompilationState}.
@@ -1215,31 +1159,31 @@ void TriggerTierUp(Isolate* isolate, NativeModule* native_module,
namespace {
void RecordStats(const Code code, Counters* counters) {
- counters->wasm_generated_code_size()->Increment(code.body_size());
+ counters->wasm_generated_code_size()->Increment(code.raw_body_size());
counters->wasm_reloc_size()->Increment(code.relocation_info().length());
}
enum CompilationExecutionResult : int8_t { kNoMoreUnits, kYield };
CompilationExecutionResult ExecuteJSToWasmWrapperCompilationUnits(
- const std::shared_ptr<BackgroundCompileToken>& token,
- JobDelegate* delegate) {
+ std::weak_ptr<NativeModule> native_module, JobDelegate* delegate) {
std::shared_ptr<JSToWasmWrapperCompilationUnit> wrapper_unit = nullptr;
int num_processed_wrappers = 0;
{
- BackgroundCompileScope compile_scope(token);
+ BackgroundCompileScope compile_scope(native_module);
if (compile_scope.cancelled()) return kNoMoreUnits;
wrapper_unit = compile_scope.compilation_state()
->GetNextJSToWasmWrapperCompilationUnit();
if (!wrapper_unit) return kNoMoreUnits;
}
+ TRACE_EVENT0("v8.wasm", "wasm.JSToWasmWrapperCompilation");
while (true) {
wrapper_unit->Execute();
++num_processed_wrappers;
bool yield = delegate && delegate->ShouldYield();
- BackgroundCompileScope compile_scope(token);
+ BackgroundCompileScope compile_scope(native_module);
if (compile_scope.cancelled()) return kNoMoreUnits;
if (yield ||
!(wrapper_unit = compile_scope.compilation_state()
@@ -1251,16 +1195,35 @@ CompilationExecutionResult ExecuteJSToWasmWrapperCompilationUnits(
}
}
+namespace {
+const char* GetCompilationEventName(const WasmCompilationUnit& unit,
+ const CompilationEnv& env) {
+ ExecutionTier tier = unit.tier();
+ if (tier == ExecutionTier::kLiftoff) {
+ return "wasm.BaselineCompilation";
+ }
+ if (tier == ExecutionTier::kTurbofan) {
+ return "wasm.TopTierCompilation";
+ }
+ if (unit.func_index() <
+ static_cast<int>(env.module->num_imported_functions)) {
+ return "wasm.WasmToJSWrapperCompilation";
+ }
+ return "wasm.OtherCompilation";
+}
+} // namespace
+
// Run by the {BackgroundCompileJob} (on any thread).
CompilationExecutionResult ExecuteCompilationUnits(
- const std::shared_ptr<BackgroundCompileToken>& token, Counters* counters,
+ std::weak_ptr<NativeModule> native_module, Counters* counters,
JobDelegate* delegate, CompileBaselineOnly baseline_only) {
TRACE_EVENT0("v8.wasm", "wasm.ExecuteCompilationUnits");
// Execute JS to Wasm wrapper units first, so that they are ready to be
// finalized by the main thread when the kFinishedBaselineCompilation event is
// triggered.
- if (ExecuteJSToWasmWrapperCompilationUnits(token, delegate) == kYield) {
+ if (ExecuteJSToWasmWrapperCompilationUnits(native_module, delegate) ==
+ kYield) {
return kYield;
}
@@ -1270,108 +1233,65 @@ CompilationExecutionResult ExecuteCompilationUnits(
std::shared_ptr<WireBytesStorage> wire_bytes;
std::shared_ptr<const WasmModule> module;
WasmEngine* wasm_engine;
- // The Jobs API guarantees that {GetTaskId} is less than the number of
- // workers, and that the number of workers is less than or equal to the max
- // compile concurrency, which makes the task_id safe to use as an index into
- // the worker queues.
- int task_id = delegate ? delegate->GetTaskId() : 0;
- int unpublished_units_limit;
+ // Task 0 is any main thread (there might be multiple from multiple isolates),
+ // worker threads start at 1 (thus the "+ 1").
+ int task_id = delegate ? (int{delegate->GetTaskId()} + 1) : 0;
+ DCHECK_LE(0, task_id);
+ CompilationUnitQueues::Queue* queue;
base::Optional<WasmCompilationUnit> unit;
WasmFeatures detected_features = WasmFeatures::None();
- auto stop = [&detected_features,
- task_id](BackgroundCompileScope& compile_scope) {
- compile_scope.compilation_state()->OnCompilationStopped(task_id,
- detected_features);
- };
-
// Preparation (synchronized): Initialize the fields above and get the first
// compilation unit.
{
- BackgroundCompileScope compile_scope(token);
+ BackgroundCompileScope compile_scope(native_module);
if (compile_scope.cancelled()) return kNoMoreUnits;
auto* compilation_state = compile_scope.compilation_state();
env.emplace(compile_scope.native_module()->CreateCompilationEnv());
wire_bytes = compilation_state->GetWireBytesStorage();
module = compile_scope.native_module()->shared_module();
wasm_engine = compile_scope.native_module()->engine();
- unpublished_units_limit =
- compilation_state->GetUnpublishedUnitsLimits(task_id);
- unit = compilation_state->GetNextCompilationUnit(task_id, baseline_only);
- if (!unit) {
- stop(compile_scope);
- return kNoMoreUnits;
- }
+ queue = compilation_state->GetQueueForCompileTask(task_id);
+ unit = compilation_state->GetNextCompilationUnit(queue, baseline_only);
+ if (!unit) return kNoMoreUnits;
}
TRACE_COMPILE("ExecuteCompilationUnits (task id %d)\n", task_id);
std::vector<WasmCompilationResult> results_to_publish;
-
- auto publish_results = [&results_to_publish](
- BackgroundCompileScope* compile_scope) {
- TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
- "wasm.PublishCompilationResults", "num_results",
- results_to_publish.size());
- if (results_to_publish.empty()) return;
- std::vector<std::unique_ptr<WasmCode>> unpublished_code =
- compile_scope->native_module()->AddCompiledCode(
- VectorOf(results_to_publish));
- results_to_publish.clear();
-
- // For import wrapper compilation units, add result to the cache.
- const NativeModule* native_module = compile_scope->native_module();
- int num_imported_functions = native_module->num_imported_functions();
- WasmImportWrapperCache* cache = native_module->import_wrapper_cache();
- for (const auto& code : unpublished_code) {
- int func_index = code->index();
- DCHECK_LE(0, func_index);
- DCHECK_LT(func_index, native_module->num_functions());
- if (func_index < num_imported_functions) {
- const FunctionSig* sig =
- native_module->module()->functions[func_index].sig;
- WasmImportWrapperCache::CacheKey key(
- compiler::kDefaultImportCallKind, sig,
- static_cast<int>(sig->parameter_count()));
- // If two imported functions have the same key, only one of them should
- // have been added as a compilation unit. So it is always the first time
- // we compile a wrapper for this key here.
- DCHECK_NULL((*cache)[key]);
- (*cache)[key] = code.get();
- code->IncRef();
- }
- }
-
- compile_scope->SchedulePublishCode(std::move(unpublished_code));
- };
-
- bool compilation_failed = false;
while (true) {
- // (asynchronous): Execute the compilation.
- WasmCompilationResult result = unit->ExecuteCompilation(
- wasm_engine, &env.value(), wire_bytes, counters, &detected_features);
- results_to_publish.emplace_back(std::move(result));
-
- bool yield = delegate && delegate->ShouldYield();
-
- // (synchronized): Publish the compilation result and get the next unit.
- {
- BackgroundCompileScope compile_scope(token);
+ ExecutionTier current_tier = unit->tier();
+ const char* event_name = GetCompilationEventName(unit.value(), env.value());
+ TRACE_EVENT0("v8.wasm", event_name);
+ while (unit->tier() == current_tier) {
+ // (asynchronous): Execute the compilation.
+ WasmCompilationResult result = unit->ExecuteCompilation(
+ wasm_engine, &env.value(), wire_bytes, counters, &detected_features);
+ results_to_publish.emplace_back(std::move(result));
+
+ bool yield = delegate && delegate->ShouldYield();
+
+ // (synchronized): Publish the compilation result and get the next unit.
+ BackgroundCompileScope compile_scope(native_module);
if (compile_scope.cancelled()) return kNoMoreUnits;
+
if (!results_to_publish.back().succeeded()) {
- // Compile error.
compile_scope.compilation_state()->SetError();
- stop(compile_scope);
- compilation_failed = true;
- break;
+ return kNoMoreUnits;
}
- // Get next unit.
+ // Yield or get next unit.
if (yield ||
!(unit = compile_scope.compilation_state()->GetNextCompilationUnit(
- task_id, baseline_only))) {
- publish_results(&compile_scope);
- stop(compile_scope);
+ queue, baseline_only))) {
+ std::vector<std::unique_ptr<WasmCode>> unpublished_code =
+ compile_scope.native_module()->AddCompiledCode(
+ VectorOf(std::move(results_to_publish)));
+ results_to_publish.clear();
+ compile_scope.compilation_state()->SchedulePublishCompilationResults(
+ std::move(unpublished_code));
+ compile_scope.compilation_state()->OnCompilationStopped(
+ detected_features);
return yield ? kYield : kNoMoreUnits;
}
@@ -1382,17 +1302,17 @@ CompilationExecutionResult ExecuteCompilationUnits(
// Also publish after finishing a certain amount of units, to avoid
// contention when all threads publish at the end.
if (unit->tier() == ExecutionTier::kTurbofan ||
- static_cast<int>(results_to_publish.size()) >=
- unpublished_units_limit) {
- publish_results(&compile_scope);
+ queue->ShouldPublish(static_cast<int>(results_to_publish.size()))) {
+ std::vector<std::unique_ptr<WasmCode>> unpublished_code =
+ compile_scope.native_module()->AddCompiledCode(
+ VectorOf(std::move(results_to_publish)));
+ results_to_publish.clear();
+ compile_scope.compilation_state()->SchedulePublishCompilationResults(
+ std::move(unpublished_code));
}
}
}
- // We only get here if compilation failed. Other exits return directly.
- DCHECK(compilation_failed);
- USE(compilation_failed);
- token->Cancel();
- return kNoMoreUnits;
+ UNREACHABLE();
}
using JSToWasmWrapperKey = std::pair<bool, FunctionSig>;
@@ -1410,7 +1330,8 @@ int AddExportWrapperUnits(Isolate* isolate, WasmEngine* wasm_engine,
if (keys.insert(key).second) {
auto unit = std::make_shared<JSToWasmWrapperCompilationUnit>(
isolate, wasm_engine, function.sig, native_module->module(),
- function.imported, enabled_features);
+ function.imported, enabled_features,
+ JSToWasmWrapperCompilationUnit::kAllowGeneric);
builder->AddJSToWasmWrapperUnit(std::move(unit));
}
}
@@ -1529,6 +1450,7 @@ class CompilationTimeCallback {
histogram->AddSample(static_cast<int>(duration.InMicroseconds()));
}
+ // TODO(sartang@microsoft.com): Remove wall_clock_time_in_us field
v8::metrics::WasmModuleCompiled event{
(compile_mode_ != kSynchronous), // async
(compile_mode_ == kStreaming), // streamed
@@ -1538,7 +1460,8 @@ class CompilationTimeCallback {
true, // success
native_module->liftoff_code_size(), // code_size_in_bytes
native_module->liftoff_bailout_count(), // liftoff_bailout_count
- duration.InMicroseconds() // wall_clock_time_in_us
+ duration.InMicroseconds(), // wall_clock_time_in_us
+ duration.InMicroseconds() // wall_clock_duration_in_us
};
metrics_recorder_->DelayMainThreadEvent(event, context_id_);
}
@@ -1549,7 +1472,8 @@ class CompilationTimeCallback {
v8::metrics::WasmModuleTieredUp event{
FLAG_wasm_lazy_compilation, // lazy
native_module->turbofan_code_size(), // code_size_in_bytes
- duration.InMicroseconds() // wall_clock_time_in_us
+ duration.InMicroseconds(), // wall_clock_time_in_us
+ duration.InMicroseconds() // wall_clock_duration_in_us
};
metrics_recorder_->DelayMainThreadEvent(event, context_id_);
}
@@ -1563,7 +1487,8 @@ class CompilationTimeCallback {
false, // success
native_module->liftoff_code_size(), // code_size_in_bytes
native_module->liftoff_bailout_count(), // liftoff_bailout_count
- duration.InMicroseconds() // wall_clock_time_in_us
+ duration.InMicroseconds(), // wall_clock_time_in_us
+ duration.InMicroseconds() // wall_clock_duration_in_us
};
metrics_recorder_->DelayMainThreadEvent(event, context_id_);
}
@@ -1646,55 +1571,33 @@ void CompileNativeModule(Isolate* isolate,
}
}
-// The runnable task that performs compilations in the background.
-class BackgroundCompileJob : public JobTask {
+class BackgroundCompileJob final : public JobTask {
public:
- explicit BackgroundCompileJob(
- std::shared_ptr<BackgroundCompileToken> token,
- std::shared_ptr<Counters> async_counters,
- std::shared_ptr<std::atomic<int>> scheduled_units_approximation,
- size_t max_concurrency)
- : token_(std::move(token)),
- async_counters_(std::move(async_counters)),
- scheduled_units_approximation_(
- std::move(scheduled_units_approximation)),
- max_concurrency_(max_concurrency) {}
+ explicit BackgroundCompileJob(std::weak_ptr<NativeModule> native_module,
+ std::shared_ptr<Counters> async_counters)
+ : native_module_(std::move(native_module)),
+ async_counters_(std::move(async_counters)) {}
void Run(JobDelegate* delegate) override {
- if (ExecuteCompilationUnits(token_, async_counters_.get(), delegate,
- kBaselineOrTopTier) == kYield) {
- return;
- }
- // Otherwise we didn't find any more units to execute. Reduce the atomic
- // counter of the approximated number of available units to zero, but then
- // check whether any more units were added in the meantime, and increase
- // back if necessary.
- scheduled_units_approximation_->store(0, std::memory_order_relaxed);
-
- BackgroundCompileScope scope(token_);
- if (scope.cancelled()) return;
- size_t outstanding_units =
- scope.compilation_state()->NumOutstandingCompilations();
- if (outstanding_units == 0) return;
- // On a race between this thread and the thread which scheduled the units,
- // this might increase concurrency more than needed, which is fine. It
- // will be reduced again when the first task finds no more work to do.
- scope.compilation_state()->ScheduleCompileJobForNewUnits(
- static_cast<int>(outstanding_units));
+ ExecuteCompilationUnits(native_module_, async_counters_.get(), delegate,
+ kBaselineOrTopTier);
}
size_t GetMaxConcurrency(size_t worker_count) const override {
- // {current_concurrency_} does not reflect the units that running workers
- // are processing, thus add the current worker count to that number.
- return std::min(max_concurrency_,
- worker_count + scheduled_units_approximation_->load());
+ BackgroundCompileScope scope(native_module_);
+ if (scope.cancelled()) return 0;
+ // NumOutstandingCompilations() does not reflect the units that running
+ // workers are processing, thus add the current worker count to that number.
+ size_t flag_limit =
+ static_cast<size_t>(std::max(1, FLAG_wasm_num_compilation_tasks));
+ return std::min(
+ flag_limit,
+ worker_count + scope.compilation_state()->NumOutstandingCompilations());
}
private:
- const std::shared_ptr<BackgroundCompileToken> token_;
+ const std::weak_ptr<NativeModule> native_module_;
const std::shared_ptr<Counters> async_counters_;
- const std::shared_ptr<std::atomic<int>> scheduled_units_approximation_;
- const size_t max_concurrency_;
};
} // namespace
@@ -1974,7 +1877,8 @@ void AsyncCompileJob::FinishCompile(bool is_after_cache_hit) {
!compilation_state->failed(), // success
native_module_->liftoff_code_size(), // code_size_in_bytes
native_module_->liftoff_bailout_count(), // liftoff_bailout_count
- duration.InMicroseconds() // wall_clock_time_in_us
+ duration.InMicroseconds(), // wall_clock_time_in_us
+ duration.InMicroseconds() // wall_clock_duration_in_us
};
isolate_->metrics_recorder()->DelayMainThreadEvent(event, context_id_);
}
@@ -2489,6 +2393,7 @@ void AsyncStreamingProcessor::FinishAsyncCompileJobWithError(
job_->metrics_event_.module_size_in_bytes = job_->wire_bytes_.length();
job_->metrics_event_.function_count = num_functions_;
job_->metrics_event_.wall_clock_time_in_us = duration.InMicroseconds();
+ job_->metrics_event_.wall_clock_duration_in_us = duration.InMicroseconds();
job_->isolate_->metrics_recorder()->DelayMainThreadEvent(job_->metrics_event_,
job_->context_id_);
@@ -2580,6 +2485,8 @@ bool AsyncStreamingProcessor::ProcessCodeSectionHeader(
return false;
}
+ decoder_.set_code_section(offset, static_cast<uint32_t>(code_section_length));
+
prefix_hash_ = base::hash_combine(prefix_hash_,
static_cast<uint32_t>(code_section_length));
if (!wasm_engine_->GetStreamingCompilationOwnership(prefix_hash_)) {
@@ -2601,7 +2508,6 @@ bool AsyncStreamingProcessor::ProcessCodeSectionHeader(
job_->DoImmediately<AsyncCompileJob::PrepareAndStartCompile>(
decoder_.shared_module(), false, code_size_estimate);
- decoder_.set_code_section(offset, static_cast<uint32_t>(code_section_length));
auto* compilation_state = Impl(job_->native_module_->compilation_state());
compilation_state->SetWireBytesStorage(std::move(wire_bytes_storage));
DCHECK_EQ(job_->native_module_->module()->origin, kWasmOrigin);
@@ -2710,6 +2616,7 @@ void AsyncStreamingProcessor::OnFinishedStream(OwnedVector<uint8_t> bytes) {
job_->metrics_event_.module_size_in_bytes = job_->wire_bytes_.length();
job_->metrics_event_.function_count = num_functions_;
job_->metrics_event_.wall_clock_time_in_us = duration.InMicroseconds();
+ job_->metrics_event_.wall_clock_duration_in_us = duration.InMicroseconds();
job_->isolate_->metrics_recorder()->DelayMainThreadEvent(job_->metrics_event_,
job_->context_id_);
@@ -2804,37 +2711,31 @@ bool AsyncStreamingProcessor::Deserialize(Vector<const uint8_t> module_bytes,
return true;
}
-// TODO(wasm): Try to avoid the {NumberOfWorkerThreads} calls, grow queues
-// dynamically instead.
-int GetMaxCompileConcurrency() {
- int num_worker_threads = V8::GetCurrentPlatform()->NumberOfWorkerThreads();
- return std::min(FLAG_wasm_num_compilation_tasks, num_worker_threads);
-}
-
CompilationStateImpl::CompilationStateImpl(
const std::shared_ptr<NativeModule>& native_module,
std::shared_ptr<Counters> async_counters)
: native_module_(native_module.get()),
- background_compile_token_(
- std::make_shared<BackgroundCompileToken>(native_module)),
+ native_module_weak_(std::move(native_module)),
compile_mode_(FLAG_wasm_tier_up &&
native_module->module()->origin == kWasmOrigin
? CompileMode::kTiering
: CompileMode::kRegular),
async_counters_(std::move(async_counters)),
- max_compile_concurrency_(std::max(GetMaxCompileConcurrency(), 1)),
- // Add one to the allowed number of parallel tasks, because the foreground
- // task sometimes also contributes.
- compilation_unit_queues_(max_compile_concurrency_ + 1,
- native_module->num_functions()) {}
+ compilation_unit_queues_(native_module->num_functions()) {}
void CompilationStateImpl::CancelCompilation() {
- background_compile_token_->Cancel();
// No more callbacks after abort.
base::MutexGuard callbacks_guard(&callbacks_mutex_);
+ // std::memory_order_relaxed is sufficient because no other state is
+ // synchronized with |compile_cancelled_|.
+ compile_cancelled_.store(true, std::memory_order_relaxed);
callbacks_.clear();
}
+bool CompilationStateImpl::cancelled() const {
+ return compile_cancelled_.load(std::memory_order_relaxed);
+}
+
void CompilationStateImpl::InitializeCompilationProgress(
bool lazy_module, int num_import_wrappers, int num_export_wrappers) {
DCHECK(!failed());
@@ -2909,6 +2810,9 @@ void CompilationStateImpl::InitializeCompilationProgressAfterDeserialization() {
RequiredBaselineTierField::encode(ExecutionTier::kTurbofan) |
RequiredTopTierField::encode(ExecutionTier::kTurbofan) |
ReachedTierField::encode(ExecutionTier::kTurbofan);
+ finished_events_.Add(CompilationEvent::kFinishedExportWrappers);
+ finished_events_.Add(CompilationEvent::kFinishedBaselineCompilation);
+ finished_events_.Add(CompilationEvent::kFinishedTopTierCompilation);
compilation_progress_.assign(module->num_declared_functions,
kProgressAfterDeserialization);
}
@@ -2956,7 +2860,9 @@ void CompilationStateImpl::InitializeRecompilation(
// start yet, and new code will be kept tiered-down from the start. For
// streaming compilation, there is a special path to tier down later, when
// the module is complete. In any case, we don't need to recompile here.
+ base::Optional<CompilationUnitBuilder> builder;
if (compilation_progress_.size() > 0) {
+ builder.emplace(native_module_);
const WasmModule* module = native_module_->module();
DCHECK_EQ(module->num_declared_functions, compilation_progress_.size());
DCHECK_GE(module->num_declared_functions,
@@ -2971,15 +2877,13 @@ void CompilationStateImpl::InitializeRecompilation(
: ExecutionTier::kTurbofan;
int imported = module->num_imported_functions;
// Generate necessary compilation units on the fly.
- CompilationUnitBuilder builder(native_module_);
for (int function_index : recompile_function_indexes) {
DCHECK_LE(imported, function_index);
int slot_index = function_index - imported;
auto& progress = compilation_progress_[slot_index];
progress = MissingRecompilationField::update(progress, true);
- builder.AddRecompilationUnit(function_index, new_tier);
+ builder->AddRecompilationUnit(function_index, new_tier);
}
- builder.Commit();
}
// Trigger callback if module needs no recompilation.
@@ -2987,6 +2891,12 @@ void CompilationStateImpl::InitializeRecompilation(
TriggerCallbacks(base::EnumSet<CompilationEvent>(
{CompilationEvent::kFinishedRecompilation}));
}
+
+ if (builder.has_value()) {
+ // Avoid holding lock while scheduling a compile job.
+ guard.reset();
+ builder->Commit();
+ }
}
void CompilationStateImpl::AddCallback(CompilationState::callback_t callback) {
@@ -3017,13 +2927,15 @@ void CompilationStateImpl::AddCompilationUnits(
compilation_unit_queues_.AddUnits(baseline_units, top_tier_units,
native_module_->module());
}
- js_to_wasm_wrapper_units_.insert(js_to_wasm_wrapper_units_.end(),
- js_to_wasm_wrapper_units.begin(),
- js_to_wasm_wrapper_units.end());
-
- size_t total_units = baseline_units.size() + top_tier_units.size() +
- js_to_wasm_wrapper_units.size();
- ScheduleCompileJobForNewUnits(static_cast<int>(total_units));
+ if (!js_to_wasm_wrapper_units.empty()) {
+ // |js_to_wasm_wrapper_units_| can only be modified before background
+ // compilation started.
+ DCHECK(!current_compile_job_ || !current_compile_job_->IsRunning());
+ js_to_wasm_wrapper_units_.insert(js_to_wasm_wrapper_units_.end(),
+ js_to_wasm_wrapper_units.begin(),
+ js_to_wasm_wrapper_units.end());
+ }
+ ScheduleCompileJobForNewUnits();
}
void CompilationStateImpl::AddTopTierCompilationUnit(WasmCompilationUnit unit) {
@@ -3033,7 +2945,7 @@ void CompilationStateImpl::AddTopTierCompilationUnit(WasmCompilationUnit unit) {
void CompilationStateImpl::AddTopTierPriorityCompilationUnit(
WasmCompilationUnit unit, size_t priority) {
compilation_unit_queues_.AddTopTierPriorityUnit(unit, priority);
- ScheduleCompileJobForNewUnits(1);
+ ScheduleCompileJobForNewUnits();
}
std::shared_ptr<JSToWasmWrapperCompilationUnit>
@@ -3055,7 +2967,7 @@ void CompilationStateImpl::FinalizeJSToWasmWrappers(
// optimization we keep the code space unlocked to avoid repeated unlocking
// because many such wrapper are allocated in sequence below.
TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
- "wasm.FinalizeJSToWasmWrappers", "num_wrappers",
+ "wasm.FinalizeJSToWasmWrappers", "wrappers",
js_to_wasm_wrapper_units_.size());
CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
for (auto& unit : js_to_wasm_wrapper_units_) {
@@ -3067,15 +2979,20 @@ void CompilationStateImpl::FinalizeJSToWasmWrappers(
}
}
+CompilationUnitQueues::Queue* CompilationStateImpl::GetQueueForCompileTask(
+ int task_id) {
+ return compilation_unit_queues_.GetQueueForTask(task_id);
+}
+
base::Optional<WasmCompilationUnit>
CompilationStateImpl::GetNextCompilationUnit(
- int task_id, CompileBaselineOnly baseline_only) {
- return compilation_unit_queues_.GetNextUnit(task_id, baseline_only);
+ CompilationUnitQueues::Queue* queue, CompileBaselineOnly baseline_only) {
+ return compilation_unit_queues_.GetNextUnit(queue, baseline_only);
}
void CompilationStateImpl::OnFinishedUnits(Vector<WasmCode*> code_vector) {
TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
- "wasm.OnFinishedUnits", "num_units", code_vector.size());
+ "wasm.OnFinishedUnits", "units", code_vector.size());
base::MutexGuard guard(&callbacks_mutex_);
@@ -3230,24 +3147,7 @@ void CompilationStateImpl::TriggerCallbacks(
}
}
-int CompilationStateImpl::GetUnpublishedUnitsLimits(int task_id) {
- // We want background threads to publish regularly (to avoid contention when
- // they are all publishing at the end). On the other side, each publishing has
- // some overhead (part of it for synchronizing between threads), so it should
- // not happen *too* often.
- // Thus aim for 4-8 publishes per thread, but distribute it such that
- // publishing is likely to happen at different times.
- int units_per_thread =
- static_cast<int>(native_module_->module()->num_declared_functions /
- max_compile_concurrency_);
- int min = units_per_thread / 8;
- // Return something between {min} and {2*min}, but not smaller than {10}.
- return std::max(10, min + (min * task_id / max_compile_concurrency_));
-}
-
-void CompilationStateImpl::OnCompilationStopped(int task_id,
- const WasmFeatures& detected) {
- DCHECK_GE(max_compile_concurrency_, task_id);
+void CompilationStateImpl::OnCompilationStopped(const WasmFeatures& detected) {
base::MutexGuard guard(&mutex_);
detected_features_.Add(detected);
}
@@ -3260,40 +3160,104 @@ void CompilationStateImpl::PublishDetectedFeatures(Isolate* isolate) {
UpdateFeatureUseCounts(isolate, detected_features_);
}
-void CompilationStateImpl::ScheduleCompileJobForNewUnits(int new_units) {
- // Increase the {scheduled_units_approximation_} counter and remember the old
- // value to check whether it increased towards {max_compile_concurrency_}.
- // In that case, we need to notify the compile job about the increased
- // concurrency.
- DCHECK_LT(0, new_units);
- int old_units = scheduled_units_approximation_->fetch_add(
- new_units, std::memory_order_relaxed);
- bool concurrency_increased = old_units < max_compile_concurrency_;
+void CompilationStateImpl::PublishCompilationResults(
+ std::vector<std::unique_ptr<WasmCode>> unpublished_code) {
+ if (unpublished_code.empty()) return;
- base::MutexGuard guard(&mutex_);
- if (current_compile_job_ && current_compile_job_->IsRunning()) {
- if (concurrency_increased) {
- current_compile_job_->NotifyConcurrencyIncrease();
+ // For import wrapper compilation units, add result to the cache.
+ int num_imported_functions = native_module_->num_imported_functions();
+ WasmImportWrapperCache* cache = native_module_->import_wrapper_cache();
+ for (const auto& code : unpublished_code) {
+ int func_index = code->index();
+ DCHECK_LE(0, func_index);
+ DCHECK_LT(func_index, native_module_->num_functions());
+ if (func_index < num_imported_functions) {
+ const FunctionSig* sig =
+ native_module_->module()->functions[func_index].sig;
+ WasmImportWrapperCache::CacheKey key(
+ compiler::kDefaultImportCallKind, sig,
+ static_cast<int>(sig->parameter_count()));
+ // If two imported functions have the same key, only one of them should
+ // have been added as a compilation unit. So it is always the first time
+ // we compile a wrapper for this key here.
+ DCHECK_NULL((*cache)[key]);
+ (*cache)[key] = code.get();
+ code->IncRef();
}
- return;
}
+ PublishCode(VectorOf(unpublished_code));
+}
+
+void CompilationStateImpl::PublishCode(Vector<std::unique_ptr<WasmCode>> code) {
+ WasmCodeRefScope code_ref_scope;
+ std::vector<WasmCode*> published_code =
+ native_module_->PublishCode(std::move(code));
+ // Defer logging code in case wire bytes were not fully received yet.
+ if (native_module_->HasWireBytes()) {
+ native_module_->engine()->LogCode(VectorOf(published_code));
+ }
+
+ OnFinishedUnits(VectorOf(std::move(published_code)));
+}
+
+void CompilationStateImpl::SchedulePublishCompilationResults(
+ std::vector<std::unique_ptr<WasmCode>> unpublished_code) {
+ {
+ base::MutexGuard guard(&publish_mutex_);
+ if (publisher_running_) {
+ // Add new code to the queue and return.
+ publish_queue_.reserve(publish_queue_.size() + unpublished_code.size());
+ for (auto& c : unpublished_code) {
+ publish_queue_.emplace_back(std::move(c));
+ }
+ return;
+ }
+ publisher_running_ = true;
+ }
+ while (true) {
+ PublishCompilationResults(std::move(unpublished_code));
+ unpublished_code.clear();
+
+ // Keep publishing new code that came in.
+ base::MutexGuard guard(&publish_mutex_);
+ DCHECK(publisher_running_);
+ if (publish_queue_.empty()) {
+ publisher_running_ = false;
+ return;
+ }
+ unpublished_code.swap(publish_queue_);
+ }
+}
+
+void CompilationStateImpl::ScheduleCompileJobForNewUnits() {
if (failed()) return;
- std::unique_ptr<JobTask> new_compile_job =
- std::make_unique<BackgroundCompileJob>(
- background_compile_token_, async_counters_,
- scheduled_units_approximation_, max_compile_concurrency_);
- // TODO(wasm): Lower priority for TurboFan-only jobs.
- std::shared_ptr<JobHandle> handle = V8::GetCurrentPlatform()->PostJob(
- has_priority_ ? TaskPriority::kUserBlocking : TaskPriority::kUserVisible,
- std::move(new_compile_job));
- native_module_->engine()->ShepherdCompileJobHandle(handle);
- current_compile_job_ =
- std::make_unique<ThreadSafeJobHandle>(std::move(handle));
+ std::shared_ptr<JobHandle> new_job_handle;
+ {
+ base::MutexGuard guard(&mutex_);
+ if (current_compile_job_ && current_compile_job_->IsValid()) {
+ current_compile_job_->NotifyConcurrencyIncrease();
+ return;
+ }
+
+ std::unique_ptr<JobTask> new_compile_job =
+ std::make_unique<BackgroundCompileJob>(native_module_weak_,
+ async_counters_);
+ // TODO(wasm): Lower priority for TurboFan-only jobs.
+ new_job_handle = V8::GetCurrentPlatform()->PostJob(
+ has_priority_ ? TaskPriority::kUserBlocking
+ : TaskPriority::kUserVisible,
+ std::move(new_compile_job));
+ current_compile_job_ = new_job_handle;
+ // Reset the priority. Later uses of the compilation state, e.g. for
+ // debugging, should compile with the default priority again.
+ has_priority_ = false;
+ }
- // Reset the priority. Later uses of the compilation state, e.g. for
- // debugging, should compile with the default priority again.
- has_priority_ = false;
+ if (new_job_handle) {
+ native_module_->engine()->ShepherdCompileJobHandle(
+ std::move(new_job_handle));
+ }
}
size_t CompilationStateImpl::NumOutstandingCompilations() const {
@@ -3307,12 +3271,14 @@ size_t CompilationStateImpl::NumOutstandingCompilations() const {
}
void CompilationStateImpl::SetError() {
+ compile_cancelled_.store(true, std::memory_order_relaxed);
if (compile_failed_.exchange(true, std::memory_order_relaxed)) {
return; // Already failed before.
}
base::MutexGuard callbacks_guard(&callbacks_mutex_);
TriggerCallbacks();
+ callbacks_.clear();
}
void CompilationStateImpl::WaitForCompilationEvent(
@@ -3330,7 +3296,7 @@ void CompilationStateImpl::WaitForCompilationEvent(
}
constexpr JobDelegate* kNoDelegate = nullptr;
- ExecuteCompilationUnits(background_compile_token_, async_counters_.get(),
+ ExecuteCompilationUnits(native_module_weak_, async_counters_.get(),
kNoDelegate, kBaselineOnly);
compilation_event_semaphore->Wait();
}
@@ -3350,7 +3316,6 @@ class CompileJSToWasmWrapperJob final : public JobTask {
size_t max_concurrency)
: queue_(queue),
compilation_units_(compilation_units),
- max_concurrency_(max_concurrency),
outstanding_units_(queue->size()) {}
void Run(JobDelegate* delegate) override {
@@ -3366,14 +3331,15 @@ class CompileJSToWasmWrapperJob final : public JobTask {
// {outstanding_units_} includes the units that other workers are currently
// working on, so we can safely ignore the {worker_count} and just return
// the current number of outstanding units.
- return std::min(max_concurrency_,
+ size_t flag_limit =
+ static_cast<size_t>(std::max(1, FLAG_wasm_num_compilation_tasks));
+ return std::min(flag_limit,
outstanding_units_.load(std::memory_order_relaxed));
}
private:
JSToWasmWrapperQueue* const queue_;
JSToWasmWrapperUnitMap* const compilation_units_;
- const size_t max_concurrency_;
std::atomic<size_t> outstanding_units_;
};
} // namespace
@@ -3395,7 +3361,8 @@ void CompileJsToWasmWrappers(Isolate* isolate, const WasmModule* module,
if (queue.insert(key)) {
auto unit = std::make_unique<JSToWasmWrapperCompilationUnit>(
isolate, isolate->wasm_engine(), function.sig, module,
- function.imported, enabled_features);
+ function.imported, enabled_features,
+ JSToWasmWrapperCompilationUnit::kAllowGeneric);
compilation_units.emplace(key, std::move(unit));
}
}
diff --git a/deps/v8/src/wasm/module-compiler.h b/deps/v8/src/wasm/module-compiler.h
index 6206d11986..e688bb9479 100644
--- a/deps/v8/src/wasm/module-compiler.h
+++ b/deps/v8/src/wasm/module-compiler.h
@@ -68,9 +68,6 @@ bool CompileLazy(Isolate*, NativeModule*, int func_index);
void TriggerTierUp(Isolate*, NativeModule*, int func_index);
-// Get the maximum concurrency for parallel compilation.
-int GetMaxCompileConcurrency();
-
template <typename Key, typename Hash>
class WrapperQueue {
public:
diff --git a/deps/v8/src/wasm/module-decoder.cc b/deps/v8/src/wasm/module-decoder.cc
index dea4e1cb69..6d684d3534 100644
--- a/deps/v8/src/wasm/module-decoder.cc
+++ b/deps/v8/src/wasm/module-decoder.cc
@@ -635,7 +635,8 @@ class ModuleDecoderImpl : public Decoder {
case kExternalMemory: {
// ===== Imported memory =============================================
if (!AddMemory(module_.get())) break;
- uint8_t flags = validate_memory_flags(&module_->has_shared_memory);
+ uint8_t flags = validate_memory_flags(&module_->has_shared_memory,
+ &module_->is_memory64);
consume_resizable_limits("memory", "pages", max_mem_pages(),
&module_->initial_pages,
&module_->has_maximum_pages, max_mem_pages(),
@@ -735,7 +736,8 @@ class ModuleDecoderImpl : public Decoder {
for (uint32_t i = 0; ok() && i < memory_count; i++) {
if (!AddMemory(module_.get())) break;
- uint8_t flags = validate_memory_flags(&module_->has_shared_memory);
+ uint8_t flags = validate_memory_flags(&module_->has_shared_memory,
+ &module_->is_memory64);
consume_resizable_limits("memory", "pages", max_mem_pages(),
&module_->initial_pages,
&module_->has_maximum_pages, max_mem_pages(),
@@ -1531,7 +1533,7 @@ class ModuleDecoderImpl : public Decoder {
return flags;
}
- uint8_t validate_memory_flags(bool* has_shared_memory) {
+ uint8_t validate_memory_flags(bool* has_shared_memory, bool* is_memory64) {
uint8_t flags = consume_u8("memory limits flags");
*has_shared_memory = false;
switch (flags) {
@@ -1542,8 +1544,9 @@ class ModuleDecoderImpl : public Decoder {
case kSharedWithMaximum:
if (!enabled_features_.has_threads()) {
errorf(pc() - 1,
- "invalid memory limits flags (enable via "
- "--experimental-wasm-threads)");
+ "invalid memory limits flags 0x%x (enable via "
+ "--experimental-wasm-threads)",
+ flags);
}
*has_shared_memory = true;
// V8 does not support shared memory without a maximum.
@@ -1557,9 +1560,14 @@ class ModuleDecoderImpl : public Decoder {
case kMemory64WithMaximum:
if (!enabled_features_.has_memory64()) {
errorf(pc() - 1,
- "invalid memory limits flags (enable via "
- "--experimental-wasm-memory64)");
+ "invalid memory limits flags 0x%x (enable via "
+ "--experimental-wasm-memory64)",
+ flags);
}
+ *is_memory64 = true;
+ break;
+ default:
+ errorf(pc() - 1, "invalid memory limits flags 0x%x", flags);
break;
}
return flags;
@@ -1618,7 +1626,8 @@ class ModuleDecoderImpl : public Decoder {
// TODO(manoskouk): This is copy-modified from function-body-decoder-impl.h.
// We should find a way to share this code.
- V8_INLINE bool Validate(const byte* pc, HeapTypeImmediate<kValidate>& imm) {
+ V8_INLINE bool Validate(const byte* pc,
+ HeapTypeImmediate<kFullValidation>& imm) {
if (V8_UNLIKELY(imm.type.is_bottom())) {
error(pc, "invalid heap type");
return false;
@@ -1633,7 +1642,7 @@ class ModuleDecoderImpl : public Decoder {
WasmInitExpr consume_init_expr(WasmModule* module, ValueType expected,
size_t current_global_index) {
- constexpr Decoder::ValidateFlag validate = Decoder::kValidate;
+ constexpr Decoder::ValidateFlag validate = Decoder::kFullValidation;
WasmOpcode opcode = kExprNop;
std::vector<WasmInitExpr> stack;
while (pc() < end() && opcode != kExprEnd) {
@@ -1670,25 +1679,25 @@ class ModuleDecoderImpl : public Decoder {
break;
}
case kExprI32Const: {
- ImmI32Immediate<Decoder::kValidate> imm(this, pc() + 1);
+ ImmI32Immediate<Decoder::kFullValidation> imm(this, pc() + 1);
stack.emplace_back(imm.value);
len = 1 + imm.length;
break;
}
case kExprF32Const: {
- ImmF32Immediate<Decoder::kValidate> imm(this, pc() + 1);
+ ImmF32Immediate<Decoder::kFullValidation> imm(this, pc() + 1);
stack.emplace_back(imm.value);
len = 1 + imm.length;
break;
}
case kExprI64Const: {
- ImmI64Immediate<Decoder::kValidate> imm(this, pc() + 1);
+ ImmI64Immediate<Decoder::kFullValidation> imm(this, pc() + 1);
stack.emplace_back(imm.value);
len = 1 + imm.length;
break;
}
case kExprF64Const: {
- ImmF64Immediate<Decoder::kValidate> imm(this, pc() + 1);
+ ImmF64Immediate<Decoder::kFullValidation> imm(this, pc() + 1);
stack.emplace_back(imm.value);
len = 1 + imm.length;
break;
@@ -1702,8 +1711,8 @@ class ModuleDecoderImpl : public Decoder {
kExprRefNull);
return {};
}
- HeapTypeImmediate<Decoder::kValidate> imm(enabled_features_, this,
- pc() + 1);
+ HeapTypeImmediate<Decoder::kFullValidation> imm(enabled_features_,
+ this, pc() + 1);
len = 1 + imm.length;
if (!Validate(pc() + 1, imm)) return {};
stack.push_back(
@@ -1719,7 +1728,7 @@ class ModuleDecoderImpl : public Decoder {
return {};
}
- FunctionIndexImmediate<Decoder::kValidate> imm(this, pc() + 1);
+ FunctionIndexImmediate<Decoder::kFullValidation> imm(this, pc() + 1);
len = 1 + imm.length;
if (V8_UNLIKELY(module->functions.size() <= imm.index)) {
errorf(pc(), "invalid function index: %u", imm.index);
@@ -1741,8 +1750,8 @@ class ModuleDecoderImpl : public Decoder {
return {};
}
- Simd128Immediate<validate> imm(this, pc() + len + 1);
- len += 1 + kSimd128Size;
+ Simd128Immediate<validate> imm(this, pc() + len);
+ len += kSimd128Size;
stack.emplace_back(imm.value);
break;
}
@@ -1755,8 +1764,8 @@ class ModuleDecoderImpl : public Decoder {
case kExprRttCanon: {
HeapTypeImmediate<validate> imm(enabled_features_, this,
pc() + 2);
- len += 1 + imm.length;
- if (!Validate(pc() + 2, imm)) return {};
+ len += imm.length;
+ if (!Validate(pc() + len, imm)) return {};
stack.push_back(
WasmInitExpr::RttCanon(imm.type.representation()));
break;
@@ -1764,8 +1773,8 @@ class ModuleDecoderImpl : public Decoder {
case kExprRttSub: {
HeapTypeImmediate<validate> imm(enabled_features_, this,
pc() + 2);
- len += 1 + imm.length;
- if (!Validate(pc() + 2, imm)) return {};
+ len += imm.length;
+ if (!Validate(pc() + len, imm)) return {};
if (stack.empty()) {
error(pc(), "calling rtt.sub without arguments");
return {};
@@ -1836,7 +1845,7 @@ class ModuleDecoderImpl : public Decoder {
ValueType consume_value_type() {
uint32_t type_length;
- ValueType result = value_type_reader::read_value_type<kValidate>(
+ ValueType result = value_type_reader::read_value_type<kFullValidation>(
this, this->pc(), &type_length,
origin_ == kWasmOrigin ? enabled_features_ : WasmFeatures::None());
if (result == kWasmBottom) error(pc_, "invalid value type");
@@ -1850,7 +1859,7 @@ class ModuleDecoderImpl : public Decoder {
}
ValueType consume_storage_type() {
- uint8_t opcode = read_u8<kValidate>(this->pc());
+ uint8_t opcode = read_u8<kFullValidation>(this->pc());
switch (opcode) {
case kI8Code:
consume_bytes(1, "i8");
@@ -1961,10 +1970,10 @@ class ModuleDecoderImpl : public Decoder {
ValueType* type, uint32_t* table_index,
WasmInitExpr* offset) {
const byte* pos = pc();
- uint8_t flag;
+ uint32_t flag;
if (enabled_features_.has_bulk_memory() ||
enabled_features_.has_reftypes()) {
- flag = consume_u8("flag");
+ flag = consume_u32v("flag");
} else {
uint32_t table_index = consume_u32v("table index");
// The only valid flag value without bulk_memory or externref is '0'.
@@ -2133,7 +2142,8 @@ class ModuleDecoderImpl : public Decoder {
if (failed()) return index;
switch (opcode) {
case kExprRefNull: {
- HeapTypeImmediate<kValidate> imm(WasmFeatures::All(), this, this->pc());
+ HeapTypeImmediate<kFullValidation> imm(WasmFeatures::All(), this,
+ this->pc());
consume_bytes(imm.length, "ref.null immediate");
index = WasmElemSegment::kNullIndex;
break;
@@ -2172,13 +2182,14 @@ ModuleResult DecodeWasmModule(
// as the {module}.
ModuleDecoderImpl decoder(enabled, module_start, module_end, origin);
v8::metrics::WasmModuleDecoded metrics_event;
- metrics::TimedScope<v8::metrics::WasmModuleDecoded> metrics_event_scope(
- &metrics_event, &v8::metrics::WasmModuleDecoded::wall_clock_time_in_us);
+ base::ElapsedTimer timer;
+ timer.Start();
ModuleResult result =
decoder.DecodeModule(counters, allocator, verify_functions);
// Record event metrics.
- metrics_event_scope.Stop();
+ metrics_event.wall_clock_duration_in_us = timer.Elapsed().InMicroseconds();
+ timer.Stop();
metrics_event.success = decoder.ok() && result.ok();
metrics_event.async = decoding_method == DecodingMethod::kAsync ||
decoding_method == DecodingMethod::kAsyncStream;
@@ -2438,14 +2449,8 @@ void DecodeFunctionNames(const byte* module_start, const byte* module_end,
// Extract from export table.
for (const WasmExport& exp : export_table) {
- switch (exp.kind) {
- case kExternalFunction:
- if (names->count(exp.index) == 0) {
- names->insert(std::make_pair(exp.index, exp.name));
- }
- break;
- default:
- break;
+ if (exp.kind == kExternalFunction && names->count(exp.index) == 0) {
+ names->insert(std::make_pair(exp.index, exp.name));
}
}
}
diff --git a/deps/v8/src/wasm/module-instantiate.cc b/deps/v8/src/wasm/module-instantiate.cc
index d31bafb294..e8b0a4f8e6 100644
--- a/deps/v8/src/wasm/module-instantiate.cc
+++ b/deps/v8/src/wasm/module-instantiate.cc
@@ -58,25 +58,32 @@ uint32_t EvalUint32InitExpr(Handle<WasmInstanceObject> instance,
using ImportWrapperQueue = WrapperQueue<WasmImportWrapperCache::CacheKey,
WasmImportWrapperCache::CacheKeyHash>;
-class CompileImportWrapperTask final : public CancelableTask {
+class CompileImportWrapperJob final : public JobTask {
public:
- CompileImportWrapperTask(
- CancelableTaskManager* task_manager, WasmEngine* engine,
- Counters* counters, NativeModule* native_module,
+ CompileImportWrapperJob(
+ WasmEngine* engine, Counters* counters, NativeModule* native_module,
ImportWrapperQueue* queue,
WasmImportWrapperCache::ModificationScope* cache_scope)
- : CancelableTask(task_manager),
- engine_(engine),
+ : engine_(engine),
counters_(counters),
native_module_(native_module),
queue_(queue),
cache_scope_(cache_scope) {}
- void RunInternal() override {
+ size_t GetMaxConcurrency(size_t worker_count) const override {
+ size_t flag_limit =
+ static_cast<size_t>(std::max(1, FLAG_wasm_num_compilation_tasks));
+ // Add {worker_count} to the queue size because workers might still be
+ // processing units that have already been popped from the queue.
+ return std::min(flag_limit, worker_count + queue_->size());
+ }
+
+ void Run(JobDelegate* delegate) override {
while (base::Optional<WasmImportWrapperCache::CacheKey> key =
queue_->pop()) {
CompileImportWrapper(engine_, native_module_, counters_, key->kind,
key->signature, key->expected_arity, cache_scope_);
+ if (delegate->ShouldYield()) return;
}
}
@@ -410,10 +417,8 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
TimedHistogramScope wasm_instantiate_module_time_scope(SELECT_WASM_COUNTER(
isolate_->counters(), module_->origin, wasm_instantiate, module_time));
v8::metrics::WasmModuleInstantiated wasm_module_instantiated;
- metrics::TimedScope<v8::metrics::WasmModuleInstantiated>
- wasm_module_instantiated_timed_scope(
- &wasm_module_instantiated,
- &v8::metrics::WasmModuleInstantiated::wall_clock_time_in_us);
+ base::ElapsedTimer timer;
+ timer.Start();
NativeModule* native_module = module_object_->native_module();
//--------------------------------------------------------------------------
@@ -745,7 +750,9 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
TRACE("Successfully built instance for module %p\n",
module_object_->native_module());
wasm_module_instantiated.success = true;
- wasm_module_instantiated_timed_scope.Stop();
+ wasm_module_instantiated.wall_clock_duration_in_us =
+ timer.Elapsed().InMicroseconds();
+ timer.Stop();
isolate_->metrics_recorder()->DelayMainThreadEvent(wasm_module_instantiated,
context_id_);
return instance;
@@ -1074,8 +1081,7 @@ bool InstanceBuilder::ProcessImportedFunction(
// The imported function is a callable.
int expected_arity = static_cast<int>(expected_sig->parameter_count());
- if (kind ==
- compiler::WasmImportCallKind::kJSFunctionArityMismatchSkipAdaptor) {
+ if (kind == compiler::WasmImportCallKind::kJSFunctionArityMismatch) {
Handle<JSFunction> function = Handle<JSFunction>::cast(js_receiver);
SharedFunctionInfo shared = function->shared();
expected_arity = shared.internal_formal_parameter_count();
@@ -1450,7 +1456,7 @@ void InstanceBuilder::CompileImportWrappers(
int expected_arity = static_cast<int>(sig->parameter_count());
if (resolved.first ==
- compiler::WasmImportCallKind::kJSFunctionArityMismatchSkipAdaptor) {
+ compiler::WasmImportCallKind::kJSFunctionArityMismatch) {
Handle<JSFunction> function = Handle<JSFunction>::cast(resolved.second);
SharedFunctionInfo shared = function->shared();
expected_arity = shared.internal_formal_parameter_count();
@@ -1464,24 +1470,14 @@ void InstanceBuilder::CompileImportWrappers(
import_wrapper_queue.insert(key);
}
- CancelableTaskManager task_manager;
- // TODO(wasm): Switch this to the Jobs API.
- const int max_background_tasks = GetMaxCompileConcurrency();
- for (int i = 0; i < max_background_tasks; ++i) {
- auto task = std::make_unique<CompileImportWrapperTask>(
- &task_manager, isolate_->wasm_engine(), isolate_->counters(),
- native_module, &import_wrapper_queue, &cache_scope);
- V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
- }
+ auto compile_job_task = std::make_unique<CompileImportWrapperJob>(
+ isolate_->wasm_engine(), isolate_->counters(), native_module,
+ &import_wrapper_queue, &cache_scope);
+ auto compile_job = V8::GetCurrentPlatform()->PostJob(
+ TaskPriority::kUserVisible, std::move(compile_job_task));
- // Also compile in the current thread, in case there are no worker threads.
- while (base::Optional<WasmImportWrapperCache::CacheKey> key =
- import_wrapper_queue.pop()) {
- CompileImportWrapper(isolate_->wasm_engine(), native_module,
- isolate_->counters(), key->kind, key->signature,
- key->expected_arity, &cache_scope);
- }
- task_manager.CancelAndWait();
+ // Wait for the job to finish, while contributing in this thread.
+ compile_job->Join();
}
// Process the imports, including functions, tables, globals, and memory, in
@@ -1947,7 +1943,7 @@ bool LoadElemSegmentImpl(Isolate* isolate, Handle<WasmInstanceObject> instance,
// Update the local dispatch table first if necessary.
if (IsSubtypeOf(table_object->type(), kWasmFuncRef, module)) {
- uint32_t sig_id = module->signature_ids[function->sig_index];
+ uint32_t sig_id = module->canonicalized_type_ids[function->sig_index];
IndirectFunctionTableEntry(instance, table_index, entry_index)
.Set(sig_id, instance, func_index);
}
diff --git a/deps/v8/src/wasm/streaming-decoder.cc b/deps/v8/src/wasm/streaming-decoder.cc
index c9f984aaee..d1312edd33 100644
--- a/deps/v8/src/wasm/streaming-decoder.cc
+++ b/deps/v8/src/wasm/streaming-decoder.cc
@@ -28,6 +28,8 @@ namespace wasm {
class V8_EXPORT_PRIVATE AsyncStreamingDecoder : public StreamingDecoder {
public:
explicit AsyncStreamingDecoder(std::unique_ptr<StreamingProcessor> processor);
+ AsyncStreamingDecoder(const AsyncStreamingDecoder&) = delete;
+ AsyncStreamingDecoder& operator=(const AsyncStreamingDecoder&) = delete;
// The buffer passed into OnBytesReceived is owned by the caller.
void OnBytesReceived(Vector<const uint8_t> bytes) override;
@@ -218,8 +220,6 @@ class V8_EXPORT_PRIVATE AsyncStreamingDecoder : public StreamingDecoder {
// We need wire bytes in an array for deserializing cached modules.
std::vector<uint8_t> wire_bytes_for_deserializing_;
-
- DISALLOW_COPY_AND_ASSIGN(AsyncStreamingDecoder);
};
void AsyncStreamingDecoder::OnBytesReceived(Vector<const uint8_t> bytes) {
@@ -517,10 +517,6 @@ size_t AsyncStreamingDecoder::DecodeVarInt32::ReadBytes(
Decoder decoder(buf,
streaming->module_offset() - static_cast<uint32_t>(offset()));
value_ = decoder.consume_u32v(field_name_);
- // The number of bytes we actually needed to read.
- DCHECK_GT(decoder.pc(), buffer().begin());
- bytes_consumed_ = static_cast<size_t>(decoder.pc() - buf.begin());
- TRACE_STREAMING(" ==> %zu bytes consumed\n", bytes_consumed_);
if (decoder.failed()) {
if (new_bytes == remaining_buf.size()) {
@@ -531,6 +527,11 @@ size_t AsyncStreamingDecoder::DecodeVarInt32::ReadBytes(
return new_bytes;
}
+ // The number of bytes we actually needed to read.
+ DCHECK_GT(decoder.pc(), buffer().begin());
+ bytes_consumed_ = static_cast<size_t>(decoder.pc() - buf.begin());
+ TRACE_STREAMING(" ==> %zu bytes consumed\n", bytes_consumed_);
+
// We read all the bytes we needed.
DCHECK_GT(bytes_consumed_, offset());
new_bytes = bytes_consumed_ - offset();
diff --git a/deps/v8/src/wasm/value-type.h b/deps/v8/src/wasm/value-type.h
index 2e9a2a8d06..3731511c24 100644
--- a/deps/v8/src/wasm/value-type.h
+++ b/deps/v8/src/wasm/value-type.h
@@ -178,38 +178,7 @@ class ValueType {
#undef DEF_ENUM
};
- constexpr bool is_reference_type() const {
- return kind() == kRef || kind() == kOptRef || kind() == kRtt;
- }
-
- constexpr bool is_object_reference_type() const {
- return kind() == kRef || kind() == kOptRef;
- }
-
- constexpr bool is_packed() const { return kind() == kI8 || kind() == kI16; }
-
- constexpr bool is_nullable() const { return kind() == kOptRef; }
-
- constexpr bool is_reference_to(uint32_t htype) const {
- return (kind() == kRef || kind() == kOptRef) &&
- heap_representation() == htype;
- }
-
- constexpr bool is_defaultable() const {
- CONSTEXPR_DCHECK(kind() != kBottom && kind() != kStmt);
- return kind() != kRef && kind() != kRtt;
- }
-
- constexpr ValueType Unpacked() const {
- return is_packed() ? Primitive(kI32) : *this;
- }
-
- constexpr bool has_index() const {
- return is_reference_type() && heap_type().is_index();
- }
- constexpr bool is_rtt() const { return kind() == kRtt; }
- constexpr bool has_depth() const { return is_rtt(); }
-
+ /******************************* Constructors *******************************/
constexpr ValueType() : bit_field_(KindField::encode(kStmt)) {}
static constexpr ValueType Primitive(Kind kind) {
CONSTEXPR_DCHECK(kind == kBottom || kind <= kI16);
@@ -242,6 +211,43 @@ class ValueType {
return ValueType(bit_field);
}
+ /******************************** Type checks *******************************/
+ constexpr bool is_reference_type() const {
+ return kind() == kRef || kind() == kOptRef || kind() == kRtt;
+ }
+
+ constexpr bool is_object_reference_type() const {
+ return kind() == kRef || kind() == kOptRef;
+ }
+
+ constexpr bool is_nullable() const { return kind() == kOptRef; }
+
+ constexpr bool is_reference_to(uint32_t htype) const {
+ return (kind() == kRef || kind() == kOptRef) &&
+ heap_representation() == htype;
+ }
+
+ constexpr bool is_rtt() const { return kind() == kRtt; }
+ constexpr bool has_depth() const { return is_rtt(); }
+
+ constexpr bool has_index() const {
+ return is_reference_type() && heap_type().is_index();
+ }
+
+ constexpr bool is_defaultable() const {
+ CONSTEXPR_DCHECK(kind() != kBottom && kind() != kStmt);
+ return kind() != kRef && kind() != kRtt;
+ }
+
+ constexpr bool is_bottom() const { return kind() == kBottom; }
+
+ constexpr bool is_packed() const { return kind() == kI8 || kind() == kI16; }
+
+ constexpr ValueType Unpacked() const {
+ return is_packed() ? Primitive(kI32) : *this;
+ }
+
+ /***************************** Field Accessors ******************************/
constexpr Kind kind() const { return KindField::decode(bit_field_); }
constexpr HeapType::Representation heap_representation() const {
CONSTEXPR_DCHECK(is_reference_type());
@@ -263,6 +269,14 @@ class ValueType {
// Useful when serializing this type to store it into a runtime object.
constexpr uint32_t raw_bit_field() const { return bit_field_; }
+ /*************************** Other utility methods **************************/
+ constexpr bool operator==(ValueType other) const {
+ return bit_field_ == other.bit_field_;
+ }
+ constexpr bool operator!=(ValueType other) const {
+ return bit_field_ != other.bit_field_;
+ }
+
static constexpr size_t bit_field_offset() {
return offsetof(ValueType, bit_field_);
}
@@ -292,13 +306,7 @@ class ValueType {
return size;
}
- constexpr bool operator==(ValueType other) const {
- return bit_field_ == other.bit_field_;
- }
- constexpr bool operator!=(ValueType other) const {
- return bit_field_ != other.bit_field_;
- }
-
+ /*************************** Machine-type related ***************************/
constexpr MachineType machine_type() const {
CONSTEXPR_DCHECK(kBottom != kind());
@@ -316,6 +324,29 @@ class ValueType {
return machine_type().representation();
}
+ static ValueType For(MachineType type) {
+ switch (type.representation()) {
+ case MachineRepresentation::kWord8:
+ case MachineRepresentation::kWord16:
+ case MachineRepresentation::kWord32:
+ return Primitive(kI32);
+ case MachineRepresentation::kWord64:
+ return Primitive(kI64);
+ case MachineRepresentation::kFloat32:
+ return Primitive(kF32);
+ case MachineRepresentation::kFloat64:
+ return Primitive(kF64);
+ case MachineRepresentation::kTaggedPointer:
+ return Ref(HeapType::kExtern, kNullable);
+ case MachineRepresentation::kSimd128:
+ return Primitive(kS128);
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ /********************************* Encoding *********************************/
+
// Returns the first byte of this type's representation in the wasm binary
// format.
// For compatibility with the reftypes and exception-handling proposals, this
@@ -365,27 +396,9 @@ class ValueType {
heap_representation() == HeapType::kI31));
}
- static ValueType For(MachineType type) {
- switch (type.representation()) {
- case MachineRepresentation::kWord8:
- case MachineRepresentation::kWord16:
- case MachineRepresentation::kWord32:
- return Primitive(kI32);
- case MachineRepresentation::kWord64:
- return Primitive(kI64);
- case MachineRepresentation::kFloat32:
- return Primitive(kF32);
- case MachineRepresentation::kFloat64:
- return Primitive(kF64);
- case MachineRepresentation::kTaggedPointer:
- return Ref(HeapType::kExtern, kNullable);
- case MachineRepresentation::kSimd128:
- return Primitive(kS128);
- default:
- UNREACHABLE();
- }
- }
+ static constexpr int kLastUsedBit = 30;
+ /****************************** Pretty-printing *****************************/
constexpr char short_name() const {
constexpr char kShortName[] = {
#define SHORT_NAME(kind, log2Size, code, machineType, shortName, ...) shortName,
@@ -425,8 +438,6 @@ class ValueType {
return buf.str();
}
- static constexpr int kLastUsedBit = 30;
-
private:
// We only use 31 bits so ValueType fits in a Smi. This can be changed if
// needed.
diff --git a/deps/v8/src/wasm/wasm-code-manager.cc b/deps/v8/src/wasm/wasm-code-manager.cc
index ac68dc970c..cd90524599 100644
--- a/deps/v8/src/wasm/wasm-code-manager.cc
+++ b/deps/v8/src/wasm/wasm-code-manager.cc
@@ -323,8 +323,12 @@ void WasmCode::Validate() const {
void WasmCode::MaybePrint(const char* name) const {
// Determines whether flags want this code to be printed.
- if ((FLAG_print_wasm_code && kind() == kFunction) ||
- (FLAG_print_wasm_stub_code && kind() != kFunction) || FLAG_print_code) {
+ bool function_index_matches =
+ (!IsAnonymous() &&
+ FLAG_print_wasm_code_function_index == static_cast<int>(index()));
+ if (FLAG_print_code ||
+ (kind() == kFunction ? (FLAG_print_wasm_code || function_index_matches)
+ : FLAG_print_wasm_stub_code)) {
Print(name);
}
}
@@ -854,7 +858,7 @@ void NativeModule::ReserveCodeTableForTesting(uint32_t max_functions) {
void NativeModule::LogWasmCodes(Isolate* isolate) {
if (!WasmCode::ShouldBeLogged(isolate)) return;
- TRACE_EVENT1("v8.wasm", "wasm.LogWasmCodes", "num_functions",
+ TRACE_EVENT1("v8.wasm", "wasm.LogWasmCodes", "functions",
module_->num_declared_functions);
// TODO(titzer): we skip the logging of the import wrappers
@@ -874,11 +878,7 @@ CompilationEnv NativeModule::CreateCompilationEnv() const {
WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
CODE_SPACE_WRITE_SCOPE
- // For off-heap builtins, we create a copy of the off-heap instruction stream
- // instead of the on-heap code object containing the trampoline. Ensure that
- // we do not apply the on-heap reloc info to the off-heap instructions.
- const size_t relocation_size =
- code->is_off_heap_trampoline() ? 0 : code->relocation_size();
+ const size_t relocation_size = code->relocation_size();
OwnedVector<byte> reloc_info;
if (relocation_size > 0) {
reloc_info = OwnedVector<byte>::Of(
@@ -892,19 +892,25 @@ WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
source_pos_table->copy_out(0, source_pos.start(),
source_pos_table->length());
}
+ CHECK(!code->is_off_heap_trampoline());
+ STATIC_ASSERT(Code::kOnHeapBodyIsContiguous);
Vector<const byte> instructions(
- reinterpret_cast<byte*>(code->InstructionStart()),
- static_cast<size_t>(code->InstructionSize()));
+ reinterpret_cast<byte*>(code->raw_body_start()),
+ static_cast<size_t>(code->raw_body_size()));
const int stack_slots = code->has_safepoint_info() ? code->stack_slots() : 0;
+ // Metadata offsets in Code objects are relative to the start of the metadata
+ // section, whereas WasmCode expects offsets relative to InstructionStart.
+ const int base_offset = code->raw_instruction_size();
// TODO(jgruber,v8:8758): Remove this translation. It exists only because
// Code objects contains real offsets but WasmCode expects an offset of 0 to
// mean 'empty'.
const int safepoint_table_offset =
- code->has_safepoint_table() ? code->safepoint_table_offset() : 0;
- const int handler_table_offset = code->handler_table_offset();
- const int constant_pool_offset = code->constant_pool_offset();
- const int code_comments_offset = code->code_comments_offset();
+ code->has_safepoint_table() ? base_offset + code->safepoint_table_offset()
+ : 0;
+ const int handler_table_offset = base_offset + code->handler_table_offset();
+ const int constant_pool_offset = base_offset + code->constant_pool_offset();
+ const int code_comments_offset = base_offset + code->code_comments_offset();
Vector<uint8_t> dst_code_bytes =
code_allocator_.AllocateForCode(this, instructions.size());
@@ -912,7 +918,7 @@ WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
// Apply the relocation delta by iterating over the RelocInfo.
intptr_t delta = reinterpret_cast<Address>(dst_code_bytes.begin()) -
- code->InstructionStart();
+ code->raw_instruction_start();
int mode_mask =
RelocInfo::kApplyMask | RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL);
auto jump_tables_ref =
@@ -1081,12 +1087,16 @@ std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace(
}
WasmCode* NativeModule::PublishCode(std::unique_ptr<WasmCode> code) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
+ "wasm.PublishCode");
base::MutexGuard lock(&allocation_mutex_);
return PublishCodeLocked(std::move(code));
}
std::vector<WasmCode*> NativeModule::PublishCode(
Vector<std::unique_ptr<WasmCode>> codes) {
+ TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
+ "wasm.PublishCode", "number", codes.size());
std::vector<WasmCode*> published_code;
published_code.reserve(codes.size());
base::MutexGuard lock(&allocation_mutex_);
@@ -1362,10 +1372,10 @@ void NativeModule::AddCodeSpace(
WASM_RUNTIME_STUB_LIST(RUNTIME_STUB, RUNTIME_STUB_TRAP)};
#undef RUNTIME_STUB
#undef RUNTIME_STUB_TRAP
+ STATIC_ASSERT(Builtins::kAllBuiltinsAreIsolateIndependent);
Address builtin_addresses[WasmCode::kRuntimeStubCount];
for (int i = 0; i < WasmCode::kRuntimeStubCount; ++i) {
Builtins::Name builtin = stub_names[i];
- CHECK(embedded_data.ContainsBuiltin(builtin));
builtin_addresses[i] = embedded_data.InstructionStartOfBuiltin(builtin);
}
JumpTableAssembler::GenerateFarJumpTable(
@@ -1468,7 +1478,11 @@ NativeModule::JumpTablesRef NativeModule::FindJumpTablesForRegion(
size_t max_distance = std::max(
code_region.end() > table_start ? code_region.end() - table_start : 0,
table_end > code_region.begin() ? table_end - code_region.begin() : 0);
- return max_distance < WasmCodeAllocator::kMaxCodeSpaceSize;
+ // We can allow a max_distance that is equal to kMaxCodeSpaceSize, because
+ // every call or jump will target an address *within* the region, but never
+ // exactly the end of the region. So all occuring offsets are actually
+ // smaller than max_distance.
+ return max_distance <= WasmCodeAllocator::kMaxCodeSpaceSize;
};
// Fast path: Try to use {main_jump_table_} and {main_far_jump_table_}.
@@ -1881,6 +1895,8 @@ std::unique_ptr<WasmCode> NativeModule::AddCompiledCode(
std::vector<std::unique_ptr<WasmCode>> NativeModule::AddCompiledCode(
Vector<WasmCompilationResult> results) {
+ TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
+ "wasm.AddCompiledCode", "num", results.size());
DCHECK(!results.empty());
// First, allocate code space for all the results.
size_t total_code_space = 0;
diff --git a/deps/v8/src/wasm/wasm-code-manager.h b/deps/v8/src/wasm/wasm-code-manager.h
index 5e8ed5475b..f017b977b5 100644
--- a/deps/v8/src/wasm/wasm-code-manager.h
+++ b/deps/v8/src/wasm/wasm-code-manager.h
@@ -57,6 +57,7 @@ struct WasmModule;
V(WasmFloat64ToNumber) \
V(WasmTaggedToFloat64) \
V(WasmAllocateJSArray) \
+ V(WasmAllocatePair) \
V(WasmAtomicNotify) \
V(WasmI32AtomicWait32) \
V(WasmI32AtomicWait64) \
@@ -200,6 +201,8 @@ class V8_EXPORT_PRIVATE WasmCode final {
static bool ShouldBeLogged(Isolate* isolate);
void LogCode(Isolate* isolate) const;
+ WasmCode(const WasmCode&) = delete;
+ WasmCode& operator=(const WasmCode&) = delete;
~WasmCode();
void IncRef() {
@@ -348,8 +351,6 @@ class V8_EXPORT_PRIVATE WasmCode final {
// from (3) and all (2)), the code object is deleted and the memory for the
// machine code is freed.
std::atomic<int> ref_count_{1};
-
- DISALLOW_COPY_AND_ASSIGN(WasmCode);
};
// Check that {WasmCode} objects are sufficiently small. We create many of them,
@@ -476,6 +477,10 @@ class V8_EXPORT_PRIVATE NativeModule final {
static constexpr bool kNeedsFarJumpsBetweenCodeSpaces = false;
#endif
+ NativeModule(const NativeModule&) = delete;
+ NativeModule& operator=(const NativeModule&) = delete;
+ ~NativeModule();
+
// {AddCode} is thread safe w.r.t. other calls to {AddCode} or methods adding
// code below, i.e. it can be called concurrently from background threads.
// The returned code still needs to be published via {PublishCode}.
@@ -612,8 +617,6 @@ class V8_EXPORT_PRIVATE NativeModule final {
return import_wrapper_cache_.get();
}
- ~NativeModule();
-
const WasmFeatures& enabled_features() const { return enabled_features_; }
// Returns the runtime stub id that corresponds to the given address (which
@@ -794,13 +797,13 @@ class V8_EXPORT_PRIVATE NativeModule final {
std::atomic<size_t> liftoff_bailout_count_{0};
std::atomic<size_t> liftoff_code_size_{0};
std::atomic<size_t> turbofan_code_size_{0};
-
- DISALLOW_COPY_AND_ASSIGN(NativeModule);
};
class V8_EXPORT_PRIVATE WasmCodeManager final {
public:
explicit WasmCodeManager(size_t max_committed);
+ WasmCodeManager(const WasmCodeManager&) = delete;
+ WasmCodeManager& operator=(const WasmCodeManager&) = delete;
#ifdef DEBUG
~WasmCodeManager() {
@@ -872,8 +875,6 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
// End of fields protected by {native_modules_mutex_}.
//////////////////////////////////////////////////////////////////////////////
-
- DISALLOW_COPY_AND_ASSIGN(WasmCodeManager);
};
// Within the scope, the native_module is writable and not executable.
@@ -901,6 +902,8 @@ class NativeModuleModificationScope final {
class V8_EXPORT_PRIVATE WasmCodeRefScope {
public:
WasmCodeRefScope();
+ WasmCodeRefScope(const WasmCodeRefScope&) = delete;
+ WasmCodeRefScope& operator=(const WasmCodeRefScope&) = delete;
~WasmCodeRefScope();
// Register a {WasmCode} reference in the current {WasmCodeRefScope}. Fails if
@@ -910,8 +913,6 @@ class V8_EXPORT_PRIVATE WasmCodeRefScope {
private:
WasmCodeRefScope* const previous_scope_;
std::unordered_set<WasmCode*> code_ptrs_;
-
- DISALLOW_COPY_AND_ASSIGN(WasmCodeRefScope);
};
// Similarly to a global handle, a {GlobalWasmCodeRef} stores a single
@@ -924,6 +925,9 @@ class GlobalWasmCodeRef {
code_->IncRef();
}
+ GlobalWasmCodeRef(const GlobalWasmCodeRef&) = delete;
+ GlobalWasmCodeRef& operator=(const GlobalWasmCodeRef&) = delete;
+
~GlobalWasmCodeRef() { WasmCode::DecrementRefCount({&code_, 1}); }
// Get a pointer to the contained {WasmCode} object. This is only guaranteed
@@ -934,7 +938,6 @@ class GlobalWasmCodeRef {
WasmCode* const code_;
// Also keep the {NativeModule} alive.
const std::shared_ptr<NativeModule> native_module_;
- DISALLOW_COPY_AND_ASSIGN(GlobalWasmCodeRef);
};
const char* GetRuntimeStubName(WasmCode::RuntimeStubId);
diff --git a/deps/v8/src/wasm/wasm-constants.h b/deps/v8/src/wasm/wasm-constants.h
index 4e701599fc..31a519ee2e 100644
--- a/deps/v8/src/wasm/wasm-constants.h
+++ b/deps/v8/src/wasm/wasm-constants.h
@@ -123,6 +123,11 @@ constexpr uint32_t kExceptionAttribute = 0;
constexpr int kAnonymousFuncIndex = -1;
+// The number of calls to an exported wasm function that will be handled
+// by the generic wrapper. Once this threshold is reached, a specific wrapper
+// is to be compiled for the function's signature.
+constexpr uint32_t kGenericWrapperThreshold = 6;
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-debug-evaluate.cc b/deps/v8/src/wasm/wasm-debug-evaluate.cc
index d8abe49679..bbd75f6b18 100644
--- a/deps/v8/src/wasm/wasm-debug-evaluate.cc
+++ b/deps/v8/src/wasm/wasm-debug-evaluate.cc
@@ -81,7 +81,7 @@ static bool CheckRangeOutOfBounds(uint32_t offset, uint32_t size,
class DebugEvaluatorProxy {
public:
- explicit DebugEvaluatorProxy(Isolate* isolate, StandardFrame* frame)
+ explicit DebugEvaluatorProxy(Isolate* isolate, CommonFrame* frame)
: isolate_(isolate), frame_(frame) {}
static void GetMemoryTrampoline(
@@ -283,7 +283,7 @@ class DebugEvaluatorProxy {
}
Isolate* isolate_;
- StandardFrame* frame_;
+ CommonFrame* frame_;
Handle<WasmInstanceObject> evaluator_;
Handle<WasmInstanceObject> debuggee_;
};
@@ -356,7 +356,7 @@ static bool VerifyEvaluatorInterface(const WasmModule* raw_module,
Maybe<std::string> DebugEvaluateImpl(
Vector<const byte> snippet, Handle<WasmInstanceObject> debuggee_instance,
- StandardFrame* frame) {
+ CommonFrame* frame) {
Isolate* isolate = debuggee_instance->GetIsolate();
HandleScope handle_scope(isolate);
WasmEngine* engine = isolate->wasm_engine();
@@ -433,7 +433,7 @@ Maybe<std::string> DebugEvaluateImpl(
MaybeHandle<String> DebugEvaluate(Vector<const byte> snippet,
Handle<WasmInstanceObject> debuggee_instance,
- StandardFrame* frame) {
+ CommonFrame* frame) {
Maybe<std::string> result =
DebugEvaluateImpl(snippet, debuggee_instance, frame);
if (result.IsNothing()) return {};
diff --git a/deps/v8/src/wasm/wasm-debug-evaluate.h b/deps/v8/src/wasm/wasm-debug-evaluate.h
index f4e3aef175..ab84a736a8 100644
--- a/deps/v8/src/wasm/wasm-debug-evaluate.h
+++ b/deps/v8/src/wasm/wasm-debug-evaluate.h
@@ -13,9 +13,9 @@ namespace v8 {
namespace internal {
namespace wasm {
-MaybeHandle<String> V8_EXPORT_PRIVATE DebugEvaluate(
- Vector<const byte> snippet, Handle<WasmInstanceObject> debuggee_instance,
- StandardFrame* frame);
+MaybeHandle<String> V8_EXPORT_PRIVATE
+DebugEvaluate(Vector<const byte> snippet,
+ Handle<WasmInstanceObject> debuggee_instance, CommonFrame* frame);
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/wasm-debug.cc b/deps/v8/src/wasm/wasm-debug.cc
index d05caa4144..5da5525045 100644
--- a/deps/v8/src/wasm/wasm-debug.cc
+++ b/deps/v8/src/wasm/wasm-debug.cc
@@ -51,60 +51,6 @@ Handle<String> PrintFToOneByteString(Isolate* isolate, const char* format,
: isolate->factory()->NewStringFromOneByte(name).ToHandleChecked();
}
-MaybeHandle<JSObject> CreateFunctionTablesObject(
- Handle<WasmInstanceObject> instance) {
- Isolate* isolate = instance->GetIsolate();
- auto tables = handle(instance->tables(), isolate);
- if (tables->length() == 0) return MaybeHandle<JSObject>();
-
- const char* table_label = "table%d";
- Handle<JSObject> tables_obj = isolate->factory()->NewJSObjectWithNullProto();
- for (int table_index = 0; table_index < tables->length(); ++table_index) {
- auto func_table =
- handle(WasmTableObject::cast(tables->get(table_index)), isolate);
- if (!IsSubtypeOf(func_table->type(), kWasmFuncRef, instance->module()))
- continue;
-
- Handle<String> table_name;
- if (!WasmInstanceObject::GetTableNameOrNull(isolate, instance, table_index)
- .ToHandle(&table_name)) {
- table_name =
- PrintFToOneByteString<true>(isolate, table_label, table_index);
- }
-
- Handle<JSObject> func_table_obj =
- isolate->factory()->NewJSObjectWithNullProto();
- JSObject::AddProperty(isolate, tables_obj, table_name, func_table_obj,
- NONE);
- for (int i = 0; i < func_table->current_length(); ++i) {
- Handle<Object> func = WasmTableObject::Get(isolate, func_table, i);
- DCHECK(!WasmCapiFunction::IsWasmCapiFunction(*func));
- if (func->IsNull(isolate)) continue;
-
- Handle<String> func_name;
- Handle<JSObject> func_obj =
- isolate->factory()->NewJSObjectWithNullProto();
-
- if (WasmExportedFunction::IsWasmExportedFunction(*func)) {
- auto target_func = Handle<WasmExportedFunction>::cast(func);
- auto target_instance = handle(target_func->instance(), isolate);
- auto module = handle(target_instance->module_object(), isolate);
- func_name = WasmModuleObject::GetFunctionName(
- isolate, module, target_func->function_index());
- } else if (WasmJSFunction::IsWasmJSFunction(*func)) {
- auto target_func = Handle<JSFunction>::cast(func);
- func_name = JSFunction::GetName(target_func);
- if (func_name->length() == 0) {
- func_name = isolate->factory()->InternalizeUtf8String("anonymous");
- }
- }
- JSObject::AddProperty(isolate, func_obj, func_name, func, NONE);
- JSObject::AddDataElement(func_table_obj, i, func_obj, NONE);
- }
- }
- return tables_obj;
-}
-
Handle<Object> WasmValueToValueObject(Isolate* isolate, WasmValue value) {
Handle<ByteArray> bytes;
switch (value.type().kind()) {
@@ -164,8 +110,8 @@ MaybeHandle<String> GetLocalNameString(Isolate* isolate,
ModuleWireBytes wire_bytes{native_module->wire_bytes()};
// Bounds were checked during decoding.
DCHECK(wire_bytes.BoundsCheck(name_ref));
- Vector<const char> name = wire_bytes.GetNameOrNull(name_ref);
- if (name.begin() == nullptr) return {};
+ WasmName name = wire_bytes.GetNameOrNull(name_ref);
+ if (name.size() == 0) return {};
return isolate->factory()->NewStringFromUtf8(name);
}
@@ -272,14 +218,6 @@ Handle<JSObject> GetModuleScopeObject(Handle<WasmInstanceObject> instance) {
NONE);
}
- Handle<JSObject> function_tables_obj;
- if (CreateFunctionTablesObject(instance).ToHandle(&function_tables_obj)) {
- Handle<String> tables_name = isolate->factory()->InternalizeString(
- StaticCharVector("function tables"));
- JSObject::AddProperty(isolate, module_scope_object, tables_name,
- function_tables_obj, NONE);
- }
-
auto& globals = instance->module()->globals;
if (globals.size() > 0) {
Handle<JSObject> globals_obj =
@@ -310,6 +248,9 @@ class DebugInfoImpl {
explicit DebugInfoImpl(NativeModule* native_module)
: native_module_(native_module) {}
+ DebugInfoImpl(const DebugInfoImpl&) = delete;
+ DebugInfoImpl& operator=(const DebugInfoImpl&) = delete;
+
int GetNumLocals(Address pc) {
FrameInspectionScope scope(this, pc);
if (!scope.is_inspectable()) return 0;
@@ -340,6 +281,12 @@ class DebugInfoImpl {
debug_break_fp);
}
+ const WasmFunction& GetFunctionAtAddress(Address pc) {
+ FrameInspectionScope scope(this, pc);
+ auto* module = native_module_->module();
+ return module->functions[scope.code->index()];
+ }
+
Handle<JSObject> GetLocalScopeObject(Isolate* isolate, Address pc, Address fp,
Address debug_break_fp) {
FrameInspectionScope scope(this, pc);
@@ -886,8 +833,6 @@ class DebugInfoImpl {
// Isolate-specific data.
std::unordered_map<Isolate*, PerIsolateDebugData> per_isolate_data_;
-
- DISALLOW_COPY_AND_ASSIGN(DebugInfoImpl);
};
DebugInfo::DebugInfo(NativeModule* native_module)
@@ -909,6 +854,10 @@ WasmValue DebugInfo::GetStackValue(int index, Address pc, Address fp,
return impl_->GetStackValue(index, pc, fp, debug_break_fp);
}
+const wasm::WasmFunction& DebugInfo::GetFunctionAtAddress(Address pc) {
+ return impl_->GetFunctionAtAddress(pc);
+}
+
Handle<JSObject> DebugInfo::GetLocalScopeObject(Isolate* isolate, Address pc,
Address fp,
Address debug_break_fp) {
diff --git a/deps/v8/src/wasm/wasm-debug.h b/deps/v8/src/wasm/wasm-debug.h
index 6050cb3a58..82fe974952 100644
--- a/deps/v8/src/wasm/wasm-debug.h
+++ b/deps/v8/src/wasm/wasm-debug.h
@@ -34,6 +34,7 @@ class NativeModule;
class WasmCode;
class WireBytesRef;
class WasmValue;
+struct WasmFunction;
// Side table storing information used to inspect Liftoff frames at runtime.
// This table is only created on demand for debugging, so it is not optimized
@@ -153,6 +154,9 @@ class V8_EXPORT_PRIVATE DebugInfo {
WasmValue GetLocalValue(int local, Address pc, Address fp,
Address debug_break_fp);
int GetStackDepth(Address pc);
+
+ const wasm::WasmFunction& GetFunctionAtAddress(Address pc);
+
WasmValue GetStackValue(int index, Address pc, Address fp,
Address debug_break_fp);
diff --git a/deps/v8/src/wasm/wasm-engine.cc b/deps/v8/src/wasm/wasm-engine.cc
index 9699516c27..9f962f76bd 100644
--- a/deps/v8/src/wasm/wasm-engine.cc
+++ b/deps/v8/src/wasm/wasm-engine.cc
@@ -419,7 +419,7 @@ WasmEngine::~WasmEngine() {
compile_job_handles = compile_job_handles_;
}
for (auto& job_handle : compile_job_handles) {
- if (job_handle->IsRunning()) job_handle->Cancel();
+ if (job_handle->IsValid()) job_handle->Cancel();
}
// All AsyncCompileJobs have been canceled.
@@ -1036,8 +1036,7 @@ void WasmEngine::LogOutstandingCodesForIsolate(Isolate* isolate) {
DCHECK_EQ(1, isolates_.count(isolate));
code_to_log.swap(isolates_[isolate]->code_to_log);
}
- TRACE_EVENT1("v8.wasm", "wasm.LogCode", "num_code_objects",
- code_to_log.size());
+ TRACE_EVENT1("v8.wasm", "wasm.LogCode", "codeObjects", code_to_log.size());
if (code_to_log.empty()) return;
for (WasmCode* code : code_to_log) {
code->LogCode(isolate);
diff --git a/deps/v8/src/wasm/wasm-engine.h b/deps/v8/src/wasm/wasm-engine.h
index 2d96111462..a38308110b 100644
--- a/deps/v8/src/wasm/wasm-engine.h
+++ b/deps/v8/src/wasm/wasm-engine.h
@@ -137,6 +137,8 @@ class NativeModuleCache {
class V8_EXPORT_PRIVATE WasmEngine {
public:
WasmEngine();
+ WasmEngine(const WasmEngine&) = delete;
+ WasmEngine& operator=(const WasmEngine&) = delete;
~WasmEngine();
// Synchronously validates the given bytes that represent an encoded Wasm
@@ -413,8 +415,6 @@ class V8_EXPORT_PRIVATE WasmEngine {
// End of fields protected by {mutex_}.
//////////////////////////////////////////////////////////////////////////////
-
- DISALLOW_COPY_AND_ASSIGN(WasmEngine);
};
} // namespace wasm
diff --git a/deps/v8/src/wasm/wasm-external-refs.cc b/deps/v8/src/wasm/wasm-external-refs.cc
index de1dd5e9df..e8e8cf8d50 100644
--- a/deps/v8/src/wasm/wasm-external-refs.cc
+++ b/deps/v8/src/wasm/wasm-external-refs.cc
@@ -5,12 +5,13 @@
#include <math.h>
#include <stdint.h>
#include <stdlib.h>
+
#include <limits>
#include "include/v8config.h"
-
#include "src/base/bits.h"
#include "src/base/ieee754.h"
+#include "src/base/safe_conversions.h"
#include "src/common/assert-scope.h"
#include "src/utils/memcopy.h"
#include "src/wasm/wasm-objects-inl.h"
@@ -179,12 +180,8 @@ void uint64_to_float64_wrapper(Address data) {
}
int32_t float32_to_int64_wrapper(Address data) {
- // We use "<" here to check the upper bound because of rounding problems: With
- // "<=" some inputs would be considered within int64 range which are actually
- // not within int64 range.
float input = ReadUnalignedValue<float>(data);
- if (input >= static_cast<float>(std::numeric_limits<int64_t>::min()) &&
- input < static_cast<float>(std::numeric_limits<int64_t>::max())) {
+ if (base::IsValueInRangeForNumericType<int64_t>(input)) {
WriteUnalignedValue<int64_t>(data, static_cast<int64_t>(input));
return 1;
}
@@ -193,11 +190,7 @@ int32_t float32_to_int64_wrapper(Address data) {
int32_t float32_to_uint64_wrapper(Address data) {
float input = ReadUnalignedValue<float>(data);
- // We use "<" here to check the upper bound because of rounding problems: With
- // "<=" some inputs would be considered within uint64 range which are actually
- // not within uint64 range.
- if (input > -1.0 &&
- input < static_cast<float>(std::numeric_limits<uint64_t>::max())) {
+ if (base::IsValueInRangeForNumericType<uint64_t>(input)) {
WriteUnalignedValue<uint64_t>(data, static_cast<uint64_t>(input));
return 1;
}
@@ -205,12 +198,8 @@ int32_t float32_to_uint64_wrapper(Address data) {
}
int32_t float64_to_int64_wrapper(Address data) {
- // We use "<" here to check the upper bound because of rounding problems: With
- // "<=" some inputs would be considered within int64 range which are actually
- // not within int64 range.
double input = ReadUnalignedValue<double>(data);
- if (input >= static_cast<double>(std::numeric_limits<int64_t>::min()) &&
- input < static_cast<double>(std::numeric_limits<int64_t>::max())) {
+ if (base::IsValueInRangeForNumericType<int64_t>(input)) {
WriteUnalignedValue<int64_t>(data, static_cast<int64_t>(input));
return 1;
}
@@ -218,12 +207,8 @@ int32_t float64_to_int64_wrapper(Address data) {
}
int32_t float64_to_uint64_wrapper(Address data) {
- // We use "<" here to check the upper bound because of rounding problems: With
- // "<=" some inputs would be considered within uint64 range which are actually
- // not within uint64 range.
double input = ReadUnalignedValue<double>(data);
- if (input > -1.0 &&
- input < static_cast<double>(std::numeric_limits<uint64_t>::max())) {
+ if (base::IsValueInRangeForNumericType<uint64_t>(input)) {
WriteUnalignedValue<uint64_t>(data, static_cast<uint64_t>(input));
return 1;
}
@@ -232,11 +217,7 @@ int32_t float64_to_uint64_wrapper(Address data) {
void float32_to_int64_sat_wrapper(Address data) {
float input = ReadUnalignedValue<float>(data);
- // We use "<" here to check the upper bound because of rounding problems: With
- // "<=" some inputs would be considered within int64 range which are actually
- // not within int64 range.
- if (input < static_cast<float>(std::numeric_limits<int64_t>::max()) &&
- input >= static_cast<float>(std::numeric_limits<int64_t>::min())) {
+ if (base::IsValueInRangeForNumericType<int64_t>(input)) {
WriteUnalignedValue<int64_t>(data, static_cast<int64_t>(input));
return;
}
@@ -253,11 +234,7 @@ void float32_to_int64_sat_wrapper(Address data) {
void float32_to_uint64_sat_wrapper(Address data) {
float input = ReadUnalignedValue<float>(data);
- // We use "<" here to check the upper bound because of rounding problems: With
- // "<=" some inputs would be considered within uint64 range which are actually
- // not within uint64 range.
- if (input < static_cast<float>(std::numeric_limits<uint64_t>::max()) &&
- input >= 0.0) {
+ if (base::IsValueInRangeForNumericType<uint64_t>(input)) {
WriteUnalignedValue<uint64_t>(data, static_cast<uint64_t>(input));
return;
}
@@ -270,11 +247,7 @@ void float32_to_uint64_sat_wrapper(Address data) {
void float64_to_int64_sat_wrapper(Address data) {
double input = ReadUnalignedValue<double>(data);
- // We use "<" here to check the upper bound because of rounding problems: With
- // "<=" some inputs would be considered within int64 range which are actually
- // not within int64 range.
- if (input < static_cast<double>(std::numeric_limits<int64_t>::max()) &&
- input >= static_cast<double>(std::numeric_limits<int64_t>::min())) {
+ if (base::IsValueInRangeForNumericType<int64_t>(input)) {
WriteUnalignedValue<int64_t>(data, static_cast<int64_t>(input));
return;
}
@@ -291,11 +264,7 @@ void float64_to_int64_sat_wrapper(Address data) {
void float64_to_uint64_sat_wrapper(Address data) {
double input = ReadUnalignedValue<double>(data);
- // We use "<" here to check the upper bound because of rounding problems: With
- // "<=" some inputs would be considered within int64 range which are actually
- // not within int64 range.
- if (input < static_cast<double>(std::numeric_limits<uint64_t>::max()) &&
- input >= 0.0) {
+ if (base::IsValueInRangeForNumericType<uint64_t>(input)) {
WriteUnalignedValue<uint64_t>(data, static_cast<uint64_t>(input));
return;
}
@@ -405,9 +374,12 @@ template <typename T, T (*float_round_op)(T)>
void simd_float_round_wrapper(Address data) {
constexpr int n = kSimd128Size / sizeof(T);
for (int i = 0; i < n; i++) {
- WriteUnalignedValue<T>(
- data + (i * sizeof(T)),
- float_round_op(ReadUnalignedValue<T>(data + (i * sizeof(T)))));
+ T input = ReadUnalignedValue<T>(data + (i * sizeof(T)));
+ T value = float_round_op(input);
+#if V8_OS_AIX
+ value = FpOpWorkaround<T>(input, value);
+#endif
+ WriteUnalignedValue<T>(data + (i * sizeof(T)), value);
}
}
diff --git a/deps/v8/src/wasm/wasm-js.cc b/deps/v8/src/wasm/wasm-js.cc
index 16f4c5d3f9..4edd23eecf 100644
--- a/deps/v8/src/wasm/wasm-js.cc
+++ b/deps/v8/src/wasm/wasm-js.cc
@@ -10,25 +10,31 @@
#include "src/api/api-inl.h"
#include "src/api/api-natives.h"
#include "src/ast/ast.h"
+#include "src/base/logging.h"
#include "src/base/overflowing-math.h"
#include "src/common/assert-scope.h"
#include "src/execution/execution.h"
+#include "src/execution/frames-inl.h"
#include "src/execution/isolate.h"
#include "src/handles/handles.h"
#include "src/heap/factory.h"
#include "src/init/v8.h"
+#include "src/objects/js-collection-inl.h"
#include "src/objects/js-promise-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/templates.h"
#include "src/parsing/parse-info.h"
#include "src/tasks/task-utils.h"
#include "src/trap-handler/trap-handler.h"
+#include "src/wasm/function-compiler.h"
#include "src/wasm/streaming-decoder.h"
#include "src/wasm/value-type.h"
+#include "src/wasm/wasm-debug.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-serialization.h"
+#include "src/wasm/wasm-value.h"
using v8::internal::wasm::ErrorThrower;
using v8::internal::wasm::ScheduledErrorThrower;
@@ -102,7 +108,7 @@ WasmStreaming::WasmStreaming(std::unique_ptr<WasmStreamingImpl> impl)
WasmStreaming::~WasmStreaming() = default;
void WasmStreaming::OnBytesReceived(const uint8_t* bytes, size_t size) {
- TRACE_EVENT1("v8.wasm", "wasm.OnBytesReceived", "num_bytes", size);
+ TRACE_EVENT1("v8.wasm", "wasm.OnBytesReceived", "bytes", size);
impl_->OnBytesReceived(bytes, size);
}
@@ -1581,7 +1587,7 @@ constexpr const char* kName_WasmTableObject = "WebAssembly.Table";
}
void WebAssemblyInstanceGetExports(
- const v8::FunctionCallbackInfo<v8::Value>& args) {
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
@@ -2020,9 +2026,9 @@ Handle<JSFunction> InstallFunc(Isolate* isolate, Handle<JSObject> object,
}
Handle<JSFunction> InstallConstructorFunc(Isolate* isolate,
- Handle<JSObject> object,
- const char* str,
- FunctionCallback func) {
+ Handle<JSObject> object,
+ const char* str,
+ FunctionCallback func) {
return InstallFunc(isolate, object, str, func, 1, true, DONT_ENUM);
}
@@ -2281,6 +2287,775 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
runtime_error, DONT_ENUM);
}
+namespace {
+void SetMapValue(Isolate* isolate, Handle<JSMap> map, Handle<Object> key,
+ Handle<Object> value) {
+ DCHECK(!map.is_null() && !key.is_null() && !value.is_null());
+ Handle<Object> argv[] = {key, value};
+ Execution::CallBuiltin(isolate, isolate->map_set(), map, arraysize(argv),
+ argv)
+ .Check();
+}
+
+Handle<Object> GetMapValue(Isolate* isolate, Handle<JSMap> map,
+ Handle<Object> key) {
+ DCHECK(!map.is_null() && !key.is_null());
+ Handle<Object> argv[] = {key};
+ return Execution::CallBuiltin(isolate, isolate->map_get(), map,
+ arraysize(argv), argv)
+ .ToHandleChecked();
+}
+
+// Look up a name in a name table. Name tables are stored under the "names"
+// property of the handler and map names to index.
+base::Optional<int> ResolveValueSelector(Isolate* isolate,
+ Handle<Name> property,
+ Handle<JSObject> handler,
+ bool enable_index_lookup) {
+ size_t index = 0;
+ if (enable_index_lookup && property->AsIntegerIndex(&index) &&
+ index < kMaxInt) {
+ return static_cast<int>(index);
+ }
+
+ Handle<Object> name_table =
+ JSObject::GetProperty(isolate, handler, "names").ToHandleChecked();
+ DCHECK(name_table->IsJSMap());
+
+ Handle<Object> object =
+ GetMapValue(isolate, Handle<JSMap>::cast(name_table), property);
+ if (object->IsUndefined()) return {};
+ DCHECK(object->IsNumeric());
+ return NumberToInt32(*object);
+}
+
+// Helper for unpacking a maybe name that makes a default with an index if
+// the name is empty. If the name is not empty, it's prefixed with a $.
+Handle<String> GetNameOrDefault(Isolate* isolate,
+ MaybeHandle<String> maybe_name,
+ const char* default_name_prefix, int index) {
+ Handle<String> name;
+ if (maybe_name.ToHandle(&name)) {
+ return isolate->factory()
+ ->NewConsString(isolate->factory()->NewStringFromAsciiChecked("$"),
+ name)
+ .ToHandleChecked();
+ }
+
+ // Maximum length of the default names: $memory-2147483648\0
+ static constexpr int kMaxStrLen = 19;
+ EmbeddedVector<char, kMaxStrLen> value;
+ DCHECK_LT(strlen(default_name_prefix) + /*strlen(kMinInt)*/ 11, kMaxStrLen);
+ int len = SNPrintF(value, "%s%d", default_name_prefix, index);
+ return isolate->factory()->InternalizeString(value.SubVector(0, len));
+}
+
+// Generate names for the locals. Names either come from the name table,
+// otherwise the default $varX is used.
+std::vector<Handle<String>> GetLocalNames(Handle<WasmInstanceObject> instance,
+ Address pc) {
+ wasm::NativeModule* native_module = instance->module_object().native_module();
+ wasm::DebugInfo* debug_info = native_module->GetDebugInfo();
+ int num_locals = debug_info->GetNumLocals(pc);
+ auto* isolate = instance->GetIsolate();
+
+ wasm::ModuleWireBytes module_wire_bytes(
+ instance->module_object().native_module()->wire_bytes());
+ const wasm::WasmFunction& function = debug_info->GetFunctionAtAddress(pc);
+
+ std::vector<Handle<String>> names;
+ for (int i = 0; i < num_locals; ++i) {
+ wasm::WireBytesRef local_name_ref =
+ debug_info->GetLocalName(function.func_index, i);
+ DCHECK(module_wire_bytes.BoundsCheck(local_name_ref));
+ Vector<const char> name_vec =
+ module_wire_bytes.GetNameOrNull(local_name_ref);
+ names.emplace_back(GetNameOrDefault(
+ isolate,
+ name_vec.empty() ? MaybeHandle<String>()
+ : isolate->factory()->NewStringFromUtf8(name_vec),
+ "$var", i));
+ }
+
+ return names;
+}
+
+// Generate names for the globals. Names either come from the name table,
+// otherwise the default $globalX is used.
+std::vector<Handle<String>> GetGlobalNames(
+ Handle<WasmInstanceObject> instance) {
+ Isolate* isolate = instance->GetIsolate();
+ auto& globals = instance->module()->globals;
+ std::vector<Handle<String>> names;
+ for (uint32_t i = 0; i < globals.size(); ++i) {
+ names.emplace_back(GetNameOrDefault(
+ isolate, WasmInstanceObject::GetGlobalNameOrNull(isolate, instance, i),
+ "$global", i));
+ }
+ return names;
+}
+
+// Generate names for the functions.
+std::vector<Handle<String>> GetFunctionNames(
+ Handle<WasmInstanceObject> instance) {
+ Isolate* isolate = instance->GetIsolate();
+ auto* module = instance->module();
+
+ wasm::ModuleWireBytes wire_bytes(
+ instance->module_object().native_module()->wire_bytes());
+
+ std::vector<Handle<String>> names;
+ for (auto& function : module->functions) {
+ DCHECK_EQ(function.func_index, names.size());
+ wasm::WireBytesRef name_ref =
+ module->lazily_generated_names.LookupFunctionName(
+ wire_bytes, function.func_index, VectorOf(module->export_table));
+ DCHECK(wire_bytes.BoundsCheck(name_ref));
+ Vector<const char> name_vec = wire_bytes.GetNameOrNull(name_ref);
+ names.emplace_back(GetNameOrDefault(
+ isolate,
+ name_vec.empty() ? MaybeHandle<String>()
+ : isolate->factory()->NewStringFromUtf8(name_vec),
+ "$func", function.func_index));
+ }
+
+ return names;
+}
+
+// Generate names for the imports.
+std::vector<Handle<String>> GetImportNames(
+ Handle<WasmInstanceObject> instance) {
+ Isolate* isolate = instance->GetIsolate();
+ const wasm::WasmModule* module = instance->module();
+ Handle<WasmModuleObject> module_object(instance->module_object(), isolate);
+ int num_imports = static_cast<int>(module->import_table.size());
+
+ std::vector<Handle<String>> names;
+ for (int index = 0; index < num_imports; ++index) {
+ const wasm::WasmImport& import = module->import_table[index];
+
+ names.emplace_back(WasmModuleObject::ExtractUtf8StringFromModuleBytes(
+ isolate, module_object, import.field_name, kInternalize));
+ }
+
+ return names;
+}
+
+// Generate names for the memories.
+std::vector<Handle<String>> GetMemoryNames(
+ Handle<WasmInstanceObject> instance) {
+ Isolate* isolate = instance->GetIsolate();
+
+ std::vector<Handle<String>> names;
+ uint32_t memory_count = instance->has_memory_object() ? 1 : 0;
+ for (uint32_t memory_index = 0; memory_index < memory_count; ++memory_index) {
+ names.emplace_back(GetNameOrDefault(isolate,
+ WasmInstanceObject::GetMemoryNameOrNull(
+ isolate, instance, memory_index),
+ "$memory", memory_index));
+ }
+
+ return names;
+}
+
+// Generate names for the tables.
+std::vector<Handle<String>> GetTableNames(Handle<WasmInstanceObject> instance) {
+ Isolate* isolate = instance->GetIsolate();
+ auto tables = handle(instance->tables(), isolate);
+
+ std::vector<Handle<String>> names;
+ for (int table_index = 0; table_index < tables->length(); ++table_index) {
+ auto func_table =
+ handle(WasmTableObject::cast(tables->get(table_index)), isolate);
+ if (!func_table->type().is_reference_to(wasm::HeapType::kFunc)) continue;
+
+ names.emplace_back(GetNameOrDefault(
+ isolate,
+ WasmInstanceObject::GetTableNameOrNull(isolate, instance, table_index),
+ "$table", table_index));
+ }
+ return names;
+}
+
+// Generate names for the exports
+std::vector<Handle<String>> GetExportNames(
+ Handle<WasmInstanceObject> instance) {
+ Isolate* isolate = instance->GetIsolate();
+ const wasm::WasmModule* module = instance->module();
+ Handle<WasmModuleObject> module_object(instance->module_object(), isolate);
+ int num_exports = static_cast<int>(module->export_table.size());
+
+ std::vector<Handle<String>> names;
+
+ for (int index = 0; index < num_exports; ++index) {
+ const wasm::WasmExport& exp = module->export_table[index];
+
+ names.emplace_back(WasmModuleObject::ExtractUtf8StringFromModuleBytes(
+ isolate, module_object, exp.name, kInternalize));
+ }
+ return names;
+}
+
+Handle<WasmInstanceObject> GetInstance(Isolate* isolate,
+ Handle<JSObject> handler) {
+ Handle<Object> instance =
+ JSObject::GetProperty(isolate, handler, "instance").ToHandleChecked();
+ DCHECK(instance->IsWasmInstanceObject());
+ return Handle<WasmInstanceObject>::cast(instance);
+}
+
+Address GetPC(Isolate* isolate, Handle<JSObject> handler) {
+ Handle<Object> pc =
+ JSObject::GetProperty(isolate, handler, "pc").ToHandleChecked();
+ DCHECK(pc->IsBigInt());
+ return Handle<BigInt>::cast(pc)->AsUint64();
+}
+
+Address GetFP(Isolate* isolate, Handle<JSObject> handler) {
+ Handle<Object> fp =
+ JSObject::GetProperty(isolate, handler, "fp").ToHandleChecked();
+ DCHECK(fp->IsBigInt());
+ return Handle<BigInt>::cast(fp)->AsUint64();
+}
+
+Address GetCalleeFP(Isolate* isolate, Handle<JSObject> handler) {
+ Handle<Object> callee_fp =
+ JSObject::GetProperty(isolate, handler, "callee_fp").ToHandleChecked();
+ DCHECK(callee_fp->IsBigInt());
+ return Handle<BigInt>::cast(callee_fp)->AsUint64();
+}
+
+// Convert a WasmValue to an appropriate JS representation.
+static Handle<Object> WasmValueToObject(Isolate* isolate,
+ wasm::WasmValue value) {
+ auto* factory = isolate->factory();
+ switch (value.type().kind()) {
+ case wasm::ValueType::kI32:
+ return factory->NewNumberFromInt(value.to_i32());
+ case wasm::ValueType::kI64:
+ return BigInt::FromInt64(isolate, value.to_i64());
+ case wasm::ValueType::kF32:
+ return factory->NewNumber(value.to_f32());
+ case wasm::ValueType::kF64:
+ return factory->NewNumber(value.to_f64());
+ case wasm::ValueType::kS128: {
+ wasm::Simd128 s128 = value.to_s128();
+ Handle<JSArrayBuffer> buffer;
+ if (!isolate->factory()
+ ->NewJSArrayBufferAndBackingStore(
+ kSimd128Size, InitializedFlag::kUninitialized)
+ .ToHandle(&buffer)) {
+ isolate->FatalProcessOutOfHeapMemory(
+ "failed to allocate backing store");
+ }
+
+ memcpy(buffer->allocation_base(), s128.bytes(), buffer->byte_length());
+ return isolate->factory()->NewJSTypedArray(kExternalUint8Array, buffer, 0,
+ buffer->byte_length());
+ }
+ case wasm::ValueType::kRef:
+ return value.to_externref();
+ default:
+ break;
+ }
+ return factory->undefined_value();
+}
+
+base::Optional<int> HasLocalImpl(Isolate* isolate, Handle<Name> property,
+ Handle<JSObject> handler,
+ bool enable_index_lookup) {
+ Handle<WasmInstanceObject> instance = GetInstance(isolate, handler);
+
+ base::Optional<int> index =
+ ResolveValueSelector(isolate, property, handler, enable_index_lookup);
+ if (!index) return index;
+ Address pc = GetPC(isolate, handler);
+
+ wasm::DebugInfo* debug_info =
+ instance->module_object().native_module()->GetDebugInfo();
+ int num_locals = debug_info->GetNumLocals(pc);
+ if (0 <= index && index < num_locals) return index;
+ return {};
+}
+
+Handle<Object> GetLocalImpl(Isolate* isolate, Handle<Name> property,
+ Handle<JSObject> handler,
+ bool enable_index_lookup) {
+ Factory* factory = isolate->factory();
+ Handle<WasmInstanceObject> instance = GetInstance(isolate, handler);
+
+ base::Optional<int> index =
+ HasLocalImpl(isolate, property, handler, enable_index_lookup);
+ if (!index) return factory->undefined_value();
+ Address pc = GetPC(isolate, handler);
+ Address fp = GetFP(isolate, handler);
+ Address callee_fp = GetCalleeFP(isolate, handler);
+
+ wasm::DebugInfo* debug_info =
+ instance->module_object().native_module()->GetDebugInfo();
+ wasm::WasmValue value = debug_info->GetLocalValue(*index, pc, fp, callee_fp);
+ return WasmValueToObject(isolate, value);
+}
+
+base::Optional<int> HasGlobalImpl(Isolate* isolate, Handle<Name> property,
+ Handle<JSObject> handler,
+ bool enable_index_lookup) {
+ Handle<WasmInstanceObject> instance = GetInstance(isolate, handler);
+ base::Optional<int> index =
+ ResolveValueSelector(isolate, property, handler, enable_index_lookup);
+ if (!index) return index;
+
+ const std::vector<wasm::WasmGlobal>& globals = instance->module()->globals;
+ if (globals.size() <= kMaxInt && 0 <= *index &&
+ *index < static_cast<int>(globals.size())) {
+ return index;
+ }
+ return {};
+}
+
+Handle<Object> GetGlobalImpl(Isolate* isolate, Handle<Name> property,
+ Handle<JSObject> handler,
+ bool enable_index_lookup) {
+ Handle<WasmInstanceObject> instance = GetInstance(isolate, handler);
+ base::Optional<int> index =
+ HasGlobalImpl(isolate, property, handler, enable_index_lookup);
+ if (!index) return isolate->factory()->undefined_value();
+
+ const std::vector<wasm::WasmGlobal>& globals = instance->module()->globals;
+ return WasmValueToObject(
+ isolate, WasmInstanceObject::GetGlobalValue(instance, globals[*index]));
+}
+
+base::Optional<int> HasMemoryImpl(Isolate* isolate, Handle<Name> property,
+ Handle<JSObject> handler,
+ bool enable_index_lookup) {
+ Handle<WasmInstanceObject> instance = GetInstance(isolate, handler);
+ base::Optional<int> index =
+ ResolveValueSelector(isolate, property, handler, enable_index_lookup);
+ if (index && *index == 0 && instance->has_memory_object()) return index;
+ return {};
+}
+
+Handle<Object> GetMemoryImpl(Isolate* isolate, Handle<Name> property,
+ Handle<JSObject> handler,
+ bool enable_index_lookup) {
+ Handle<WasmInstanceObject> instance = GetInstance(isolate, handler);
+ base::Optional<int> index =
+ HasMemoryImpl(isolate, property, handler, enable_index_lookup);
+ if (index) return handle(instance->memory_object(), isolate);
+ return isolate->factory()->undefined_value();
+}
+
+base::Optional<int> HasFunctionImpl(Isolate* isolate, Handle<Name> property,
+ Handle<JSObject> handler,
+ bool enable_index_lookup) {
+ Handle<WasmInstanceObject> instance = GetInstance(isolate, handler);
+ base::Optional<int> index =
+ ResolveValueSelector(isolate, property, handler, enable_index_lookup);
+ if (!index) return index;
+ const std::vector<wasm::WasmFunction>& functions =
+ instance->module()->functions;
+ if (functions.size() <= kMaxInt && 0 <= *index &&
+ *index < static_cast<int>(functions.size())) {
+ return index;
+ }
+ return {};
+}
+
+Handle<Object> GetFunctionImpl(Isolate* isolate, Handle<Name> property,
+ Handle<JSObject> handler,
+ bool enable_index_lookup) {
+ Handle<WasmInstanceObject> instance = GetInstance(isolate, handler);
+ base::Optional<int> index =
+ HasFunctionImpl(isolate, property, handler, enable_index_lookup);
+ if (!index) return isolate->factory()->undefined_value();
+
+ return WasmInstanceObject::GetOrCreateWasmExternalFunction(isolate, instance,
+ *index);
+}
+
+base::Optional<int> HasTableImpl(Isolate* isolate, Handle<Name> property,
+ Handle<JSObject> handler,
+ bool enable_index_lookup) {
+ Handle<WasmInstanceObject> instance = GetInstance(isolate, handler);
+ base::Optional<int> index =
+ ResolveValueSelector(isolate, property, handler, enable_index_lookup);
+ if (!index) return index;
+ Handle<FixedArray> tables(instance->tables(), isolate);
+ int num_tables = tables->length();
+ if (*index < 0 || *index >= num_tables) return {};
+
+ Handle<WasmTableObject> func_table(WasmTableObject::cast(tables->get(*index)),
+ isolate);
+ if (func_table->type().is_reference_to(wasm::HeapType::kFunc)) return index;
+ return {};
+}
+
+Handle<Object> GetTableImpl(Isolate* isolate, Handle<Name> property,
+ Handle<JSObject> handler,
+ bool enable_index_lookup) {
+ Handle<WasmInstanceObject> instance = GetInstance(isolate, handler);
+ base::Optional<int> index =
+ HasTableImpl(isolate, property, handler, enable_index_lookup);
+ if (!index) return isolate->factory()->undefined_value();
+
+ Handle<WasmTableObject> func_table(
+ WasmTableObject::cast(instance->tables().get(*index)), isolate);
+ return func_table;
+}
+
+base::Optional<int> HasImportImpl(Isolate* isolate, Handle<Name> property,
+ Handle<JSObject> handler,
+ bool enable_index_lookup) {
+ Handle<WasmInstanceObject> instance = GetInstance(isolate, handler);
+ base::Optional<int> index =
+ ResolveValueSelector(isolate, property, handler, enable_index_lookup);
+ if (!index) return index;
+ const wasm::WasmModule* module = instance->module();
+ Handle<WasmModuleObject> module_object(instance->module_object(), isolate);
+ int num_imports = static_cast<int>(module->import_table.size());
+ if (0 <= *index && *index < num_imports) return index;
+ return {};
+}
+
+Handle<JSObject> GetExternalObject(Isolate* isolate,
+ wasm::ImportExportKindCode kind,
+ uint32_t index) {
+ Handle<JSObject> result = isolate->factory()->NewJSObjectWithNullProto();
+ Handle<Object> value = isolate->factory()->NewNumberFromUint(index);
+ switch (kind) {
+ case wasm::kExternalFunction:
+ JSObject::AddProperty(isolate, result, "func", value, NONE);
+ break;
+ case wasm::kExternalGlobal:
+ JSObject::AddProperty(isolate, result, "global", value, NONE);
+ break;
+ case wasm::kExternalTable:
+ JSObject::AddProperty(isolate, result, "table", value, NONE);
+ break;
+ case wasm::kExternalMemory:
+ JSObject::AddProperty(isolate, result, "mem", value, NONE);
+ break;
+ case wasm::kExternalException:
+ JSObject::AddProperty(isolate, result, "exn", value, NONE);
+ break;
+ }
+ return result;
+}
+
+Handle<Object> GetImportImpl(Isolate* isolate, Handle<Name> property,
+ Handle<JSObject> handler,
+ bool enable_index_lookup) {
+ Handle<WasmInstanceObject> instance = GetInstance(isolate, handler);
+ base::Optional<int> index =
+ HasImportImpl(isolate, property, handler, enable_index_lookup);
+ if (!index) return isolate->factory()->undefined_value();
+
+ const wasm::WasmImport& imp = instance->module()->import_table[*index];
+ return GetExternalObject(isolate, imp.kind, imp.index);
+}
+
+base::Optional<int> HasExportImpl(Isolate* isolate, Handle<Name> property,
+ Handle<JSObject> handler,
+ bool enable_index_lookup) {
+ Handle<WasmInstanceObject> instance = GetInstance(isolate, handler);
+ base::Optional<int> index =
+ ResolveValueSelector(isolate, property, handler, enable_index_lookup);
+ if (!index) return index;
+
+ const wasm::WasmModule* module = instance->module();
+ Handle<WasmModuleObject> module_object(instance->module_object(), isolate);
+ int num_exports = static_cast<int>(module->export_table.size());
+ if (0 <= *index && *index < num_exports) return index;
+ return {};
+}
+
+Handle<Object> GetExportImpl(Isolate* isolate, Handle<Name> property,
+ Handle<JSObject> handler,
+ bool enable_index_lookup) {
+ Handle<WasmInstanceObject> instance = GetInstance(isolate, handler);
+ base::Optional<int> index =
+ HasExportImpl(isolate, property, handler, enable_index_lookup);
+ if (!index) return isolate->factory()->undefined_value();
+
+ const wasm::WasmExport& exp = instance->module()->export_table[*index];
+ return GetExternalObject(isolate, exp.kind, exp.index);
+}
+
+// Generic has trap callback for the index space proxies.
+template <base::Optional<int> Impl(Isolate*, Handle<Name>, Handle<JSObject>,
+ bool)>
+void HasTrapCallback(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ DCHECK_GE(args.Length(), 2);
+ Isolate* isolate = reinterpret_cast<Isolate*>(args.GetIsolate());
+ DCHECK(args.This()->IsObject());
+ Handle<JSObject> handler =
+ Handle<JSObject>::cast(Utils::OpenHandle(*args.This()));
+
+ DCHECK(args[1]->IsName());
+ Handle<Name> property = Handle<Name>::cast(Utils::OpenHandle(*args[1]));
+ args.GetReturnValue().Set(Impl(isolate, property, handler, true).has_value());
+}
+
+// Generic get trap callback for the index space proxies.
+template <Handle<Object> Impl(Isolate*, Handle<Name>, Handle<JSObject>, bool)>
+void GetTrapCallback(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ DCHECK_GE(args.Length(), 2);
+ Isolate* isolate = reinterpret_cast<Isolate*>(args.GetIsolate());
+ DCHECK(args.This()->IsObject());
+ Handle<JSObject> handler =
+ Handle<JSObject>::cast(Utils::OpenHandle(*args.This()));
+
+ DCHECK(args[1]->IsName());
+ Handle<Name> property = Handle<Name>::cast(Utils::OpenHandle(*args[1]));
+ args.GetReturnValue().Set(
+ Utils::ToLocal(Impl(isolate, property, handler, true)));
+}
+
+template <typename ReturnT>
+ReturnT DelegateToplevelCall(Isolate* isolate, Handle<JSObject> target,
+ Handle<Name> property, const char* index_space,
+ ReturnT (*impl)(Isolate*, Handle<Name>,
+ Handle<JSObject>, bool)) {
+ Handle<Object> namespace_proxy =
+ JSObject::GetProperty(isolate, target, index_space).ToHandleChecked();
+ DCHECK(namespace_proxy->IsJSProxy());
+ Handle<JSObject> namespace_handler(
+ JSObject::cast(Handle<JSProxy>::cast(namespace_proxy)->handler()),
+ isolate);
+ return impl(isolate, property, namespace_handler, false);
+}
+
+template <typename ReturnT>
+using DelegateCallback = ReturnT (*)(Isolate*, Handle<Name>, Handle<JSObject>,
+ bool);
+
+// Has trap callback for the top-level proxy.
+void ToplevelHasTrapCallback(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ DCHECK_GE(args.Length(), 2);
+ Isolate* isolate = reinterpret_cast<Isolate*>(args.GetIsolate());
+ DCHECK(args[0]->IsObject());
+ Handle<JSObject> target = Handle<JSObject>::cast(Utils::OpenHandle(*args[0]));
+
+ DCHECK(args[1]->IsName());
+ Handle<Name> property = Handle<Name>::cast(Utils::OpenHandle(*args[1]));
+
+ // First check if the property exists on the target.
+ if (JSObject::HasProperty(target, property).FromMaybe(false)) {
+ args.GetReturnValue().Set(true);
+ return;
+ }
+
+ // Now check the index space proxies in order if they know the property.
+ constexpr std::pair<const char*, DelegateCallback<base::Optional<int>>>
+ kDelegates[] = {{"memories", HasMemoryImpl},
+ {"locals", HasLocalImpl},
+ {"tables", HasTableImpl},
+ {"functions", HasFunctionImpl},
+ {"globals", HasGlobalImpl}};
+ for (auto& delegate : kDelegates) {
+ if (DelegateToplevelCall(isolate, target, property, delegate.first,
+ delegate.second)) {
+ args.GetReturnValue().Set(true);
+ return;
+ }
+ args.GetReturnValue().Set(false);
+ }
+}
+
+// Get trap callback for the top-level proxy.
+void ToplevelGetTrapCallback(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ DCHECK_GE(args.Length(), 2);
+ Isolate* isolate = reinterpret_cast<Isolate*>(args.GetIsolate());
+ DCHECK(args[0]->IsObject());
+ Handle<JSObject> target = Handle<JSObject>::cast(Utils::OpenHandle(*args[0]));
+
+ DCHECK(args[1]->IsName());
+ Handle<Name> property = Handle<Name>::cast(Utils::OpenHandle(*args[1]));
+
+ // First, check if the property is a proper property on the target. If so,
+ // return its value.
+ Handle<Object> value =
+ JSObject::GetProperty(isolate, target, property).ToHandleChecked();
+ if (!value->IsUndefined()) {
+ args.GetReturnValue().Set(Utils::ToLocal(value));
+ return;
+ }
+
+ // Try the index space proxies in the correct disambiguation order.
+ constexpr std::pair<const char*, DelegateCallback<Handle<Object>>>
+ kDelegates[] = {{"memories", GetMemoryImpl},
+ {"locals", GetLocalImpl},
+ {"tables", GetTableImpl},
+ {"functions", GetFunctionImpl},
+ {"globals", GetGlobalImpl}};
+ for (auto& delegate : kDelegates) {
+ value = DelegateToplevelCall(isolate, target, property, delegate.first,
+ delegate.second);
+ if (!value->IsUndefined()) {
+ args.GetReturnValue().Set(Utils::ToLocal(value));
+ return;
+ }
+ }
+}
+
+// Populate a JSMap with name->index mappings from an ordered list of names.
+Handle<JSMap> GetNameTable(Isolate* isolate,
+ const std::vector<Handle<String>>& names) {
+ Factory* factory = isolate->factory();
+ Handle<JSMap> name_table = factory->NewJSMap();
+
+ for (size_t i = 0; i < names.size(); ++i) {
+ SetMapValue(isolate, name_table, names[i], factory->NewNumberFromInt64(i));
+ }
+ return name_table;
+}
+
+// Produce a JSProxy with a given name table and get and has trap handlers.
+Handle<JSProxy> GetJSProxy(
+ WasmFrame* frame, Handle<JSMap> name_table,
+ void (*get_callback)(const v8::FunctionCallbackInfo<v8::Value>&),
+ void (*has_callback)(const v8::FunctionCallbackInfo<v8::Value>&)) {
+ Isolate* isolate = frame->isolate();
+ Factory* factory = isolate->factory();
+ Handle<JSObject> target = factory->NewJSObjectWithNullProto();
+ Handle<JSObject> handler = factory->NewJSObjectWithNullProto();
+
+ // Besides the name table, the get and has traps need access to the instance
+ // and frame information.
+ JSObject::AddProperty(isolate, handler, "names", name_table, DONT_ENUM);
+ Handle<WasmInstanceObject> instance(frame->wasm_instance(), isolate);
+ JSObject::AddProperty(isolate, handler, "instance", instance, DONT_ENUM);
+ Handle<BigInt> pc = BigInt::FromInt64(isolate, frame->pc());
+ JSObject::AddProperty(isolate, handler, "pc", pc, DONT_ENUM);
+ Handle<BigInt> fp = BigInt::FromInt64(isolate, frame->fp());
+ JSObject::AddProperty(isolate, handler, "fp", fp, DONT_ENUM);
+ Handle<BigInt> callee_fp = BigInt::FromInt64(isolate, frame->callee_fp());
+ JSObject::AddProperty(isolate, handler, "callee_fp", callee_fp, DONT_ENUM);
+
+ InstallFunc(isolate, handler, "get", get_callback, 3, false, READ_ONLY);
+ InstallFunc(isolate, handler, "has", has_callback, 2, false, READ_ONLY);
+
+ return factory->NewJSProxy(target, handler);
+}
+
+Handle<JSObject> GetStackObject(WasmFrame* frame) {
+ Isolate* isolate = frame->isolate();
+ Handle<JSObject> object = isolate->factory()->NewJSObjectWithNullProto();
+ wasm::DebugInfo* debug_info =
+ frame->wasm_instance().module_object().native_module()->GetDebugInfo();
+ int num_values = debug_info->GetStackDepth(frame->pc());
+ for (int i = 0; i < num_values; ++i) {
+ wasm::WasmValue value = debug_info->GetStackValue(
+ i, frame->pc(), frame->fp(), frame->callee_fp());
+ JSObject::AddDataElement(object, i, WasmValueToObject(isolate, value),
+ NONE);
+ }
+ return object;
+}
+} // namespace
+
+// This function generates the JS debug proxy for a given Wasm frame. The debug
+// proxy is used when evaluating debug JS expressions on a wasm frame and let's
+// the developer inspect the engine state from JS. The proxy provides the
+// following interface:
+//
+// type WasmSimdValue = Uint8Array;
+// type WasmValue = number | bigint | object | WasmSimdValue;
+// type WasmFunction = (... args : WasmValue[]) = > WasmValue;
+// type WasmExport = {name : string} & ({func : number} | {table : number} |
+// {mem : number} | {global : number});
+// type WasmImport = {name : string, module : string} &
+// ({func : number} | {table : number} | {mem : number} |
+// {global : number});
+// interface WasmInterface {
+// $globalX: WasmValue;
+// $varX: WasmValue;
+// $funcX(a : WasmValue /*, ...*/) : WasmValue;
+// readonly $memoryX : WebAssembly.Memory;
+// readonly $tableX : WebAssembly.Table;
+// readonly memories : {[nameOrIndex:string | number] : WebAssembly.Memory};
+// readonly tables : {[nameOrIndex:string | number] : WebAssembly.Table};
+// readonly stack : WasmValue[];
+// readonly imports : {[nameOrIndex:string | number] : WasmImport};
+// readonly exports : {[nameOrIndex:string | number] : WasmExport};
+// readonly globals : {[nameOrIndex:string | number] : WasmValue};
+// readonly locals : {[nameOrIndex:string | number] : WasmValue};
+// readonly functions : {[nameOrIndex:string | number] : WasmFunction};
+// }
+//
+// The wasm index spaces memories, tables, imports, exports, globals, locals
+// functions are JSProxies that lazily produce values either by index or by
+// name. A top level JSProxy is wrapped around those for top-level lookup of
+// names in the disambiguation order  memory, local, table, function, global.
+// Import and export names are not globally resolved.
+
+Handle<JSProxy> WasmJs::GetJSDebugProxy(WasmFrame* frame) {
+ Isolate* isolate = frame->isolate();
+ Factory* factory = isolate->factory();
+ Handle<WasmInstanceObject> instance(frame->wasm_instance(), isolate);
+
+ // The top level proxy delegates lookups to the index space proxies.
+ Handle<JSObject> handler = factory->NewJSObjectWithNullProto();
+ InstallFunc(isolate, handler, "get", ToplevelGetTrapCallback, 3, false,
+ READ_ONLY);
+ InstallFunc(isolate, handler, "has", ToplevelHasTrapCallback, 2, false,
+ READ_ONLY);
+
+ Handle<JSObject> target = factory->NewJSObjectWithNullProto();
+
+ // Generate JSMaps per index space for name->index lookup. Every index space
+ // proxy is associated with its table for local name lookup.
+
+ auto local_name_table =
+ GetNameTable(isolate, GetLocalNames(instance, frame->pc()));
+ auto locals =
+ GetJSProxy(frame, local_name_table, GetTrapCallback<GetLocalImpl>,
+ HasTrapCallback<HasLocalImpl>);
+ JSObject::AddProperty(isolate, target, "locals", locals, READ_ONLY);
+
+ auto global_name_table = GetNameTable(isolate, GetGlobalNames(instance));
+ auto globals =
+ GetJSProxy(frame, global_name_table, GetTrapCallback<GetGlobalImpl>,
+ HasTrapCallback<HasGlobalImpl>);
+ JSObject::AddProperty(isolate, target, "globals", globals, READ_ONLY);
+
+ auto function_name_table = GetNameTable(isolate, GetFunctionNames(instance));
+ auto functions =
+ GetJSProxy(frame, function_name_table, GetTrapCallback<GetFunctionImpl>,
+ HasTrapCallback<HasFunctionImpl>);
+ JSObject::AddProperty(isolate, target, "functions", functions, READ_ONLY);
+
+ auto memory_name_table = GetNameTable(isolate, GetMemoryNames(instance));
+ auto memories =
+ GetJSProxy(frame, memory_name_table, GetTrapCallback<GetMemoryImpl>,
+ HasTrapCallback<HasMemoryImpl>);
+ JSObject::AddProperty(isolate, target, "memories", memories, READ_ONLY);
+
+ auto table_name_table = GetNameTable(isolate, GetTableNames(instance));
+ auto tables =
+ GetJSProxy(frame, table_name_table, GetTrapCallback<GetTableImpl>,
+ HasTrapCallback<HasTableImpl>);
+ JSObject::AddProperty(isolate, target, "tables", tables, READ_ONLY);
+
+ auto import_name_table = GetNameTable(isolate, GetImportNames(instance));
+ auto imports =
+ GetJSProxy(frame, import_name_table, GetTrapCallback<GetImportImpl>,
+ HasTrapCallback<HasImportImpl>);
+ JSObject::AddProperty(isolate, target, "imports", imports, READ_ONLY);
+
+ auto export_name_table = GetNameTable(isolate, GetExportNames(instance));
+ auto exports =
+ GetJSProxy(frame, export_name_table, GetTrapCallback<GetExportImpl>,
+ HasTrapCallback<HasExportImpl>);
+ JSObject::AddProperty(isolate, target, "exports", exports, READ_ONLY);
+
+ auto stack = GetStackObject(frame);
+ JSObject::AddProperty(isolate, target, "stack", stack, READ_ONLY);
+
+ return factory->NewJSProxy(target, handler);
+}
+
#undef ASSIGN
#undef EXTRACT_THIS
diff --git a/deps/v8/src/wasm/wasm-js.h b/deps/v8/src/wasm/wasm-js.h
index 6f83ad6326..4c9ae9645b 100644
--- a/deps/v8/src/wasm/wasm-js.h
+++ b/deps/v8/src/wasm/wasm-js.h
@@ -9,6 +9,8 @@
namespace v8 {
namespace internal {
+class JSProxy;
+class WasmFrame;
namespace wasm {
class StreamingDecoder;
@@ -19,6 +21,8 @@ class WasmJs {
public:
V8_EXPORT_PRIVATE static void Install(Isolate* isolate,
bool exposed_on_global_object);
+
+ V8_EXPORT_PRIVATE static Handle<JSProxy> GetJSDebugProxy(WasmFrame* frame);
};
} // namespace internal
diff --git a/deps/v8/src/wasm/wasm-module-builder.cc b/deps/v8/src/wasm/wasm-module-builder.cc
index de895e6429..f2e7d63f52 100644
--- a/deps/v8/src/wasm/wasm-module-builder.cc
+++ b/deps/v8/src/wasm/wasm-module-builder.cc
@@ -597,7 +597,7 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
size_t start = EmitSection(kTableSectionCode, buffer);
buffer->write_size(tables_.size());
for (const WasmTable& table : tables_) {
- buffer->write_u8(table.type.value_type_code());
+ WriteValueType(buffer, table.type);
buffer->write_u8(table.has_maximum ? kWithMaximum : kNoMaximum);
buffer->write_size(table.min_size);
if (table.has_maximum) buffer->write_size(table.max_size);
diff --git a/deps/v8/src/wasm/wasm-module-builder.h b/deps/v8/src/wasm/wasm-module-builder.h
index fdd64950df..7d6df375aa 100644
--- a/deps/v8/src/wasm/wasm-module-builder.h
+++ b/deps/v8/src/wasm/wasm-module-builder.h
@@ -236,6 +236,8 @@ class V8_EXPORT_PRIVATE WasmFunctionBuilder : public ZoneObject {
class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
public:
explicit WasmModuleBuilder(Zone* zone);
+ WasmModuleBuilder(const WasmModuleBuilder&) = delete;
+ WasmModuleBuilder& operator=(const WasmModuleBuilder&) = delete;
// Building methods.
uint32_t AddImport(Vector<const char> name, FunctionSig* sig,
@@ -361,8 +363,6 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
// Indirect functions must be allocated before adding extra tables.
bool allocating_indirect_functions_allowed_ = true;
#endif
-
- DISALLOW_COPY_AND_ASSIGN(WasmModuleBuilder);
};
inline FunctionSig* WasmFunctionBuilder::signature() {
diff --git a/deps/v8/src/wasm/wasm-module.cc b/deps/v8/src/wasm/wasm-module.cc
index 3d935f27be..afe192a3d3 100644
--- a/deps/v8/src/wasm/wasm-module.cc
+++ b/deps/v8/src/wasm/wasm-module.cc
@@ -137,7 +137,7 @@ void LazilyGeneratedNames::AddForTesting(int function_index,
AsmJsOffsetInformation::AsmJsOffsetInformation(
Vector<const byte> encoded_offsets)
- : encoded_offsets_(OwnedVector<uint8_t>::Of(encoded_offsets)) {}
+ : encoded_offsets_(OwnedVector<const uint8_t>::Of(encoded_offsets)) {}
AsmJsOffsetInformation::~AsmJsOffsetInformation() = default;
@@ -618,10 +618,11 @@ size_t EstimateStoredSize(const WasmModule* module) {
(module->signature_zone ? module->signature_zone->allocation_size()
: 0) +
VectorSize(module->types) + VectorSize(module->type_kinds) +
- VectorSize(module->signature_ids) + VectorSize(module->functions) +
- VectorSize(module->data_segments) + VectorSize(module->tables) +
- VectorSize(module->import_table) + VectorSize(module->export_table) +
- VectorSize(module->exceptions) + VectorSize(module->elem_segments);
+ VectorSize(module->canonicalized_type_ids) +
+ VectorSize(module->functions) + VectorSize(module->data_segments) +
+ VectorSize(module->tables) + VectorSize(module->import_table) +
+ VectorSize(module->export_table) + VectorSize(module->exceptions) +
+ VectorSize(module->elem_segments);
}
size_t PrintSignature(Vector<char> buffer, const wasm::FunctionSig* sig,
diff --git a/deps/v8/src/wasm/wasm-module.h b/deps/v8/src/wasm/wasm-module.h
index 2ffc92e390..9c54f17b9c 100644
--- a/deps/v8/src/wasm/wasm-module.h
+++ b/deps/v8/src/wasm/wasm-module.h
@@ -266,6 +266,7 @@ struct V8_EXPORT_PRIVATE WasmModule {
uint32_t maximum_pages = 0; // maximum size of the memory in 64k pages
bool has_shared_memory = false; // true if memory is a SharedArrayBuffer
bool has_maximum_pages = false; // true if there is a maximum memory size
+ bool is_memory64 = false; // true if the memory is 64 bit
bool has_memory = false; // true if the memory was defined or imported
bool mem_export = false; // true if the memory is exported
int start_function_index = -1; // start function, >= 0 if any
@@ -283,9 +284,12 @@ struct V8_EXPORT_PRIVATE WasmModule {
uint32_t num_declared_data_segments = 0; // From the DataCount section.
WireBytesRef code = {0, 0};
WireBytesRef name = {0, 0};
- std::vector<TypeDefinition> types; // by type index
- std::vector<uint8_t> type_kinds; // by type index
- std::vector<uint32_t> signature_ids; // by signature index
+ std::vector<TypeDefinition> types; // by type index
+ std::vector<uint8_t> type_kinds; // by type index
+ // Map from each type index to the index of its corresponding canonical type.
+ // Note: right now, only functions are canonicalized, and arrays and structs
+ // map to themselves.
+ std::vector<uint32_t> canonicalized_type_ids;
bool has_type(uint32_t index) const { return index < types.size(); }
@@ -293,37 +297,43 @@ struct V8_EXPORT_PRIVATE WasmModule {
types.push_back(TypeDefinition(sig));
type_kinds.push_back(kWasmFunctionTypeCode);
uint32_t canonical_id = sig ? signature_map.FindOrInsert(*sig) : 0;
- signature_ids.push_back(canonical_id);
- }
- const FunctionSig* signature(uint32_t index) const {
- DCHECK(type_kinds[index] == kWasmFunctionTypeCode);
- return types[index].function_sig;
+ canonicalized_type_ids.push_back(canonical_id);
}
bool has_signature(uint32_t index) const {
return index < types.size() && type_kinds[index] == kWasmFunctionTypeCode;
}
+ const FunctionSig* signature(uint32_t index) const {
+ DCHECK(has_signature(index));
+ return types[index].function_sig;
+ }
+
void add_struct_type(const StructType* type) {
types.push_back(TypeDefinition(type));
type_kinds.push_back(kWasmStructTypeCode);
- }
- const StructType* struct_type(uint32_t index) const {
- DCHECK(type_kinds[index] == kWasmStructTypeCode);
- return types[index].struct_type;
+ // No canonicalization for structs.
+ canonicalized_type_ids.push_back(0);
}
bool has_struct(uint32_t index) const {
return index < types.size() && type_kinds[index] == kWasmStructTypeCode;
}
+ const StructType* struct_type(uint32_t index) const {
+ DCHECK(has_struct(index));
+ return types[index].struct_type;
+ }
+
void add_array_type(const ArrayType* type) {
types.push_back(TypeDefinition(type));
type_kinds.push_back(kWasmArrayTypeCode);
- }
- const ArrayType* array_type(uint32_t index) const {
- DCHECK(type_kinds[index] == kWasmArrayTypeCode);
- return types[index].array_type;
+ // No canonicalization for arrays.
+ canonicalized_type_ids.push_back(0);
}
bool has_array(uint32_t index) const {
return index < types.size() && type_kinds[index] == kWasmArrayTypeCode;
}
+ const ArrayType* array_type(uint32_t index) const {
+ DCHECK(has_array(index));
+ return types[index].array_type;
+ }
std::vector<WasmFunction> functions;
std::vector<WasmDataSegment> data_segments;
@@ -344,9 +354,8 @@ struct V8_EXPORT_PRIVATE WasmModule {
std::unique_ptr<AsmJsOffsetInformation> asm_js_offset_information;
explicit WasmModule(std::unique_ptr<Zone> signature_zone = nullptr);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(WasmModule);
+ WasmModule(const WasmModule&) = delete;
+ WasmModule& operator=(const WasmModule&) = delete;
};
// Static representation of a wasm indirect call table.
diff --git a/deps/v8/src/wasm/wasm-objects-inl.h b/deps/v8/src/wasm/wasm-objects-inl.h
index 984c6d0f5b..744a16c855 100644
--- a/deps/v8/src/wasm/wasm-objects-inl.h
+++ b/deps/v8/src/wasm/wasm-objects-inl.h
@@ -28,6 +28,8 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/wasm/wasm-objects-tq-inl.inc"
+
OBJECT_CONSTRUCTORS_IMPL(WasmExceptionObject, JSObject)
TQ_OBJECT_CONSTRUCTORS_IMPL(WasmExceptionTag)
OBJECT_CONSTRUCTORS_IMPL(WasmExportedFunctionData, Struct)
@@ -58,7 +60,8 @@ CAST_ACCESSOR(WasmArray)
Object value = TaggedField<Object, offset>::load(isolate, *this); \
return !value.IsUndefined(GetReadOnlyRoots(isolate)); \
} \
- ACCESSORS(holder, name, type, offset)
+ ACCESSORS_CHECKED2(holder, name, type, offset, \
+ !value.IsUndefined(GetReadOnlyRoots(isolate)), true)
#define PRIMITIVE_ACCESSORS(holder, name, type, offset) \
type holder::name() const { \
@@ -329,11 +332,12 @@ ACCESSORS(WasmExportedFunctionData, instance, WasmInstanceObject,
SMI_ACCESSORS(WasmExportedFunctionData, jump_table_offset,
kJumpTableOffsetOffset)
SMI_ACCESSORS(WasmExportedFunctionData, function_index, kFunctionIndexOffset)
+ACCESSORS(WasmExportedFunctionData, signature, Foreign, kSignatureOffset)
+SMI_ACCESSORS(WasmExportedFunctionData, call_count, kCallCountOffset)
ACCESSORS(WasmExportedFunctionData, c_wrapper_code, Object, kCWrapperCodeOffset)
ACCESSORS(WasmExportedFunctionData, wasm_call_target, Object,
kWasmCallTargetOffset)
SMI_ACCESSORS(WasmExportedFunctionData, packed_args_size, kPackedArgsSizeOffset)
-ACCESSORS(WasmExportedFunctionData, signature, Foreign, kSignatureOffset)
// WasmJSFunction
WasmJSFunction::WasmJSFunction(Address ptr) : JSFunction(ptr) {
@@ -352,6 +356,8 @@ ACCESSORS(WasmJSFunctionData, serialized_signature, PodArray<wasm::ValueType>,
kSerializedSignatureOffset)
ACCESSORS(WasmJSFunctionData, callable, JSReceiver, kCallableOffset)
ACCESSORS(WasmJSFunctionData, wrapper_code, Code, kWrapperCodeOffset)
+ACCESSORS(WasmJSFunctionData, wasm_to_js_wrapper_code, Code,
+ kWasmToJsWrapperCodeOffset)
// WasmCapiFunction
WasmCapiFunction::WasmCapiFunction(Address ptr) : JSFunction(ptr) {
@@ -359,16 +365,6 @@ WasmCapiFunction::WasmCapiFunction(Address ptr) : JSFunction(ptr) {
}
CAST_ACCESSOR(WasmCapiFunction)
-// WasmCapiFunctionData
-OBJECT_CONSTRUCTORS_IMPL(WasmCapiFunctionData, Struct)
-CAST_ACCESSOR(WasmCapiFunctionData)
-PRIMITIVE_ACCESSORS(WasmCapiFunctionData, call_target, Address,
- kCallTargetOffset)
-ACCESSORS(WasmCapiFunctionData, embedder_data, Foreign, kEmbedderDataOffset)
-ACCESSORS(WasmCapiFunctionData, wrapper_code, Code, kWrapperCodeOffset)
-ACCESSORS(WasmCapiFunctionData, serialized_signature, PodArray<wasm::ValueType>,
- kSerializedSignatureOffset)
-
// WasmExternalFunction
WasmExternalFunction::WasmExternalFunction(Address ptr) : JSFunction(ptr) {
SLOW_DCHECK(IsWasmExternalFunction(*this));
@@ -451,6 +447,11 @@ int WasmArray::SizeFor(Map map, int length) {
}
void WasmTypeInfo::clear_foreign_address(Isolate* isolate) {
+#ifdef V8_HEAP_SANDBOX
+ // Due to the type-specific pointer tags for external pointers, we need to
+ // allocate an entry in the table here even though it will just store nullptr.
+ AllocateExternalPointerEntries(isolate);
+#endif
set_foreign_address(isolate, 0);
}
diff --git a/deps/v8/src/wasm/wasm-objects.cc b/deps/v8/src/wasm/wasm-objects.cc
index cf78ab5ff3..d06caef486 100644
--- a/deps/v8/src/wasm/wasm-objects.cc
+++ b/deps/v8/src/wasm/wasm-objects.cc
@@ -1508,10 +1508,15 @@ void WasmInstanceObject::ImportWasmJSFunctionIntoTable(
callable = resolved.second; // Update to ultimate target.
DCHECK_NE(compiler::WasmImportCallKind::kLinkError, kind);
wasm::CompilationEnv env = native_module->CreateCompilationEnv();
- SharedFunctionInfo shared = js_function->shared();
+ // {expected_arity} should only be used if kind != kJSFunctionArityMismatch.
+ int expected_arity = -1;
+ if (kind == compiler::WasmImportCallKind ::kJSFunctionArityMismatch) {
+ expected_arity = Handle<JSFunction>::cast(callable)
+ ->shared()
+ .internal_formal_parameter_count();
+ }
wasm::WasmCompilationResult result = compiler::CompileWasmImportCallWrapper(
- isolate->wasm_engine(), &env, kind, sig, false,
- shared.internal_formal_parameter_count());
+ isolate->wasm_engine(), &env, kind, sig, false, expected_arity);
std::unique_ptr<wasm::WasmCode> wasm_code = native_module->AddCode(
result.func_index, result.code_desc, result.frame_slot_count,
result.tagged_parameter_slots,
@@ -1674,6 +1679,7 @@ Handle<WasmExceptionObject> WasmExceptionObject::New(
return exception;
}
+// TODO(9495): Update this if function type variance is introduced.
bool WasmExceptionObject::MatchesSignature(const wasm::FunctionSig* sig) {
DCHECK_EQ(0, sig->return_count());
DCHECK_LE(sig->parameter_count(), std::numeric_limits<int>::max());
@@ -1687,6 +1693,7 @@ bool WasmExceptionObject::MatchesSignature(const wasm::FunctionSig* sig) {
return true;
}
+// TODO(9495): Update this if function type variance is introduced.
bool WasmCapiFunction::MatchesSignature(const wasm::FunctionSig* sig) const {
// TODO(jkummerow): Unify with "SignatureHelper" in c-api.cc.
int param_count = static_cast<int>(sig->parameter_count());
@@ -1833,16 +1840,14 @@ bool WasmCapiFunction::IsWasmCapiFunction(Object object) {
Handle<WasmCapiFunction> WasmCapiFunction::New(
Isolate* isolate, Address call_target, Handle<Foreign> embedder_data,
Handle<PodArray<wasm::ValueType>> serialized_signature) {
- Handle<WasmCapiFunctionData> fun_data =
- Handle<WasmCapiFunctionData>::cast(isolate->factory()->NewStruct(
- WASM_CAPI_FUNCTION_DATA_TYPE, AllocationType::kOld));
- fun_data->set_call_target(call_target);
- fun_data->set_embedder_data(*embedder_data);
- fun_data->set_serialized_signature(*serialized_signature);
// TODO(jkummerow): Install a JavaScript wrapper. For now, calling
// these functions directly is unsupported; they can only be called
// from Wasm code.
- fun_data->set_wrapper_code(isolate->builtins()->builtin(Builtins::kIllegal));
+ Handle<WasmCapiFunctionData> fun_data =
+ isolate->factory()->NewWasmCapiFunctionData(
+ call_target, embedder_data,
+ isolate->builtins()->builtin_handle(Builtins::kIllegal),
+ serialized_signature, AllocationType::kOld);
Handle<SharedFunctionInfo> shared =
isolate->factory()->NewSharedFunctionInfoForWasmCapiFunction(fun_data);
return Handle<WasmCapiFunction>::cast(
@@ -1884,10 +1889,11 @@ Handle<WasmExportedFunction> WasmExportedFunction::New(
function_data->set_instance(*instance);
function_data->set_jump_table_offset(jump_table_offset);
function_data->set_function_index(func_index);
+ function_data->set_signature(*sig_foreign);
+ function_data->set_call_count(0);
function_data->set_c_wrapper_code(Smi::zero(), SKIP_WRITE_BARRIER);
function_data->set_wasm_call_target(Smi::zero(), SKIP_WRITE_BARRIER);
function_data->set_packed_args_size(0);
- function_data->set_signature(*sig_foreign);
MaybeHandle<String> maybe_name;
bool is_asm_js_module = instance->module_object().is_asm_js();
@@ -1948,6 +1954,23 @@ const wasm::FunctionSig* WasmExportedFunction::sig() {
return instance().module()->functions[function_index()].sig;
}
+bool WasmExportedFunction::MatchesSignature(
+ const WasmModule* other_module, const wasm::FunctionSig* other_sig) {
+ const wasm::FunctionSig* sig = this->sig();
+ if (sig->parameter_count() != other_sig->parameter_count() ||
+ sig->return_count() != other_sig->return_count()) {
+ return false;
+ }
+
+ for (int i = 0; i < sig->all().size(); i++) {
+ if (!wasm::EquivalentTypes(sig->all()[i], other_sig->all()[i],
+ this->instance().module(), other_module)) {
+ return false;
+ }
+ }
+ return true;
+}
+
// static
bool WasmJSFunction::IsWasmJSFunction(Object object) {
if (!object.IsJSFunction()) return false;
@@ -1955,8 +1978,6 @@ bool WasmJSFunction::IsWasmJSFunction(Object object) {
return js_function.shared().HasWasmJSFunctionData();
}
-// TODO(7748): WasmJSFunctions should compile/find and store an import wrapper
-// in case they are called from within wasm.
Handle<WasmJSFunction> WasmJSFunction::New(Isolate* isolate,
const wasm::FunctionSig* sig,
Handle<JSReceiver> callable) {
@@ -1973,6 +1994,7 @@ Handle<WasmJSFunction> WasmJSFunction::New(Isolate* isolate,
// signature instead of compiling a new one for every instantiation.
Handle<Code> wrapper_code =
compiler::CompileJSToJSWrapper(isolate, sig, nullptr).ToHandleChecked();
+
Handle<WasmJSFunctionData> function_data =
Handle<WasmJSFunctionData>::cast(isolate->factory()->NewStruct(
WASM_JS_FUNCTION_DATA_TYPE, AllocationType::kOld));
@@ -1981,6 +2003,30 @@ Handle<WasmJSFunction> WasmJSFunction::New(Isolate* isolate,
function_data->set_serialized_signature(*serialized_sig);
function_data->set_callable(*callable);
function_data->set_wrapper_code(*wrapper_code);
+ // Use Abort() as a default value (it will never be called if not overwritten
+ // below).
+ function_data->set_wasm_to_js_wrapper_code(
+ isolate->heap()->builtin(Builtins::kAbort));
+
+ if (wasm::WasmFeatures::FromIsolate(isolate).has_typed_funcref()) {
+ using CK = compiler::WasmImportCallKind;
+ int expected_arity = parameter_count;
+ CK kind = compiler::kDefaultImportCallKind;
+ if (callable->IsJSFunction()) {
+ SharedFunctionInfo shared = Handle<JSFunction>::cast(callable)->shared();
+ expected_arity = shared.internal_formal_parameter_count();
+ if (expected_arity != parameter_count) {
+ kind = CK::kJSFunctionArityMismatch;
+ }
+ }
+ // TODO(wasm): Think about caching and sharing the wasm-to-JS wrappers per
+ // signature instead of compiling a new one for every instantiation.
+ Handle<Code> wasm_to_js_wrapper_code =
+ compiler::CompileWasmToJSWrapper(isolate, sig, kind, expected_arity)
+ .ToHandleChecked();
+ function_data->set_wasm_to_js_wrapper_code(*wasm_to_js_wrapper_code);
+ }
+
Handle<String> name = isolate->factory()->Function_string();
if (callable->IsJSFunction()) {
name = JSFunction::GetName(Handle<JSFunction>::cast(callable));
@@ -2012,6 +2058,7 @@ const wasm::FunctionSig* WasmJSFunction::GetSignature(Zone* zone) {
return zone->New<wasm::FunctionSig>(return_count, parameter_count, types);
}
+// TODO(9495): Update this if function type variance is introduced.
bool WasmJSFunction::MatchesSignature(const wasm::FunctionSig* sig) {
DCHECK_LE(sig->all().size(), kMaxInt);
int sig_size = static_cast<int>(sig->all().size());
diff --git a/deps/v8/src/wasm/wasm-objects.h b/deps/v8/src/wasm/wasm-objects.h
index d269c8df4f..dcef1aec8b 100644
--- a/deps/v8/src/wasm/wasm-objects.h
+++ b/deps/v8/src/wasm/wasm-objects.h
@@ -16,7 +16,6 @@
#include "src/objects/objects.h"
#include "src/wasm/struct-types.h"
#include "src/wasm/value-type.h"
-#include "torque-generated/class-definitions.h"
// Has to be the last include (doesn't have include guards)
#include "src/objects/object-macros.h"
@@ -44,13 +43,14 @@ class WasmExternalFunction;
class WasmInstanceObject;
class WasmJSFunction;
class WasmModuleObject;
-class WasmIndirectFunctionTable;
enum class SharedFlag : uint8_t;
template <class CppType>
class Managed;
+#include "torque-generated/src/wasm/wasm-objects-tq.inc"
+
#define DECL_OPTIONAL_ACCESSORS(name, type) \
DECL_GETTER(has_##name, bool) \
DECL_ACCESSORS(name, type)
@@ -666,6 +666,9 @@ class WasmExportedFunction : public JSFunction {
V8_EXPORT_PRIVATE const wasm::FunctionSig* sig();
+ bool MatchesSignature(const wasm::WasmModule* other_module,
+ const wasm::FunctionSig* other_sig);
+
DECL_CAST(WasmExportedFunction)
OBJECT_CONSTRUCTORS(WasmExportedFunction, JSFunction);
};
@@ -750,27 +753,6 @@ class WasmIndirectFunctionTable : public Struct {
OBJECT_CONSTRUCTORS(WasmIndirectFunctionTable, Struct);
};
-class WasmCapiFunctionData : public Struct {
- public:
- DECL_PRIMITIVE_ACCESSORS(call_target, Address)
- DECL_ACCESSORS(embedder_data, Foreign)
- DECL_ACCESSORS(wrapper_code, Code)
- DECL_ACCESSORS(serialized_signature, PodArray<wasm::ValueType>)
-
- DECL_CAST(WasmCapiFunctionData)
-
- DECL_PRINTER(WasmCapiFunctionData)
- DECL_VERIFIER(WasmCapiFunctionData)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
- TORQUE_GENERATED_WASM_CAPI_FUNCTION_DATA_FIELDS)
-
- STATIC_ASSERT(kStartOfStrongFieldsOffset == kEmbedderDataOffset);
- using BodyDescriptor = FlexibleBodyDescriptor<kStartOfStrongFieldsOffset>;
-
- OBJECT_CONSTRUCTORS(WasmCapiFunctionData, Struct);
-};
-
// Information for a WasmExportedFunction which is referenced as the function
// data of the SharedFunctionInfo underlying the function. For details please
// see the {SharedFunctionInfo::HasWasmExportedFunctionData} predicate.
@@ -780,10 +762,11 @@ class WasmExportedFunctionData : public Struct {
DECL_ACCESSORS(instance, WasmInstanceObject)
DECL_INT_ACCESSORS(jump_table_offset)
DECL_INT_ACCESSORS(function_index)
+ DECL_ACCESSORS(signature, Foreign)
+ DECL_INT_ACCESSORS(call_count)
DECL_ACCESSORS(c_wrapper_code, Object)
DECL_ACCESSORS(wasm_call_target, Object)
DECL_INT_ACCESSORS(packed_args_size)
- DECL_ACCESSORS(signature, Foreign)
DECL_CAST(WasmExportedFunctionData)
@@ -809,6 +792,7 @@ class WasmJSFunctionData : public Struct {
DECL_ACCESSORS(serialized_signature, PodArray<wasm::ValueType>)
DECL_ACCESSORS(callable, JSReceiver)
DECL_ACCESSORS(wrapper_code, Code)
+ DECL_ACCESSORS(wasm_to_js_wrapper_code, Code)
DECL_CAST(WasmJSFunctionData)
diff --git a/deps/v8/src/wasm/wasm-objects.tq b/deps/v8/src/wasm/wasm-objects.tq
index bd1fdfd783..fc5cfd6985 100644
--- a/deps/v8/src/wasm/wasm-objects.tq
+++ b/deps/v8/src/wasm/wasm-objects.tq
@@ -2,10 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+@useParentTypeChecker
type PodArrayOfWasmValueType extends ByteArray
-constexpr 'PodArray<wasm::ValueType>';
+ constexpr 'PodArray<wasm::ValueType>';
+@useParentTypeChecker
type ManagedWasmNativeModule extends Foreign
-constexpr 'Managed<wasm::NativeModule>';
+ constexpr 'Managed<wasm::NativeModule>';
type WasmValueType extends uint8 constexpr 'wasm::ValueType::Kind';
extern class WasmInstanceObject extends JSObject;
@@ -15,23 +17,26 @@ extern class WasmExportedFunctionData extends Struct {
instance: WasmInstanceObject;
jump_table_offset: Smi;
function_index: Smi;
+ signature: Foreign;
+ call_count: Smi;
// The remaining fields are for fast calling from C++. The contract is
// that they are lazily populated, and either all will be present or none.
c_wrapper_code: Object;
wasm_call_target: Smi|Foreign;
packed_args_size: Smi;
- signature: Foreign;
}
extern class WasmJSFunctionData extends Struct {
callable: JSReceiver;
wrapper_code: Code;
+ wasm_to_js_wrapper_code: Code;
serialized_return_count: Smi;
serialized_parameter_count: Smi;
serialized_signature: PodArrayOfWasmValueType;
}
-extern class WasmCapiFunctionData extends Struct {
+@export
+class WasmCapiFunctionData extends HeapObject {
call_target: RawPtr;
embedder_data: Foreign; // Managed<wasm::FuncData>
wrapper_code: Code;
diff --git a/deps/v8/src/wasm/wasm-opcodes-inl.h b/deps/v8/src/wasm/wasm-opcodes-inl.h
index e050d12947..5e0f172bd5 100644
--- a/deps/v8/src/wasm/wasm-opcodes-inl.h
+++ b/deps/v8/src/wasm/wasm-opcodes-inl.h
@@ -35,7 +35,6 @@ namespace wasm {
#define CASE_S64x2_OP(name, str) CASE_OP(S64x2##name, "s64x2." str)
#define CASE_S32x4_OP(name, str) CASE_OP(S32x4##name, "s32x4." str)
#define CASE_S16x8_OP(name, str) CASE_OP(S16x8##name, "s16x8." str)
-#define CASE_V64x2_OP(name, str) CASE_OP(V64x2##name, "v64x2." str)
#define CASE_V32x4_OP(name, str) CASE_OP(V32x4##name, "v32x4." str)
#define CASE_V16x8_OP(name, str) CASE_OP(V16x8##name, "v16x8." str)
#define CASE_V8x16_OP(name, str) CASE_OP(V8x16##name, "v8x16." str)
@@ -235,7 +234,8 @@ constexpr const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_SIMD_OP(Neg, "neg")
CASE_SIMDF_OP(Sqrt, "sqrt")
CASE_SIMD_OP(Eq, "eq")
- CASE_SIMD_OP(Ne, "ne")
+ CASE_SIMDF_OP(Ne, "ne")
+ CASE_SIMDI_OP(Ne, "ne")
CASE_SIMD_OP(Add, "add")
CASE_SIMD_OP(Sub, "sub")
CASE_SIMD_OP(Mul, "mul")
@@ -267,27 +267,23 @@ constexpr const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_SIGN_OP(I8x16, ExtractLane, "extract_lane")
CASE_SIMDI_OP(ReplaceLane, "replace_lane")
CASE_SIGN_OP(SIMDI, Min, "min")
- CASE_SIGN_OP(I64x2, Min, "min")
CASE_SIGN_OP(SIMDI, Max, "max")
- CASE_SIGN_OP(I64x2, Max, "max")
CASE_SIGN_OP(SIMDI, Lt, "lt")
- CASE_SIGN_OP(I64x2, Lt, "lt")
CASE_SIGN_OP(SIMDI, Le, "le")
- CASE_SIGN_OP(I64x2, Le, "le")
CASE_SIGN_OP(SIMDI, Gt, "gt")
- CASE_SIGN_OP(I64x2, Gt, "gt")
CASE_SIGN_OP(SIMDI, Ge, "ge")
- CASE_SIGN_OP(I64x2, Ge, "ge")
+ CASE_CONVERT_OP(Convert, I64x2, I32x4Low, "i32", "convert")
+ CASE_CONVERT_OP(Convert, I64x2, I32x4High, "i32", "convert")
CASE_SIGN_OP(SIMDI, Shr, "shr")
CASE_SIGN_OP(I64x2, Shr, "shr")
CASE_SIMDI_OP(Shl, "shl")
CASE_I64x2_OP(Shl, "shl")
CASE_I32x4_OP(AddHoriz, "add_horizontal")
CASE_I16x8_OP(AddHoriz, "add_horizontal")
- CASE_SIGN_OP(I16x8, AddSaturate, "add_saturate")
- CASE_SIGN_OP(I8x16, AddSaturate, "add_saturate")
- CASE_SIGN_OP(I16x8, SubSaturate, "sub_saturate")
- CASE_SIGN_OP(I8x16, SubSaturate, "sub_saturate")
+ CASE_SIGN_OP(I16x8, AddSat, "add_sat")
+ CASE_SIGN_OP(I8x16, AddSat, "add_sat")
+ CASE_SIGN_OP(I16x8, SubSat, "sub_sat")
+ CASE_SIGN_OP(I8x16, SubSat, "sub_sat")
CASE_S128_OP(And, "and")
CASE_S128_OP(Or, "or")
CASE_S128_OP(Xor, "xor")
@@ -298,13 +294,11 @@ constexpr const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_I8x16_OP(Shuffle, "shuffle")
CASE_SIMDV_OP(AnyTrue, "any_true")
CASE_SIMDV_OP(AllTrue, "all_true")
- CASE_V64x2_OP(AnyTrue, "any_true")
- CASE_V64x2_OP(AllTrue, "all_true")
CASE_SIMDF_OP(Qfma, "qfma")
CASE_SIMDF_OP(Qfms, "qfms")
- CASE_S128_OP(LoadMem32Zero, "load32_zero")
- CASE_S128_OP(LoadMem64Zero, "load64_zero")
+ CASE_S128_OP(Load32Zero, "load32_zero")
+ CASE_S128_OP(Load64Zero, "load64_zero")
CASE_S128_OP(Load8Splat, "load8_splat")
CASE_S128_OP(Load16Splat, "load16_splat")
CASE_S128_OP(Load32Splat, "load32_splat")
@@ -315,17 +309,28 @@ constexpr const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_S128_OP(Load16x4U, "load16x4_u")
CASE_S128_OP(Load32x2S, "load32x2_s")
CASE_S128_OP(Load32x2U, "load32x2_u")
+ CASE_S128_OP(Load8Lane, "load8_lane")
+ CASE_S128_OP(Load16Lane, "load16_lane")
+ CASE_S128_OP(Load32Lane, "load32_lane")
+ CASE_S128_OP(Load64Lane, "load64_lane")
+ CASE_S128_OP(Store8Lane, "store8_lane")
+ CASE_S128_OP(Store16Lane, "store16_lane")
+ CASE_S128_OP(Store32Lane, "store32_lane")
+ CASE_S128_OP(Store64Lane, "store64_lane")
CASE_I8x16_OP(RoundingAverageU, "avgr_u")
CASE_I16x8_OP(RoundingAverageU, "avgr_u")
+ CASE_I16x8_OP(Q15MulRSatS, "q15mulr_sat_s")
CASE_I8x16_OP(Abs, "abs")
+ CASE_I8x16_OP(Popcnt, "popcnt")
CASE_I16x8_OP(Abs, "abs")
CASE_I32x4_OP(Abs, "abs")
CASE_I8x16_OP(BitMask, "bitmask")
CASE_I16x8_OP(BitMask, "bitmask")
CASE_I32x4_OP(BitMask, "bitmask")
+ CASE_I64x2_OP(BitMask, "bitmask")
CASE_F32x4_OP(Pmin, "pmin")
CASE_F32x4_OP(Pmax, "pmax")
@@ -343,6 +348,18 @@ constexpr const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_I32x4_OP(DotI16x8S, "dot_i16x8_s")
+ CASE_SIGN_OP(I16x8, ExtMulLowI8x16, "extmul_low_i8x16")
+ CASE_SIGN_OP(I16x8, ExtMulHighI8x16, "extmul_high_i8x16")
+ CASE_SIGN_OP(I32x4, ExtMulLowI16x8, "extmul_low_i16x8")
+ CASE_SIGN_OP(I32x4, ExtMulHighI16x8, "extmul_high_i16x8")
+ CASE_SIGN_OP(I64x2, ExtMulLowI32x4, "extmul_low_i32x4")
+ CASE_SIGN_OP(I64x2, ExtMulHighI32x4, "extmul_high_i32x4")
+ CASE_SIMDI_OP(SignSelect, "signselect")
+ CASE_I64x2_OP(SignSelect, "signselect")
+
+ CASE_SIGN_OP(I32x4, ExtAddPairwiseI16x8, "extadd_pairwise_i16x8")
+ CASE_SIGN_OP(I16x8, ExtAddPairwiseI8x16, "extadd_pairwise_i8x6")
+
// Atomic operations.
CASE_OP(AtomicNotify, "atomic.notify")
CASE_INT_OP(AtomicWait, "atomic.wait")
diff --git a/deps/v8/src/wasm/wasm-opcodes.h b/deps/v8/src/wasm/wasm-opcodes.h
index 04767f53a2..76812446a9 100644
--- a/deps/v8/src/wasm/wasm-opcodes.h
+++ b/deps/v8/src/wasm/wasm-opcodes.h
@@ -21,8 +21,9 @@ class WasmFeatures;
struct WasmModule;
std::ostream& operator<<(std::ostream& os, const FunctionSig& function);
-bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmModule* module,
- const WasmFeatures&);
+bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
+ const WasmModule* module,
+ const WasmFeatures&);
// Control expressions and blocks.
#define FOREACH_CONTROL_OPCODE(V) \
@@ -287,7 +288,9 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmModule* module,
V(S128Load16Splat, 0xfd08, s_i) \
V(S128Load32Splat, 0xfd09, s_i) \
V(S128Load64Splat, 0xfd0a, s_i) \
- V(S128StoreMem, 0xfd0b, v_is)
+ V(S128StoreMem, 0xfd0b, v_is) \
+ V(S128Load32Zero, 0xfdfc, s_i) \
+ V(S128Load64Zero, 0xfdfd, s_i)
#define FOREACH_SIMD_CONST_OPCODE(V) V(S128Const, 0xfd0c, _)
@@ -360,11 +363,11 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmModule* module,
V(I8x16ShrS, 0xfd6c, s_si) \
V(I8x16ShrU, 0xfd6d, s_si) \
V(I8x16Add, 0xfd6e, s_ss) \
- V(I8x16AddSaturateS, 0xfd6f, s_ss) \
- V(I8x16AddSaturateU, 0xfd70, s_ss) \
+ V(I8x16AddSatS, 0xfd6f, s_ss) \
+ V(I8x16AddSatU, 0xfd70, s_ss) \
V(I8x16Sub, 0xfd71, s_ss) \
- V(I8x16SubSaturateS, 0xfd72, s_ss) \
- V(I8x16SubSaturateU, 0xfd73, s_ss) \
+ V(I8x16SubSatS, 0xfd72, s_ss) \
+ V(I8x16SubSatU, 0xfd73, s_ss) \
V(I8x16MinS, 0xfd76, s_ss) \
V(I8x16MinU, 0xfd77, s_ss) \
V(I8x16MaxS, 0xfd78, s_ss) \
@@ -385,11 +388,11 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmModule* module,
V(I16x8ShrS, 0xfd8c, s_si) \
V(I16x8ShrU, 0xfd8d, s_si) \
V(I16x8Add, 0xfd8e, s_ss) \
- V(I16x8AddSaturateS, 0xfd8f, s_ss) \
- V(I16x8AddSaturateU, 0xfd90, s_ss) \
+ V(I16x8AddSatS, 0xfd8f, s_ss) \
+ V(I16x8AddSatU, 0xfd90, s_ss) \
V(I16x8Sub, 0xfd91, s_ss) \
- V(I16x8SubSaturateS, 0xfd92, s_ss) \
- V(I16x8SubSaturateU, 0xfd93, s_ss) \
+ V(I16x8SubSatS, 0xfd92, s_ss) \
+ V(I16x8SubSatU, 0xfd93, s_ss) \
V(I16x8Mul, 0xfd95, s_ss) \
V(I16x8MinS, 0xfd96, s_ss) \
V(I16x8MinU, 0xfd97, s_ss) \
@@ -415,6 +418,7 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmModule* module,
V(I32x4MinU, 0xfdb7, s_ss) \
V(I32x4MaxS, 0xfdb8, s_ss) \
V(I32x4MaxU, 0xfdb9, s_ss) \
+ V(I32x4DotI16x8S, 0xfdba, s_ss) \
V(I64x2Neg, 0xfdc1, s_s) \
V(I64x2Shl, 0xfdcb, s_si) \
V(I64x2ShrS, 0xfdcc, s_si) \
@@ -458,36 +462,53 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmModule* module,
V(F64x2NearestInt, 0xfddf, s_s)
#define FOREACH_SIMD_POST_MVP_MEM_OPCODE(V) \
- V(S128LoadMem32Zero, 0xfdfc, s_i) \
- V(S128LoadMem64Zero, 0xfdfd, s_i)
-
-#define FOREACH_SIMD_POST_MVP_OPCODE(V) \
- V(I8x16Mul, 0xfd75, s_ss) \
- V(V64x2AnyTrue, 0xfdc2, i_s) \
- V(V64x2AllTrue, 0xfdc3, i_s) \
- V(I64x2Eq, 0xfdc0, s_ss) \
- V(I64x2Ne, 0xfdc4, s_ss) \
- V(I64x2LtS, 0xfdc5, s_ss) \
- V(I64x2LtU, 0xfdc6, s_ss) \
- V(I64x2GtS, 0xfdc7, s_ss) \
- V(I64x2GtU, 0xfdc8, s_ss) \
- V(I64x2LeS, 0xfdc9, s_ss) \
- V(I64x2LeU, 0xfdca, s_ss) \
- V(I64x2GeS, 0xfdcf, s_ss) \
- V(I64x2GeU, 0xfdd0, s_ss) \
- V(I64x2MinS, 0xfdd6, s_ss) \
- V(I64x2MinU, 0xfdd7, s_ss) \
- V(I64x2MaxS, 0xfde2, s_ss) \
- V(I64x2MaxU, 0xfdee, s_ss) \
- V(F32x4Qfma, 0xfdb4, s_sss) \
- V(F32x4Qfms, 0xfdd4, s_sss) \
- V(F64x2Qfma, 0xfdfe, s_sss) \
- V(F64x2Qfms, 0xfdff, s_sss) \
- V(I16x8AddHoriz, 0xfdaf, s_ss) \
- V(I32x4AddHoriz, 0xfdb0, s_ss) \
- V(I32x4DotI16x8S, 0xfdba, s_ss) \
- V(F32x4AddHoriz, 0xfdb2, s_ss) \
- V(F32x4RecipApprox, 0xfdb3, s_s) \
+ V(S128Load8Lane, 0xfd58, s_is) \
+ V(S128Load16Lane, 0xfd59, s_is) \
+ V(S128Load32Lane, 0xfd5a, s_is) \
+ V(S128Load64Lane, 0xfd5b, s_is) \
+ V(S128Store8Lane, 0xfd5c, v_is) \
+ V(S128Store16Lane, 0xfd5d, v_is) \
+ V(S128Store32Lane, 0xfd5e, v_is) \
+ V(S128Store64Lane, 0xfd5f, v_is)
+
+#define FOREACH_SIMD_POST_MVP_OPCODE(V) \
+ V(I8x16Mul, 0xfd75, s_ss) \
+ V(I8x16Popcnt, 0xfd7c, s_s) \
+ V(I8x16SignSelect, 0xfd7d, s_sss) \
+ V(I16x8SignSelect, 0xfd7e, s_sss) \
+ V(I32x4SignSelect, 0xfd7f, s_sss) \
+ V(I64x2SignSelect, 0xfd94, s_sss) \
+ V(I16x8Q15MulRSatS, 0xfd9c, s_ss) \
+ V(I16x8ExtMulLowI8x16S, 0xfd9a, s_ss) \
+ V(I16x8ExtMulHighI8x16S, 0xfd9d, s_ss) \
+ V(I16x8ExtMulLowI8x16U, 0xfd9e, s_ss) \
+ V(I16x8ExtMulHighI8x16U, 0xfd9f, s_ss) \
+ V(I32x4ExtMulLowI16x8S, 0xfdbb, s_ss) \
+ V(I32x4ExtMulHighI16x8S, 0xfdbd, s_ss) \
+ V(I32x4ExtMulLowI16x8U, 0xfdbe, s_ss) \
+ V(I32x4ExtMulHighI16x8U, 0xfdbf, s_ss) \
+ V(I64x2ExtMulLowI32x4S, 0xfdd2, s_ss) \
+ V(I64x2ExtMulHighI32x4S, 0xfdd3, s_ss) \
+ V(I64x2ExtMulLowI32x4U, 0xfdd6, s_ss) \
+ V(I64x2ExtMulHighI32x4U, 0xfdd7, s_ss) \
+ V(I32x4ExtAddPairwiseI16x8S, 0xfda5, s_s) \
+ V(I32x4ExtAddPairwiseI16x8U, 0xfda6, s_s) \
+ V(I16x8ExtAddPairwiseI8x16S, 0xfdc2, s_s) \
+ V(I16x8ExtAddPairwiseI8x16U, 0xfdc3, s_s) \
+ V(I64x2Eq, 0xfdc0, s_ss) \
+ V(F32x4Qfma, 0xfdb4, s_sss) \
+ V(I64x2BitMask, 0xfdc4, i_s) \
+ V(I64x2SConvertI32x4Low, 0xfdc7, s_s) \
+ V(I64x2SConvertI32x4High, 0xfdc8, s_s) \
+ V(I64x2UConvertI32x4Low, 0xfdc9, s_s) \
+ V(I64x2UConvertI32x4High, 0xfdca, s_s) \
+ V(F32x4Qfms, 0xfdd4, s_sss) \
+ V(F64x2Qfma, 0xfdfe, s_sss) \
+ V(F64x2Qfms, 0xfdff, s_sss) \
+ V(I16x8AddHoriz, 0xfdaf, s_ss) \
+ V(I32x4AddHoriz, 0xfdb0, s_ss) \
+ V(F32x4AddHoriz, 0xfdb2, s_ss) \
+ V(F32x4RecipApprox, 0xfdb3, s_s) \
V(F32x4RecipSqrtApprox, 0xfdbc, s_s)
#define FOREACH_SIMD_1_OPERAND_1_PARAM_OPCODE(V) \
@@ -700,17 +721,18 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmModule* module,
V(i_ci, kWasmI32, kWasmFuncRef, kWasmI32) \
V(i_qq, kWasmI32, kWasmEqRef, kWasmEqRef)
-#define FOREACH_SIMD_SIGNATURE(V) \
- V(s_s, kWasmS128, kWasmS128) \
- V(s_f, kWasmS128, kWasmF32) \
- V(s_d, kWasmS128, kWasmF64) \
- V(s_ss, kWasmS128, kWasmS128, kWasmS128) \
- V(s_i, kWasmS128, kWasmI32) \
- V(s_l, kWasmS128, kWasmI64) \
- V(s_si, kWasmS128, kWasmS128, kWasmI32) \
- V(i_s, kWasmI32, kWasmS128) \
- V(v_is, kWasmStmt, kWasmI32, kWasmS128) \
- V(s_sss, kWasmS128, kWasmS128, kWasmS128, kWasmS128)
+#define FOREACH_SIMD_SIGNATURE(V) \
+ V(s_s, kWasmS128, kWasmS128) \
+ V(s_f, kWasmS128, kWasmF32) \
+ V(s_d, kWasmS128, kWasmF64) \
+ V(s_ss, kWasmS128, kWasmS128, kWasmS128) \
+ V(s_i, kWasmS128, kWasmI32) \
+ V(s_l, kWasmS128, kWasmI64) \
+ V(s_si, kWasmS128, kWasmS128, kWasmI32) \
+ V(i_s, kWasmI32, kWasmS128) \
+ V(v_is, kWasmStmt, kWasmI32, kWasmS128) \
+ V(s_sss, kWasmS128, kWasmS128, kWasmS128, kWasmS128) \
+ V(s_is, kWasmS128, kWasmI32, kWasmS128)
#define FOREACH_PREFIX(V) \
V(Numeric, 0xfc) \
diff --git a/deps/v8/src/wasm/wasm-result.h b/deps/v8/src/wasm/wasm-result.h
index 784dd0f615..8f0d5427aa 100644
--- a/deps/v8/src/wasm/wasm-result.h
+++ b/deps/v8/src/wasm/wasm-result.h
@@ -63,6 +63,8 @@ template <typename T>
class Result {
public:
Result() = default;
+ Result(const Result&) = delete;
+ Result& operator=(const Result&) = delete;
template <typename S>
explicit Result(S&& value) : value_(std::forward<S>(value)) {}
@@ -104,8 +106,6 @@ class Result {
T value_ = T{};
WasmError error_;
-
- DISALLOW_COPY_AND_ASSIGN(Result);
};
// A helper for generating error messages that bubble up to JS exceptions.
@@ -113,8 +113,10 @@ class V8_EXPORT_PRIVATE ErrorThrower {
public:
ErrorThrower(Isolate* isolate, const char* context)
: isolate_(isolate), context_(context) {}
- // Explicitly allow move-construction. Disallow copy (below).
+ // Explicitly allow move-construction. Disallow copy.
ErrorThrower(ErrorThrower&& other) V8_NOEXCEPT;
+ ErrorThrower(const ErrorThrower&) = delete;
+ ErrorThrower& operator=(const ErrorThrower&) = delete;
~ErrorThrower();
PRINTF_FORMAT(2, 3) void TypeError(const char* fmt, ...);
@@ -165,7 +167,6 @@ class V8_EXPORT_PRIVATE ErrorThrower {
// ErrorThrower should always be stack-allocated, since it constitutes a scope
// (things happen in the destructor).
DISALLOW_NEW_AND_DELETE()
- DISALLOW_COPY_AND_ASSIGN(ErrorThrower);
};
// Like an ErrorThrower, but turns all pending exceptions into scheduled
diff --git a/deps/v8/src/wasm/wasm-serialization.cc b/deps/v8/src/wasm/wasm-serialization.cc
index f4f5f99268..1c73fc5c41 100644
--- a/deps/v8/src/wasm/wasm-serialization.cc
+++ b/deps/v8/src/wasm/wasm-serialization.cc
@@ -208,6 +208,9 @@ constexpr size_t kCodeHeaderSize = sizeof(bool) + // whether code is present
// a tag from the Address of an external reference and vice versa.
class ExternalReferenceList {
public:
+ ExternalReferenceList(const ExternalReferenceList&) = delete;
+ ExternalReferenceList& operator=(const ExternalReferenceList&) = delete;
+
uint32_t tag_from_address(Address ext_ref_address) const {
auto tag_addr_less_than = [this](uint32_t tag, Address searched_addr) {
return external_reference_by_tag_[tag] < searched_addr;
@@ -263,7 +266,6 @@ class ExternalReferenceList {
#undef RUNTIME_ADDR
};
uint32_t tags_ordered_by_address_[kNumExternalReferences];
- DISALLOW_COPY_AND_ASSIGN(ExternalReferenceList);
};
static_assert(std::is_trivially_destructible<ExternalReferenceList>::value,
@@ -273,8 +275,9 @@ static_assert(std::is_trivially_destructible<ExternalReferenceList>::value,
class V8_EXPORT_PRIVATE NativeModuleSerializer {
public:
- NativeModuleSerializer() = delete;
NativeModuleSerializer(const NativeModule*, Vector<WasmCode* const>);
+ NativeModuleSerializer(const NativeModuleSerializer&) = delete;
+ NativeModuleSerializer& operator=(const NativeModuleSerializer&) = delete;
size_t Measure() const;
bool Write(Writer* writer);
@@ -287,8 +290,6 @@ class V8_EXPORT_PRIVATE NativeModuleSerializer {
const NativeModule* const native_module_;
Vector<WasmCode* const> code_table_;
bool write_called_;
-
- DISALLOW_COPY_AND_ASSIGN(NativeModuleSerializer);
};
NativeModuleSerializer::NativeModuleSerializer(
@@ -468,8 +469,9 @@ bool WasmSerializer::SerializeNativeModule(Vector<byte> buffer) const {
class V8_EXPORT_PRIVATE NativeModuleDeserializer {
public:
- NativeModuleDeserializer() = delete;
explicit NativeModuleDeserializer(NativeModule*);
+ NativeModuleDeserializer(const NativeModuleDeserializer&) = delete;
+ NativeModuleDeserializer& operator=(const NativeModuleDeserializer&) = delete;
bool Read(Reader* reader);
@@ -479,8 +481,6 @@ class V8_EXPORT_PRIVATE NativeModuleDeserializer {
NativeModule* const native_module_;
bool read_called_;
-
- DISALLOW_COPY_AND_ASSIGN(NativeModuleDeserializer);
};
NativeModuleDeserializer::NativeModuleDeserializer(NativeModule* native_module)
diff --git a/deps/v8/src/wasm/wasm-value.h b/deps/v8/src/wasm/wasm-value.h
index 3926a4c7e9..81dbd3e9cb 100644
--- a/deps/v8/src/wasm/wasm-value.h
+++ b/deps/v8/src/wasm/wasm-value.h
@@ -46,10 +46,21 @@ class Simd128 {
const uint8_t* bytes() { return val_; }
+ template <typename T>
+ inline T to();
+
private:
uint8_t val_[16] = {0};
};
+#define DECLARE_CAST(cType, sType, name, size) \
+ template <> \
+ inline sType Simd128::to() { \
+ return to_##name(); \
+ }
+FOREACH_SIMD_TYPE(DECLARE_CAST)
+#undef DECLARE_CAST
+
// Macro for defining WasmValue methods for different types.
// Elements:
// - name (for to_<name>() method)
diff --git a/deps/v8/src/zone/zone-containers.h b/deps/v8/src/zone/zone-containers.h
index 638ddd2213..c2b4dfa703 100644
--- a/deps/v8/src/zone/zone-containers.h
+++ b/deps/v8/src/zone/zone-containers.h
@@ -176,9 +176,9 @@ class ZoneUnorderedSet
: public std::unordered_set<K, Hash, KeyEqual, ZoneAllocator<K>> {
public:
// Constructs an empty map.
- explicit ZoneUnorderedSet(Zone* zone)
+ explicit ZoneUnorderedSet(Zone* zone, size_t bucket_count = 100)
: std::unordered_set<K, Hash, KeyEqual, ZoneAllocator<K>>(
- 100, Hash(), KeyEqual(), ZoneAllocator<K>(zone)) {}
+ bucket_count, Hash(), KeyEqual(), ZoneAllocator<K>(zone)) {}
};
// A wrapper subclass for std::multimap to make it easy to construct one that
diff --git a/deps/v8/test/BUILD.gn b/deps/v8/test/BUILD.gn
index c6045d1bd7..fb872ad39f 100644
--- a/deps/v8/test/BUILD.gn
+++ b/deps/v8/test/BUILD.gn
@@ -152,6 +152,7 @@ v8_header_set("common_test_headers") {
sources = [
"common/assembler-tester.h",
+ "common/flag-utils.h",
"common/types-fuzz.h",
"common/wasm/flag-utils.h",
"common/wasm/test-signatures.h",
diff --git a/deps/v8/test/cctest/BUILD.gn b/deps/v8/test/cctest/BUILD.gn
index d2a1c1fad5..30fa6c533a 100644
--- a/deps/v8/test/cctest/BUILD.gn
+++ b/deps/v8/test/cctest/BUILD.gn
@@ -72,9 +72,11 @@ v8_source_set("cctest_sources") {
sources = [
### gcmole(all) ###
"../common/assembler-tester.h",
+ "../common/flag-utils.h",
"../common/wasm/flag-utils.h",
"../common/wasm/test-signatures.h",
"../common/wasm/wasm-macro-gen.h",
+ "cctest-utils.h",
"collector.h",
"compiler/c-signature.h",
"compiler/call-tester.h",
@@ -204,6 +206,7 @@ v8_source_set("cctest_sources") {
"test-code-stub-assembler.cc",
"test-compiler.cc",
"test-concurrent-descriptor-array.cc",
+ "test-concurrent-feedback-vector.cc",
"test-concurrent-prototype.cc",
"test-concurrent-script-context-table.cc",
"test-concurrent-transition-array.cc",
@@ -276,6 +279,7 @@ v8_source_set("cctest_sources") {
"test-unwinder-code-pages.cc",
"test-usecounters.cc",
"test-utils.cc",
+ "test-verifiers.cc",
"test-version.cc",
"test-weakmaps.cc",
"test-weaksets.cc",
@@ -303,6 +307,7 @@ v8_source_set("cctest_sources") {
"wasm/test-run-wasm-simd-liftoff.cc",
"wasm/test-run-wasm-simd-scalar-lowering.cc",
"wasm/test-run-wasm-simd.cc",
+ "wasm/test-run-wasm-wrappers.cc",
"wasm/test-run-wasm.cc",
"wasm/test-streaming-compilation.cc",
"wasm/test-wasm-breakpoints.cc",
diff --git a/deps/v8/test/cctest/assembler-helper-arm.cc b/deps/v8/test/cctest/assembler-helper-arm.cc
index 0df6360260..d28ae3b6c6 100644
--- a/deps/v8/test/cctest/assembler-helper-arm.cc
+++ b/deps/v8/test/cctest/assembler-helper-arm.cc
@@ -22,7 +22,7 @@ Handle<Code> AssembleCodeImpl(std::function<void(MacroAssembler&)> assemble) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
if (FLAG_print_code) {
code->Print();
}
diff --git a/deps/v8/test/cctest/cctest-utils.h b/deps/v8/test/cctest/cctest-utils.h
new file mode 100644
index 0000000000..804d458b0c
--- /dev/null
+++ b/deps/v8/test/cctest/cctest-utils.h
@@ -0,0 +1,60 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdint.h>
+
+#include "src/base/build_config.h"
+#include "test/cctest/cctest.h"
+
+namespace v8 {
+namespace internal {
+
+#ifdef V8_CC_GNU
+
+#if V8_HOST_ARCH_X64
+#define GET_STACK_POINTER_TO(sp_addr) \
+ __asm__ __volatile__("mov %%rsp, %0" : "=g"(sp_addr))
+#elif V8_HOST_ARCH_IA32
+#define GET_STACK_POINTER_TO(sp_addr) \
+ __asm__ __volatile__("mov %%esp, %0" : "=g"(sp_addr))
+#elif V8_HOST_ARCH_ARM
+#define GET_STACK_POINTER_TO(sp_addr) \
+ __asm__ __volatile__("str sp, %0" : "=g"(sp_addr))
+#elif V8_HOST_ARCH_ARM64
+#define GET_STACK_POINTER_TO(sp_addr) \
+ __asm__ __volatile__("mov x16, sp; str x16, %0" : "=g"(sp_addr))
+#elif V8_HOST_ARCH_MIPS
+#define GET_STACK_POINTER_TO(sp_addr) \
+ __asm__ __volatile__("sw $sp, %0" : "=g"(sp_addr))
+#elif V8_HOST_ARCH_MIPS64
+#define GET_STACK_POINTER_TO(sp_addr) \
+ __asm__ __volatile__("sd $sp, %0" : "=g"(sp_addr))
+#elif defined(__s390x__) || defined(_ARCH_S390X)
+#define GET_STACK_POINTER_TO(sp_addr) \
+ __asm__ __volatile__("stg %%r15, %0" : "=m"(sp_addr))
+#elif defined(__s390__) || defined(_ARCH_S390)
+#define GET_STACK_POINTER_TO(sp_addr) \
+ __asm__ __volatile__("st 15, %0" : "=m"(sp_addr))
+#elif defined(__PPC64__) || defined(_ARCH_PPC64)
+#define GET_STACK_POINTER_TO(sp_addr) \
+ __asm__ __volatile__("std 1, %0" : "=m"(sp_addr))
+#elif defined(__PPC__) || defined(_ARCH_PPC)
+#define GET_STACK_POINTER_TO(sp_addr) \
+ __asm__ __volatile__("stw 1, %0" : "=m"(sp_addr))
+#else
+#error Host architecture was not detected as supported by v8
+#endif
+
+DISABLE_ASAN inline uintptr_t GetStackPointer() {
+ // MSAN doesn't seem to treat initializing stores in inline assembly as such,
+ // so we initialize this value here.
+ uintptr_t sp_addr = 0;
+ GET_STACK_POINTER_TO(sp_addr);
+ return sp_addr;
+}
+
+#endif // V8_CC_GNU
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/cctest.cc b/deps/v8/test/cctest/cctest.cc
index 7fce7444b8..44a0763f6b 100644
--- a/deps/v8/test/cctest/cctest.cc
+++ b/deps/v8/test/cctest/cctest.cc
@@ -269,7 +269,7 @@ i::Handle<i::JSFunction> Optimize(
CHECK_NOT_NULL(zone);
i::OptimizedCompilationInfo info(zone, isolate, shared, function,
- i::CodeKind::OPTIMIZED_FUNCTION);
+ i::CodeKind::TURBOFAN);
if (flags & i::OptimizedCompilationInfo::kInlining) {
info.set_inlining();
diff --git a/deps/v8/test/cctest/cctest.h b/deps/v8/test/cctest/cctest.h
index 8d86b8b338..50cccab689 100644
--- a/deps/v8/test/cctest/cctest.h
+++ b/deps/v8/test/cctest/cctest.h
@@ -685,6 +685,9 @@ class ManualGCScope {
// of construction.
class TestPlatform : public v8::Platform {
public:
+ TestPlatform(const TestPlatform&) = delete;
+ TestPlatform& operator=(const TestPlatform&) = delete;
+
// v8::Platform implementation.
v8::PageAllocator* GetPageAllocator() override {
return old_platform_->GetPageAllocator();
@@ -746,8 +749,6 @@ class TestPlatform : public v8::Platform {
private:
v8::Platform* old_platform_;
-
- DISALLOW_COPY_AND_ASSIGN(TestPlatform);
};
#if defined(USE_SIMULATOR)
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index 0c4bd967b8..b8c8cbde03 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -32,15 +32,16 @@
##############################################################################
- # This test is so detailed in it's look at the literals array, I can't
- # maintain it until the CL is done.
- 'test-heap-profiler/AllocationSitesAreVisible': [FAIL, ['lite_mode == True', SKIP]],
-
# These tests always fail. They are here to test test.py. If
# they don't fail then test.py has failed.
'test-serialize/TestThatAlwaysFails': [FAIL],
'test-api/SealHandleScope': [FAIL],
+ # These tests are expected to hit a CHECK (i.e. a FAIL result actually means
+ # the test passed).
+ 'test-api/RegExpInterruptAndReenterIrregexp': [FAIL, CRASH],
+ 'test-verifiers/Fail*': [FAIL, CRASH],
+
# This test always fails. It tests that LiveEdit causes abort when turned off.
'test-debug/LiveEditDisabled': [FAIL],
@@ -112,6 +113,10 @@
# https://crbug.com/v8/8919
'test-platform/StackAlignment': [PASS, ['not is_clang', SKIP]],
+ # Test that misuse of PopAndReturn does not compile.
+ 'test-code-stub-assembler/PopAndReturnFromJSBuiltinWithStackParameters' : [FAIL],
+ 'test-code-stub-assembler/PopAndReturnFromTFCBuiltinWithStackParameters' : [FAIL],
+
############################################################################
# Slow tests.
'test-debug/CallFunctionInDebugger': [PASS, ['mode == debug', SLOW]],
@@ -193,12 +198,6 @@
}], # variant == nooptimization
##############################################################################
-['variant == no_lfa', {
- # https://crbug.com/v8/10219
- 'test-compiler/DecideToPretenureDuringCompilation': [SKIP],
-}], # variant == no_lfa
-
-##############################################################################
['asan == True', {
# Skip tests not suitable for ASAN.
'test-assembler-x64/AssemblerX64XchglOperations': [SKIP],
@@ -250,9 +249,6 @@
# BUG(v8:8744).
'test-cpu-profiler/FunctionCallSample': [SKIP],
- # BUG(5920): Flaky crash.
- 'test-serialize/ContextSerializerContext': [PASS, ['arch == x64 and mode == debug', SKIP]],
-
# BUG(10107): Failing flakily
'test-cpu-profiler/Inlining2': ['arch == ia32 and mode == debug', SKIP],
'test-cpu-profiler/CrossScriptInliningCallerLineNumbers2': ['arch == ia32 and mode == debug', SKIP],
@@ -387,13 +383,6 @@
# TODO(ppc): Implement load/store reverse byte instructions
'test-run-wasm-simd/RunWasm_SimdLoadStoreLoad': [SKIP],
'test-run-wasm-simd/RunWasm_SimdLoadStoreLoad_turbofan': [SKIP],
- # TODO(miladfarca): remove once aix gcc bug is fixed.
- # gcc on aix has a bug when using ceilf or truncf:
- # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=97086
- 'test-run-wasm-simd/RunWasm_F32x4Ceil_interpreter': [SKIP],
- 'test-run-wasm-simd/RunWasm_F32x4Trunc_interpreter': [SKIP],
- 'test-run-wasm-simd/RunWasm_F64x2Ceil_interpreter': [SKIP],
- 'test-run-wasm-simd/RunWasm_F64x2Trunc_interpreter': [SKIP],
}], # 'system == aix or (arch == ppc64 and byteorder == big)'
@@ -475,18 +464,6 @@
'test-unwinder-code-pages/*': [SKIP]
}],
-# TODO(solanes, v8:10833): Re-enable these tests or create specific ones for
-# Arm. They are disabled because the stack is not being set up the way it does
-# in the wild.
-['arch == arm64 or arch == arm', {
- 'test-unwinder-code-pages/Unwind_CodeObjectPCInMiddle_Success_CodePagesAPI': [SKIP],
- 'test-unwinder-code-pages/Unwind_JSEntryBeforeFrame_Fail_CodePagesAPI': [SKIP],
- 'test-unwinder-code-pages/Unwind_OneJSFrame_Success_CodePagesAPI': [SKIP],
- 'test-unwinder-code-pages/Unwind_TwoJSFrames_Success_CodePagesAPI': [SKIP],
- 'test-unwinder-code-pages/Unwind_StackBounds_Basic_CodePagesAPI': [SKIP],
- 'test-unwinder-code-pages/Unwind_StackBounds_WithUnwinding_CodePagesAPI': [SKIP],
-}],
-
##############################################################################
['lite_mode or variant == jitless', {
@@ -518,6 +495,7 @@
'test-run-wasm-simd-liftoff/*': [SKIP],
'test-run-wasm-simd-scalar-lowering/*': [SKIP],
'test-run-wasm-simd/*': [SKIP],
+ 'test-run-wasm-wrappers/*': [SKIP],
'test-streaming-compilation/*': [SKIP],
'test-wasm-breakpoints/*': [SKIP],
'test-wasm-codegen/*': [SKIP],
diff --git a/deps/v8/test/cctest/compiler/code-assembler-tester.h b/deps/v8/test/cctest/compiler/code-assembler-tester.h
index 7e19237f30..ec404dcfa5 100644
--- a/deps/v8/test/cctest/compiler/code-assembler-tester.h
+++ b/deps/v8/test/cctest/compiler/code-assembler-tester.h
@@ -18,12 +18,17 @@ namespace compiler {
class CodeAssemblerTester {
public:
- // Test generating code for a stub. Assumes VoidDescriptor call interface.
- explicit CodeAssemblerTester(Isolate* isolate, const char* name = "test")
+ CodeAssemblerTester(Isolate* isolate,
+ const CallInterfaceDescriptor& descriptor,
+ const char* name = "test")
: zone_(isolate->allocator(), ZONE_NAME, kCompressGraphZone),
scope_(isolate),
- state_(isolate, &zone_, VoidDescriptor{}, CodeKind::STUB, name,
- PoisoningMitigationLevel::kDontPoison) {}
+ state_(isolate, &zone_, descriptor, CodeKind::FOR_TESTING, name,
+ PoisoningMitigationLevel::kDontPoison, Builtins::kNoBuiltinId) {}
+
+ // Test generating code for a stub. Assumes VoidDescriptor call interface.
+ explicit CodeAssemblerTester(Isolate* isolate, const char* name = "test")
+ : CodeAssemblerTester(isolate, VoidDescriptor{}, name) {}
// Test generating code for a JS function (e.g. builtins).
CodeAssemblerTester(Isolate* isolate, int parameter_count,
@@ -36,16 +41,13 @@ class CodeAssemblerTester {
CodeAssemblerTester(Isolate* isolate, CodeKind kind,
const char* name = "test")
- : zone_(isolate->allocator(), ZONE_NAME, kCompressGraphZone),
- scope_(isolate),
- state_(isolate, &zone_, 0, kind, name,
- PoisoningMitigationLevel::kDontPoison) {}
+ : CodeAssemblerTester(isolate, 0, kind, name) {}
CodeAssemblerTester(Isolate* isolate, CallDescriptor* call_descriptor,
const char* name = "test")
: zone_(isolate->allocator(), ZONE_NAME, kCompressGraphZone),
scope_(isolate),
- state_(isolate, &zone_, call_descriptor, CodeKind::STUB, name,
+ state_(isolate, &zone_, call_descriptor, CodeKind::FOR_TESTING, name,
PoisoningMitigationLevel::kDontPoison, Builtins::kNoBuiltinId) {}
CodeAssemblerState* state() { return &state_; }
diff --git a/deps/v8/test/cctest/compiler/codegen-tester.h b/deps/v8/test/cctest/compiler/codegen-tester.h
index d8fe21b787..f1e75f8b55 100644
--- a/deps/v8/test/cctest/compiler/codegen-tester.h
+++ b/deps/v8/test/cctest/compiler/codegen-tester.h
@@ -91,7 +91,7 @@ class RawMachineAssemblerTester : public HandleAndZoneScope,
}
private:
- CodeKind kind_ = CodeKind::STUB;
+ CodeKind kind_ = CodeKind::FOR_TESTING;
MaybeHandle<Code> code_;
};
diff --git a/deps/v8/test/cctest/compiler/function-tester.cc b/deps/v8/test/cctest/compiler/function-tester.cc
index 9beddf406e..1bc31c34a0 100644
--- a/deps/v8/test/cctest/compiler/function-tester.cc
+++ b/deps/v8/test/cctest/compiler/function-tester.cc
@@ -151,7 +151,7 @@ Handle<JSFunction> FunctionTester::CompileGraph(Graph* graph) {
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
Zone zone(isolate->allocator(), ZONE_NAME);
OptimizedCompilationInfo info(&zone, isolate, shared, function,
- CodeKind::OPTIMIZED_FUNCTION);
+ CodeKind::TURBOFAN);
auto call_descriptor = Linkage::ComputeIncoming(&zone, &info);
Handle<Code> code =
diff --git a/deps/v8/test/cctest/compiler/test-code-assembler.cc b/deps/v8/test/cctest/compiler/test-code-assembler.cc
index 7085468c72..4de20ead40 100644
--- a/deps/v8/test/cctest/compiler/test-code-assembler.cc
+++ b/deps/v8/test/cctest/compiler/test-code-assembler.cc
@@ -8,6 +8,7 @@
#include "src/compiler/opcodes.h"
#include "src/execution/isolate.h"
#include "src/objects/heap-number-inl.h"
+#include "src/objects/js-function.h"
#include "src/objects/objects-inl.h"
#include "test/cctest/compiler/code-assembler-tester.h"
#include "test/cctest/compiler/function-tester.h"
@@ -151,10 +152,10 @@ TEST(SimpleCallJSFunction0Arg) {
CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
CodeAssembler m(asm_tester.state());
{
- Node* function = m.Parameter(1);
- Node* context = m.Parameter(kContextOffset);
+ auto function = m.Parameter<JSFunction>(1);
+ auto context = m.Parameter<Context>(kContextOffset);
- Node* receiver = SmiTag(&m, m.Int32Constant(42));
+ auto receiver = SmiTag(&m, m.Int32Constant(42));
Callable callable = CodeFactory::Call(isolate);
TNode<Object> result = m.CallJS(callable, context, function, receiver);
@@ -174,8 +175,8 @@ TEST(SimpleCallJSFunction1Arg) {
CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
CodeAssembler m(asm_tester.state());
{
- Node* function = m.Parameter(1);
- Node* context = m.Parameter(kContextOffset);
+ auto function = m.Parameter<JSFunction>(1);
+ auto context = m.Parameter<Context>(kContextOffset);
Node* receiver = SmiTag(&m, m.Int32Constant(42));
Node* a = SmiTag(&m, m.Int32Constant(13));
@@ -198,8 +199,8 @@ TEST(SimpleCallJSFunction2Arg) {
CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
CodeAssembler m(asm_tester.state());
{
- Node* function = m.Parameter(1);
- Node* context = m.Parameter(kContextOffset);
+ auto function = m.Parameter<JSFunction>(1);
+ auto context = m.Parameter<Context>(kContextOffset);
Node* receiver = SmiTag(&m, m.Int32Constant(42));
Node* a = SmiTag(&m, m.Int32Constant(13));
@@ -422,14 +423,12 @@ TEST(TestOutOfScopeVariable) {
CodeAssemblerLabel block2(&m);
CodeAssemblerLabel block3(&m);
CodeAssemblerLabel block4(&m);
- m.Branch(m.WordEqual(m.UncheckedCast<IntPtrT>(m.Parameter(0)),
- m.IntPtrConstant(0)),
+ m.Branch(m.WordEqual(m.UncheckedParameter<IntPtrT>(0), m.IntPtrConstant(0)),
&block1, &block4);
m.Bind(&block4);
{
TVariable<IntPtrT> var_object(&m);
- m.Branch(m.WordEqual(m.UncheckedCast<IntPtrT>(m.Parameter(0)),
- m.IntPtrConstant(0)),
+ m.Branch(m.WordEqual(m.UncheckedParameter<IntPtrT>(0), m.IntPtrConstant(0)),
&block2, &block3);
m.Bind(&block2);
diff --git a/deps/v8/test/cctest/compiler/test-code-generator.cc b/deps/v8/test/cctest/compiler/test-code-generator.cc
index 22bc456933..105fa630ce 100644
--- a/deps/v8/test/cctest/compiler/test-code-generator.cc
+++ b/deps/v8/test/cctest/compiler/test-code-generator.cc
@@ -82,7 +82,7 @@ Handle<Code> BuildSetupFunction(Isolate* isolate,
CodeStubAssembler assembler(tester.state());
std::vector<Node*> params;
// The first parameter is always the callee.
- params.push_back(__ Parameter(1));
+ params.push_back(__ Parameter<Object>(1));
params.push_back(__ HeapConstant(
BuildTeardownFunction(isolate, call_descriptor, parameters)));
// First allocate the FixedArray which will hold the final results. Here we
@@ -114,7 +114,7 @@ Handle<Code> BuildSetupFunction(Isolate* isolate,
}
params.push_back(state_out);
// Then take each element of the initial state and pass them as arguments.
- TNode<FixedArray> state_in = __ Cast(__ Parameter(2));
+ auto state_in = __ Parameter<FixedArray>(2);
for (int i = 0; i < static_cast<int>(parameters.size()); i++) {
Node* element = __ LoadFixedArrayElement(state_in, __ IntPtrConstant(i));
// Unbox all elements before passing them as arguments.
@@ -123,10 +123,11 @@ Handle<Code> BuildSetupFunction(Isolate* isolate,
case MachineRepresentation::kTagged:
break;
case MachineRepresentation::kFloat32:
- element = __ TruncateFloat64ToFloat32(__ LoadHeapNumberValue(element));
+ element = __ TruncateFloat64ToFloat32(
+ __ LoadHeapNumberValue(__ CAST(element)));
break;
case MachineRepresentation::kFloat64:
- element = __ LoadHeapNumberValue(element);
+ element = __ LoadHeapNumberValue(__ CAST(element));
break;
case MachineRepresentation::kSimd128: {
Node* vector = tester.raw_assembler_for_testing()->AddNode(
@@ -203,10 +204,10 @@ Handle<Code> BuildTeardownFunction(Isolate* isolate,
std::vector<AllocatedOperand> parameters) {
CodeAssemblerTester tester(isolate, call_descriptor, "teardown");
CodeStubAssembler assembler(tester.state());
- TNode<FixedArray> result_array = __ Cast(__ Parameter(1));
+ auto result_array = __ Parameter<FixedArray>(1);
for (int i = 0; i < static_cast<int>(parameters.size()); i++) {
// The first argument is not used and the second is "result_array".
- Node* param = __ Parameter(i + 2);
+ Node* param = __ UntypedParameter(i + 2);
switch (parameters[i].representation()) {
case MachineRepresentation::kTagged:
__ StoreFixedArrayElement(result_array, i, param,
@@ -964,10 +965,11 @@ class CodeGeneratorTester {
explicit CodeGeneratorTester(TestEnvironment* environment,
int extra_stack_space = 0)
: zone_(environment->main_zone()),
- info_(ArrayVector("test"), environment->main_zone(), CodeKind::STUB),
+ info_(ArrayVector("test"), environment->main_zone(),
+ CodeKind::FOR_TESTING),
linkage_(environment->test_descriptor()),
frame_(environment->test_descriptor()->CalculateFixedFrameSize(
- CodeKind::STUB)) {
+ CodeKind::FOR_TESTING)) {
// Pick half of the stack parameters at random and move them into spill
// slots, separated by `extra_stack_space` bytes.
// When testing a move with stack slots using CheckAssembleMove or
diff --git a/deps/v8/test/cctest/compiler/test-js-context-specialization.cc b/deps/v8/test/cctest/compiler/test-js-context-specialization.cc
index 079923967c..8006ca43f2 100644
--- a/deps/v8/test/cctest/compiler/test-js-context-specialization.cc
+++ b/deps/v8/test/cctest/compiler/test-js-context-specialization.cc
@@ -74,8 +74,8 @@ void ContextSpecializationTester::CheckChangesToValue(
Reduction r = spec()->Reduce(node);
CHECK(r.Changed());
HeapObjectMatcher match(r.replacement());
- CHECK(match.HasValue());
- CHECK_EQ(*match.Value(), *expected_value);
+ CHECK(match.HasResolvedValue());
+ CHECK_EQ(*match.ResolvedValue(), *expected_value);
}
void ContextSpecializationTester::CheckContextInputAndDepthChanges(
@@ -88,7 +88,7 @@ void ContextSpecializationTester::CheckContextInputAndDepthChanges(
Node* new_context = NodeProperties::GetContextInput(r.replacement());
CHECK_EQ(IrOpcode::kHeapConstant, new_context->opcode());
HeapObjectMatcher match(new_context);
- CHECK_EQ(Context::cast(*match.Value()), *expected_new_context_object);
+ CHECK_EQ(Context::cast(*match.ResolvedValue()), *expected_new_context_object);
ContextAccess new_access = ContextAccessOf(r.replacement()->op());
CHECK_EQ(new_access.depth(), expected_new_depth);
@@ -160,7 +160,7 @@ TEST(ReduceJSLoadContext0) {
Node* new_context_input = NodeProperties::GetContextInput(r.replacement());
CHECK_EQ(IrOpcode::kHeapConstant, new_context_input->opcode());
HeapObjectMatcher match(new_context_input);
- CHECK_EQ(*native, Context::cast(*match.Value()));
+ CHECK_EQ(*native, Context::cast(*match.ResolvedValue()));
ContextAccess access = ContextAccessOf(r.replacement()->op());
CHECK_EQ(Context::GLOBAL_EVAL_FUN_INDEX, static_cast<int>(access.index()));
CHECK_EQ(0, static_cast<int>(access.depth()));
@@ -176,8 +176,8 @@ TEST(ReduceJSLoadContext0) {
CHECK(r.replacement() != load);
HeapObjectMatcher match(r.replacement());
- CHECK(match.HasValue());
- CHECK_EQ(*expected, *match.Value());
+ CHECK(match.HasResolvedValue());
+ CHECK_EQ(*expected, *match.ResolvedValue());
}
// Clean up so that verifiers don't complain.
@@ -474,7 +474,7 @@ TEST(ReduceJSStoreContext0) {
Node* new_context_input = NodeProperties::GetContextInput(r.replacement());
CHECK_EQ(IrOpcode::kHeapConstant, new_context_input->opcode());
HeapObjectMatcher match(new_context_input);
- CHECK_EQ(*native, Context::cast(*match.Value()));
+ CHECK_EQ(*native, Context::cast(*match.ResolvedValue()));
ContextAccess access = ContextAccessOf(r.replacement()->op());
CHECK_EQ(Context::GLOBAL_EVAL_FUN_INDEX, static_cast<int>(access.index()));
CHECK_EQ(0, static_cast<int>(access.depth()));
diff --git a/deps/v8/test/cctest/compiler/test-linkage.cc b/deps/v8/test/cctest/compiler/test-linkage.cc
index 48be24073a..a4414ce276 100644
--- a/deps/v8/test/cctest/compiler/test-linkage.cc
+++ b/deps/v8/test/cctest/compiler/test-linkage.cc
@@ -49,7 +49,7 @@ TEST(TestLinkageCreate) {
Handle<JSFunction> function = Compile("a + b");
Handle<SharedFunctionInfo> shared(function->shared(), handles.main_isolate());
OptimizedCompilationInfo info(handles.main_zone(), function->GetIsolate(),
- shared, function, CodeKind::OPTIMIZED_FUNCTION);
+ shared, function, CodeKind::TURBOFAN);
auto call_descriptor = Linkage::ComputeIncoming(info.zone(), &info);
CHECK(call_descriptor);
}
@@ -67,8 +67,7 @@ TEST(TestLinkageJSFunctionIncoming) {
Handle<SharedFunctionInfo> shared(function->shared(),
handles.main_isolate());
OptimizedCompilationInfo info(handles.main_zone(), function->GetIsolate(),
- shared, function,
- CodeKind::OPTIMIZED_FUNCTION);
+ shared, function, CodeKind::TURBOFAN);
auto call_descriptor = Linkage::ComputeIncoming(info.zone(), &info);
CHECK(call_descriptor);
@@ -85,7 +84,7 @@ TEST(TestLinkageJSCall) {
Handle<JSFunction> function = Compile("a + c");
Handle<SharedFunctionInfo> shared(function->shared(), handles.main_isolate());
OptimizedCompilationInfo info(handles.main_zone(), function->GetIsolate(),
- shared, function, CodeKind::OPTIMIZED_FUNCTION);
+ shared, function, CodeKind::TURBOFAN);
for (int i = 0; i < 32; i++) {
auto call_descriptor = Linkage::GetJSCallDescriptor(
@@ -109,7 +108,8 @@ TEST(TestLinkageStubCall) {
Isolate* isolate = CcTest::InitIsolateOnce();
Zone zone(isolate->allocator(), ZONE_NAME);
Callable callable = Builtins::CallableFor(isolate, Builtins::kToNumber);
- OptimizedCompilationInfo info(ArrayVector("test"), &zone, CodeKind::STUB);
+ OptimizedCompilationInfo info(ArrayVector("test"), &zone,
+ CodeKind::FOR_TESTING);
auto call_descriptor = Linkage::GetStubCallDescriptor(
&zone, callable.descriptor(), 0, CallDescriptor::kNoFlags,
Operator::kNoProperties);
@@ -129,7 +129,8 @@ TEST(TestFPLinkageStubCall) {
Zone zone(isolate->allocator(), ZONE_NAME);
Callable callable =
Builtins::CallableFor(isolate, Builtins::kWasmFloat64ToNumber);
- OptimizedCompilationInfo info(ArrayVector("test"), &zone, CodeKind::STUB);
+ OptimizedCompilationInfo info(ArrayVector("test"), &zone,
+ CodeKind::FOR_TESTING);
auto call_descriptor = Linkage::GetStubCallDescriptor(
&zone, callable.descriptor(), 0, CallDescriptor::kNoFlags,
Operator::kNoProperties);
diff --git a/deps/v8/test/cctest/compiler/test-multiple-return.cc b/deps/v8/test/cctest/compiler/test-multiple-return.cc
index 9ef1ff68b3..8693d5fa76 100644
--- a/deps/v8/test/cctest/compiler/test-multiple-return.cc
+++ b/deps/v8/test/cctest/compiler/test-multiple-return.cc
@@ -131,14 +131,12 @@ std::shared_ptr<wasm::NativeModule> AllocateNativeModule(Isolate* isolate,
return native_module;
}
-void TestReturnMultipleValues(MachineType type) {
- const int kMaxCount = 20;
- const int kMaxParamCount = 9;
- // Use 9 parameters as a regression test or https://crbug.com/838098.
- for (int param_count : {2, kMaxParamCount}) {
- for (int count = 0; count < kMaxCount; ++count) {
- printf("\n==== type = %s, count = %d ====\n\n\n",
- MachineReprToString(type.representation()), count);
+template <int kMinParamCount, int kMaxParamCount>
+void TestReturnMultipleValues(MachineType type, int min_count, int max_count) {
+ for (int param_count : {kMinParamCount, kMaxParamCount}) {
+ for (int count = min_count; count < max_count; ++count) {
+ printf("\n==== type = %s, parameter_count = %d, count = %d ====\n\n\n",
+ MachineReprToString(type.representation()), param_count, count);
v8::internal::AccountingAllocator allocator;
Zone zone(&allocator, ZONE_NAME);
CallDescriptor* desc =
@@ -230,8 +228,15 @@ void TestReturnMultipleValues(MachineType type) {
} // namespace
+// Use 9 parameters as a regression test or https://crbug.com/838098.
#define TEST_MULTI(Type, type) \
- TEST(ReturnMultiple##Type) { TestReturnMultipleValues(type); }
+ TEST(ReturnMultiple##Type) { TestReturnMultipleValues<2, 9>(type, 0, 20); }
+
+// Create a frame larger than UINT16_MAX to force TF to use an extra register
+// when popping the frame.
+TEST(TestReturnMultipleValuesLargeFrame) {
+ TestReturnMultipleValues<20000, 20000>(MachineType::Int32(), 2, 3);
+}
TEST_MULTI(Int32, MachineType::Int32())
#if (!V8_TARGET_ARCH_32_BIT)
diff --git a/deps/v8/test/cctest/compiler/test-representation-change.cc b/deps/v8/test/cctest/compiler/test-representation-change.cc
index 42e048af6b..4f1ebb7f7d 100644
--- a/deps/v8/test/cctest/compiler/test-representation-change.cc
+++ b/deps/v8/test/cctest/compiler/test-representation-change.cc
@@ -48,26 +48,26 @@ class RepresentationChangerTester : public HandleAndZoneScope,
// TODO(titzer): use ValueChecker / ValueUtil
void CheckInt32Constant(Node* n, int32_t expected) {
Int32Matcher m(n);
- CHECK(m.HasValue());
- CHECK_EQ(expected, m.Value());
+ CHECK(m.HasResolvedValue());
+ CHECK_EQ(expected, m.ResolvedValue());
}
void CheckInt64Constant(Node* n, int64_t expected) {
Int64Matcher m(n);
- CHECK(m.HasValue());
- CHECK_EQ(expected, m.Value());
+ CHECK(m.HasResolvedValue());
+ CHECK_EQ(expected, m.ResolvedValue());
}
void CheckUint32Constant(Node* n, uint32_t expected) {
Uint32Matcher m(n);
- CHECK(m.HasValue());
- CHECK_EQ(static_cast<int>(expected), static_cast<int>(m.Value()));
+ CHECK(m.HasResolvedValue());
+ CHECK_EQ(static_cast<int>(expected), static_cast<int>(m.ResolvedValue()));
}
void CheckFloat64Constant(Node* n, double expected) {
Float64Matcher m(n);
- CHECK(m.HasValue());
- CHECK_DOUBLE_EQ(expected, m.Value());
+ CHECK(m.HasResolvedValue());
+ CHECK_DOUBLE_EQ(expected, m.ResolvedValue());
}
void CheckFloat32Constant(Node* n, float expected) {
@@ -78,15 +78,15 @@ class RepresentationChangerTester : public HandleAndZoneScope,
void CheckHeapConstant(Node* n, HeapObject expected) {
HeapObjectMatcher m(n);
- CHECK(m.HasValue());
- CHECK_EQ(expected, *m.Value());
+ CHECK(m.HasResolvedValue());
+ CHECK_EQ(expected, *m.ResolvedValue());
}
void CheckNumberConstant(Node* n, double expected) {
NumberMatcher m(n);
CHECK_EQ(IrOpcode::kNumberConstant, n->opcode());
- CHECK(m.HasValue());
- CHECK_DOUBLE_EQ(expected, m.Value());
+ CHECK(m.HasResolvedValue());
+ CHECK_DOUBLE_EQ(expected, m.ResolvedValue());
}
Node* Parameter(int index = 0) {
diff --git a/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc b/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
index 74ac9bc0f3..eac874480c 100644
--- a/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
+++ b/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
@@ -80,6 +80,8 @@ class BytecodeGraphTester {
i::FLAG_allow_natives_syntax = true;
}
virtual ~BytecodeGraphTester() = default;
+ BytecodeGraphTester(const BytecodeGraphTester&) = delete;
+ BytecodeGraphTester& operator=(const BytecodeGraphTester&) = delete;
template <class... A>
BytecodeGraphCallable<A...> GetCallable(
@@ -123,7 +125,7 @@ class BytecodeGraphTester {
Zone zone(isolate_->allocator(), ZONE_NAME);
Handle<SharedFunctionInfo> shared(function->shared(), isolate_);
OptimizedCompilationInfo compilation_info(&zone, isolate_, shared, function,
- CodeKind::OPTIMIZED_FUNCTION);
+ CodeKind::TURBOFAN);
// Compiler relies on canonicalized handles, let's create
// a canonicalized scope and migrate existing handles there.
@@ -137,8 +139,6 @@ class BytecodeGraphTester {
return function;
}
-
- DISALLOW_COPY_AND_ASSIGN(BytecodeGraphTester);
};
#define SPACE()
diff --git a/deps/v8/test/cctest/compiler/test-run-calls-to-external-references.cc b/deps/v8/test/cctest/compiler/test-run-calls-to-external-references.cc
index 5944fc9155..7a76839081 100644
--- a/deps/v8/test/cctest/compiler/test-run-calls-to-external-references.cc
+++ b/deps/v8/test/cctest/compiler/test-run-calls-to-external-references.cc
@@ -17,7 +17,7 @@ template <typename InType, typename OutType, typename Iterable>
void TestExternalReference_ConvertOp(
BufferedRawMachineAssemblerTester<int32_t>* m, ExternalReference ref,
void (*wrapper)(Address), Iterable inputs) {
- constexpr size_t kBufferSize = Max(sizeof(InType), sizeof(OutType));
+ constexpr size_t kBufferSize = std::max(sizeof(InType), sizeof(OutType));
uint8_t buffer[kBufferSize] = {0};
Address buffer_addr = reinterpret_cast<Address>(buffer);
@@ -45,7 +45,7 @@ template <typename InType, typename OutType, typename Iterable>
void TestExternalReference_ConvertOpWithOutputAndReturn(
BufferedRawMachineAssemblerTester<int32_t>* m, ExternalReference ref,
int32_t (*wrapper)(Address), Iterable inputs) {
- constexpr size_t kBufferSize = Max(sizeof(InType), sizeof(OutType));
+ constexpr size_t kBufferSize = std::max(sizeof(InType), sizeof(OutType));
uint8_t buffer[kBufferSize] = {0};
Address buffer_addr = reinterpret_cast<Address>(buffer);
diff --git a/deps/v8/test/cctest/compiler/test-run-machops.cc b/deps/v8/test/cctest/compiler/test-run-machops.cc
index f6369b519c..b3ec64e712 100644
--- a/deps/v8/test/cctest/compiler/test-run-machops.cc
+++ b/deps/v8/test/cctest/compiler/test-run-machops.cc
@@ -9,6 +9,7 @@
#include "src/base/bits.h"
#include "src/base/ieee754.h"
#include "src/base/overflowing-math.h"
+#include "src/base/safe_conversions.h"
#include "src/base/utils/random-number-generator.h"
#include "src/common/ptr-compr-inl.h"
#include "src/objects/objects-inl.h"
@@ -6464,10 +6465,9 @@ TEST(RunChangeFloat64ToInt64) {
BufferedRawMachineAssemblerTester<int64_t> m(MachineType::Float64());
m.Return(m.ChangeFloat64ToInt64(m.Parameter(0)));
- FOR_INT64_INPUTS(i) {
- double input = static_cast<double>(i);
- if (static_cast<int64_t>(input) == i) {
- CHECK_EQ(static_cast<int64_t>(input), m.Call(input));
+ FOR_FLOAT64_INPUTS(i) {
+ if (base::IsValueInRangeForNumericType<int64_t>(i)) {
+ CHECK_EQ(static_cast<int64_t>(i), m.Call(i));
}
}
}
@@ -6477,9 +6477,7 @@ TEST(RunChangeInt64ToFloat64) {
m.Return(m.ChangeInt64ToFloat64(m.Parameter(0)));
FOR_INT64_INPUTS(i) {
double output = static_cast<double>(i);
- if (static_cast<int64_t>(output) == i) {
- CHECK_EQ(output, m.Call(i));
- }
+ CHECK_EQ(output, m.Call(i));
}
}
@@ -6548,9 +6546,11 @@ TEST(RunTryTruncateFloat64ToInt64WithoutCheck) {
BufferedRawMachineAssemblerTester<int64_t> m(MachineType::Float64());
m.Return(m.TryTruncateFloat64ToInt64(m.Parameter(0)));
- FOR_INT64_INPUTS(i) {
- double input = static_cast<double>(i);
- CHECK_EQ(static_cast<int64_t>(input), m.Call(input));
+ FOR_FLOAT64_INPUTS(i) {
+ if (base::IsValueInRangeForNumericType<int64_t>(i)) {
+ double input = static_cast<double>(i);
+ CHECK_EQ(static_cast<int64_t>(input), m.Call(input));
+ }
}
}
diff --git a/deps/v8/test/cctest/compiler/test-run-native-calls.cc b/deps/v8/test/cctest/compiler/test-run-native-calls.cc
index 1920579855..c103d37aae 100644
--- a/deps/v8/test/cctest/compiler/test-run-native-calls.cc
+++ b/deps/v8/test/cctest/compiler/test-run-native-calls.cc
@@ -246,7 +246,7 @@ Handle<Code> CompileGraph(const char* name, CallDescriptor* call_descriptor,
Graph* graph, Schedule* schedule = nullptr) {
Isolate* isolate = CcTest::InitIsolateOnce();
OptimizedCompilationInfo info(ArrayVector("testing"), graph->zone(),
- CodeKind::STUB);
+ CodeKind::FOR_TESTING);
Handle<Code> code = Pipeline::GenerateCodeForTesting(
&info, isolate, call_descriptor, graph,
AssemblerOptions::Default(isolate), schedule)
diff --git a/deps/v8/test/cctest/compiler/test-run-retpoline.cc b/deps/v8/test/cctest/compiler/test-run-retpoline.cc
index d5eb9b5428..ccdc4821e0 100644
--- a/deps/v8/test/cctest/compiler/test-run-retpoline.cc
+++ b/deps/v8/test/cctest/compiler/test-run-retpoline.cc
@@ -27,8 +27,8 @@ Handle<Code> BuildCallee(Isolate* isolate, CallDescriptor* call_descriptor) {
int param_count = static_cast<int>(call_descriptor->StackParameterCount());
TNode<IntPtrT> sum = __ IntPtrConstant(0);
for (int i = 0; i < param_count; ++i) {
- TNode<IntPtrT> product =
- __ Signed(__ IntPtrMul(__ Parameter(i), __ IntPtrConstant(i + 1)));
+ TNode<IntPtrT> product = __ Signed(__ IntPtrMul(
+ __ UncheckedParameter<IntPtrT>(i), __ IntPtrConstant(i + 1)));
sum = __ IntPtrAdd(sum, product);
}
__ Return(sum);
diff --git a/deps/v8/test/cctest/compiler/test-run-tail-calls.cc b/deps/v8/test/cctest/compiler/test-run-tail-calls.cc
index ca7eb6a86e..2b1ee39f6f 100644
--- a/deps/v8/test/cctest/compiler/test-run-tail-calls.cc
+++ b/deps/v8/test/cctest/compiler/test-run-tail-calls.cc
@@ -28,8 +28,8 @@ Handle<Code> BuildCallee(Isolate* isolate, CallDescriptor* call_descriptor) {
int param_count = static_cast<int>(call_descriptor->StackParameterCount());
TNode<IntPtrT> sum = __ IntPtrConstant(0);
for (int i = 0; i < param_count; ++i) {
- TNode<WordT> product =
- __ IntPtrMul(__ Parameter(i), __ IntPtrConstant(i + 1));
+ TNode<WordT> product = __ IntPtrMul(__ UncheckedParameter<IntPtrT>(i),
+ __ IntPtrConstant(i + 1));
sum = __ Signed(__ IntPtrAdd(sum, product));
}
__ Return(sum);
diff --git a/deps/v8/test/cctest/compiler/value-helper.h b/deps/v8/test/cctest/compiler/value-helper.h
index cda606ec16..e395c885ac 100644
--- a/deps/v8/test/cctest/compiler/value-helper.h
+++ b/deps/v8/test/cctest/compiler/value-helper.h
@@ -273,6 +273,8 @@ class ValueHelper {
0x000007FFFFFFFFFF, 0x000003FFFFFFFFFF, 0x000001FFFFFFFFFF,
0x8000008000000000, 0x8000008000000001, 0x8000000000000400,
0x8000000000000401, 0x0000000000000020,
+ 0x8000000000000000, // int64_t min
+ 0x7FFFFFFFFFFFFFFF, // int64_t max
// Bit pattern of a quiet NaN and signaling NaN, with or without
// additional payload.
0x7FF8000000000000, 0x7FF0000000000000, 0x7FF8123456789ABC,
diff --git a/deps/v8/test/cctest/heap/heap-tester.h b/deps/v8/test/cctest/heap/heap-tester.h
index 92ee4d7055..83de6ec828 100644
--- a/deps/v8/test/cctest/heap/heap-tester.h
+++ b/deps/v8/test/cctest/heap/heap-tester.h
@@ -12,6 +12,7 @@
// Those tests need to be defined using HEAP_TEST(Name) { ... }.
#define HEAP_TEST_METHODS(V) \
V(CodeLargeObjectSpace) \
+ V(CodeLargeObjectSpace64k) \
V(CompactionFullAbortedPage) \
V(CompactionPartiallyAbortedPage) \
V(CompactionPartiallyAbortedPageIntraAbortedPointers) \
@@ -47,7 +48,6 @@
V(StressHandles) \
V(TestMemoryReducerSampleJsCalls) \
V(TestSizeOfObjects) \
- V(Regress5831) \
V(Regress10560) \
V(Regress538257) \
V(Regress587004) \
diff --git a/deps/v8/test/cctest/heap/test-alloc.cc b/deps/v8/test/cctest/heap/test-alloc.cc
index d50287ee30..001c8eb0c3 100644
--- a/deps/v8/test/cctest/heap/test-alloc.cc
+++ b/deps/v8/test/cctest/heap/test-alloc.cc
@@ -87,8 +87,7 @@ Handle<Object> HeapTester::TestAllocateAfterFailures() {
heap::SimulateFullSpace(heap->code_space());
size = CcTest::i_isolate()->builtins()->builtin(Builtins::kIllegal).Size();
obj =
- heap->AllocateRaw(size, AllocationType::kCode, AllocationOrigin::kRuntime,
- AllocationAlignment::kCodeAligned)
+ heap->AllocateRaw(size, AllocationType::kCode, AllocationOrigin::kRuntime)
.ToObjectChecked();
heap->CreateFillerObjectAt(obj.address(), size, ClearRecordedSlots::kNo);
return CcTest::i_isolate()->factory()->true_value();
@@ -149,8 +148,8 @@ TEST(StressJS) {
// Patch the map to have an accessor for "get".
Handle<Map> map(function->initial_map(), isolate);
- Handle<DescriptorArray> instance_descriptors(map->instance_descriptors(),
- isolate);
+ Handle<DescriptorArray> instance_descriptors(
+ map->instance_descriptors(kRelaxedLoad), isolate);
CHECK_EQ(0, instance_descriptors->number_of_descriptors());
PropertyAttributes attrs = NONE;
diff --git a/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc b/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc
index 19f08e0cc3..d3eb8c84ec 100644
--- a/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc
+++ b/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc
@@ -361,7 +361,7 @@ UNINITIALIZED_TEST(ArrayBuffer_SemiSpaceCopyMultipleTasks) {
// Test allocates JSArrayBuffer on different pages before triggering a
// full GC that performs the semispace copy. If parallelized, this test
// ensures proper synchronization in TSAN configurations.
- FLAG_min_semi_space_size = Max(2 * Page::kPageSize / MB, 1);
+ FLAG_min_semi_space_size = std::max(2 * Page::kPageSize / MB, 1);
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
diff --git a/deps/v8/test/cctest/heap/test-compaction.cc b/deps/v8/test/cctest/heap/test-compaction.cc
index e836f37db5..5746b98ee5 100644
--- a/deps/v8/test/cctest/heap/test-compaction.cc
+++ b/deps/v8/test/cctest/heap/test-compaction.cc
@@ -54,6 +54,11 @@ HEAP_TEST(CompactionFullAbortedPage) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
+ auto reset_oom = [](void* heap, size_t limit, size_t) -> size_t {
+ reinterpret_cast<Heap*>(heap)->set_force_oom(false);
+ return limit;
+ };
+ heap->AddNearHeapLimitCallback(reset_oom, heap);
{
HandleScope scope1(isolate);
@@ -84,6 +89,7 @@ HEAP_TEST(CompactionFullAbortedPage) {
CheckInvariantsOfAbortedPage(to_be_aborted_page);
}
}
+ heap->RemoveNearHeapLimitCallback(reset_oom, 0u);
}
namespace {
@@ -115,6 +121,11 @@ HEAP_TEST(CompactionPartiallyAbortedPage) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
+ auto reset_oom = [](void* heap, size_t limit, size_t) -> size_t {
+ reinterpret_cast<Heap*>(heap)->set_force_oom(false);
+ return limit;
+ };
+ heap->AddNearHeapLimitCallback(reset_oom, heap);
{
HandleScope scope1(isolate);
@@ -171,6 +182,7 @@ HEAP_TEST(CompactionPartiallyAbortedPage) {
}
}
}
+ heap->RemoveNearHeapLimitCallback(reset_oom, 0u);
}
HEAP_TEST(CompactionPartiallyAbortedPageWithInvalidatedSlots) {
@@ -189,6 +201,12 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithInvalidatedSlots) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
+ auto reset_oom = [](void* heap, size_t limit, size_t) -> size_t {
+ reinterpret_cast<Heap*>(heap)->set_force_oom(false);
+ return limit;
+ };
+ heap->AddNearHeapLimitCallback(reset_oom, heap);
+
{
HandleScope scope1(isolate);
@@ -247,6 +265,7 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithInvalidatedSlots) {
}
}
}
+ heap->RemoveNearHeapLimitCallback(reset_oom, 0u);
}
HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) {
@@ -267,6 +286,11 @@ HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
+ auto reset_oom = [](void* heap, size_t limit, size_t) -> size_t {
+ reinterpret_cast<Heap*>(heap)->set_force_oom(false);
+ return limit;
+ };
+ heap->AddNearHeapLimitCallback(reset_oom, heap);
{
HandleScope scope1(isolate);
Handle<FixedArray> root_array =
@@ -334,6 +358,7 @@ HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) {
CheckInvariantsOfAbortedPage(to_be_aborted_page);
}
}
+ heap->RemoveNearHeapLimitCallback(reset_oom, 0u);
}
HEAP_TEST(CompactionPartiallyAbortedPageWithRememberedSetEntries) {
@@ -357,6 +382,11 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithRememberedSetEntries) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
+ auto reset_oom = [](void* heap, size_t limit, size_t) -> size_t {
+ reinterpret_cast<Heap*>(heap)->set_force_oom(false);
+ return limit;
+ };
+ heap->AddNearHeapLimitCallback(reset_oom, heap);
{
HandleScope scope1(isolate);
Handle<FixedArray> root_array =
@@ -458,6 +488,7 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithRememberedSetEntries) {
CcTest::CollectGarbage(NEW_SPACE);
}
}
+ heap->RemoveNearHeapLimitCallback(reset_oom, 0u);
}
} // namespace heap
diff --git a/deps/v8/test/cctest/heap/test-concurrent-allocation.cc b/deps/v8/test/cctest/heap/test-concurrent-allocation.cc
index b83b55551a..12c9f162e3 100644
--- a/deps/v8/test/cctest/heap/test-concurrent-allocation.cc
+++ b/deps/v8/test/cctest/heap/test-concurrent-allocation.cc
@@ -14,6 +14,7 @@
#include "src/codegen/macro-assembler.h"
#include "src/common/globals.h"
#include "src/handles/handles-inl.h"
+#include "src/handles/handles.h"
#include "src/handles/local-handles-inl.h"
#include "src/handles/persistent-handles.h"
#include "src/heap/concurrent-allocator-inl.h"
@@ -28,6 +29,7 @@
namespace v8 {
namespace internal {
+namespace {
void CreateFixedArray(Heap* heap, Address start, int size) {
HeapObject object = HeapObject::FromAddress(start);
object.set_map_after_allocation(ReadOnlyRoots(heap).fixed_array_map(),
@@ -43,6 +45,23 @@ const int kNumIterations = 2000;
const int kSmallObjectSize = 10 * kTaggedSize;
const int kMediumObjectSize = 8 * KB;
+void AllocateSomeObjects(LocalHeap* local_heap) {
+ for (int i = 0; i < kNumIterations; i++) {
+ Address address = local_heap->AllocateRawOrFail(
+ kSmallObjectSize, AllocationType::kOld, AllocationOrigin::kRuntime,
+ AllocationAlignment::kWordAligned);
+ CreateFixedArray(local_heap->heap(), address, kSmallObjectSize);
+ address = local_heap->AllocateRawOrFail(
+ kMediumObjectSize, AllocationType::kOld, AllocationOrigin::kRuntime,
+ AllocationAlignment::kWordAligned);
+ CreateFixedArray(local_heap->heap(), address, kMediumObjectSize);
+ if (i % 10 == 0) {
+ local_heap->Safepoint();
+ }
+ }
+}
+} // namespace
+
class ConcurrentAllocationThread final : public v8::base::Thread {
public:
explicit ConcurrentAllocationThread(Heap* heap, std::atomic<int>* pending)
@@ -51,22 +70,9 @@ class ConcurrentAllocationThread final : public v8::base::Thread {
pending_(pending) {}
void Run() override {
- LocalHeap local_heap(heap_);
-
- for (int i = 0; i < kNumIterations; i++) {
- Address address = local_heap.AllocateRawOrFail(
- kSmallObjectSize, AllocationType::kOld, AllocationOrigin::kRuntime,
- AllocationAlignment::kWordAligned);
- CreateFixedArray(heap_, address, kSmallObjectSize);
- address = local_heap.AllocateRawOrFail(
- kMediumObjectSize, AllocationType::kOld, AllocationOrigin::kRuntime,
- AllocationAlignment::kWordAligned);
- CreateFixedArray(heap_, address, kMediumObjectSize);
- if (i % 10 == 0) {
- local_heap.Safepoint();
- }
- }
-
+ LocalHeap local_heap(heap_, ThreadKind::kBackground);
+ UnparkedScope unparked_scope(&local_heap);
+ AllocateSomeObjects(&local_heap);
pending_->fetch_sub(1);
}
@@ -109,6 +115,26 @@ UNINITIALIZED_TEST(ConcurrentAllocationInOldSpace) {
isolate->Dispose();
}
+UNINITIALIZED_TEST(ConcurrentAllocationInOldSpaceFromMainThread) {
+ FLAG_max_old_space_size = 4;
+ FLAG_concurrent_allocation = true;
+ FLAG_local_heaps = true;
+ FLAG_stress_concurrent_allocation = false;
+
+ v8::Isolate::CreateParams create_params;
+ create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ v8::Isolate* isolate = v8::Isolate::New(create_params);
+ Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
+
+ {
+ LocalHeap local_heap(i_isolate->heap(), ThreadKind::kMain);
+ UnparkedScope unparked_scope(&local_heap);
+ AllocateSomeObjects(&local_heap);
+ }
+
+ isolate->Dispose();
+}
+
class LargeObjectConcurrentAllocationThread final : public v8::base::Thread {
public:
explicit LargeObjectConcurrentAllocationThread(Heap* heap,
@@ -118,14 +144,20 @@ class LargeObjectConcurrentAllocationThread final : public v8::base::Thread {
pending_(pending) {}
void Run() override {
- LocalHeap local_heap(heap_);
+ LocalHeap local_heap(heap_, ThreadKind::kBackground);
+ UnparkedScope unparked_scope(&local_heap);
const size_t kLargeObjectSize = kMaxRegularHeapObjectSize * 2;
for (int i = 0; i < kNumIterations; i++) {
- Address address = local_heap.AllocateRawOrFail(
+ AllocationResult result = local_heap.AllocateRaw(
kLargeObjectSize, AllocationType::kOld, AllocationOrigin::kRuntime,
AllocationAlignment::kWordAligned);
- CreateFixedArray(heap_, address, kLargeObjectSize);
+ if (result.IsRetry()) {
+ local_heap.PerformCollection();
+ } else {
+ Address address = result.ToAddress();
+ CreateFixedArray(heap_, address, kLargeObjectSize);
+ }
local_heap.Safepoint();
}
@@ -185,7 +217,8 @@ class ConcurrentBlackAllocationThread final : public v8::base::Thread {
sema_marking_started_(sema_marking_started) {}
void Run() override {
- LocalHeap local_heap(heap_);
+ LocalHeap local_heap(heap_, ThreadKind::kBackground);
+ UnparkedScope unparked_scope(&local_heap);
for (int i = 0; i < kNumIterations; i++) {
if (i == kWhiteIterations) {
@@ -264,7 +297,8 @@ class ConcurrentWriteBarrierThread final : public v8::base::Thread {
value_(value) {}
void Run() override {
- LocalHeap local_heap(heap_);
+ LocalHeap local_heap(heap_, ThreadKind::kBackground);
+ UnparkedScope unparked_scope(&local_heap);
fixed_array_.set(0, value_);
}
@@ -325,7 +359,8 @@ class ConcurrentRecordRelocSlotThread final : public v8::base::Thread {
value_(value) {}
void Run() override {
- LocalHeap local_heap(heap_);
+ LocalHeap local_heap(heap_, ThreadKind::kBackground);
+ UnparkedScope unparked_scope(&local_heap);
int mode_mask = RelocInfo::EmbeddedObjectModeMask();
for (RelocIterator it(code_, mode_mask); !it.done(); it.next()) {
DCHECK(RelocInfo::IsEmbeddedObjectMode(it.rinfo()->rmode()));
@@ -365,7 +400,7 @@ UNINITIALIZED_TEST(ConcurrentRecordRelocSlot) {
CodeDesc desc;
masm.GetCode(i_isolate, &desc);
Handle<Code> code_handle =
- Factory::CodeBuilder(i_isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(i_isolate, desc, CodeKind::FOR_TESTING).Build();
heap::AbandonCurrentlyFreeMemory(heap->old_space());
Handle<HeapNumber> value_handle(
i_isolate->factory()->NewHeapNumber<AllocationType::kOld>(1.1));
diff --git a/deps/v8/test/cctest/heap/test-concurrent-marking.cc b/deps/v8/test/cctest/heap/test-concurrent-marking.cc
index 375890a2b5..71d46e0827 100644
--- a/deps/v8/test/cctest/heap/test-concurrent-marking.cc
+++ b/deps/v8/test/cctest/heap/test-concurrent-marking.cc
@@ -44,9 +44,8 @@ TEST(ConcurrentMarking) {
new ConcurrentMarking(heap, &marking_worklists, &weak_objects);
PublishSegment(marking_worklists.shared(),
ReadOnlyRoots(heap).undefined_value());
- concurrent_marking->ScheduleTasks();
- concurrent_marking->Stop(
- ConcurrentMarking::StopRequest::COMPLETE_TASKS_FOR_TESTING);
+ concurrent_marking->ScheduleJob();
+ concurrent_marking->Join();
delete concurrent_marking;
}
@@ -67,14 +66,12 @@ TEST(ConcurrentMarkingReschedule) {
new ConcurrentMarking(heap, &marking_worklists, &weak_objects);
PublishSegment(marking_worklists.shared(),
ReadOnlyRoots(heap).undefined_value());
- concurrent_marking->ScheduleTasks();
- concurrent_marking->Stop(
- ConcurrentMarking::StopRequest::COMPLETE_ONGOING_TASKS);
+ concurrent_marking->ScheduleJob();
+ concurrent_marking->Join();
PublishSegment(marking_worklists.shared(),
ReadOnlyRoots(heap).undefined_value());
- concurrent_marking->RescheduleTasksIfNeeded();
- concurrent_marking->Stop(
- ConcurrentMarking::StopRequest::COMPLETE_TASKS_FOR_TESTING);
+ concurrent_marking->RescheduleJobIfNeeded();
+ concurrent_marking->Join();
delete concurrent_marking;
}
@@ -96,14 +93,13 @@ TEST(ConcurrentMarkingPreemptAndReschedule) {
for (int i = 0; i < 5000; i++)
PublishSegment(marking_worklists.shared(),
ReadOnlyRoots(heap).undefined_value());
- concurrent_marking->ScheduleTasks();
- concurrent_marking->Stop(ConcurrentMarking::StopRequest::PREEMPT_TASKS);
+ concurrent_marking->ScheduleJob();
+ concurrent_marking->Pause();
for (int i = 0; i < 5000; i++)
PublishSegment(marking_worklists.shared(),
ReadOnlyRoots(heap).undefined_value());
- concurrent_marking->RescheduleTasksIfNeeded();
- concurrent_marking->Stop(
- ConcurrentMarking::StopRequest::COMPLETE_TASKS_FOR_TESTING);
+ concurrent_marking->RescheduleJobIfNeeded();
+ concurrent_marking->Join();
delete concurrent_marking;
}
@@ -117,8 +113,7 @@ TEST(ConcurrentMarkingMarkedBytes) {
CcTest::CollectAllGarbage();
if (!heap->incremental_marking()->IsStopped()) return;
heap::SimulateIncrementalMarking(heap, false);
- heap->concurrent_marking()->Stop(
- ConcurrentMarking::StopRequest::COMPLETE_TASKS_FOR_TESTING);
+ heap->concurrent_marking()->Join();
CHECK_GE(heap->concurrent_marking()->TotalMarkedBytes(), root->Size());
}
diff --git a/deps/v8/test/cctest/heap/test-embedder-tracing.cc b/deps/v8/test/cctest/heap/test-embedder-tracing.cc
index 70778f51ee..2e92805edb 100644
--- a/deps/v8/test/cctest/heap/test-embedder-tracing.cc
+++ b/deps/v8/test/cctest/heap/test-embedder-tracing.cc
@@ -324,13 +324,13 @@ void TracedGlobalTest(v8::Isolate* isolate,
v8::Local<v8::Context> context = v8::Context::New(isolate);
v8::Context::Scope context_scope(context);
- v8::TracedGlobal<v8::Object> global;
- construct_function(isolate, context, &global);
- CHECK(InCorrectGeneration(isolate, global));
- modifier_function(global);
+ auto global = std::make_unique<v8::TracedGlobal<v8::Object>>();
+ construct_function(isolate, context, global.get());
+ CHECK(InCorrectGeneration(isolate, *global));
+ modifier_function(*global);
gc_function();
- CHECK_IMPLIES(survives == SurvivalMode::kSurvives, !global.IsEmpty());
- CHECK_IMPLIES(survives == SurvivalMode::kDies, global.IsEmpty());
+ CHECK_IMPLIES(survives == SurvivalMode::kSurvives, !global->IsEmpty());
+ CHECK_IMPLIES(survives == SurvivalMode::kDies, global->IsEmpty());
}
} // namespace
@@ -371,33 +371,33 @@ TEST(TracedGlobalCopyWithDestructor) {
i::GlobalHandles* global_handles = CcTest::i_isolate()->global_handles();
const size_t initial_count = global_handles->handles_count();
- v8::TracedGlobal<v8::Object> global1;
+ auto global1 = std::make_unique<v8::TracedGlobal<v8::Object>>();
{
v8::HandleScope scope(isolate);
- global1.Reset(isolate, v8::Object::New(isolate));
+ global1->Reset(isolate, v8::Object::New(isolate));
}
- v8::TracedGlobal<v8::Object> global2(global1);
- v8::TracedGlobal<v8::Object> global3;
- global3 = global2;
+ auto global2 = std::make_unique<v8::TracedGlobal<v8::Object>>(*global1);
+ auto global3 = std::make_unique<v8::TracedGlobal<v8::Object>>();
+ *global3 = *global2;
CHECK_EQ(initial_count + 3, global_handles->handles_count());
- CHECK(!global1.IsEmpty());
- CHECK_EQ(global1, global2);
- CHECK_EQ(global2, global3);
+ CHECK(!global1->IsEmpty());
+ CHECK_EQ(*global1, *global2);
+ CHECK_EQ(*global2, *global3);
{
v8::HandleScope scope(isolate);
- auto tmp = v8::Local<v8::Object>::New(isolate, global3);
+ auto tmp = v8::Local<v8::Object>::New(isolate, *global3);
CHECK(!tmp.IsEmpty());
InvokeMarkSweep();
}
CHECK_EQ(initial_count + 3, global_handles->handles_count());
- CHECK(!global1.IsEmpty());
- CHECK_EQ(global1, global2);
- CHECK_EQ(global2, global3);
+ CHECK(!global1->IsEmpty());
+ CHECK_EQ(*global1, *global2);
+ CHECK_EQ(*global2, *global3);
InvokeMarkSweep();
CHECK_EQ(initial_count, global_handles->handles_count());
- CHECK(global1.IsEmpty());
- CHECK_EQ(global1, global2);
- CHECK_EQ(global2, global3);
+ CHECK(global1->IsEmpty());
+ CHECK_EQ(*global1, *global2);
+ CHECK_EQ(*global2, *global3);
}
TEST(TracedGlobalCopyNoDestructor) {
@@ -408,28 +408,28 @@ TEST(TracedGlobalCopyNoDestructor) {
i::GlobalHandles* global_handles = CcTest::i_isolate()->global_handles();
const size_t initial_count = global_handles->handles_count();
- v8::TracedReference<v8::Value> global1;
+ auto global1 = std::make_unique<v8::TracedReference<v8::Value>>();
{
v8::HandleScope scope(isolate);
- global1.Reset(isolate, v8::Object::New(isolate));
+ global1->Reset(isolate, v8::Object::New(isolate));
}
- v8::TracedReference<v8::Value> global2(global1);
- v8::TracedReference<v8::Value> global3;
- global3 = global2;
+ auto global2 = std::make_unique<v8::TracedReference<v8::Value>>(*global1);
+ auto global3 = std::make_unique<v8::TracedReference<v8::Value>>();
+ *global3 = *global2;
CHECK_EQ(initial_count + 3, global_handles->handles_count());
- CHECK(!global1.IsEmpty());
- CHECK_EQ(global1, global2);
- CHECK_EQ(global2, global3);
+ CHECK(!global1->IsEmpty());
+ CHECK_EQ(*global1, *global2);
+ CHECK_EQ(*global2, *global3);
{
v8::HandleScope scope(isolate);
- auto tmp = v8::Local<v8::Value>::New(isolate, global3);
+ auto tmp = v8::Local<v8::Value>::New(isolate, *global3);
CHECK(!tmp.IsEmpty());
InvokeMarkSweep();
}
CHECK_EQ(initial_count + 3, global_handles->handles_count());
- CHECK(!global1.IsEmpty());
- CHECK_EQ(global1, global2);
- CHECK_EQ(global2, global3);
+ CHECK(!global1->IsEmpty());
+ CHECK_EQ(*global1, *global2);
+ CHECK_EQ(*global2, *global3);
InvokeMarkSweep();
CHECK_EQ(initial_count, global_handles->handles_count());
}
@@ -544,15 +544,15 @@ TEST(TracedReferenceHandlesMarking) {
CcTest::InitializeVM();
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::TracedReference<v8::Value> live;
- v8::TracedReference<v8::Value> dead;
- live.Reset(isolate, v8::Undefined(isolate));
- dead.Reset(isolate, v8::Undefined(isolate));
+ auto live = std::make_unique<v8::TracedReference<v8::Value>>();
+ auto dead = std::make_unique<v8::TracedReference<v8::Value>>();
+ live->Reset(isolate, v8::Undefined(isolate));
+ dead->Reset(isolate, v8::Undefined(isolate));
i::GlobalHandles* global_handles = CcTest::i_isolate()->global_handles();
{
TestEmbedderHeapTracer tracer;
heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
- tracer.AddReferenceForTracing(&live);
+ tracer.AddReferenceForTracing(live.get());
const size_t initial_count = global_handles->handles_count();
InvokeMarkSweep();
const size_t final_count = global_handles->handles_count();
@@ -563,7 +563,7 @@ TEST(TracedReferenceHandlesMarking) {
{
TestEmbedderHeapTracer tracer;
heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
- tracer.AddReferenceForTracing(&live);
+ tracer.AddReferenceForTracing(live.get());
const size_t initial_count = global_handles->handles_count();
InvokeMarkSweep();
const size_t final_count = global_handles->handles_count();
@@ -579,8 +579,8 @@ TEST(TracedReferenceHandlesDoNotLeak) {
CcTest::InitializeVM();
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::TracedReference<v8::Value> ref;
- ref.Reset(isolate, v8::Undefined(isolate));
+ auto ref = std::make_unique<v8::TracedReference<v8::Value>>();
+ ref->Reset(isolate, v8::Undefined(isolate));
i::GlobalHandles* global_handles = CcTest::i_isolate()->global_handles();
const size_t initial_count = global_handles->handles_count();
// We need two GCs because handles are black allocated.
@@ -635,10 +635,10 @@ TEST(TracedGlobalIteration) {
TestEmbedderHeapTracer tracer;
heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
- v8::TracedGlobal<v8::Object> traced;
- ConstructJSObject(isolate, isolate->GetCurrentContext(), &traced);
- CHECK(!traced.IsEmpty());
- traced.SetWrapperClassId(57);
+ auto traced = std::make_unique<v8::TracedGlobal<v8::Object>>();
+ ConstructJSObject(isolate, isolate->GetCurrentContext(), traced.get());
+ CHECK(!traced->IsEmpty());
+ traced->SetWrapperClassId(57);
TracedGlobalVisitor visitor;
{
v8::HandleScope scope(isolate);
@@ -669,18 +669,18 @@ TEST(TracedGlobalSetFinalizationCallbackScavenge) {
tracer.ConsiderTracedGlobalAsRoot(false);
heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
- v8::TracedGlobal<v8::Object> traced;
- ConstructJSApiObject(isolate, isolate->GetCurrentContext(), &traced);
- CHECK(!traced.IsEmpty());
+ auto traced = std::make_unique<v8::TracedGlobal<v8::Object>>();
+ ConstructJSApiObject(isolate, isolate->GetCurrentContext(), traced.get());
+ CHECK(!traced->IsEmpty());
{
v8::HandleScope scope(isolate);
- auto local = traced.Get(isolate);
+ auto local = traced->Get(isolate);
local->SetAlignedPointerInInternalField(0, reinterpret_cast<void*>(0x4));
local->SetAlignedPointerInInternalField(1, reinterpret_cast<void*>(0x8));
}
- traced.SetFinalizationCallback(&traced, FinalizationCallback);
+ traced->SetFinalizationCallback(traced.get(), FinalizationCallback);
heap::InvokeScavenge();
- CHECK(traced.IsEmpty());
+ CHECK(traced->IsEmpty());
}
TEST(TracedGlobalSetFinalizationCallbackMarkSweep) {
@@ -691,18 +691,18 @@ TEST(TracedGlobalSetFinalizationCallbackMarkSweep) {
TestEmbedderHeapTracer tracer;
heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
- v8::TracedGlobal<v8::Object> traced;
- ConstructJSApiObject(isolate, isolate->GetCurrentContext(), &traced);
- CHECK(!traced.IsEmpty());
+ auto traced = std::make_unique<v8::TracedGlobal<v8::Object>>();
+ ConstructJSApiObject(isolate, isolate->GetCurrentContext(), traced.get());
+ CHECK(!traced->IsEmpty());
{
v8::HandleScope scope(isolate);
- auto local = traced.Get(isolate);
+ auto local = traced->Get(isolate);
local->SetAlignedPointerInInternalField(0, reinterpret_cast<void*>(0x4));
local->SetAlignedPointerInInternalField(1, reinterpret_cast<void*>(0x8));
}
- traced.SetFinalizationCallback(&traced, FinalizationCallback);
+ traced->SetFinalizationCallback(traced.get(), FinalizationCallback);
heap::InvokeMarkSweep();
- CHECK(traced.IsEmpty());
+ CHECK(traced->IsEmpty());
}
TEST(TracePrologueCallingIntoV8WriteBarrier) {
@@ -833,8 +833,8 @@ class EmbedderHeapTracerNoDestructorNonTracingClearing final
// Convention (for test): Objects that are optimized have their first field
// set as a back pointer.
- TracedReferenceBase<v8::Value>* original_handle =
- reinterpret_cast<TracedReferenceBase<v8::Value>*>(
+ BasicTracedReference<v8::Value>* original_handle =
+ reinterpret_cast<BasicTracedReference<v8::Value>*>(
v8::Object::GetAlignedPointerFromInternalField(
handle.As<v8::Object>(), 0));
original_handle->Reset();
diff --git a/deps/v8/test/cctest/heap/test-heap.cc b/deps/v8/test/cctest/heap/test-heap.cc
index 1856417825..35936dc0b1 100644
--- a/deps/v8/test/cctest/heap/test-heap.cc
+++ b/deps/v8/test/cctest/heap/test-heap.cc
@@ -207,7 +207,7 @@ HEAP_TEST(TestNewSpaceRefsInCopiedCode) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Handle<Code> copy;
{
@@ -231,7 +231,7 @@ static void CheckFindCodeObject(Isolate* isolate) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
CHECK(code->IsCode());
HeapObject obj = HeapObject::cast(*code);
@@ -243,7 +243,7 @@ static void CheckFindCodeObject(Isolate* isolate) {
}
Handle<Code> copy =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
HeapObject obj_copy = HeapObject::cast(*copy);
Object not_right =
isolate->FindCodeObject(obj_copy.address() + obj_copy.Size() / 2);
@@ -1194,15 +1194,7 @@ HEAP_TEST(Regress10560) {
CHECK(function->shared().GetBytecodeArray().IsOld());
CHECK(function->shared().is_compiled());
- heap->set_force_oom(true);
- heap->AddNearHeapLimitCallback(
- [](void* data, size_t current_heap_limit,
- size_t initial_heap_limit) -> size_t {
- Heap* heap = static_cast<Heap*>(data);
- heap->set_force_oom(false);
- return 0;
- },
- heap);
+ heap->set_force_gc_on_next_allocation();
// Allocate feedback vector.
IsCompiledScope is_compiled_scope(
@@ -1259,7 +1251,8 @@ UNINITIALIZED_TEST(Regress10843) {
// Tests that spill slots from optimized code don't have weak pointers.
TEST(Regress10774) {
i::FLAG_allow_natives_syntax = true;
- i::FLAG_dynamic_map_checks = true;
+ i::FLAG_turboprop = true;
+ i::FLAG_turboprop_dynamic_map_checks = true;
#ifdef VERIFY_HEAP
i::FLAG_verify_heap = true;
#endif
@@ -3639,13 +3632,8 @@ TEST(DetailedErrorStackTraceBuiltinExit) {
FixedArray parameters = stack_trace->Parameters(0);
CHECK_EQ(parameters.length(), 2);
-#ifdef V8_REVERSE_JSARGS
CHECK(parameters.get(1).IsSmi());
CHECK_EQ(Smi::ToInt(parameters.get(1)), 9999);
-#else
- CHECK(parameters.get(0).IsSmi());
- CHECK_EQ(Smi::ToInt(parameters.get(0)), 9999);
-#endif
});
}
@@ -3752,7 +3740,7 @@ TEST(LargeObjectSlotRecording) {
FixedArray old_location = *lit;
// Allocate a large object.
- int size = Max(1000000, kMaxRegularHeapObjectSize + KB);
+ int size = std::max(1000000, kMaxRegularHeapObjectSize + KB);
CHECK_LT(kMaxRegularHeapObjectSize, size);
Handle<FixedArray> lo =
isolate->factory()->NewFixedArray(size, AllocationType::kOld);
@@ -4474,7 +4462,7 @@ TEST(NextCodeLinkInCodeDataContainerIsCleared) {
OptimizeDummyFunction(CcTest::isolate(), "mortal2");
CHECK_EQ(mortal2->code().next_code_link(), mortal1->code());
code_data_container = scope.CloseAndEscape(Handle<CodeDataContainer>(
- mortal2->code().code_data_container(), isolate));
+ mortal2->code().code_data_container(kAcquireLoad), isolate));
CompileRun("mortal1 = null; mortal2 = null;");
}
CcTest::CollectAllAvailableGarbage();
@@ -4490,10 +4478,9 @@ static Handle<Code> DummyOptimizedCode(Isolate* isolate) {
masm.Push(isolate->factory()->undefined_value());
masm.Drop(2);
masm.GetCode(isolate, &desc);
- Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::OPTIMIZED_FUNCTION)
- .set_self_reference(masm.CodeObject())
- .Build();
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, CodeKind::TURBOFAN)
+ .set_self_reference(masm.CodeObject())
+ .Build();
CHECK(code->IsCode());
return code;
}
@@ -5597,6 +5584,11 @@ HEAP_TEST(Regress589413) {
// Add the array in root set.
handle(byte_array, isolate);
}
+ auto reset_oom = [](void* heap, size_t limit, size_t) -> size_t {
+ reinterpret_cast<Heap*>(heap)->set_force_oom(false);
+ return limit;
+ };
+ heap->AddNearHeapLimitCallback(reset_oom, heap);
{
// Ensure that incremental marking is not started unexpectedly.
@@ -5660,6 +5652,7 @@ HEAP_TEST(Regress589413) {
// Force allocation from the free list.
heap->set_force_oom(true);
CcTest::CollectGarbage(OLD_SPACE);
+ heap->RemoveNearHeapLimitCallback(reset_oom, 0);
}
TEST(Regress598319) {
@@ -5674,7 +5667,8 @@ TEST(Regress598319) {
Isolate* isolate = heap->isolate();
// The size of the array should be larger than kProgressBarScanningChunk.
- const int kNumberOfObjects = Max(FixedArray::kMaxRegularLength + 1, 128 * KB);
+ const int kNumberOfObjects =
+ std::max(FixedArray::kMaxRegularLength + 1, 128 * KB);
struct Arr {
Arr(Isolate* isolate, int number_of_objects) {
@@ -6286,7 +6280,7 @@ TEST(RememberedSet_InsertInLargePage) {
HandleScope scope(isolate);
// Allocate an object in Large space.
- const int count = Max(FixedArray::kMaxRegularLength + 1, 128 * KB);
+ const int count = std::max(FixedArray::kMaxRegularLength + 1, 128 * KB);
Handle<FixedArray> arr = factory->NewFixedArray(count, AllocationType::kOld);
CHECK(heap->lo_space()->Contains(*arr));
CHECK_EQ(0, GetRememberedSetSize<OLD_TO_NEW>(*arr));
@@ -6523,67 +6517,6 @@ HEAP_TEST(Regress670675) {
DCHECK(marking->IsStopped());
}
-namespace {
-Handle<Code> GenerateDummyImmovableCode(Isolate* isolate) {
- Assembler assm(AssemblerOptions{});
-
- const int kNumberOfNops = 1 << 10;
- for (int i = 0; i < kNumberOfNops; i++) {
- assm.nop(); // supported on all architectures
- }
-
- CodeDesc desc;
- assm.GetCode(isolate, &desc);
- Handle<Code> code = Factory::CodeBuilder(isolate, desc, CodeKind::STUB)
- .set_immovable()
- .Build();
- CHECK(code->IsCode());
-
- return code;
-}
-} // namespace
-
-HEAP_TEST(Regress5831) {
- CcTest::InitializeVM();
- Heap* heap = CcTest::heap();
- Isolate* isolate = CcTest::i_isolate();
- HandleScope handle_scope(isolate);
-
- // Used to ensure that the generated code is not collected.
- const int kInitialSize = 32;
- Handle<FixedArray> array = isolate->factory()->NewFixedArray(kInitialSize);
-
- // Ensure that all immovable code space pages are full and we overflow into
- // LO_SPACE.
- const int kMaxIterations = 1 << 16;
- bool overflowed_into_lospace = false;
- for (int i = 0; i < kMaxIterations; i++) {
- Handle<Code> code = GenerateDummyImmovableCode(isolate);
- array = FixedArray::SetAndGrow(isolate, array, i, code);
- CHECK(heap->code_space()->Contains(*code) ||
- heap->code_lo_space()->Contains(*code));
- if (heap->code_lo_space()->Contains(*code)) {
- overflowed_into_lospace = true;
- break;
- }
- }
-
- CHECK(overflowed_into_lospace);
-
- // Fake a serializer run.
- isolate->serializer_enabled_ = true;
-
- // Generate the code.
- Handle<Code> code = GenerateDummyImmovableCode(isolate);
- CHECK_GE(i::kMaxRegularHeapObjectSize, code->Size());
- CHECK(!heap->code_space()->first_page()->Contains(code->address()));
-
- // Ensure it's not in large object space.
- MemoryChunk* chunk = MemoryChunk::FromHeapObject(*code);
- CHECK(chunk->owner_identity() != LO_SPACE);
- CHECK(chunk->NeverEvacuate());
-}
-
HEAP_TEST(RegressMissingWriteBarrierInAllocate) {
if (!FLAG_incremental_marking) return;
ManualGCScope manual_gc_scope;
@@ -6898,7 +6831,10 @@ UNINITIALIZED_TEST(OutOfMemoryLargeObjects) {
factory->NewFixedArray(kFixedArrayLength);
}
}
- CHECK_LE(state.old_generation_capacity_at_oom, kOldGenerationLimit);
+ CHECK_LE(state.old_generation_capacity_at_oom,
+ kOldGenerationLimit + state.new_space_capacity_at_oom +
+ state.new_lo_space_size_at_oom +
+ FixedArray::SizeFor(kFixedArrayLength));
CHECK_LE(kOldGenerationLimit, state.old_generation_capacity_at_oom +
state.new_space_capacity_at_oom +
state.new_lo_space_size_at_oom +
@@ -7063,7 +6999,7 @@ TEST(CodeObjectRegistry) {
{
// Ensure that both code objects end up on the same page.
CHECK(HeapTester::CodeEnsureLinearAllocationArea(
- heap, kMaxRegularHeapObjectSize));
+ heap, MemoryChunkLayout::MaxRegularCodeObjectSize()));
code1 = DummyOptimizedCode(isolate);
Handle<Code> code2 = DummyOptimizedCode(isolate);
code2_address = code2->address();
@@ -7185,7 +7121,8 @@ TEST(GarbageCollectionWithLocalHeap) {
Heap* heap = CcTest::i_isolate()->heap();
- LocalHeap local_heap(heap);
+ LocalHeap local_heap(heap, ThreadKind::kMain);
+ UnparkedScope unparked_scope(&local_heap);
CcTest::CollectGarbage(OLD_SPACE);
{ ParkedScope parked_scope(&local_heap); }
CcTest::CollectGarbage(OLD_SPACE);
@@ -7232,20 +7169,85 @@ class TestAllocationTracker : public HeapObjectAllocationTracker {
HEAP_TEST(CodeLargeObjectSpace) {
Heap* heap = CcTest::heap();
- int size_in_bytes = kMaxRegularHeapObjectSize + kSystemPointerSize;
+ int size_in_bytes =
+ MemoryChunkLayout::MaxRegularCodeObjectSize() + kTaggedSize;
TestAllocationTracker allocation_tracker{size_in_bytes};
heap->AddHeapObjectAllocationTracker(&allocation_tracker);
- AllocationResult allocation = heap->AllocateRaw(
- size_in_bytes, AllocationType::kCode, AllocationOrigin::kGeneratedCode,
- AllocationAlignment::kCodeAligned);
+ HeapObject obj;
+ {
+ AllocationResult allocation = heap->AllocateRaw(
+ size_in_bytes, AllocationType::kCode, AllocationOrigin::kRuntime);
+ CHECK(allocation.To(&obj));
+ CHECK_EQ(allocation.ToAddress(), allocation_tracker.address());
- CHECK(allocation.ToAddress() == allocation_tracker.address());
- heap->CreateFillerObjectAt(allocation.ToAddress(), size_in_bytes,
- ClearRecordedSlots::kNo);
+ heap->CreateFillerObjectAt(obj.address(), size_in_bytes,
+ ClearRecordedSlots::kNo);
+ }
+
+ CHECK(Heap::IsLargeObject(obj));
heap->RemoveHeapObjectAllocationTracker(&allocation_tracker);
}
+UNINITIALIZED_HEAP_TEST(CodeLargeObjectSpace64k) {
+ // Simulate having a system with 64k OS pages.
+ i::FLAG_v8_os_page_size = 64;
+
+ // Initialize the isolate manually to make sure --v8-os-page-size is taken
+ // into account.
+ v8::Isolate::CreateParams create_params;
+ create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ v8::Isolate* isolate = v8::Isolate::New(create_params);
+
+ Heap* heap = reinterpret_cast<Isolate*>(isolate)->heap();
+
+ // Allocate a regular code object.
+ {
+ int size_in_bytes =
+ MemoryChunkLayout::MaxRegularCodeObjectSize() - kTaggedSize;
+ TestAllocationTracker allocation_tracker{size_in_bytes};
+ heap->AddHeapObjectAllocationTracker(&allocation_tracker);
+
+ HeapObject obj;
+ {
+ AllocationResult allocation = heap->AllocateRaw(
+ size_in_bytes, AllocationType::kCode, AllocationOrigin::kRuntime);
+ CHECK(allocation.To(&obj));
+ CHECK_EQ(allocation.ToAddress(), allocation_tracker.address());
+
+ heap->CreateFillerObjectAt(obj.address(), size_in_bytes,
+ ClearRecordedSlots::kNo);
+ }
+
+ CHECK(!Heap::IsLargeObject(obj));
+ heap->RemoveHeapObjectAllocationTracker(&allocation_tracker);
+ }
+
+ // Allocate a large code object.
+ {
+ int size_in_bytes =
+ MemoryChunkLayout::MaxRegularCodeObjectSize() + kTaggedSize;
+ TestAllocationTracker allocation_tracker{size_in_bytes};
+ heap->AddHeapObjectAllocationTracker(&allocation_tracker);
+
+ HeapObject obj;
+ {
+ AllocationResult allocation = heap->AllocateRaw(
+ size_in_bytes, AllocationType::kCode, AllocationOrigin::kRuntime);
+ CHECK(allocation.To(&obj));
+ CHECK_EQ(allocation.ToAddress(), allocation_tracker.address());
+
+ heap->CreateFillerObjectAt(obj.address(), size_in_bytes,
+ ClearRecordedSlots::kNo);
+ }
+
+ CHECK(Heap::IsLargeObject(obj));
+ heap->RemoveHeapObjectAllocationTracker(&allocation_tracker);
+ }
+
+ isolate->Dispose();
+}
+
TEST(Regress10900) {
FLAG_always_compact = true;
CcTest::InitializeVM();
@@ -7260,7 +7262,7 @@ TEST(Regress10900) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
{
// Generate multiple code pages.
CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
diff --git a/deps/v8/test/cctest/heap/test-memory-measurement.cc b/deps/v8/test/cctest/heap/test-memory-measurement.cc
index 861337e38f..d72c70725b 100644
--- a/deps/v8/test/cctest/heap/test-memory-measurement.cc
+++ b/deps/v8/test/cctest/heap/test-memory-measurement.cc
@@ -91,7 +91,6 @@ TEST(NativeContextStatsArrayBuffers) {
*i_array_buffer, 10);
CHECK_EQ(1010, stats.Get(native_context->ptr()));
}
-
namespace {
class TestResource : public v8::String::ExternalStringResource {
@@ -229,6 +228,64 @@ TEST(LazyMemoryMeasurement) {
CHECK(!platform.TaskPosted());
}
+TEST(PartiallyInitializedJSFunction) {
+ LocalContext env;
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+ HandleScope scope(isolate);
+ Handle<JSFunction> js_function =
+ factory->NewFunctionForTest(factory->NewStringFromAsciiChecked("test"));
+ Handle<Context> context = handle(js_function->context(), isolate);
+
+ // 1. Start simulating deserializaiton.
+ isolate->RegisterDeserializerStarted();
+ // 2. Set the context field to the uninitialized sentintel.
+ TaggedField<Object, JSFunction::kContextOffset>::store(
+ *js_function, Deserializer::uninitialized_field_value());
+ // 3. Request memory meaurement and run all tasks. GC that runs as part
+ // of the measurement should not crash.
+ CcTest::isolate()->MeasureMemory(
+ std::make_unique<MockMeasureMemoryDelegate>(),
+ v8::MeasureMemoryExecution::kEager);
+ while (v8::platform::PumpMessageLoop(v8::internal::V8::GetCurrentPlatform(),
+ CcTest::isolate())) {
+ }
+ // 4. Restore the value and complete deserialization.
+ TaggedField<Object, JSFunction::kContextOffset>::store(*js_function,
+ *context);
+ isolate->RegisterDeserializerFinished();
+}
+
+TEST(PartiallyInitializedContext) {
+ LocalContext env;
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+ HandleScope scope(isolate);
+ Handle<ScopeInfo> scope_info =
+ ReadOnlyRoots(isolate).global_this_binding_scope_info_handle();
+ Handle<Context> context = factory->NewScriptContext(
+ GetNativeContext(isolate, env.local()), scope_info);
+ Handle<Map> map = handle(context->map(), isolate);
+ Handle<NativeContext> native_context = handle(map->native_context(), isolate);
+ // 1. Start simulating deserializaiton.
+ isolate->RegisterDeserializerStarted();
+ // 2. Set the native context field to the uninitialized sentintel.
+ TaggedField<Object, Map::kConstructorOrBackPointerOrNativeContextOffset>::
+ store(*map, Deserializer::uninitialized_field_value());
+ // 3. Request memory meaurement and run all tasks. GC that runs as part
+ // of the measurement should not crash.
+ CcTest::isolate()->MeasureMemory(
+ std::make_unique<MockMeasureMemoryDelegate>(),
+ v8::MeasureMemoryExecution::kEager);
+ while (v8::platform::PumpMessageLoop(v8::internal::V8::GetCurrentPlatform(),
+ CcTest::isolate())) {
+ }
+ // 4. Restore the value and complete deserialization.
+ TaggedField<Object, Map::kConstructorOrBackPointerOrNativeContextOffset>::
+ store(*map, *native_context);
+ isolate->RegisterDeserializerFinished();
+}
+
} // namespace heap
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/heap/test-spaces.cc b/deps/v8/test/cctest/heap/test-spaces.cc
index 5106086e8d..a02ebffd9f 100644
--- a/deps/v8/test/cctest/heap/test-spaces.cc
+++ b/deps/v8/test/cctest/heap/test-spaces.cc
@@ -75,11 +75,13 @@ class TestMemoryAllocatorScope {
isolate_->GetCodePages()->swap(code_pages_);
}
+ TestMemoryAllocatorScope(const TestMemoryAllocatorScope&) = delete;
+ TestMemoryAllocatorScope& operator=(const TestMemoryAllocatorScope&) = delete;
+
private:
Isolate* isolate_;
std::unique_ptr<MemoryAllocator> old_allocator_;
std::vector<MemoryRange> code_pages_;
- DISALLOW_COPY_AND_ASSIGN(TestMemoryAllocatorScope);
};
// Temporarily sets a given code page allocator in an isolate.
@@ -98,12 +100,13 @@ class TestCodePageAllocatorScope {
isolate_->heap()->memory_allocator()->code_page_allocator_ =
old_code_page_allocator_;
}
+ TestCodePageAllocatorScope(const TestCodePageAllocatorScope&) = delete;
+ TestCodePageAllocatorScope& operator=(const TestCodePageAllocatorScope&) =
+ delete;
private:
Isolate* isolate_;
v8::PageAllocator* old_code_page_allocator_;
-
- DISALLOW_COPY_AND_ASSIGN(TestCodePageAllocatorScope);
};
static void VerifyMemoryChunk(Isolate* isolate, Heap* heap,
@@ -344,6 +347,7 @@ TEST(OldLargeObjectSpace) {
// messages are also not stable if files are moved and modified during the build
// process (jumbo builds).
TEST(SizeOfInitialHeap) {
+ ManualGCScope manual_gc_scope;
if (i::FLAG_always_opt) return;
// Bootstrapping without a snapshot causes more allocations.
CcTest::InitializeVM();
@@ -374,26 +378,16 @@ TEST(SizeOfInitialHeap) {
// Freshly initialized VM gets by with the snapshot size (which is below
// kMaxInitialSizePerSpace per space).
Heap* heap = isolate->heap();
- int page_count[LAST_GROWABLE_PAGED_SPACE + 1] = {0, 0, 0, 0};
for (int i = FIRST_GROWABLE_PAGED_SPACE; i <= LAST_GROWABLE_PAGED_SPACE;
i++) {
// Debug code can be very large, so skip CODE_SPACE if we are generating it.
if (i == CODE_SPACE && i::FLAG_debug_code) continue;
- page_count[i] = heap->paged_space(i)->CountTotalPages();
// Check that the initial heap is also below the limit.
CHECK_LE(heap->paged_space(i)->CommittedMemory(), kMaxInitialSizePerSpace);
}
- // Executing the empty script gets by with the same number of pages, i.e.,
- // requires no extra space.
CompileRun("/*empty*/");
- for (int i = FIRST_GROWABLE_PAGED_SPACE; i <= LAST_GROWABLE_PAGED_SPACE;
- i++) {
- // Skip CODE_SPACE, since we had to generate code even for an empty script.
- if (i == CODE_SPACE) continue;
- CHECK_EQ(page_count[i], isolate->heap()->paged_space(i)->CountTotalPages());
- }
// No large objects required to perform the above steps.
CHECK_EQ(initial_lo_space,
diff --git a/deps/v8/test/cctest/heap/test-unmapper.cc b/deps/v8/test/cctest/heap/test-unmapper.cc
index fa4768bd72..164de7571c 100644
--- a/deps/v8/test/cctest/heap/test-unmapper.cc
+++ b/deps/v8/test/cctest/heap/test-unmapper.cc
@@ -50,14 +50,24 @@ class MockPlatformForUnmapper : public TestPlatform {
v8::Platform* old_platform_;
};
-TEST(EagerUnmappingInCollectAllAvailableGarbage) {
+UNINITIALIZED_TEST(EagerUnmappingInCollectAllAvailableGarbage) {
FLAG_stress_concurrent_allocation = false; // For SimulateFullSpace.
- CcTest::InitializeVM();
MockPlatformForUnmapper platform;
- Heap* heap = CcTest::heap();
- i::heap::SimulateFullSpace(heap->old_space());
- CcTest::CollectAllAvailableGarbage();
- CHECK_EQ(0, heap->memory_allocator()->unmapper()->NumberOfChunks());
+ v8::Isolate::CreateParams create_params;
+ create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ v8::Isolate* isolate = v8::Isolate::New(create_params);
+
+ {
+ v8::HandleScope handle_scope(isolate);
+ v8::Local<v8::Context> context = CcTest::NewContext(isolate);
+ v8::Context::Scope context_scope(context);
+ Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ Heap* heap = i_isolate->heap();
+ i::heap::SimulateFullSpace(heap->old_space());
+ CcTest::CollectAllAvailableGarbage(i_isolate);
+ CHECK_EQ(0, heap->memory_allocator()->unmapper()->NumberOfChunks());
+ }
+ isolate->Dispose();
}
} // namespace heap
diff --git a/deps/v8/test/cctest/heap/test-weak-references.cc b/deps/v8/test/cctest/heap/test-weak-references.cc
index 8e0e713ad6..b7f6d680dc 100644
--- a/deps/v8/test/cctest/heap/test-weak-references.cc
+++ b/deps/v8/test/cctest/heap/test-weak-references.cc
@@ -51,7 +51,7 @@ TEST(WeakReferencesBasic) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
CHECK(code->IsCode());
lh->set_data1(HeapObjectReference::Weak(*code));
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden
index b4425f57fe..880cba2b23 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden
@@ -104,12 +104,13 @@ snippet: "
"
frame size: 6
parameter count: 1
-bytecode array length: 39
+bytecode array length: 41
bytecodes: [
B(Mov), R(closure), R(1),
/* 118 S> */ B(Ldar), R(1),
B(GetSuperConstructor), R(3),
B(LdaSmi), I8(1),
+ B(ThrowIfNotSuperConstructor), R(3),
B(Star), R(4),
B(Ldar), R(0),
/* 118 E> */ B(Construct), R(3), R(4), U8(1), U8(0),
@@ -146,11 +147,12 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 35
+bytecode array length: 37
bytecodes: [
B(Mov), R(closure), R(1),
/* 117 S> */ B(Ldar), R(1),
B(GetSuperConstructor), R(3),
+ B(ThrowIfNotSuperConstructor), R(3),
B(Ldar), R(0),
/* 117 E> */ B(Construct), R(3), R(0), U8(0), U8(0),
B(Star), R(4),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorAccess.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorAccess.golden
index 7e925f9c91..b43a8e3d0e 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorAccess.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorAccess.golden
@@ -84,7 +84,7 @@ bytecodes: [
B(Mov), R(this), R(0),
B(Mov), R(context), R(2),
/* 48 E> */ B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(0), U8(3),
- /* 53 S> */ B(Wide), B(LdaSmi), I16(268),
+ /* 53 S> */ B(Wide), B(LdaSmi), I16(272),
B(Star), R(3),
B(LdaConstant), U8(0),
B(Star), R(4),
@@ -115,7 +115,7 @@ bytecodes: [
B(Mov), R(this), R(0),
B(Mov), R(context), R(2),
/* 41 E> */ B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(0), U8(3),
- /* 46 S> */ B(Wide), B(LdaSmi), I16(267),
+ /* 46 S> */ B(Wide), B(LdaSmi), I16(271),
B(Star), R(3),
B(LdaConstant), U8(0),
B(Star), R(4),
@@ -146,7 +146,7 @@ bytecodes: [
B(Mov), R(this), R(0),
B(Mov), R(context), R(2),
/* 48 E> */ B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(0), U8(3),
- /* 53 S> */ B(Wide), B(LdaSmi), I16(268),
+ /* 53 S> */ B(Wide), B(LdaSmi), I16(272),
B(Star), R(3),
B(LdaConstant), U8(0),
B(Star), R(4),
@@ -177,7 +177,7 @@ bytecodes: [
B(Mov), R(this), R(0),
B(Mov), R(context), R(2),
/* 41 E> */ B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(0), U8(3),
- /* 46 S> */ B(Wide), B(LdaSmi), I16(267),
+ /* 46 S> */ B(Wide), B(LdaSmi), I16(271),
B(Star), R(4),
B(LdaConstant), U8(0),
B(Star), R(5),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodAccess.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodAccess.golden
index a36806c05b..d660433560 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodAccess.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodAccess.golden
@@ -57,7 +57,7 @@ bytecodes: [
B(Mov), R(this), R(0),
B(Mov), R(context), R(2),
/* 44 E> */ B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(0), U8(3),
- /* 49 S> */ B(Wide), B(LdaSmi), I16(266),
+ /* 49 S> */ B(Wide), B(LdaSmi), I16(270),
B(Star), R(3),
B(LdaConstant), U8(0),
B(Star), R(4),
@@ -89,7 +89,7 @@ bytecodes: [
B(Mov), R(this), R(0),
B(Mov), R(context), R(2),
/* 44 E> */ B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(0), U8(3),
- /* 49 S> */ B(Wide), B(LdaSmi), I16(266),
+ /* 49 S> */ B(Wide), B(LdaSmi), I16(270),
B(Star), R(3),
B(LdaConstant), U8(0),
B(Star), R(4),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticPrivateMethodAccess.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticPrivateMethodAccess.golden
index 47f3280126..1cabd9b5e3 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticPrivateMethodAccess.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticPrivateMethodAccess.golden
@@ -25,7 +25,7 @@ bytecodes: [
B(TestReferenceEqual), R(this),
B(Mov), R(this), R(1),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(264),
+ B(Wide), B(LdaSmi), I16(268),
B(Star), R(2),
B(LdaConstant), U8(0),
B(Star), R(3),
@@ -56,7 +56,7 @@ frame size: 2
parameter count: 1
bytecode array length: 16
bytecodes: [
- /* 56 S> */ B(Wide), B(LdaSmi), I16(266),
+ /* 56 S> */ B(Wide), B(LdaSmi), I16(270),
B(Star), R(0),
B(LdaConstant), U8(0),
B(Star), R(1),
@@ -83,7 +83,7 @@ frame size: 2
parameter count: 1
bytecode array length: 16
bytecodes: [
- /* 56 S> */ B(Wide), B(LdaSmi), I16(266),
+ /* 56 S> */ B(Wide), B(LdaSmi), I16(270),
B(Star), R(0),
B(LdaConstant), U8(0),
B(Star), R(1),
@@ -122,7 +122,7 @@ bytecodes: [
/* 94 E> */ B(TestReferenceEqual), R(this),
B(Mov), R(this), R(0),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(264),
+ B(Wide), B(LdaSmi), I16(268),
B(Star), R(2),
B(LdaConstant), U8(0),
B(Star), R(3),
@@ -144,7 +144,7 @@ bytecodes: [
/* 109 E> */ B(TestReferenceEqual), R(this),
B(Mov), R(this), R(1),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(265),
+ B(Wide), B(LdaSmi), I16(269),
B(Star), R(3),
B(LdaConstant), U8(0),
B(Star), R(4),
@@ -159,7 +159,7 @@ bytecodes: [
/* 133 E> */ B(TestReferenceEqual), R(this),
B(Mov), R(this), R(0),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(264),
+ B(Wide), B(LdaSmi), I16(268),
B(Star), R(2),
B(LdaConstant), U8(0),
B(Star), R(3),
@@ -189,7 +189,7 @@ frame size: 2
parameter count: 1
bytecode array length: 16
bytecodes: [
- /* 60 S> */ B(Wide), B(LdaSmi), I16(268),
+ /* 60 S> */ B(Wide), B(LdaSmi), I16(272),
B(Star), R(0),
B(LdaConstant), U8(0),
B(Star), R(1),
@@ -215,7 +215,7 @@ frame size: 2
parameter count: 1
bytecode array length: 16
bytecodes: [
- /* 53 S> */ B(Wide), B(LdaSmi), I16(267),
+ /* 53 S> */ B(Wide), B(LdaSmi), I16(271),
B(Star), R(0),
B(LdaConstant), U8(0),
B(Star), R(1),
@@ -241,7 +241,7 @@ frame size: 2
parameter count: 1
bytecode array length: 16
bytecodes: [
- /* 60 S> */ B(Wide), B(LdaSmi), I16(268),
+ /* 60 S> */ B(Wide), B(LdaSmi), I16(272),
B(Star), R(0),
B(LdaConstant), U8(0),
B(Star), R(1),
@@ -267,7 +267,7 @@ frame size: 3
parameter count: 1
bytecode array length: 16
bytecodes: [
- /* 46 S> */ B(Wide), B(LdaSmi), I16(267),
+ /* 46 S> */ B(Wide), B(LdaSmi), I16(271),
B(Star), R(1),
B(LdaConstant), U8(0),
B(Star), R(2),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden
index a316cfdceb..07feb7864a 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden
@@ -19,13 +19,14 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 18
+bytecode array length: 20
bytecodes: [
/* 93 E> */ B(CreateRestParameter),
B(Star), R(2),
B(Mov), R(closure), R(1),
/* 93 S> */ B(Ldar), R(1),
B(GetSuperConstructor), R(4),
+ B(ThrowIfNotSuperConstructor), R(4),
B(Ldar), R(0),
/* 93 E> */ B(ConstructWithSpread), R(4), R(2), U8(1), U8(0),
/* 93 S> */ B(Return),
@@ -50,7 +51,7 @@ snippet: "
"
frame size: 9
parameter count: 1
-bytecode array length: 39
+bytecode array length: 41
bytecodes: [
/* 128 E> */ B(CreateRestParameter),
B(Star), R(3),
@@ -60,6 +61,7 @@ bytecodes: [
B(GetSuperConstructor), R(5),
B(LdaSmi), I8(1),
B(Star), R(6),
+ /* 152 E> */ B(ThrowIfNotSuperConstructor), R(5),
B(Ldar), R(0),
B(Mov), R(3), R(7),
/* 140 E> */ B(ConstructWithSpread), R(5), R(6), U8(2), U8(0),
@@ -91,7 +93,7 @@ snippet: "
"
frame size: 11
parameter count: 1
-bytecode array length: 118
+bytecode array length: 120
bytecodes: [
/* 128 E> */ B(CreateRestParameter),
B(Star), R(3),
@@ -129,6 +131,7 @@ bytecodes: [
B(JumpLoop), U8(33), I8(0),
B(LdaSmi), I8(1),
B(StaInArrayLiteral), R(7), R(6), U8(1),
+ B(ThrowIfNotSuperConstructor), R(5),
B(Mov), R(5), R(6),
B(Mov), R(0), R(8),
/* 140 E> */ B(CallJSRuntime), U8(%reflect_construct), R(6), U8(3),
diff --git a/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc b/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc
index 2fe3658813..b4ef8f3990 100644
--- a/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc
+++ b/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc
@@ -103,6 +103,8 @@ class V8InitializationScope final {
public:
explicit V8InitializationScope(const char* exec_path);
~V8InitializationScope();
+ V8InitializationScope(const V8InitializationScope&) = delete;
+ V8InitializationScope& operator=(const V8InitializationScope&) = delete;
v8::Platform* platform() const { return platform_.get(); }
v8::Isolate* isolate() const { return isolate_; }
@@ -111,8 +113,6 @@ class V8InitializationScope final {
std::unique_ptr<v8::Platform> platform_;
std::unique_ptr<v8::ArrayBuffer::Allocator> allocator_;
v8::Isolate* isolate_;
-
- DISALLOW_COPY_AND_ASSIGN(V8InitializationScope);
};
bool ParseBoolean(const char* string) {
diff --git a/deps/v8/test/cctest/interpreter/interpreter-tester.h b/deps/v8/test/cctest/interpreter/interpreter-tester.h
index 00bb389b9e..4f3bc8e4e9 100644
--- a/deps/v8/test/cctest/interpreter/interpreter-tester.h
+++ b/deps/v8/test/cctest/interpreter/interpreter-tester.h
@@ -86,6 +86,8 @@ class InterpreterTester {
const char* filter = kFunctionName);
virtual ~InterpreterTester();
+ InterpreterTester(const InterpreterTester&) = delete;
+ InterpreterTester& operator=(const InterpreterTester&) = delete;
template <class... A>
InterpreterCallableUndefinedReceiver<A...> GetCallable() {
@@ -152,7 +154,8 @@ class InterpreterTester {
}
if (!bytecode_.is_null()) {
- function->shared().set_function_data(*bytecode_.ToHandleChecked());
+ function->shared().set_function_data(*bytecode_.ToHandleChecked(),
+ kReleaseStore);
is_compiled_scope = function->shared().is_compiled_scope(isolate_);
}
if (HasFeedbackMetadata()) {
@@ -165,8 +168,6 @@ class InterpreterTester {
}
return function;
}
-
- DISALLOW_COPY_AND_ASSIGN(InterpreterTester);
};
} // namespace interpreter
diff --git a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
index 13410c916c..6afccdfdfc 100644
--- a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
+++ b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
@@ -2591,6 +2591,10 @@ TEST(ClassDeclarations) {
}
TEST(ClassAndSuperClass) {
+ // Different bytecodes are generated with and without --future temporarily,
+ // see crbug.com/v8/9237 . TODO(marja): remove this hack once --super-ic is on
+ // by default.
+ FLAG_super_ic = false;
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
diff --git a/deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc b/deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc
index 6889747e17..a07ecd912d 100644
--- a/deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc
+++ b/deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc
@@ -29,7 +29,6 @@ class InvokeIntrinsicHelper {
Handle<Object> Invoke(A... args) {
CHECK(IntrinsicsHelper::IsSupported(function_id_));
int parameter_count = sizeof...(args);
-#ifdef V8_REVERSE_JSARGS
// Move the parameter to locals, since the order of the
// arguments in the stack is reversed.
BytecodeArrayBuilder builder(zone_, parameter_count + 1, parameter_count,
@@ -39,12 +38,6 @@ class InvokeIntrinsicHelper {
}
RegisterList reg_list =
InterpreterTester::NewRegisterList(0, parameter_count);
-#else
- // Add the receiver in the parameter count.
- BytecodeArrayBuilder builder(zone_, parameter_count + 1, 0, nullptr);
- RegisterList reg_list = InterpreterTester::NewRegisterList(
- builder.Parameter(0).index(), parameter_count);
-#endif
builder.CallRuntime(function_id_, reg_list).Return();
InterpreterTester tester(isolate_, builder.ToBytecodeArray(isolate_));
auto callable = tester.GetCallable<A...>();
diff --git a/deps/v8/test/cctest/libplatform/test-tracing.cc b/deps/v8/test/cctest/libplatform/test-tracing.cc
index 0d552c42b0..406f119f65 100644
--- a/deps/v8/test/cctest/libplatform/test-tracing.cc
+++ b/deps/v8/test/cctest/libplatform/test-tracing.cc
@@ -60,6 +60,9 @@ TEST(TestTraceObject) {
class ConvertableToTraceFormatMock : public v8::ConvertableToTraceFormat {
public:
+ ConvertableToTraceFormatMock(const ConvertableToTraceFormatMock&) = delete;
+ ConvertableToTraceFormatMock& operator=(const ConvertableToTraceFormatMock&) =
+ delete;
explicit ConvertableToTraceFormatMock(int value) : value_(value) {}
void AppendAsTraceFormat(std::string* out) const override {
*out += "[" + std::to_string(value_) + "," + std::to_string(value_) + "]";
@@ -67,8 +70,6 @@ class ConvertableToTraceFormatMock : public v8::ConvertableToTraceFormat {
private:
int value_;
-
- DISALLOW_COPY_AND_ASSIGN(ConvertableToTraceFormatMock);
};
class MockTraceWriter : public TraceWriter {
@@ -856,10 +857,11 @@ TEST(JsonIntegrationTest) {
std::vector<std::string> all_args;
GetJSONStrings(&all_args, json, "\"args\"", "{", "}");
- CHECK_EQ("\"1\":1e+100", all_args[0]);
- CHECK_EQ("\"2\":\"NaN\"", all_args[1]);
- CHECK_EQ("\"3\":\"Infinity\"", all_args[2]);
- CHECK_EQ("\"4\":\"-Infinity\"", all_args[3]);
+ // Ignore the first metadata event.
+ CHECK_EQ("\"1\":1e+100", all_args[1]);
+ CHECK_EQ("\"2\":\"NaN\"", all_args[2]);
+ CHECK_EQ("\"3\":\"Infinity\"", all_args[3]);
+ CHECK_EQ("\"4\":\"-Infinity\"", all_args[4]);
}
#endif // V8_USE_PERFETTO
diff --git a/deps/v8/test/cctest/test-accessor-assembler.cc b/deps/v8/test/cctest/test-accessor-assembler.cc
index 54d86725de..ba53304a40 100644
--- a/deps/v8/test/cctest/test-accessor-assembler.cc
+++ b/deps/v8/test/cctest/test-accessor-assembler.cc
@@ -28,8 +28,8 @@ void TestStubCacheOffsetCalculation(StubCache::Table table) {
AccessorAssembler m(data.state());
{
- TNode<Name> name = m.CAST(m.Parameter(1));
- TNode<Map> map = m.CAST(m.Parameter(2));
+ auto name = m.Parameter<Name>(1);
+ auto map = m.Parameter<Map>(2);
TNode<IntPtrT> primary_offset =
m.StubCachePrimaryOffsetForTesting(name, map);
Node* result;
@@ -128,10 +128,9 @@ TEST(TryProbeStubCache) {
stub_cache.Clear();
{
- TNode<Object> receiver = m.CAST(m.Parameter(1));
- TNode<Name> name = m.CAST(m.Parameter(2));
- TNode<MaybeObject> expected_handler =
- m.UncheckedCast<MaybeObject>(m.Parameter(3));
+ auto receiver = m.Parameter<Object>(1);
+ auto name = m.Parameter<Name>(2);
+ TNode<MaybeObject> expected_handler = m.UncheckedParameter<MaybeObject>(3);
Label passed(&m), failed(&m);
@@ -204,7 +203,7 @@ TEST(TryProbeStubCache) {
// Generate some number of handlers.
for (int i = 0; i < 30; i++) {
- handlers.push_back(CreateCodeOfKind(CodeKind::STUB));
+ handlers.push_back(CreateCodeOfKind(CodeKind::FOR_TESTING));
}
// Ensure that GC does happen because from now on we are going to fill our
diff --git a/deps/v8/test/cctest/test-accessors.cc b/deps/v8/test/cctest/test-accessors.cc
index ec14e8d0c2..bd4b8b68bc 100644
--- a/deps/v8/test/cctest/test-accessors.cc
+++ b/deps/v8/test/cctest/test-accessors.cc
@@ -882,3 +882,27 @@ TEST(ObjectSetLazyDataProperty) {
CHECK(result.FromJust());
ExpectInt32("obj.bar = -1; obj.bar;", -1);
}
+
+TEST(ObjectSetLazyDataPropertyForIndex) {
+ // Regression test for crbug.com/1136800 .
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Object> obj = v8::Object::New(isolate);
+ CHECK(env->Global()->Set(env.local(), v8_str("obj"), obj).FromJust());
+
+ static int getter_call_count;
+ getter_call_count = 0;
+ auto result = obj->SetLazyDataProperty(
+ env.local(), v8_str("1"),
+ [](Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
+ getter_call_count++;
+ info.GetReturnValue().Set(getter_call_count);
+ });
+ CHECK(result.FromJust());
+ CHECK_EQ(0, getter_call_count);
+ for (int i = 0; i < 2; i++) {
+ ExpectInt32("obj[1]", 1);
+ CHECK_EQ(1, getter_call_count);
+ }
+}
diff --git a/deps/v8/test/cctest/test-api-icu.cc b/deps/v8/test/cctest/test-api-icu.cc
index c5e617fdd2..8527ee72d3 100644
--- a/deps/v8/test/cctest/test-api-icu.cc
+++ b/deps/v8/test/cctest/test-api-icu.cc
@@ -47,7 +47,7 @@ TEST(LocaleConfigurationChangeNotification) {
SetIcuLocale("zh_CN");
isolate->LocaleConfigurationChangeNotification();
- CheckLocaleSpecificValues("zh-CN", "2020/2/14 下午1:45:00", "10,000.3");
+ CheckLocaleSpecificValues("zh-CN", "2020/2/14下午1:45:00", "10,000.3");
UErrorCode error_code = U_ZERO_ERROR;
icu::Locale::setDefault(default_locale, error_code);
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index 299614c122..3264a1aa3b 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -64,12 +64,14 @@
#include "src/objects/module-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/string-inl.h"
+#include "src/objects/synthetic-module-inl.h"
#include "src/profiler/cpu-profiler.h"
#include "src/strings/unicode-inl.h"
#include "src/utils/utils.h"
#include "test/cctest/heap/heap-tester.h"
#include "test/cctest/heap/heap-utils.h"
#include "test/cctest/wasm/wasm-run-utils.h"
+#include "test/common/flag-utils.h"
#include "test/common/wasm/wasm-macro-gen.h"
static const bool kLogThreading = false;
@@ -2655,9 +2657,13 @@ THREADED_TEST(AccessorIsPreservedOnAttributeChange) {
LocalContext env;
v8::Local<v8::Value> res = CompileRun("var a = []; a;");
i::Handle<i::JSReceiver> a(v8::Utils::OpenHandle(v8::Object::Cast(*res)));
- CHECK_EQ(1, a->map().instance_descriptors().number_of_descriptors());
+ CHECK_EQ(
+ 1,
+ a->map().instance_descriptors(v8::kRelaxedLoad).number_of_descriptors());
CompileRun("Object.defineProperty(a, 'length', { writable: false });");
- CHECK_EQ(0, a->map().instance_descriptors().number_of_descriptors());
+ CHECK_EQ(
+ 0,
+ a->map().instance_descriptors(v8::kRelaxedLoad).number_of_descriptors());
// But we should still have an AccessorInfo.
i::Handle<i::String> name = CcTest::i_isolate()->factory()->length_string();
i::LookupIterator it(CcTest::i_isolate(), a, name,
@@ -17560,6 +17566,33 @@ THREADED_TEST(FunctionGetBoundFunction) {
original_function->GetScriptColumnNumber());
}
+THREADED_TEST(FunctionProtoToString) {
+ LocalContext context;
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+
+ // Replace Function.prototype.toString.
+ CompileRun(R"(
+ Function.prototype.toString = function() {
+ return 'customized toString';
+ })");
+
+ constexpr char kTestFunction[] = "function testFunction() { return 7; }";
+ std::string wrapped_function("(");
+ wrapped_function.append(kTestFunction).append(")");
+ Local<Function> function =
+ CompileRun(wrapped_function.c_str()).As<Function>();
+
+ Local<String> value = function->ToString(context.local()).ToLocalChecked();
+ CHECK(value->IsString());
+ CHECK(
+ value->Equals(context.local(), v8_str("customized toString")).FromJust());
+
+ // FunctionProtoToString() should not call the replaced toString function.
+ value = function->FunctionProtoToString(context.local()).ToLocalChecked();
+ CHECK(value->IsString());
+ CHECK(value->Equals(context.local(), v8_str(kTestFunction)).FromJust());
+}
static void GetterWhichReturns42(
Local<String> name,
@@ -19422,7 +19455,7 @@ void CheckCodeGenerationDisallowed() {
char first_fourty_bytes[41];
v8::ModifyCodeGenerationFromStringsResult CodeGenerationAllowed(
- Local<Context> context, Local<Value> source) {
+ Local<Context> context, Local<Value> source, bool is_code_like) {
String::Utf8Value str(CcTest::isolate(), source);
size_t len = std::min(sizeof(first_fourty_bytes) - 1,
static_cast<size_t>(str.length()));
@@ -19433,13 +19466,13 @@ v8::ModifyCodeGenerationFromStringsResult CodeGenerationAllowed(
}
v8::ModifyCodeGenerationFromStringsResult CodeGenerationDisallowed(
- Local<Context> context, Local<Value> source) {
+ Local<Context> context, Local<Value> source, bool is_code_like) {
ApiTestFuzzer::Fuzz();
return {false, {}};
}
v8::ModifyCodeGenerationFromStringsResult ModifyCodeGeneration(
- Local<Context> context, Local<Value> source) {
+ Local<Context> context, Local<Value> source, bool is_code_like) {
// Allow (passthrough, unmodified) all objects that are not strings.
if (!source->IsString()) {
return {/* codegen_allowed= */ true, v8::MaybeLocal<String>()};
@@ -19535,7 +19568,7 @@ TEST(ModifyCodeGenFromStrings) {
}
v8::ModifyCodeGenerationFromStringsResult RejectStringsIncrementNumbers(
- Local<Context> context, Local<Value> source) {
+ Local<Context> context, Local<Value> source, bool is_code_like) {
if (source->IsString()) {
return {false, v8::MaybeLocal<String>()};
}
@@ -21430,6 +21463,13 @@ class RegExpInterruptTest {
string->MakeExternal(&two_byte_string_resource);
}
+ static void ReenterIrregexp(v8::Isolate* isolate, void* data) {
+ v8::HandleScope scope(isolate);
+ v8::TryCatch try_catch(isolate);
+ // Irregexp is not reentrant. This should crash.
+ CompileRun("/((a*)*)*b/.exec('aaaaab')");
+ }
+
private:
static void SignalSemaphore(v8::Isolate* isolate, void* data) {
reinterpret_cast<RegExpInterruptTest*>(data)->sem_.Signal();
@@ -21521,21 +21561,56 @@ class RegExpInterruptTest {
} // namespace
TEST(RegExpInterruptAndCollectAllGarbage) {
- i::FLAG_always_compact = true; // Move all movable objects on GC.
+ // Move all movable objects on GC.
+ i::FLAG_always_compact = true;
+ // We want to be stuck regexp execution, so no fallback to linear-time
+ // engine.
+ // TODO(mbid,v8:10765): Find a way to test interrupt support of the
+ // experimental engine.
+ i::FLAG_enable_experimental_regexp_engine_on_excessive_backtracks = false;
RegExpInterruptTest test;
test.RunTest(RegExpInterruptTest::CollectAllGarbage);
}
TEST(RegExpInterruptAndMakeSubjectOneByteExternal) {
+ // We want to be stuck regexp execution, so no fallback to linear-time
+ // engine.
+ // TODO(mbid,v8:10765): Find a way to test interrupt support of the
+ // experimental engine.
+ i::FLAG_enable_experimental_regexp_engine_on_excessive_backtracks = false;
RegExpInterruptTest test;
test.RunTest(RegExpInterruptTest::MakeSubjectOneByteExternal);
}
TEST(RegExpInterruptAndMakeSubjectTwoByteExternal) {
+ // We want to be stuck regexp execution, so no fallback to linear-time
+ // engine.
+ // TODO(mbid,v8:10765): Find a way to test interrupt support of the
+ // experimental engine.
+ i::FLAG_enable_experimental_regexp_engine_on_excessive_backtracks = false;
RegExpInterruptTest test;
+ // We want to be stuck regexp execution, so no fallback to linear-time
+ // engine.
+ // TODO(mbid,v8:10765): Find a way to test interrupt support of the
+ // experimental engine.
test.RunTest(RegExpInterruptTest::MakeSubjectTwoByteExternal);
}
+TEST(RegExpInterruptAndReenterIrregexp) {
+ // We only check in the runtime entry to irregexp, so make sure we don't hit
+ // an interpreter.
+ i::FLAG_regexp_tier_up_ticks = 0;
+ i::FLAG_regexp_interpret_all = false;
+ i::FLAG_enable_experimental_regexp_engine = false;
+ // We want to be stuck in regexp execution, so no fallback to linear-time
+ // engine.
+ // TODO(mbid,v8:10765): Find a way to test interrupt support of the
+ // experimental engine.
+ i::FLAG_enable_experimental_regexp_engine_on_excessive_backtracks = false;
+ RegExpInterruptTest test;
+ test.RunTest(RegExpInterruptTest::ReenterIrregexp);
+}
+
class RequestInterruptTestBase {
public:
RequestInterruptTestBase()
@@ -23482,7 +23557,7 @@ void RunStreamingTest(const char** chunks,
v8::ScriptCompiler::StreamedSource source(
std::make_unique<TestSourceStream>(chunks), encoding);
v8::ScriptCompiler::ScriptStreamingTask* task =
- v8::ScriptCompiler::StartStreamingScript(isolate, &source);
+ v8::ScriptCompiler::StartStreaming(isolate, &source);
// TestSourceStream::GetMoreData won't block, so it's OK to just run the
// task here in the main thread.
@@ -23754,7 +23829,7 @@ TEST(StreamingWithDebuggingEnabledLate) {
std::make_unique<TestSourceStream>(chunks),
v8::ScriptCompiler::StreamedSource::ONE_BYTE);
v8::ScriptCompiler::ScriptStreamingTask* task =
- v8::ScriptCompiler::StartStreamingScript(isolate, &source);
+ v8::ScriptCompiler::StartStreaming(isolate, &source);
task->Run();
delete task;
@@ -23862,7 +23937,7 @@ TEST(StreamingWithHarmonyScopes) {
std::make_unique<TestSourceStream>(chunks),
v8::ScriptCompiler::StreamedSource::ONE_BYTE);
v8::ScriptCompiler::ScriptStreamingTask* task =
- v8::ScriptCompiler::StartStreamingScript(isolate, &source);
+ v8::ScriptCompiler::StartStreaming(isolate, &source);
task->Run();
delete task;
@@ -25989,7 +26064,6 @@ v8::MaybeLocal<v8::Promise> HostImportModuleDynamicallyCallbackResolve(
}
TEST(DynamicImport) {
- i::FLAG_harmony_dynamic_import = true;
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
@@ -26023,8 +26097,6 @@ void HostInitializeImportMetaObjectCallbackStatic(Local<Context> context,
}
TEST(ImportMeta) {
- i::FLAG_harmony_dynamic_import = true;
- i::FLAG_harmony_import_meta = true;
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
@@ -26075,8 +26147,6 @@ void HostInitializeImportMetaObjectCallbackThrow(Local<Context> context,
}
TEST(ImportMetaThrowUnhandled) {
- i::FLAG_harmony_dynamic_import = true;
- i::FLAG_harmony_import_meta = true;
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
@@ -26116,8 +26186,6 @@ TEST(ImportMetaThrowUnhandled) {
}
TEST(ImportMetaThrowHandled) {
- i::FLAG_harmony_dynamic_import = true;
- i::FLAG_harmony_import_meta = true;
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
@@ -27485,6 +27553,24 @@ struct ConvertJSValue<uint64_t> {
};
template <>
+struct ConvertJSValue<float> {
+ static Maybe<float> Get(v8::Local<v8::Value> value,
+ v8::Local<v8::Context> context) {
+ Maybe<double> val = value->NumberValue(context);
+ if (val.IsNothing()) return v8::Nothing<float>();
+ return v8::Just(static_cast<float>(val.ToChecked()));
+ }
+};
+
+template <>
+struct ConvertJSValue<double> {
+ static Maybe<double> Get(v8::Local<v8::Value> value,
+ v8::Local<v8::Context> context) {
+ return value->NumberValue(context);
+ }
+};
+
+template <>
struct ConvertJSValue<bool> {
static Maybe<bool> Get(v8::Local<v8::Value> value,
v8::Local<v8::Context> context) {
@@ -27503,11 +27589,12 @@ DEFINE_OPERATORS_FOR_FLAGS(ApiCheckerResultFlags)
template <typename Value, typename Impl>
struct BasicApiChecker {
static void FastCallback(v8::ApiObject receiver, Value argument,
- int* fallback) {
- Impl::FastCallback(receiver, argument, fallback);
+ v8::FastApiCallbackOptions& options) {
+ Impl::FastCallback(receiver, argument, options);
}
static void FastCallbackNoFallback(v8::ApiObject receiver, Value argument) {
- Impl::FastCallback(receiver, argument, nullptr);
+ v8::FastApiCallbackOptions options;
+ Impl::FastCallback(receiver, argument, options);
}
static void SlowCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
Impl::SlowCallback(info);
@@ -27535,16 +27622,32 @@ T* GetInternalField(v8::Object* wrapper) {
wrapper->GetAlignedPointerFromInternalField(offset));
}
+enum class Behavior {
+ kNoException,
+ kException, // An exception should be thrown by the callback function.
+};
+
+enum class FallbackPolicy {
+ kDontRequestFallback,
+ kRequestFallback, // The callback function should write a non-zero value
+ // to the fallback variable.
+};
+
template <typename T>
struct ApiNumberChecker : BasicApiChecker<T, ApiNumberChecker<T>> {
- explicit ApiNumberChecker(T value, bool raise_exception = false,
- int args_count = 1)
- : raise_exception_(raise_exception), args_count_(args_count) {}
-
- static void FastCallback(v8::ApiObject receiver, T argument, int* fallback) {
+ explicit ApiNumberChecker(
+ T value, Behavior raise_exception = Behavior::kNoException,
+ FallbackPolicy write_to_fallback = FallbackPolicy::kDontRequestFallback,
+ int args_count = 1)
+ : raise_exception_(raise_exception),
+ write_to_fallback_(write_to_fallback),
+ args_count_(args_count) {}
+
+ static void FastCallback(v8::ApiObject receiver, T argument,
+ v8::FastApiCallbackOptions& options) {
v8::Object* receiver_obj = reinterpret_cast<v8::Object*>(&receiver);
if (!IsValidUnwrapObject(receiver_obj)) {
- *fallback = 1;
+ options.fallback = 1;
return;
}
ApiNumberChecker<T>* receiver_ptr =
@@ -27552,8 +27655,13 @@ struct ApiNumberChecker : BasicApiChecker<T, ApiNumberChecker<T>> {
receiver_obj);
receiver_ptr->result_ |= ApiCheckerResult::kFastCalled;
receiver_ptr->fast_value_ = argument;
- if (receiver_ptr->raise_exception_) {
- *fallback = 1;
+ if (receiver_ptr->write_to_fallback_ == FallbackPolicy::kRequestFallback) {
+ // Anything != 0 has the same effect here, but we're writing 1 to match
+ // the default behavior expected from the embedder. The value is checked
+ // against after loading it from a stack slot, as defined in
+ // EffectControlLinearizer::LowerFastApiCall.
+ CHECK_EQ(options.fallback, 0);
+ options.fallback = 1;
}
}
@@ -27572,21 +27680,23 @@ struct ApiNumberChecker : BasicApiChecker<T, ApiNumberChecker<T>> {
LocalContext env;
checker->slow_value_ = ConvertJSValue<T>::Get(info[0], env.local());
- if (checker->raise_exception_) {
+ if (checker->raise_exception_ == Behavior::kException) {
+ CHECK(checker->write_to_fallback_ == FallbackPolicy::kRequestFallback);
info.GetIsolate()->ThrowException(v8_str("Callback error"));
}
}
T fast_value_ = T();
Maybe<T> slow_value_ = v8::Nothing<T>();
- bool raise_exception_ = false;
+ Behavior raise_exception_ = Behavior::kNoException;
+ FallbackPolicy write_to_fallback_ = FallbackPolicy::kDontRequestFallback;
int args_count_ = 1;
};
struct UnexpectedObjectChecker
: BasicApiChecker<v8::ApiObject, UnexpectedObjectChecker> {
static void FastCallback(v8::ApiObject receiver, v8::ApiObject argument,
- int* fallback) {
+ v8::FastApiCallbackOptions& options) {
v8::Object* receiver_obj = reinterpret_cast<v8::Object*>(&receiver);
UnexpectedObjectChecker* receiver_ptr =
GetInternalField<UnexpectedObjectChecker, kV8WrapperObjectIndex>(
@@ -27612,11 +27722,6 @@ struct UnexpectedObjectChecker
}
};
-enum class Behavior {
- kNoException,
- kException, // An exception should be thrown by the callback function.
-};
-
template <typename Value, typename Impl>
bool SetupTest(v8::Local<v8::Value> initial_value, LocalContext* env,
BasicApiChecker<Value, Impl>* checker, const char* source_code,
@@ -27626,7 +27731,7 @@ bool SetupTest(v8::Local<v8::Value> initial_value, LocalContext* env,
v8::CFunction c_func;
if (supports_fallback) {
- c_func = v8::CFunction::MakeWithErrorSupport(
+ c_func = v8::CFunction::MakeWithFallbackSupport(
BasicApiChecker<Value, Impl>::FastCallback);
} else {
c_func = v8::CFunction::Make(
@@ -27669,12 +27774,41 @@ bool SetupTest(v8::Local<v8::Value> initial_value, LocalContext* env,
}
template <typename T>
-void CallAndCheck(T expected_value, Behavior expected_behavior,
- ApiCheckerResultFlags expected_path,
- v8::Local<v8::Value> initial_value,
- bool raise_exception = false) {
+void CheckEqual(T actual, T expected) {
+ CHECK_EQ(actual, expected);
+}
+
+template <>
+void CheckEqual<float>(float actual, float expected) {
+ if (std::isnan(expected)) {
+ CHECK(std::isnan(actual));
+ } else {
+ // This differentiates between -0 and +0.
+ CHECK_EQ(std::signbit(actual), std::signbit(expected));
+ CHECK_EQ(actual, expected);
+ }
+}
+
+template <>
+void CheckEqual<double>(double actual, double expected) {
+ if (std::isnan(expected)) {
+ CHECK(std::isnan(actual));
+ } else {
+ // This differentiates between -0 and +0.
+ CHECK_EQ(std::signbit(actual), std::signbit(expected));
+ CHECK_EQ(actual, expected);
+ }
+}
+
+template <typename T>
+void CallAndCheck(
+ T expected_value, Behavior expected_behavior,
+ ApiCheckerResultFlags expected_path, v8::Local<v8::Value> initial_value,
+ Behavior raise_exception = Behavior::kNoException,
+ FallbackPolicy write_to_fallback = FallbackPolicy::kDontRequestFallback) {
LocalContext env;
- ApiNumberChecker<T> checker(expected_value, raise_exception);
+ ApiNumberChecker<T> checker(expected_value, raise_exception,
+ write_to_fallback);
bool has_caught = SetupTest<T, ApiNumberChecker<T>>(
initial_value, &env, &checker,
@@ -27697,13 +27831,12 @@ void CallAndCheck(T expected_value, Behavior expected_behavior,
if (expected_path & ApiCheckerResult::kSlowCalled) {
if (expected_behavior != Behavior::kException) {
- T slow_value_typed = checker.slow_value_.ToChecked();
- CHECK_EQ(slow_value_typed, expected_value);
+ CheckEqual(checker.slow_value_.ToChecked(), expected_value);
}
}
if (expected_path & ApiCheckerResult::kFastCalled) {
CHECK(checker.DidCallFast());
- CHECK_EQ(checker.fast_value_, expected_value);
+ CheckEqual(checker.fast_value_, expected_value);
}
}
@@ -27733,7 +27866,7 @@ void CallAndDeopt() {
void CallNoFallback(int32_t expected_value) {
LocalContext env;
v8::Local<v8::Value> initial_value(v8_num(42));
- ApiNumberChecker<int32_t> checker(expected_value, false);
+ ApiNumberChecker<int32_t> checker(expected_value, Behavior::kNoException);
SetupTest(initial_value, &env, &checker,
"function func(arg) { return receiver.api_func(arg); }"
"%PrepareFunctionForOptimization(func);"
@@ -27749,7 +27882,7 @@ void CallNoFallback(int32_t expected_value) {
void CallNoConvertReceiver(int32_t expected_value) {
LocalContext env;
v8::Local<v8::Value> initial_value(v8_num(42));
- ApiNumberChecker<int32_t> checker(expected_value, false);
+ ApiNumberChecker<int32_t> checker(expected_value, Behavior::kNoException);
SetupTest(initial_value, &env, &checker,
"function func(arg) { return receiver.api_func(arg); }"
"%PrepareFunctionForOptimization(func);"
@@ -27765,7 +27898,8 @@ void CallNoConvertReceiver(int32_t expected_value) {
void CallWithLessArguments() {
LocalContext env;
v8::Local<v8::Value> initial_value(v8_num(42));
- ApiNumberChecker<int32_t> checker(42, false, 0);
+ ApiNumberChecker<int32_t> checker(42, Behavior::kNoException,
+ FallbackPolicy::kDontRequestFallback, 0);
SetupTest(initial_value, &env, &checker,
"function func() { return receiver.api_func(); }"
"%PrepareFunctionForOptimization(func);"
@@ -27780,7 +27914,8 @@ void CallWithLessArguments() {
void CallWithMoreArguments() {
LocalContext env;
v8::Local<v8::Value> initial_value(v8_num(42));
- ApiNumberChecker<int32_t> checker(42, false, 2);
+ ApiNumberChecker<int32_t> checker(42, Behavior::kNoException,
+ FallbackPolicy::kDontRequestFallback, 2);
SetupTest(initial_value, &env, &checker,
"function func(arg) { receiver.api_func(arg, arg); }"
"%PrepareFunctionForOptimization(func);"
@@ -27860,17 +27995,69 @@ void CheckDynamicTypeInfo() {
} // namespace
#endif // V8_LITE_MODE
+TEST(FastApiStackSlot) {
+#ifndef V8_LITE_MODE
+ if (i::FLAG_jitless) return;
+ if (i::FLAG_turboprop) return;
+
+ FLAG_SCOPE_EXTERNAL(opt);
+ FLAG_SCOPE_EXTERNAL(turbo_fast_api_calls);
+ FLAG_SCOPE_EXTERNAL(allow_natives_syntax);
+ // Disable --always_opt, otherwise we haven't generated the necessary
+ // feedback to go down the "best optimization" path for the fast call.
+ UNFLAG_SCOPE_EXTERNAL(always_opt);
+
+ v8::Isolate* isolate = CcTest::isolate();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i_isolate->set_embedder_wrapper_type_index(kV8WrapperTypeIndex);
+ i_isolate->set_embedder_wrapper_object_index(kV8WrapperObjectIndex);
+
+ v8::HandleScope scope(isolate);
+ LocalContext env;
+
+ int test_value = 42;
+ ApiNumberChecker<int32_t> checker(test_value, Behavior::kNoException,
+ FallbackPolicy::kRequestFallback);
+
+ bool has_caught = SetupTest<int32_t, ApiNumberChecker<int32_t>>(
+ v8_num(test_value), &env, &checker,
+ "function func(arg) {"
+ " let foo = 128;"
+ " for (let i = 0; i < 100; ++i) {"
+ " let bar = true;"
+ " if (i == 10) %OptimizeOsr();"
+ " try { receiver.api_func(arg) } catch(_) {};"
+ " try { receiver.api_func(arg) } catch(_) {};"
+ " };"
+ " return foo;"
+ "};");
+ checker.result_ = ApiCheckerResult::kNotCalled;
+
+ v8::TryCatch try_catch(isolate);
+ v8::Local<v8::Value> foo =
+ CompileRun("%PrepareFunctionForOptimization(func); func(value);");
+ CHECK(foo->IsNumber());
+ CHECK_EQ(128, foo->ToInt32(env.local()).ToLocalChecked()->Value());
+
+ CHECK(checker.DidCallFast() && checker.DidCallSlow());
+ CHECK_EQ(false, has_caught);
+ int32_t slow_value_typed = checker.slow_value_.ToChecked();
+ CHECK_EQ(slow_value_typed, test_value);
+ CHECK_EQ(checker.fast_value_, test_value);
+#endif
+}
+
TEST(FastApiCalls) {
#ifndef V8_LITE_MODE
if (i::FLAG_jitless) return;
if (i::FLAG_turboprop) return;
- i::FLAG_turbo_fast_api_calls = true;
- i::FLAG_opt = true;
- i::FLAG_allow_natives_syntax = true;
+ FLAG_SCOPE_EXTERNAL(opt);
+ FLAG_SCOPE_EXTERNAL(turbo_fast_api_calls);
+ FLAG_SCOPE_EXTERNAL(allow_natives_syntax);
// Disable --always_opt, otherwise we haven't generated the necessary
// feedback to go down the "best optimization" path for the fast call.
- i::FLAG_always_opt = false;
+ UNFLAG_SCOPE_EXTERNAL(always_opt);
v8::Isolate* isolate = CcTest::isolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
@@ -27893,6 +28080,18 @@ TEST(FastApiCalls) {
ApiCheckerResult::kFastCalled,
v8::Boolean::New(isolate, true));
+#ifdef V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
+ CallAndCheck<float>(3.14f, Behavior::kNoException,
+ ApiCheckerResult::kFastCalled, v8_num(3.14));
+ CallAndCheck<double>(3.14, Behavior::kNoException,
+ ApiCheckerResult::kFastCalled, v8_num(3.14));
+#else
+ CallAndCheck<float>(3.14f, Behavior::kNoException,
+ ApiCheckerResult::kSlowCalled, v8_num(3.14));
+ CallAndCheck<double>(3.14, Behavior::kNoException,
+ ApiCheckerResult::kSlowCalled, v8_num(3.14));
+#endif
+
// Corner cases (the value is out of bounds or of different type) - int32_t
CallAndCheck<int32_t>(0, Behavior::kNoException,
ApiCheckerResult::kFastCalled, v8_num(-0.0));
@@ -27950,33 +28149,9 @@ TEST(FastApiCalls) {
CallAndCheck<uint32_t>(3, Behavior::kNoException,
ApiCheckerResult::kFastCalled, v8_num(3.14));
+ // Both 32- and 64-bit platforms should execute the following tests
+ // through the slow path.
// Corner cases - int64
-#ifdef V8_TARGET_ARCH_X64
- CallAndCheck<int64_t>(static_cast<int64_t>(i::Smi::kMaxValue) + 1,
- Behavior::kNoException, ApiCheckerResult::kFastCalled,
- v8_num(static_cast<int64_t>(i::Smi::kMaxValue) + 1));
- CallAndCheck<int64_t>(std::numeric_limits<int64_t>::min(),
- Behavior::kNoException, ApiCheckerResult::kFastCalled,
- v8_num(std::numeric_limits<int64_t>::min()));
- CallAndCheck<int64_t>(1ll << 62, Behavior::kNoException,
- ApiCheckerResult::kFastCalled, v8_num(1ll << 62));
- CallAndCheck<int64_t>(i::kMaxSafeInteger, Behavior::kNoException,
- ApiCheckerResult::kFastCalled,
- v8_num(i::kMaxSafeInteger));
- CallAndCheck<int64_t>(-i::kMaxSafeInteger, Behavior::kNoException,
- ApiCheckerResult::kFastCalled,
- v8_num(-i::kMaxSafeInteger));
- CallAndCheck<int64_t>((1ull << 63) - 1024, Behavior::kNoException,
- ApiCheckerResult::kFastCalled,
- v8_num((1ull << 63) - 1024));
- // TODO(mslekova): We deopt for unsafe integers, but ultimately we want to
- // stay on the fast path.
- CallAndCheck<int64_t>(std::numeric_limits<int64_t>::min(),
- Behavior::kNoException, ApiCheckerResult::kSlowCalled,
- v8_num(static_cast<double>(1ull << 63)));
- CallAndCheck<int64_t>(std::numeric_limits<int64_t>::min(),
- Behavior::kNoException, ApiCheckerResult::kSlowCalled,
- v8_num(1ull << 63));
CallAndCheck<int64_t>(0, Behavior::kNoException,
ApiCheckerResult::kSlowCalled, v8_num(std::pow(2, 65)));
CallAndCheck<int64_t>(8192, Behavior::kNoException,
@@ -27986,8 +28161,6 @@ TEST(FastApiCalls) {
ApiCheckerResult::kSlowCalled,
v8_num(std::pow(2, 1023)));
CallAndCheck<int64_t>(0, Behavior::kNoException,
- ApiCheckerResult::kFastCalled, v8_num(-0.0));
- CallAndCheck<int64_t>(0, Behavior::kNoException,
ApiCheckerResult::kSlowCalled,
v8_num(std::numeric_limits<double>::quiet_NaN()));
CallAndCheck<int64_t>(0, Behavior::kNoException,
@@ -28008,19 +28181,9 @@ TEST(FastApiCalls) {
CallAndCheck<int64_t>(3, Behavior::kNoException,
ApiCheckerResult::kSlowCalled, v8_num(3.14));
CallAndCheck<int64_t>(
- std::numeric_limits<int64_t>::min(), Behavior::kNoException,
- ApiCheckerResult::kSlowCalled,
- v8_num(static_cast<double>(std::numeric_limits<int64_t>::max()) + 3.14));
- CallAndCheck<int64_t>(
0, Behavior::kNoException, ApiCheckerResult::kSlowCalled,
v8_num(static_cast<double>(std::numeric_limits<int64_t>::max()) * 2 +
3.14));
- CallAndCheck<int64_t>(std::numeric_limits<int64_t>::min(),
- Behavior::kNoException, ApiCheckerResult::kSlowCalled,
- v8_num(static_cast<double>(1ull << 63)));
- CallAndCheck<int64_t>(std::numeric_limits<int64_t>::min(),
- Behavior::kNoException, ApiCheckerResult::kSlowCalled,
- v8_num(-static_cast<double>(1ll << 63)));
CallAndCheck<int64_t>(0, Behavior::kNoException,
ApiCheckerResult::kSlowCalled,
v8_num(static_cast<double>(1ull << 63) * 2));
@@ -28032,28 +28195,6 @@ TEST(FastApiCalls) {
v8_num(static_cast<double>(1ull << 63) * 3 + 4096));
// Corner cases - uint64_t
- CallAndCheck<uint64_t>(static_cast<uint64_t>(i::Smi::kMaxValue) + 1,
- Behavior::kNoException, ApiCheckerResult::kFastCalled,
- v8_num(static_cast<uint64_t>(i::Smi::kMaxValue) + 1));
- CallAndCheck<uint64_t>(std::numeric_limits<uint64_t>::min(),
- Behavior::kNoException, ApiCheckerResult::kFastCalled,
- v8_num(std::numeric_limits<uint64_t>::min()));
- CallAndCheck<uint64_t>(1ll << 62, Behavior::kNoException,
- ApiCheckerResult::kFastCalled, v8_num(1ll << 62));
- CallAndCheck<uint64_t>(
- std::numeric_limits<uint64_t>::max() - ((1ll << 62) - 1),
- Behavior::kNoException, ApiCheckerResult::kFastCalled,
- v8_num(-(1ll << 62)));
- CallAndCheck<uint64_t>(i::kMaxSafeIntegerUint64, Behavior::kNoException,
- ApiCheckerResult::kFastCalled,
- v8_num(i::kMaxSafeInteger));
- CallAndCheck<uint64_t>(
- std::numeric_limits<uint64_t>::max() - (i::kMaxSafeIntegerUint64 - 1),
- Behavior::kNoException, ApiCheckerResult::kFastCalled,
- v8_num(-i::kMaxSafeInteger));
- CallAndCheck<uint64_t>(1ull << 63, Behavior::kNoException,
- ApiCheckerResult::kSlowCalled,
- v8_num(static_cast<double>(1ull << 63)));
CallAndCheck<uint64_t>(static_cast<double>(1ull << 63) * 2 - 2048,
Behavior::kNoException, ApiCheckerResult::kSlowCalled,
v8_num(static_cast<double>(1ull << 63) * 2 - 2048));
@@ -28063,8 +28204,6 @@ TEST(FastApiCalls) {
ApiCheckerResult::kSlowCalled,
v8_num(static_cast<double>(1ull << 63) * 2));
CallAndCheck<uint64_t>(0, Behavior::kNoException,
- ApiCheckerResult::kFastCalled, v8_num(-0.0));
- CallAndCheck<uint64_t>(0, Behavior::kNoException,
ApiCheckerResult::kSlowCalled,
v8_num(std::numeric_limits<double>::quiet_NaN()));
CallAndCheck<uint64_t>(0, Behavior::kNoException,
@@ -28091,7 +28230,183 @@ TEST(FastApiCalls) {
CallAndCheck<uint64_t>(static_cast<double>(1ull << 63) + 4096,
Behavior::kNoException, ApiCheckerResult::kSlowCalled,
v8_num(static_cast<double>(1ull << 63) * 3 + 4096));
-#endif // V8_TARGET_ARCH_X64
+
+ // The following int64/uint64 tests are platform-dependent, because Turbofan
+ // currently doesn't support 64-bit integers on 32-bit architectures. So if
+ // we attempt to follow the fast path on them, this leads to unsupported
+ // situations, e.g. attempting to call IA32OperandConverter::ToImmediate
+ // for a 64-bit operand.
+#ifdef V8_TARGET_ARCH_64_BIT
+ ApiCheckerResult expected_path_for_64bit_test = ApiCheckerResult::kFastCalled;
+#else
+ ApiCheckerResult expected_path_for_64bit_test = ApiCheckerResult::kSlowCalled;
+#endif
+ // Corner cases - int64
+ CallAndCheck<int64_t>(static_cast<int64_t>(i::Smi::kMaxValue) + 1,
+ Behavior::kNoException, expected_path_for_64bit_test,
+ v8_num(static_cast<int64_t>(i::Smi::kMaxValue) + 1));
+ CallAndCheck<int64_t>(std::numeric_limits<int64_t>::min(),
+ Behavior::kNoException, expected_path_for_64bit_test,
+ v8_num(std::numeric_limits<int64_t>::min()));
+ CallAndCheck<int64_t>(1ll << 62, Behavior::kNoException,
+ expected_path_for_64bit_test, v8_num(1ll << 62));
+ CallAndCheck<int64_t>(i::kMaxSafeInteger, Behavior::kNoException,
+ expected_path_for_64bit_test,
+ v8_num(i::kMaxSafeInteger));
+ CallAndCheck<int64_t>(-i::kMaxSafeInteger, Behavior::kNoException,
+ expected_path_for_64bit_test,
+ v8_num(-i::kMaxSafeInteger));
+ CallAndCheck<int64_t>((1ull << 63) - 1024, Behavior::kNoException,
+ expected_path_for_64bit_test,
+ v8_num((1ull << 63) - 1024));
+ CallAndCheck<int64_t>(0, Behavior::kNoException, expected_path_for_64bit_test,
+ v8_num(-0.0));
+
+ // Corner cases - uint64_t
+ CallAndCheck<uint64_t>(static_cast<uint64_t>(i::Smi::kMaxValue) + 1,
+ Behavior::kNoException, expected_path_for_64bit_test,
+ v8_num(static_cast<uint64_t>(i::Smi::kMaxValue) + 1));
+ CallAndCheck<uint64_t>(std::numeric_limits<uint64_t>::min(),
+ Behavior::kNoException, expected_path_for_64bit_test,
+ v8_num(std::numeric_limits<uint64_t>::min()));
+ CallAndCheck<uint64_t>(1ll << 62, Behavior::kNoException,
+ expected_path_for_64bit_test, v8_num(1ll << 62));
+ CallAndCheck<uint64_t>(
+ std::numeric_limits<uint64_t>::max() - ((1ll << 62) - 1),
+ Behavior::kNoException, expected_path_for_64bit_test,
+ v8_num(-(1ll << 62)));
+ CallAndCheck<uint64_t>(i::kMaxSafeIntegerUint64, Behavior::kNoException,
+ expected_path_for_64bit_test,
+ v8_num(i::kMaxSafeInteger));
+ CallAndCheck<uint64_t>(
+ std::numeric_limits<uint64_t>::max() - (i::kMaxSafeIntegerUint64 - 1),
+ Behavior::kNoException, expected_path_for_64bit_test,
+ v8_num(-i::kMaxSafeInteger));
+ CallAndCheck<uint64_t>(0, Behavior::kNoException,
+ expected_path_for_64bit_test, v8_num(-0.0));
+
+#ifndef V8_TARGET_ARCH_ARM64
+ // TODO(mslekova): We deopt for unsafe integers, but ultimately we want to
+ // stay on the fast path.
+ CallAndCheck<int64_t>(std::numeric_limits<int64_t>::min(),
+ Behavior::kNoException, ApiCheckerResult::kSlowCalled,
+ v8_num(1ull << 63));
+ CallAndCheck<int64_t>(
+ std::numeric_limits<int64_t>::min(), Behavior::kNoException,
+ ApiCheckerResult::kSlowCalled,
+ v8_num(static_cast<double>(std::numeric_limits<int64_t>::max()) + 3.14));
+ CallAndCheck<int64_t>(std::numeric_limits<int64_t>::min(),
+ Behavior::kNoException, ApiCheckerResult::kSlowCalled,
+ v8_num(static_cast<double>(1ull << 63)));
+ CallAndCheck<int64_t>(std::numeric_limits<int64_t>::min(),
+ Behavior::kNoException, ApiCheckerResult::kSlowCalled,
+ v8_num(-static_cast<double>(1ll << 63)));
+ CallAndCheck<uint64_t>(1ull << 63, Behavior::kNoException,
+ ApiCheckerResult::kSlowCalled,
+ v8_num(static_cast<double>(1ull << 63)));
+#else
+ // TODO(v8:11121): Currently the tests above are executed for non-arm64
+ // because they fall down the fast path due to incorrect behaviour of
+ // CheckedFloat64ToInt64 on arm64 (see the linked issue for details).
+ // Eventually we want to remove the conditional compilation and ensure
+ // consistent behaviour on all platforms.
+#endif // V8_TARGET_ARCH_ARM64
+
+ // Corner cases - float and double
+#ifdef V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
+ // Source:
+ // https://en.wikipedia.org/wiki/Single-precision_floating-point_format#Precision_limitations_on_integer_values
+ constexpr float kMaxSafeFloat = 16777215; // 2^24-1
+ CallAndCheck<float>(std::numeric_limits<float>::min(), Behavior::kNoException,
+ ApiCheckerResult::kFastCalled,
+ v8_num(std::numeric_limits<float>::min()));
+ CallAndCheck<float>(-kMaxSafeFloat, Behavior::kNoException,
+ ApiCheckerResult::kFastCalled, v8_num(-kMaxSafeFloat));
+ CallAndCheck<float>(-0.0f, Behavior::kNoException,
+ ApiCheckerResult::kFastCalled, v8_num(-0.0));
+ CallAndCheck<float>(0.0f, Behavior::kNoException,
+ ApiCheckerResult::kFastCalled, v8_num(0.0));
+ CallAndCheck<float>(kMaxSafeFloat, Behavior::kNoException,
+ ApiCheckerResult::kFastCalled, v8_num(kMaxSafeFloat));
+ CallAndCheck<float>(std::numeric_limits<float>::max(), Behavior::kNoException,
+ ApiCheckerResult::kFastCalled,
+ v8_num(std::numeric_limits<float>::max()));
+ CallAndCheck<float>(std::numeric_limits<float>::quiet_NaN(),
+ Behavior::kNoException, ApiCheckerResult::kFastCalled,
+ v8_num(std::numeric_limits<float>::quiet_NaN()));
+ CallAndCheck<float>(std::numeric_limits<float>::infinity(),
+ Behavior::kNoException, ApiCheckerResult::kFastCalled,
+ v8_num(std::numeric_limits<float>::infinity()));
+ CallAndCheck<float>(std::numeric_limits<float>::quiet_NaN(),
+ Behavior::kNoException, ApiCheckerResult::kSlowCalled,
+ v8_str("some_string"));
+ CallAndCheck<float>(std::numeric_limits<float>::quiet_NaN(),
+ Behavior::kNoException, ApiCheckerResult::kSlowCalled,
+ CompileRun("new Proxy({}, {});"));
+ CallAndCheck<float>(std::numeric_limits<float>::quiet_NaN(),
+ Behavior::kNoException, ApiCheckerResult::kSlowCalled,
+ v8::Object::New(isolate));
+ CallAndCheck<float>(0, Behavior::kNoException, ApiCheckerResult::kSlowCalled,
+ v8::Array::New(isolate));
+ CallAndCheck<float>(std::numeric_limits<float>::quiet_NaN(),
+ Behavior::kException, ApiCheckerResult::kSlowCalled,
+ v8::BigInt::New(isolate, 42));
+ CallAndCheck<float>(-std::numeric_limits<float>::infinity(),
+ Behavior::kNoException, ApiCheckerResult::kFastCalled,
+ v8_num(-std::numeric_limits<double>::max()));
+ CallAndCheck<float>(std::numeric_limits<float>::infinity(),
+ Behavior::kNoException, ApiCheckerResult::kFastCalled,
+ v8_num(std::numeric_limits<double>::max()));
+ CallAndCheck<float>(kMaxSafeFloat + 1.0f, Behavior::kNoException,
+ ApiCheckerResult::kFastCalled,
+ v8_num(static_cast<double>(kMaxSafeFloat) + 2.0));
+
+ CallAndCheck<double>(std::numeric_limits<double>::min(),
+ Behavior::kNoException, ApiCheckerResult::kFastCalled,
+ v8_num(std::numeric_limits<double>::min()));
+ CallAndCheck<double>(-i::kMaxSafeInteger, Behavior::kNoException,
+ ApiCheckerResult::kFastCalled,
+ v8_num(-i::kMaxSafeInteger));
+ CallAndCheck<double>(std::numeric_limits<float>::min(),
+ Behavior::kNoException, ApiCheckerResult::kFastCalled,
+ v8_num(std::numeric_limits<float>::min()));
+ CallAndCheck<double>(-0.0, Behavior::kNoException,
+ ApiCheckerResult::kFastCalled, v8_num(-0.0));
+ CallAndCheck<double>(0.0, Behavior::kNoException,
+ ApiCheckerResult::kFastCalled, v8_num(0.0));
+ CallAndCheck<double>(std::numeric_limits<float>::max(),
+ Behavior::kNoException, ApiCheckerResult::kFastCalled,
+ v8_num(std::numeric_limits<float>::max()));
+ CallAndCheck<double>(i::kMaxSafeInteger, Behavior::kNoException,
+ ApiCheckerResult::kFastCalled,
+ v8_num(i::kMaxSafeInteger));
+ CallAndCheck<double>(i::kMaxSafeInteger + 1, Behavior::kNoException,
+ ApiCheckerResult::kFastCalled,
+ v8_num(i::kMaxSafeInteger + 1));
+ CallAndCheck<double>(std::numeric_limits<double>::max(),
+ Behavior::kNoException, ApiCheckerResult::kFastCalled,
+ v8_num(std::numeric_limits<double>::max()));
+ CallAndCheck<double>(std::numeric_limits<double>::quiet_NaN(),
+ Behavior::kNoException, ApiCheckerResult::kFastCalled,
+ v8_num(std::numeric_limits<double>::quiet_NaN()));
+ CallAndCheck<double>(std::numeric_limits<double>::infinity(),
+ Behavior::kNoException, ApiCheckerResult::kFastCalled,
+ v8_num(std::numeric_limits<double>::infinity()));
+ CallAndCheck<double>(std::numeric_limits<double>::quiet_NaN(),
+ Behavior::kNoException, ApiCheckerResult::kSlowCalled,
+ v8_str("some_string"));
+ CallAndCheck<double>(std::numeric_limits<double>::quiet_NaN(),
+ Behavior::kNoException, ApiCheckerResult::kSlowCalled,
+ CompileRun("new Proxy({}, {});"));
+ CallAndCheck<double>(std::numeric_limits<double>::quiet_NaN(),
+ Behavior::kNoException, ApiCheckerResult::kSlowCalled,
+ v8::Object::New(isolate));
+ CallAndCheck<double>(0, Behavior::kNoException, ApiCheckerResult::kSlowCalled,
+ v8::Array::New(isolate));
+ CallAndCheck<double>(std::numeric_limits<double>::quiet_NaN(),
+ Behavior::kException, ApiCheckerResult::kSlowCalled,
+ v8::BigInt::New(isolate, 42));
+#endif
// Corner cases - bool
CallAndCheck<bool>(false, Behavior::kNoException,
@@ -28128,15 +28443,22 @@ TEST(FastApiCalls) {
CheckDynamicTypeInfo();
- // Fallback to slow call
+ // Fallback to slow call and throw an exception.
CallAndCheck<int32_t>(
42, Behavior::kException,
ApiCheckerResult::kFastCalled | ApiCheckerResult::kSlowCalled, v8_num(42),
- true);
+ Behavior::kException, FallbackPolicy::kRequestFallback);
+
+ // Fallback to slow call and don't throw an exception.
+ CallAndCheck<int32_t>(
+ 42, Behavior::kNoException,
+ ApiCheckerResult::kFastCalled | ApiCheckerResult::kSlowCalled, v8_num(42),
+ Behavior::kNoException, FallbackPolicy::kRequestFallback);
- // Doesn't fallback to slow call
- CallAndCheck<int32_t>(42, Behavior::kNoException,
- ApiCheckerResult::kFastCalled, v8_num(42), false);
+ // Doesn't fallback to slow call, so don't throw an exception.
+ CallAndCheck<int32_t>(
+ 42, Behavior::kNoException, ApiCheckerResult::kFastCalled, v8_num(42),
+ Behavior::kNoException, FallbackPolicy::kDontRequestFallback);
// Wrong number of arguments
CallWithLessArguments();
@@ -28233,7 +28555,7 @@ class MetricsRecorder : public v8::metrics::Recorder {
v8::metrics::Recorder::ContextId id) override {
if (v8::metrics::Recorder::GetContext(isolate_, id).IsEmpty()) return;
++count_;
- time_in_us_ = event.wall_clock_time_in_us;
+ time_in_us_ = event.wall_clock_duration_in_us;
}
void AddThreadSafeEvent(
@@ -28267,7 +28589,7 @@ TEST(TriggerMainThreadMetricsEvent) {
// Check that event submission works.
{
i::metrics::TimedScope<v8::metrics::WasmModuleDecoded> timed_scope(
- &event, &v8::metrics::WasmModuleDecoded::wall_clock_time_in_us);
+ &event);
v8::base::OS::Sleep(v8::base::TimeDelta::FromMilliseconds(100));
}
i_iso->metrics_recorder()->AddMainThreadEvent(event, context_id);
@@ -28306,7 +28628,7 @@ TEST(TriggerDelayedMainThreadMetricsEvent) {
// Check that event submission works.
{
i::metrics::TimedScope<v8::metrics::WasmModuleDecoded> timed_scope(
- &event, &v8::metrics::WasmModuleDecoded::wall_clock_time_in_us);
+ &event);
v8::base::OS::Sleep(v8::base::TimeDelta::FromMilliseconds(100));
}
i_iso->metrics_recorder()->DelayMainThreadEvent(event, context_id);
@@ -28349,12 +28671,136 @@ TEST(TriggerThreadSafeMetricsEvent) {
CHECK_EQ(recorder->module_count_, 42);
}
-THREADED_TEST(MicrotaskQueueOfContext) {
- auto microtask_queue = v8::MicrotaskQueue::New(CcTest::isolate());
- v8::HandleScope scope(CcTest::isolate());
- v8::Local<Context> context = Context::New(
- CcTest::isolate(), nullptr, v8::MaybeLocal<ObjectTemplate>(),
- v8::MaybeLocal<Value>(), v8::DeserializeInternalFieldsCallback(),
- microtask_queue.get());
- CHECK_EQ(context->GetMicrotaskQueue(), microtask_queue.get());
+void SetupCodeLike(LocalContext* env, const char* name,
+ v8::Local<v8::FunctionTemplate> to_string,
+ bool is_code_like) {
+ // Setup a JS constructor + object template for testing IsCodeLike.
+ v8::Local<FunctionTemplate> constructor =
+ v8::FunctionTemplate::New((*env)->GetIsolate());
+ constructor->SetClassName(v8_str(name));
+ constructor->InstanceTemplate()->Set((*env)->GetIsolate(), "toString",
+ to_string);
+ if (is_code_like) {
+ constructor->InstanceTemplate()->SetCodeLike();
+ }
+ CHECK_EQ(is_code_like, constructor->InstanceTemplate()->IsCodeLike());
+ CHECK((*env)
+ ->Global()
+ ->Set(env->local(), v8_str(name),
+ constructor->GetFunction(env->local()).ToLocalChecked())
+ .FromJust());
+}
+
+TEST(CodeLikeEval) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ // Setup two object templates with an eval-able string representation.
+ // One code-like, one not, and otherwise identical.
+ auto string_fn = v8::FunctionTemplate::New(
+ isolate, [](const v8::FunctionCallbackInfo<v8::Value>& info) {
+ info.GetReturnValue().Set(v8_str("2+2"));
+ });
+ SetupCodeLike(&env, "CodeLike", string_fn, true);
+ SetupCodeLike(&env, "Other", string_fn, false);
+
+ // Check v8::Object::IsCodeLike.
+ CHECK(CompileRun("new CodeLike()").As<v8::Object>()->IsCodeLike(isolate));
+ CHECK(!CompileRun("new Other()").As<v8::Object>()->IsCodeLike(isolate));
+
+ // Expected behaviour for normal objects:
+ // - eval returns them as-is
+ // - when pre-stringified, the string gets evaluated (of course)
+ ExpectString("eval(new Other()) + \"\"", "2+2");
+ ExpectInt32("eval(\"\" + new Other())", 4);
+
+ // Expected behaviour for 'code like': Is always evaluated.
+ ExpectInt32("eval(new CodeLike())", 4);
+ ExpectInt32("eval(\"\" + new CodeLike())", 4);
+
+ // Modify callback will always returns a replacement string:
+ // Expected behaviour: Always execute the replacement string.
+ isolate->SetModifyCodeGenerationFromStringsCallback(
+ [](v8::Local<v8::Context> context, v8::Local<v8::Value> source,
+ bool is_code_like) -> v8::ModifyCodeGenerationFromStringsResult {
+ return {true, v8_str("3+3")};
+ });
+ ExpectInt32("eval(new Other())", 6);
+ ExpectInt32("eval(new CodeLike())", 6);
+
+ // Modify callback always disallows:
+ // Expected behaviour: Always fail to execute.
+ isolate->SetModifyCodeGenerationFromStringsCallback(
+ [](v8::Local<v8::Context> context, v8::Local<v8::Value> source,
+ bool is_code_like) -> v8::ModifyCodeGenerationFromStringsResult {
+ return {false, v8::Local<v8::String>()};
+ });
+ CHECK(CompileRun("eval(new Other())").IsEmpty());
+ CHECK(CompileRun("eval(new CodeLike())").IsEmpty());
+
+ // Modify callback allows only "code like":
+ // Expected behaviour: Only code-like executed, with replacement string.
+ isolate->SetModifyCodeGenerationFromStringsCallback(
+ [](v8::Local<v8::Context> context, v8::Local<v8::Value> source,
+ bool is_code_like) -> v8::ModifyCodeGenerationFromStringsResult {
+ bool ok = is_code_like ||
+ (source->IsObject() &&
+ source.As<v8::Object>()->IsCodeLike(context->GetIsolate()));
+ return {ok, v8_str("5+7")};
+ });
+ CHECK(CompileRun("eval(new Other())").IsEmpty());
+ ExpectInt32("eval(new CodeLike())", 12);
+}
+
+TEST(CodeLikeFunction) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ // These follow the pattern of the CodeLikeEval test above, but with
+ // "new Function" instead of eval.
+
+ // Setup two object templates with an eval-able string representation.
+ // One code kind, one not, and otherwise identical.
+ auto string_fn = v8::FunctionTemplate::New(
+ isolate, [](const v8::FunctionCallbackInfo<v8::Value>& info) {
+ info.GetReturnValue().Set(v8_str("return 2+2"));
+ });
+ SetupCodeLike(&env, "CodeLike", string_fn, true);
+ SetupCodeLike(&env, "Other", string_fn, false);
+
+ ExpectInt32("new Function(new Other())()", 4);
+ ExpectInt32("new Function(new CodeLike())()", 4);
+
+ // Modify callback will always return a replacement string:
+ env.local()->AllowCodeGenerationFromStrings(false);
+ isolate->SetModifyCodeGenerationFromStringsCallback(
+ [](v8::Local<v8::Context> context, v8::Local<v8::Value> source,
+ bool is_code_like) -> v8::ModifyCodeGenerationFromStringsResult {
+ return {true, v8_str("(function anonymous(\n) {\nreturn 7;\n})\n")};
+ });
+ ExpectInt32("new Function(new Other())()", 7);
+ ExpectInt32("new Function(new CodeLike())()", 7);
+
+ // Modify callback always disallows:
+ isolate->SetModifyCodeGenerationFromStringsCallback(
+ [](v8::Local<v8::Context> context, v8::Local<v8::Value> source,
+ bool is_code_like) -> v8::ModifyCodeGenerationFromStringsResult {
+ return {false, v8::Local<v8::String>()};
+ });
+ CHECK(CompileRun("new Function(new Other())()").IsEmpty());
+ CHECK(CompileRun("new Function(new CodeLike())()").IsEmpty());
+
+ // Modify callback allows only "code kind":
+ isolate->SetModifyCodeGenerationFromStringsCallback(
+ [](v8::Local<v8::Context> context, v8::Local<v8::Value> source,
+ bool is_code_like) -> v8::ModifyCodeGenerationFromStringsResult {
+ bool ok = is_code_like ||
+ (source->IsObject() &&
+ source.As<v8::Object>()->IsCodeLike(context->GetIsolate()));
+ return {ok, v8_str("(function anonymous(\n) {\nreturn 7;\n})\n")};
+ });
+ CHECK(CompileRun("new Function(new Other())()").IsEmpty());
+ ExpectInt32("new Function(new CodeLike())()", 7);
}
diff --git a/deps/v8/test/cctest/test-assembler-arm.cc b/deps/v8/test/cctest/test-assembler-arm.cc
index 9204eb4eec..fa3ebdd86d 100644
--- a/deps/v8/test/cctest/test-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-assembler-arm.cc
@@ -61,7 +61,7 @@ TEST(0) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -97,7 +97,7 @@ TEST(1) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -142,7 +142,7 @@ TEST(2) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -188,7 +188,7 @@ TEST(3) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -318,7 +318,7 @@ TEST(4) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -380,7 +380,7 @@ TEST(5) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -411,7 +411,7 @@ TEST(6) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -478,7 +478,7 @@ static void TestRoundingMode(VCVTTypes types,
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -660,7 +660,7 @@ TEST(8) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -769,7 +769,7 @@ TEST(9) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -874,7 +874,7 @@ TEST(10) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -968,7 +968,7 @@ TEST(11) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -1093,7 +1093,7 @@ TEST(13) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -1165,7 +1165,7 @@ TEST(14) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -2045,7 +2045,7 @@ TEST(15) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -2321,7 +2321,7 @@ TEST(16) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -2399,7 +2399,7 @@ TEST(sdiv) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -2459,7 +2459,7 @@ TEST(udiv) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -2488,7 +2488,7 @@ TEST(smmla) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -2513,7 +2513,7 @@ TEST(smmul) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -2538,7 +2538,7 @@ TEST(sxtb) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -2563,7 +2563,7 @@ TEST(sxtab) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -2588,7 +2588,7 @@ TEST(sxth) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -2613,7 +2613,7 @@ TEST(sxtah) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -2638,7 +2638,7 @@ TEST(uxtb) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -2663,7 +2663,7 @@ TEST(uxtab) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -2688,7 +2688,7 @@ TEST(uxth) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -2713,7 +2713,7 @@ TEST(uxtah) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -2754,7 +2754,7 @@ TEST(rbit) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
@@ -2834,7 +2834,7 @@ TEST(code_relative_offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = Factory::CodeBuilder(isolate, desc, CodeKind::STUB)
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING)
.set_self_reference(code_object)
.Build();
auto f = GeneratedCode<F_iiiii>::FromCode(*code);
@@ -2875,7 +2875,7 @@ TEST(msr_mrs) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -2972,7 +2972,7 @@ TEST(ARMv8_float32_vrintX) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -3073,7 +3073,7 @@ TEST(ARMv8_vrintX) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -3210,7 +3210,7 @@ TEST(ARMv8_vsel) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -3301,7 +3301,7 @@ TEST(ARMv8_vminmax_f64) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -3381,7 +3381,7 @@ TEST(ARMv8_vminmax_f32) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -3514,7 +3514,7 @@ static GeneratedCode<F_ppiii> GenerateMacroFloatMinMax(
CodeDesc desc;
assm.GetCode(assm.isolate(), &desc);
Handle<Code> code =
- Factory::CodeBuilder(assm.isolate(), desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(assm.isolate(), desc, CodeKind::FOR_TESTING).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -3677,7 +3677,7 @@ TEST(unaligned_loads) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -3720,7 +3720,7 @@ TEST(unaligned_stores) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -3820,7 +3820,7 @@ TEST(vswp) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -4032,7 +4032,7 @@ TEST(split_add_immediate) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -4052,7 +4052,7 @@ TEST(split_add_immediate) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -4075,7 +4075,7 @@ TEST(split_add_immediate) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
diff --git a/deps/v8/test/cctest/test-assembler-arm64.cc b/deps/v8/test/cctest/test-assembler-arm64.cc
index 52aaf3162b..7dbe07c924 100644
--- a/deps/v8/test/cctest/test-assembler-arm64.cc
+++ b/deps/v8/test/cctest/test-assembler-arm64.cc
@@ -158,16 +158,16 @@ static void InitializeVM() {
#define RUN() simulator.RunFrom(reinterpret_cast<Instruction*>(code->entry()))
-#define END() \
- __ Debug("End test.", __LINE__, TRACE_DISABLE | LOG_ALL); \
- core.Dump(&masm); \
- __ PopCalleeSavedRegisters(); \
- __ Ret(); \
- { \
- CodeDesc desc; \
- __ GetCode(masm.isolate(), &desc); \
- code = Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build(); \
- if (FLAG_print_code) code->Print(); \
+#define END() \
+ __ Debug("End test.", __LINE__, TRACE_DISABLE | LOG_ALL); \
+ core.Dump(&masm); \
+ __ PopCalleeSavedRegisters(); \
+ __ Ret(); \
+ { \
+ CodeDesc desc; \
+ __ GetCode(masm.isolate(), &desc); \
+ code = Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); \
+ if (FLAG_print_code) code->Print(); \
}
#else // ifdef USE_SIMULATOR.
@@ -204,15 +204,15 @@ static void InitializeVM() {
f.Call(); \
}
-#define END() \
- core.Dump(&masm); \
- __ PopCalleeSavedRegisters(); \
- __ Ret(); \
- { \
- CodeDesc desc; \
- __ GetCode(masm.isolate(), &desc); \
- code = Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build(); \
- if (FLAG_print_code) code->Print(); \
+#define END() \
+ core.Dump(&masm); \
+ __ PopCalleeSavedRegisters(); \
+ __ Ret(); \
+ { \
+ CodeDesc desc; \
+ __ GetCode(masm.isolate(), &desc); \
+ code = Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); \
+ if (FLAG_print_code) code->Print(); \
}
#endif // ifdef USE_SIMULATOR.
@@ -11784,6 +11784,9 @@ TEST(system_msr) {
}
TEST(system_pauth_b) {
+#ifdef DEBUG
+ i::FLAG_sim_abort_on_bad_auth = false;
+#endif
SETUP();
START();
@@ -14880,7 +14883,7 @@ TEST(pool_size) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
- code = Factory::CodeBuilder(isolate, desc, CodeKind::STUB)
+ code = Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING)
.set_self_reference(masm.CodeObject())
.Build();
diff --git a/deps/v8/test/cctest/test-assembler-ia32.cc b/deps/v8/test/cctest/test-assembler-ia32.cc
index 193fae18da..9598930e2c 100644
--- a/deps/v8/test/cctest/test-assembler-ia32.cc
+++ b/deps/v8/test/cctest/test-assembler-ia32.cc
@@ -27,14 +27,14 @@
#include <stdlib.h>
-#include "src/init/v8.h"
-
#include "src/base/platform/platform.h"
#include "src/base/utils/random-number-generator.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/macro-assembler.h"
+#include "src/deoptimizer/deoptimizer.h"
#include "src/diagnostics/disassembler.h"
#include "src/heap/factory.h"
+#include "src/init/v8.h"
#include "src/utils/ostreams.h"
#include "test/cctest/cctest.h"
@@ -63,7 +63,7 @@ TEST(AssemblerIa320) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -101,7 +101,7 @@ TEST(AssemblerIa321) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -143,7 +143,7 @@ TEST(AssemblerIa322) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -172,7 +172,7 @@ TEST(AssemblerIa323) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -201,7 +201,7 @@ TEST(AssemblerIa324) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -229,7 +229,7 @@ TEST(AssemblerIa325) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
F0 f = FUNCTION_CAST<F0>(code->entry());
int res = f();
CHECK_EQ(42, res);
@@ -262,7 +262,7 @@ TEST(AssemblerIa326) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -294,7 +294,7 @@ TEST(AssemblerIa328) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -377,7 +377,7 @@ TEST(AssemblerMultiByteNop) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
CHECK(code->IsCode());
F0 f = FUNCTION_CAST<F0>(code->entry());
@@ -428,7 +428,7 @@ void DoSSE2(const v8::FunctionCallbackInfo<v8::Value>& args) {
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
F0 f = FUNCTION_CAST<F0>(code->entry());
int res = f();
@@ -493,7 +493,7 @@ TEST(AssemblerIa32Extractps) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -532,7 +532,7 @@ TEST(AssemblerIa32SSE) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -565,7 +565,7 @@ TEST(AssemblerIa32SSE3) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -793,7 +793,7 @@ TEST(AssemblerX64FMA_sd) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -1021,7 +1021,7 @@ TEST(AssemblerX64FMA_ss) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -1129,7 +1129,7 @@ TEST(AssemblerIa32BMI1) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -1177,7 +1177,7 @@ TEST(AssemblerIa32LZCNT) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -1225,7 +1225,7 @@ TEST(AssemblerIa32POPCNT) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -1371,7 +1371,7 @@ TEST(AssemblerIa32BMI2) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -1415,7 +1415,7 @@ TEST(AssemblerIa32JumpTables1) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -1463,7 +1463,7 @@ TEST(AssemblerIa32JumpTables2) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -1506,7 +1506,7 @@ TEST(Regress621926) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
@@ -1517,6 +1517,29 @@ TEST(Regress621926) {
CHECK_EQ(1, f());
}
+TEST(DeoptExitSizeIsFixed) {
+ CHECK(Deoptimizer::kSupportsFixedDeoptExitSizes);
+
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope handles(isolate);
+ v8::internal::byte buffer[256];
+ MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes,
+ ExternalAssemblerBuffer(buffer, sizeof(buffer)));
+
+ STATIC_ASSERT(static_cast<int>(kFirstDeoptimizeKind) == 0);
+ for (int i = 0; i < kDeoptimizeKindCount; i++) {
+ DeoptimizeKind kind = static_cast<DeoptimizeKind>(i);
+ Builtins::Name target = Deoptimizer::GetDeoptimizationEntry(isolate, kind);
+ Label before_exit;
+ masm.bind(&before_exit);
+ masm.CallForDeoptimization(target, 42, &before_exit, kind, nullptr);
+ CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
+ kind == DeoptimizeKind::kLazy
+ ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kNonLazyDeoptExitSize);
+ }
+}
+
#undef __
} // namespace internal
diff --git a/deps/v8/test/cctest/test-assembler-mips.cc b/deps/v8/test/cctest/test-assembler-mips.cc
index 49c4d4b7f8..e4ba08454f 100644
--- a/deps/v8/test/cctest/test-assembler-mips.cc
+++ b/deps/v8/test/cctest/test-assembler-mips.cc
@@ -65,7 +65,7 @@ TEST(MIPS0) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
int res = reinterpret_cast<int>(f.Call(0xAB0, 0xC, 0, 0, 0));
CHECK_EQ(static_cast<int32_t>(0xABC), res);
@@ -100,7 +100,7 @@ TEST(MIPS1) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F1>::FromCode(*code);
int res = reinterpret_cast<int>(f.Call(50, 0, 0, 0, 0));
CHECK_EQ(1275, res);
@@ -237,7 +237,7 @@ TEST(MIPS2) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
int res = reinterpret_cast<int>(f.Call(0xAB0, 0xC, 0, 0, 0));
CHECK_EQ(static_cast<int32_t>(0x31415926), res);
@@ -337,7 +337,7 @@ TEST(MIPS3) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
// Double test values.
t.a = 1.5e14;
@@ -439,7 +439,7 @@ TEST(MIPS4) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.a = 1.5e22;
t.b = 2.75e11;
@@ -500,7 +500,7 @@ TEST(MIPS5) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.a = 1.5e4;
t.b = 2.75e8;
@@ -568,7 +568,7 @@ TEST(MIPS6) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.ui = 0x11223344;
t.si = 0x99AABBCC;
@@ -660,7 +660,7 @@ TEST(MIPS7) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.a = 1.5e14;
t.b = 2.75e11;
@@ -756,7 +756,7 @@ TEST(MIPS8) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.input = 0x12345678;
f.Call(&t, 0x0, 0, 0, 0);
@@ -801,7 +801,7 @@ TEST(MIPS9) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
USE(code);
}
@@ -852,7 +852,7 @@ TEST(MIPS10) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.a = 2.147483646e+09; // 0x7FFFFFFE -> 0xFF80000041DFFFFF as double.
t.b_word = 0x0FF00FF0; // 0x0FF00FF0 -> 0x as double.
@@ -979,7 +979,7 @@ TEST(MIPS11) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.reg_init = 0xAABBCCDD;
t.mem_init = 0x11223344;
@@ -1104,7 +1104,7 @@ TEST(MIPS12) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.x = 1;
t.y = 2;
@@ -1157,7 +1157,7 @@ TEST(MIPS13) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.cvt_big_in = 0xFFFFFFFF;
@@ -1277,7 +1277,7 @@ TEST(MIPS14) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.round_up_in = 123.51;
@@ -1381,7 +1381,7 @@ TEST(seleqz_selnez) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
(f.Call(&test, 0, 0, 0, 0));
@@ -1495,7 +1495,7 @@ TEST(min_max) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputsa[i];
@@ -1605,7 +1605,7 @@ TEST(rint_d) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int j = 0; j < 4; j++) {
@@ -1652,7 +1652,7 @@ TEST(sel) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
const int test_size = 3;
@@ -1784,7 +1784,7 @@ TEST(rint_s) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int j = 0; j < 4; j++) {
@@ -1828,7 +1828,7 @@ TEST(Cvt_d_uw) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.input = inputs[i];
@@ -1909,7 +1909,7 @@ TEST(mina_maxa) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputsa[i];
@@ -1989,7 +1989,7 @@ TEST(trunc_l) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -2069,7 +2069,7 @@ TEST(movz_movn) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -2170,7 +2170,7 @@ TEST(movt_movd) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
(f.Call(&test, 0, 0, 0, 0));
@@ -2255,7 +2255,7 @@ TEST(cvt_w_d) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int j = 0; j < 4; j++) {
test.fcsr = fcsr_inputs[j];
@@ -2322,7 +2322,7 @@ TEST(trunc_w) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -2391,7 +2391,7 @@ TEST(round_w) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -2462,7 +2462,7 @@ TEST(round_l) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -2535,7 +2535,7 @@ TEST(sub) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputfs_S[i];
@@ -2614,7 +2614,7 @@ TEST(sqrt_rsqrt_recip) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
@@ -2694,7 +2694,7 @@ TEST(neg) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_S[i];
@@ -2751,7 +2751,7 @@ TEST(mul) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputfs_S[i];
@@ -2807,7 +2807,7 @@ TEST(mov) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -2874,7 +2874,7 @@ TEST(floor_w) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -2945,7 +2945,7 @@ TEST(floor_l) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -3017,7 +3017,7 @@ TEST(ceil_w) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -3088,7 +3088,7 @@ TEST(ceil_l) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -3156,7 +3156,7 @@ TEST(jump_tables1) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -3221,7 +3221,7 @@ TEST(jump_tables2) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -3293,7 +3293,7 @@ TEST(jump_tables3) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -3344,7 +3344,7 @@ TEST(BITSWAP) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.r1 = 0x781A15C3;
t.r2 = 0x8B71FCDE;
@@ -3478,7 +3478,7 @@ TEST(class_fmt) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.dSignalingNan = std::numeric_limits<double>::signaling_NaN();
@@ -3569,7 +3569,7 @@ TEST(ABS) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
test.a = -2.0;
test.b = -2.0;
@@ -3662,7 +3662,7 @@ TEST(ADD_FMT) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
test.a = 2.0;
test.b = 3.0;
@@ -3816,7 +3816,7 @@ TEST(C_COND_FMT) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
test.dOp1 = 2.0;
test.dOp2 = 3.0;
@@ -4016,7 +4016,7 @@ TEST(CMP_COND_FMT) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
uint64_t dTrue = 0xFFFFFFFFFFFFFFFF;
uint64_t dFalse = 0x0000000000000000;
@@ -4202,7 +4202,7 @@ TEST(CVT) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
test.cvt_d_s_in = -0.51;
@@ -4414,7 +4414,7 @@ TEST(DIV_FMT) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
@@ -4506,7 +4506,7 @@ uint32_t run_align(uint32_t rs_value, uint32_t rt_value, uint8_t bp) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
@@ -4561,7 +4561,7 @@ uint32_t run_aluipc(int16_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
PC = (uint32_t)code->entry(); // Set the program counter.
@@ -4614,7 +4614,7 @@ uint32_t run_auipc(int16_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
PC = (uint32_t)code->entry(); // Set the program counter.
@@ -4689,7 +4689,7 @@ uint32_t run_lwpc(int offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
@@ -4769,7 +4769,7 @@ uint32_t run_jic(int16_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
@@ -4840,7 +4840,7 @@ uint64_t run_beqzc(int32_t value, int32_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
@@ -4946,7 +4946,7 @@ void run_bz_bnz(TestCaseMsaBranch* input, Branch GenerateBranch,
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -5116,7 +5116,7 @@ uint32_t run_jialc(int16_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
@@ -5164,7 +5164,7 @@ static uint32_t run_addiupc(int32_t imm19) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
PC = (uint32_t)code->entry(); // Set the program counter.
@@ -5246,7 +5246,7 @@ int32_t run_bc(int32_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
@@ -5327,7 +5327,7 @@ int32_t run_balc(int32_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
@@ -5351,7 +5351,7 @@ uint32_t run_aui(uint32_t rs, uint16_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
@@ -5440,7 +5440,7 @@ uint32_t run_bal(int16_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
@@ -5492,7 +5492,7 @@ TEST(Trampoline) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
int32_t res = reinterpret_cast<int32_t>(f.Call(42, 42, 0, 0, 0));
@@ -5619,7 +5619,7 @@ void helper_madd_msub_maddf_msubf(F func) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
const size_t kTableLength = sizeof(test_cases) / sizeof(TestCaseMaddMsub<T>);
@@ -5704,7 +5704,7 @@ uint32_t run_Subu(uint32_t imm, int32_t num_instr) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
uint32_t res = reinterpret_cast<uint32_t>(f.Call(0, 0, 0, 0, 0));
@@ -5808,7 +5808,7 @@ TEST(MSA_fill_copy) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -5876,7 +5876,7 @@ TEST(MSA_fill_copy_2) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -5933,7 +5933,7 @@ TEST(MSA_fill_copy_3) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -5978,7 +5978,7 @@ void run_msa_insert(int32_t rs_value, int n, msa_reg_t* w) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -6078,7 +6078,7 @@ TEST(MSA_move_v) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -6123,7 +6123,7 @@ void run_msa_sldi(OperFunc GenerateOperation,
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -6208,7 +6208,7 @@ void run_msa_ctc_cfc(uint32_t value) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -6318,7 +6318,7 @@ void run_msa_i8(SecondaryField opcode, uint64_t ws_lo, uint64_t ws_hi,
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -6496,7 +6496,7 @@ uint32_t run_Ins(uint32_t imm, uint32_t source, uint16_t pos, uint16_t size) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
uint32_t res = reinterpret_cast<uint32_t>(f.Call(0, 0, 0, 0, 0));
@@ -6546,7 +6546,7 @@ uint32_t run_Ext(uint32_t source, uint16_t pos, uint16_t size) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
uint32_t res = reinterpret_cast<uint32_t>(f.Call(0, 0, 0, 0, 0));
@@ -6608,7 +6608,7 @@ void run_msa_i5(struct TestCaseMsaI5* input, bool i5_sign_ext,
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -7028,7 +7028,7 @@ void run_msa_2r(const struct TestCaseMsa2R* input,
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -8078,7 +8078,7 @@ void run_msa_vector(struct TestCaseMsaVector* input,
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -8166,7 +8166,7 @@ void run_msa_bit(struct TestCaseMsaBit* input, InstFunc GenerateInstructionFunc,
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -8639,7 +8639,7 @@ void run_msa_i10(int32_t input, InstFunc GenerateVectorInstructionFunc,
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -8717,7 +8717,7 @@ void run_msa_mi10(InstFunc GenerateVectorInstructionFunc) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -8796,7 +8796,7 @@ void run_msa_3r(struct TestCaseMsa3R* input, InstFunc GenerateI5InstructionFunc,
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -9802,7 +9802,7 @@ void run_msa_3rf(const struct TestCaseMsa3RF* input,
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
diff --git a/deps/v8/test/cctest/test-assembler-mips64.cc b/deps/v8/test/cctest/test-assembler-mips64.cc
index 2b5749c8fe..3835fa9028 100644
--- a/deps/v8/test/cctest/test-assembler-mips64.cc
+++ b/deps/v8/test/cctest/test-assembler-mips64.cc
@@ -66,7 +66,7 @@ TEST(MIPS0) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
int64_t res = reinterpret_cast<int64_t>(f.Call(0xAB0, 0xC, 0, 0, 0));
CHECK_EQ(0xABCL, res);
@@ -101,7 +101,7 @@ TEST(MIPS1) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F1>::FromCode(*code);
int64_t res = reinterpret_cast<int64_t>(f.Call(50, 0, 0, 0, 0));
CHECK_EQ(1275L, res);
@@ -246,7 +246,7 @@ TEST(MIPS2) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
int64_t res = reinterpret_cast<int64_t>(f.Call(0xAB0, 0xC, 0, 0, 0));
@@ -347,7 +347,7 @@ TEST(MIPS3) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
// Double test values.
t.a = 1.5e14;
@@ -439,7 +439,7 @@ TEST(MIPS4) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.a = 1.5e22;
t.b = 2.75e11;
@@ -503,7 +503,7 @@ TEST(MIPS5) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.a = 1.5e4;
t.b = 2.75e8;
@@ -571,7 +571,7 @@ TEST(MIPS6) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.ui = 0x11223344;
t.si = 0x99AABBCC;
@@ -657,7 +657,7 @@ TEST(MIPS7) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.a = 1.5e14;
t.b = 2.75e11;
@@ -753,7 +753,7 @@ TEST(MIPS8) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.input = 0x12345678;
f.Call(&t, 0x0, 0, 0, 0);
@@ -798,7 +798,7 @@ TEST(MIPS9) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
USE(code);
}
@@ -874,7 +874,7 @@ TEST(MIPS10) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.a = 2.147483647e9; // 0x7FFFFFFF -> 0x41DFFFFFFFC00000 as double.
t.b_long_hi = 0x000000FF; // 0xFF00FF00FF -> 0x426FE01FE01FE000 as double.
@@ -1008,7 +1008,7 @@ TEST(MIPS11) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.reg_init = 0xAABBCCDD;
t.mem_init = 0x11223344;
@@ -1132,7 +1132,7 @@ TEST(MIPS12) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.x = 1;
t.y = 2;
@@ -1185,7 +1185,7 @@ TEST(MIPS13) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.cvt_big_in = 0xFFFFFFFF;
@@ -1305,7 +1305,7 @@ TEST(MIPS14) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.round_up_in = 123.51;
@@ -1433,7 +1433,7 @@ TEST(MIPS16) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.ui = 0x44332211;
t.si = 0x99AABBCC;
@@ -1560,7 +1560,7 @@ TEST(seleqz_selnez) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
f.Call(&test, 0, 0, 0, 0);
@@ -1675,7 +1675,7 @@ TEST(min_max) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 4; i < kTableLength; i++) {
test.a = inputsa[i];
@@ -1783,7 +1783,7 @@ TEST(rint_d) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int j = 0; j < 4; j++) {
@@ -1830,7 +1830,7 @@ TEST(sel) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
const int test_size = 3;
@@ -1962,7 +1962,7 @@ TEST(rint_s) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int j = 0; j < 4; j++) {
@@ -2047,7 +2047,7 @@ TEST(mina_maxa) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputsa[i];
@@ -2128,7 +2128,7 @@ TEST(trunc_l) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -2208,7 +2208,7 @@ TEST(movz_movn) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -2308,7 +2308,7 @@ TEST(movt_movd) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
f.Call(&test, 0, 0, 0, 0);
@@ -2394,7 +2394,7 @@ TEST(cvt_w_d) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int j = 0; j < 4; j++) {
test.fcsr = fcsr_inputs[j];
@@ -2461,7 +2461,7 @@ TEST(trunc_w) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -2530,7 +2530,7 @@ TEST(round_w) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -2600,7 +2600,7 @@ TEST(round_l) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -2672,7 +2672,7 @@ TEST(sub) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputfs_S[i];
@@ -2744,7 +2744,7 @@ TEST(sqrt_rsqrt_recip) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
@@ -2822,7 +2822,7 @@ TEST(neg) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_S[i];
@@ -2880,7 +2880,7 @@ TEST(mul) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputfs_S[i];
@@ -2935,7 +2935,7 @@ TEST(mov) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -3002,7 +3002,7 @@ TEST(floor_w) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -3072,7 +3072,7 @@ TEST(floor_l) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -3143,7 +3143,7 @@ TEST(ceil_w) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -3213,7 +3213,7 @@ TEST(ceil_l) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -3280,7 +3280,7 @@ TEST(jump_tables1) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -3345,7 +3345,7 @@ TEST(jump_tables2) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -3420,7 +3420,7 @@ TEST(jump_tables3) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -3493,7 +3493,7 @@ TEST(BITSWAP) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.r1 = 0x00102100781A15C3;
t.r2 = 0x001021008B71FCDE;
@@ -3635,7 +3635,7 @@ TEST(class_fmt) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
// Double test values.
@@ -3728,7 +3728,7 @@ TEST(ABS) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
test.a = -2.0;
test.b = -2.0;
@@ -3821,7 +3821,7 @@ TEST(ADD_FMT) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
test.a = 2.0;
test.b = 3.0;
@@ -3975,7 +3975,7 @@ TEST(C_COND_FMT) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
test.dOp1 = 2.0;
test.dOp2 = 3.0;
@@ -4175,7 +4175,7 @@ TEST(CMP_COND_FMT) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
uint64_t dTrue = 0xFFFFFFFFFFFFFFFF;
uint64_t dFalse = 0x0000000000000000;
@@ -4353,7 +4353,7 @@ TEST(CVT) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
test.cvt_d_s_in = -0.51;
@@ -4524,7 +4524,7 @@ TEST(DIV_FMT) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
f.Call(&test, 0, 0, 0, 0);
@@ -4615,7 +4615,7 @@ uint64_t run_align(uint64_t rs_value, uint64_t rt_value, uint8_t bp) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F4>::FromCode(*code);
@@ -4670,7 +4670,7 @@ uint64_t run_dalign(uint64_t rs_value, uint64_t rt_value, uint8_t bp) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F4>::FromCode(*code);
uint64_t res =
@@ -4730,7 +4730,7 @@ uint64_t run_aluipc(int16_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
PC = (uint64_t)code->entry(); // Set the program counter.
@@ -4783,7 +4783,7 @@ uint64_t run_auipc(int16_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
PC = (uint64_t)code->entry(); // Set the program counter.
@@ -4837,7 +4837,7 @@ uint64_t run_aui(uint64_t rs, uint16_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
@@ -4861,7 +4861,7 @@ uint64_t run_daui(uint64_t rs, uint16_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
@@ -4885,7 +4885,7 @@ uint64_t run_dahi(uint64_t rs, uint16_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
@@ -4909,7 +4909,7 @@ uint64_t run_dati(uint64_t rs, uint16_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
@@ -5012,7 +5012,7 @@ uint64_t run_li_macro(uint64_t imm, LiFlags mode, int32_t num_instr = 0) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -5224,7 +5224,7 @@ uint64_t run_lwpc(int offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
@@ -5301,7 +5301,7 @@ uint64_t run_lwupc(int offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
@@ -5381,7 +5381,7 @@ uint64_t run_jic(int16_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
@@ -5452,7 +5452,7 @@ uint64_t run_beqzc(int32_t value, int32_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
@@ -5558,7 +5558,7 @@ void run_bz_bnz(TestCaseMsaBranch* input, Branch GenerateBranch,
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -5728,7 +5728,7 @@ uint64_t run_jialc(int16_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
@@ -5779,7 +5779,7 @@ uint64_t run_addiupc(int32_t imm19) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
PC = (uint64_t)code->entry(); // Set the program counter.
@@ -5854,7 +5854,7 @@ uint64_t run_ldpc(int offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
@@ -5942,7 +5942,7 @@ int64_t run_bc(int32_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
@@ -6023,7 +6023,7 @@ int64_t run_balc(int32_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
@@ -6072,7 +6072,7 @@ uint64_t run_dsll(uint64_t rt_value, uint16_t sa_value) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F4>::FromCode(*code);
@@ -6129,7 +6129,7 @@ uint64_t run_bal(int16_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
@@ -6183,7 +6183,7 @@ TEST(Trampoline) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
int64_t res = reinterpret_cast<int64_t>(f.Call(42, 42, 0, 0, 0));
@@ -6310,7 +6310,7 @@ void helper_madd_msub_maddf_msubf(F func) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
const size_t kTableLength = sizeof(test_cases) / sizeof(TestCaseMaddMsub<T>);
@@ -6393,7 +6393,7 @@ uint64_t run_Subu(uint64_t imm, int32_t num_instr) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -6476,7 +6476,7 @@ uint64_t run_Dsubu(uint64_t imm, int32_t num_instr) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -6572,7 +6572,7 @@ uint64_t run_Dins(uint64_t imm, uint64_t source, uint16_t pos, uint16_t size) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
@@ -6632,7 +6632,7 @@ uint64_t run_Ins(uint64_t imm, uint64_t source, uint16_t pos, uint16_t size) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
@@ -6700,7 +6700,7 @@ uint64_t run_Ext(uint64_t source, uint16_t pos, uint16_t size) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
@@ -6783,7 +6783,7 @@ TEST(MSA_fill_copy) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -6846,7 +6846,7 @@ TEST(MSA_fill_copy_2) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -6899,7 +6899,7 @@ TEST(MSA_fill_copy_3) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -6948,7 +6948,7 @@ void run_msa_insert(int64_t rs_value, int n, msa_reg_t* w) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -7058,7 +7058,7 @@ void run_msa_ctc_cfc(uint64_t value) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -7107,7 +7107,7 @@ TEST(MSA_move_v) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -7152,7 +7152,7 @@ void run_msa_sldi(OperFunc GenerateOperation,
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -7312,7 +7312,7 @@ void run_msa_i8(SecondaryField opcode, uint64_t ws_lo, uint64_t ws_hi,
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -7516,7 +7516,7 @@ void run_msa_i5(struct TestCaseMsaI5* input, bool i5_sign_ext,
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -7942,7 +7942,7 @@ void run_msa_2r(const struct TestCaseMsa2R* input,
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -8992,7 +8992,7 @@ void run_msa_vector(struct TestCaseMsaVector* input,
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -9080,7 +9080,7 @@ void run_msa_bit(struct TestCaseMsaBit* input, InstFunc GenerateInstructionFunc,
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -9553,7 +9553,7 @@ void run_msa_i10(int32_t input, InstFunc GenerateVectorInstructionFunc,
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -9631,7 +9631,7 @@ void run_msa_mi10(InstFunc GenerateVectorInstructionFunc) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -9710,7 +9710,7 @@ void run_msa_3r(struct TestCaseMsa3R* input, InstFunc GenerateI5InstructionFunc,
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -10715,7 +10715,7 @@ void run_msa_3rf(const struct TestCaseMsa3RF* input,
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
diff --git a/deps/v8/test/cctest/test-assembler-ppc.cc b/deps/v8/test/cctest/test-assembler-ppc.cc
index a6fea45d91..871df63cce 100644
--- a/deps/v8/test/cctest/test-assembler-ppc.cc
+++ b/deps/v8/test/cctest/test-assembler-ppc.cc
@@ -61,7 +61,7 @@ TEST(0) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef DEBUG
code->Print();
#endif
@@ -97,7 +97,7 @@ TEST(1) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef DEBUG
code->Print();
#endif
@@ -145,7 +145,7 @@ TEST(2) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef DEBUG
code->Print();
#endif
@@ -214,7 +214,7 @@ TEST(3) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef DEBUG
code->Print();
#endif
@@ -327,7 +327,7 @@ TEST(4) {
assm.GetCode(isolate, &desc);
Object code = isolate->heap()->CreateCode(
desc,
- CodeKind::STUB,
+ CodeKind::FOR_TESTING,
Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
#ifdef DEBUG
@@ -387,7 +387,7 @@ TEST(5) {
assm.GetCode(isolate, &desc);
Object code = isolate->heap()->CreateCode(
desc,
- CodeKind::STUB,
+ CodeKind::FOR_TESTING,
Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
#ifdef DEBUG
@@ -422,7 +422,7 @@ TEST(6) {
assm.GetCode(isolate, &desc);
Object code = isolate->heap()->CreateCode(
desc,
- CodeKind::STUB,
+ CodeKind::FOR_TESTING,
Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
#ifdef DEBUG
@@ -497,7 +497,7 @@ static void TestRoundingMode(VCVTTypes types,
assm.GetCode(isolate, &desc);
Object code = isolate->heap()->CreateCode(
desc,
- CodeKind::STUB,
+ CodeKind::FOR_TESTING,
Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
#ifdef DEBUG
@@ -684,7 +684,7 @@ TEST(8) {
assm.GetCode(isolate, &desc);
Object code = isolate->heap()->CreateCode(
desc,
- CodeKind::STUB,
+ CodeKind::FOR_TESTING,
Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
#ifdef DEBUG
@@ -799,7 +799,7 @@ TEST(9) {
assm.GetCode(isolate, &desc);
Object code = isolate->heap()->CreateCode(
desc,
- CodeKind::STUB,
+ CodeKind::FOR_TESTING,
Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
#ifdef DEBUG
@@ -910,7 +910,7 @@ TEST(10) {
assm.GetCode(isolate, &desc);
Object code = isolate->heap()->CreateCode(
desc,
- CodeKind::STUB,
+ CodeKind::FOR_TESTING,
Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
#ifdef DEBUG
@@ -1007,7 +1007,7 @@ TEST(11) {
assm.GetCode(isolate, &desc);
Object code = isolate->heap()->CreateCode(
desc,
- CodeKind::STUB,
+ CodeKind::FOR_TESTING,
Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
#ifdef DEBUG
diff --git a/deps/v8/test/cctest/test-assembler-s390.cc b/deps/v8/test/cctest/test-assembler-s390.cc
index efaf5311dc..8e309c430b 100644
--- a/deps/v8/test/cctest/test-assembler-s390.cc
+++ b/deps/v8/test/cctest/test-assembler-s390.cc
@@ -64,7 +64,7 @@ TEST(0) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef DEBUG
code->Print();
#endif
@@ -103,7 +103,7 @@ TEST(1) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef DEBUG
code->Print();
#endif
@@ -154,7 +154,7 @@ TEST(2) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef DEBUG
code->Print();
#endif
@@ -209,7 +209,7 @@ TEST(3) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef DEBUG
code->Print();
#endif
@@ -250,7 +250,7 @@ TEST(4) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code = isolate->factory()->NewCode(
- desc, CodeKind::STUB, Handle<Code>());
+ desc, CodeKind::FOR_TESTING, Handle<Code>());
#ifdef DEBUG
code->Print();
#endif
@@ -278,7 +278,7 @@ TEST(5) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code = isolate->factory()->NewCode(
- desc, CodeKind::STUB, Handle<Code>());
+ desc, CodeKind::FOR_TESTING, Handle<Code>());
#ifdef DEBUG
code->Print();
#endif
@@ -312,7 +312,7 @@ TEST(6) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code = isolate->factory()->NewCode(
- desc, CodeKind::STUB, Handle<Code>());
+ desc, CodeKind::FOR_TESTING, Handle<Code>());
#ifdef DEBUG
code->Print();
#endif
@@ -344,7 +344,7 @@ TEST(7) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code = isolate->factory()->NewCode(
- desc, CodeKind::STUB, Handle<Code>());
+ desc, CodeKind::FOR_TESTING, Handle<Code>());
#ifdef DEBUG
code->Print();
#endif
@@ -375,7 +375,7 @@ TEST(8) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code = isolate->factory()->NewCode(
- desc, CodeKind::STUB, Handle<Code>());
+ desc, CodeKind::FOR_TESTING, Handle<Code>());
#ifdef DEBUG
code->Print();
#endif
@@ -402,7 +402,7 @@ TEST(9) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code = isolate->factory()->NewCode(
- desc, CodeKind::STUB, Handle<Code>());
+ desc, CodeKind::FOR_TESTING, Handle<Code>());
#ifdef DEBUG
code->Print();
#endif
@@ -487,7 +487,7 @@ TEST(10) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef DEBUG
code->Print();
#endif
@@ -541,7 +541,7 @@ TEST(11) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef DEBUG
code->Print();
#endif
@@ -595,7 +595,7 @@ TEST(12) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef DEBUG
code->Print();
#endif
@@ -659,7 +659,7 @@ TEST(13) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef DEBUG
code->Print();
#endif
@@ -750,7 +750,7 @@ TEST(14) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef DEBUG
code->Print();
#endif
@@ -840,7 +840,7 @@ TEST(15) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef DEBUG
code->Print();
#endif
@@ -887,7 +887,7 @@ TEST(16) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef DEBUG
code->Print();
#endif
@@ -961,7 +961,7 @@ TEST(17) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef DEBUG
code->Print();
#endif
@@ -1053,7 +1053,7 @@ TEST(18) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef DEBUG
code->Print();
#endif
diff --git a/deps/v8/test/cctest/test-assembler-x64.cc b/deps/v8/test/cctest/test-assembler-x64.cc
index bfc40a1a1c..572ef353c8 100644
--- a/deps/v8/test/cctest/test-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-assembler-x64.cc
@@ -744,7 +744,7 @@ TEST(AssemblerMultiByteNop) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F0>::FromCode(*code);
int res = f.Call();
@@ -801,7 +801,7 @@ void DoSSE2(const v8::FunctionCallbackInfo<v8::Value>& args) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F0>::FromCode(*code);
int res = f.Call();
@@ -866,7 +866,7 @@ TEST(AssemblerX64Extractps) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -903,7 +903,7 @@ TEST(AssemblerX64SSE) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -934,7 +934,7 @@ TEST(AssemblerX64SSE3) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -1159,7 +1159,7 @@ TEST(AssemblerX64FMA_sd) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -1385,7 +1385,7 @@ TEST(AssemblerX64FMA_ss) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -1461,7 +1461,7 @@ TEST(AssemblerX64SSE_ss) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -1547,7 +1547,7 @@ TEST(AssemblerX64AVX_ss) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -1787,7 +1787,7 @@ TEST(AssemblerX64AVX_sd) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -1979,7 +1979,7 @@ TEST(AssemblerX64BMI1) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -2039,7 +2039,7 @@ TEST(AssemblerX64LZCNT) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -2099,7 +2099,7 @@ TEST(AssemblerX64POPCNT) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -2362,7 +2362,7 @@ TEST(AssemblerX64BMI2) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -2406,7 +2406,7 @@ TEST(AssemblerX64JumpTables1) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -2454,7 +2454,7 @@ TEST(AssemblerX64JumpTables2) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -2511,7 +2511,7 @@ TEST(AssemblerX64vmovups) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
diff --git a/deps/v8/test/cctest/test-code-layout.cc b/deps/v8/test/cctest/test-code-layout.cc
index 056255f0e5..67d1e8b5ed 100644
--- a/deps/v8/test/cctest/test-code-layout.cc
+++ b/deps/v8/test/cctest/test-code-layout.cc
@@ -14,9 +14,9 @@ TEST(CodeLayoutWithoutUnwindingInfo) {
CcTest::InitializeVM();
HandleScope handle_scope(CcTest::i_isolate());
- // "Hello, World!" in ASCII.
- byte buffer_array[13] = {0x48, 0x65, 0x6C, 0x6C, 0x6F, 0x2C, 0x20,
- 0x57, 0x6F, 0x72, 0x6C, 0x64, 0x21};
+ // "Hello, World!" in ASCII, padded to kCodeAlignment.
+ byte buffer_array[16] = {0x48, 0x65, 0x6C, 0x6C, 0x6F, 0x2C, 0x20, 0x57,
+ 0x6F, 0x72, 0x6C, 0x64, 0x21, 0xcc, 0xcc, 0xcc};
byte* buffer = &buffer_array[0];
int buffer_size = sizeof(buffer_array);
@@ -39,25 +39,25 @@ TEST(CodeLayoutWithoutUnwindingInfo) {
code_desc.unwinding_info_size = 0;
code_desc.origin = nullptr;
- Handle<Code> code =
- Factory::CodeBuilder(CcTest::i_isolate(), code_desc, CodeKind::STUB)
- .Build();
+ Handle<Code> code = Factory::CodeBuilder(CcTest::i_isolate(), code_desc,
+ CodeKind::FOR_TESTING)
+ .Build();
CHECK(!code->has_unwinding_info());
CHECK_EQ(code->raw_instruction_size(), buffer_size);
CHECK_EQ(0, memcmp(reinterpret_cast<void*>(code->raw_instruction_start()),
buffer, buffer_size));
- CHECK_EQ(code->raw_instruction_end() - code->address(),
- Code::kHeaderSize + buffer_size);
+ CHECK_EQ(code->raw_instruction_end() - code->raw_instruction_start(),
+ buffer_size);
}
TEST(CodeLayoutWithUnwindingInfo) {
CcTest::InitializeVM();
HandleScope handle_scope(CcTest::i_isolate());
- // "Hello, World!" in ASCII.
- byte buffer_array[13] = {0x48, 0x65, 0x6C, 0x6C, 0x6F, 0x2C, 0x20,
- 0x57, 0x6F, 0x72, 0x6C, 0x64, 0x21};
+ // "Hello, World!" in ASCII, padded to kCodeAlignment.
+ byte buffer_array[16] = {0x48, 0x65, 0x6C, 0x6C, 0x6F, 0x2C, 0x20, 0x57,
+ 0x6F, 0x72, 0x6C, 0x64, 0x21, 0xcc, 0xcc, 0xcc};
// "JavaScript" in ASCII.
byte unwinding_info_array[10] = {0x4A, 0x61, 0x76, 0x61, 0x53,
@@ -86,23 +86,20 @@ TEST(CodeLayoutWithUnwindingInfo) {
code_desc.unwinding_info_size = unwinding_info_size;
code_desc.origin = nullptr;
- Handle<Code> code =
- Factory::CodeBuilder(CcTest::i_isolate(), code_desc, CodeKind::STUB)
- .Build();
+ Handle<Code> code = Factory::CodeBuilder(CcTest::i_isolate(), code_desc,
+ CodeKind::FOR_TESTING)
+ .Build();
CHECK(code->has_unwinding_info());
- CHECK_EQ(code->raw_instruction_size(), buffer_size);
+ CHECK_EQ(code->raw_body_size(), buffer_size + unwinding_info_size);
CHECK_EQ(0, memcmp(reinterpret_cast<void*>(code->raw_instruction_start()),
buffer, buffer_size));
- CHECK(IsAligned(code->GetUnwindingInfoSizeOffset(), 8));
CHECK_EQ(code->unwinding_info_size(), unwinding_info_size);
- CHECK(IsAligned(code->unwinding_info_start(), 8));
CHECK_EQ(memcmp(reinterpret_cast<void*>(code->unwinding_info_start()),
unwinding_info, unwinding_info_size),
0);
- CHECK_EQ(code->unwinding_info_end() - code->address(),
- Code::kHeaderSize + RoundUp(buffer_size, kInt64Size) + kInt64Size +
- unwinding_info_size);
+ CHECK_EQ(code->unwinding_info_end() - code->raw_instruction_start(),
+ buffer_size + unwinding_info_size);
}
} // namespace internal
diff --git a/deps/v8/test/cctest/test-code-pages.cc b/deps/v8/test/cctest/test-code-pages.cc
index 1ed475f7a8..47609c5e1e 100644
--- a/deps/v8/test/cctest/test-code-pages.cc
+++ b/deps/v8/test/cctest/test-code-pages.cc
@@ -264,7 +264,7 @@ TEST(LargeCodeObject) {
// Create a big function that ends up in CODE_LO_SPACE.
const int instruction_size = Page::kPageSize + 1;
- STATIC_ASSERT(instruction_size > kMaxRegularHeapObjectSize);
+ CHECK_GT(instruction_size, MemoryChunkLayout::MaxRegularCodeObjectSize());
std::unique_ptr<byte[]> instructions(new byte[instruction_size]);
CodeDesc desc;
@@ -380,7 +380,7 @@ TEST(LargeCodeObjectWithSignalHandler) {
// Create a big function that ends up in CODE_LO_SPACE.
const int instruction_size = Page::kPageSize + 1;
- STATIC_ASSERT(instruction_size > kMaxRegularHeapObjectSize);
+ CHECK_GT(instruction_size, MemoryChunkLayout::MaxRegularCodeObjectSize());
std::unique_ptr<byte[]> instructions(new byte[instruction_size]);
CodeDesc desc;
@@ -456,7 +456,7 @@ TEST(Sorted) {
// Create a big function that ends up in CODE_LO_SPACE.
const int instruction_size = Page::kPageSize + 1;
- STATIC_ASSERT(instruction_size > kMaxRegularHeapObjectSize);
+ CHECK_GT(instruction_size, MemoryChunkLayout::MaxRegularCodeObjectSize());
std::unique_ptr<byte[]> instructions(new byte[instruction_size]);
CodeDesc desc;
diff --git a/deps/v8/test/cctest/test-code-stub-assembler.cc b/deps/v8/test/cctest/test-code-stub-assembler.cc
index 3b8ced8a4a..fe7aebe682 100644
--- a/deps/v8/test/cctest/test-code-stub-assembler.cc
+++ b/deps/v8/test/cctest/test-code-stub-assembler.cc
@@ -27,6 +27,7 @@
#include "src/objects/struct-inl.h"
#include "src/objects/transitions-inl.h"
#include "src/strings/char-predicates.h"
+#include "test/cctest/cctest-utils.h"
#include "test/cctest/compiler/code-assembler-tester.h"
#include "test/cctest/compiler/function-tester.h"
@@ -121,7 +122,7 @@ TEST(NumberToString) {
CodeStubAssembler m(asm_tester.state());
{
- TNode<Number> input = m.CAST(m.Parameter(1));
+ auto input = m.Parameter<Number>(1);
Label bailout(&m);
m.Return(m.NumberToString(input, &bailout));
@@ -201,8 +202,8 @@ TEST(ToUint32) {
CodeStubAssembler m(asm_tester.state());
const int kContextOffset = 3;
- Node* const context = m.Parameter(kNumParams + kContextOffset);
- Node* const input = m.Parameter(1);
+ auto context = m.Parameter<Context>(kNumParams + kContextOffset);
+ auto input = m.Parameter<Object>(1);
m.Return(m.ToUint32(context, input));
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
@@ -323,9 +324,9 @@ TEST(ConvertToRelativeIndex) {
enum Result { kFound, kNotFound };
{
- TNode<Number> index = m.CAST(m.Parameter(1));
- TNode<Number> length_number = m.CAST(m.Parameter(2));
- TNode<Number> expected_relative_index = m.CAST(m.Parameter(3));
+ auto index = m.Parameter<Number>(1);
+ auto length_number = m.Parameter<Number>(2);
+ auto expected_relative_index = m.Parameter<Number>(3);
TNode<UintPtrT> length = m.ChangeUintPtrNumberToUintPtr(length_number);
TNode<UintPtrT> expected =
@@ -470,8 +471,8 @@ TEST(JSFunction) {
Isolate* isolate(CcTest::InitIsolateOnce());
CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
CodeStubAssembler m(asm_tester.state());
- m.Return(m.SmiFromInt32(
- m.Int32Add(m.SmiToInt32(m.Parameter(1)), m.SmiToInt32(m.Parameter(2)))));
+ m.Return(m.SmiFromInt32(m.Int32Add(m.SmiToInt32(m.Parameter<Smi>(1)),
+ m.SmiToInt32(m.Parameter<Smi>(2)))));
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
@@ -487,7 +488,7 @@ TEST(ComputeIntegerHash) {
CodeStubAssembler m(asm_tester.state());
m.Return(m.SmiFromInt32(m.UncheckedCast<Int32T>(
- m.ComputeSeededHash(m.SmiUntag(m.Parameter(1))))));
+ m.ComputeSeededHash(m.SmiUntag(m.Parameter<Smi>(1))))));
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
@@ -510,8 +511,8 @@ TEST(ToString) {
const int kNumParams = 1;
CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
CodeStubAssembler m(asm_tester.state());
- m.Return(m.ToStringImpl(m.CAST(m.Parameter(kNumParams + 3)),
- m.CAST(m.Parameter(1))));
+ m.Return(m.ToStringImpl(m.Parameter<Context>(kNumParams + 3),
+ m.Parameter<Object>(1)));
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
@@ -572,10 +573,9 @@ TEST(TryToName) {
enum Result { kKeyIsIndex, kKeyIsUnique, kBailout };
{
- Node* key = m.Parameter(1);
- TNode<MaybeObject> expected_result =
- m.UncheckedCast<MaybeObject>(m.Parameter(2));
- TNode<Object> expected_arg = m.CAST(m.Parameter(3));
+ auto key = m.Parameter<Object>(1);
+ auto expected_result = m.UncheckedParameter<MaybeObject>(2);
+ auto expected_arg = m.Parameter<Object>(3);
Label passed(&m), failed(&m);
Label if_keyisindex(&m), if_keyisunique(&m), if_bailout(&m);
@@ -794,7 +794,7 @@ void TestEntryToIndex() {
CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
CodeStubAssembler m(asm_tester.state());
{
- TNode<IntPtrT> entry = m.SmiUntag(m.Parameter(1));
+ TNode<IntPtrT> entry = m.SmiUntag(m.Parameter<Smi>(1));
TNode<IntPtrT> result = m.EntryToIndex<Dictionary>(entry);
m.Return(m.SmiTag(result));
}
@@ -828,10 +828,10 @@ void TestNameDictionaryLookup() {
enum Result { kFound, kNotFound };
{
- TNode<Dictionary> dictionary = m.CAST(m.Parameter(1));
- TNode<Name> unique_name = m.CAST(m.Parameter(2));
- TNode<Smi> expected_result = m.CAST(m.Parameter(3));
- TNode<Object> expected_arg = m.CAST(m.Parameter(4));
+ auto dictionary = m.Parameter<Dictionary>(1);
+ auto unique_name = m.Parameter<Name>(2);
+ auto expected_result = m.Parameter<Smi>(3);
+ auto expected_arg = m.Parameter<Object>(4);
Label passed(&m), failed(&m);
Label if_found(&m), if_not_found(&m);
@@ -931,10 +931,10 @@ TEST(NumberDictionaryLookup) {
enum Result { kFound, kNotFound };
{
- TNode<NumberDictionary> dictionary = m.CAST(m.Parameter(1));
- TNode<IntPtrT> key = m.SmiUntag(m.Parameter(2));
- TNode<Smi> expected_result = m.CAST(m.Parameter(3));
- TNode<Object> expected_arg = m.CAST(m.Parameter(4));
+ auto dictionary = m.Parameter<NumberDictionary>(1);
+ TNode<IntPtrT> key = m.SmiUntag(m.Parameter<Smi>(2));
+ auto expected_result = m.Parameter<Smi>(3);
+ auto expected_arg = m.Parameter<Object>(4);
Label passed(&m), failed(&m);
Label if_found(&m), if_not_found(&m);
@@ -1021,10 +1021,10 @@ TEST(TransitionLookup) {
: CodeStubAssembler(state) {}
void Generate() {
- TNode<TransitionArray> transitions = CAST(Parameter(1));
- TNode<Name> name = CAST(Parameter(2));
- TNode<Smi> expected_result = CAST(Parameter(3));
- TNode<Object> expected_arg = CAST(Parameter(4));
+ auto transitions = Parameter<TransitionArray>(1);
+ auto name = Parameter<Name>(2);
+ auto expected_result = Parameter<Smi>(3);
+ auto expected_arg = Parameter<Object>(4);
Label passed(this), failed(this);
Label if_found(this), if_not_found(this);
@@ -1195,10 +1195,9 @@ TEST(TryHasOwnProperty) {
enum Result { kFound, kNotFound, kBailout };
{
- TNode<HeapObject> object = m.CAST(m.Parameter(1));
- TNode<Name> unique_name = m.CAST(m.Parameter(2));
- TNode<MaybeObject> expected_result =
- m.UncheckedCast<MaybeObject>(m.Parameter(3));
+ auto object = m.Parameter<HeapObject>(1);
+ auto unique_name = m.Parameter<Name>(2);
+ TNode<MaybeObject> expected_result = m.UncheckedParameter<MaybeObject>(3);
Label passed(&m), failed(&m);
Label if_found(&m), if_not_found(&m), if_bailout(&m);
@@ -1389,9 +1388,9 @@ TEST(TryGetOwnProperty) {
Handle<Symbol> not_found_symbol = factory->NewSymbol();
Handle<Symbol> bailout_symbol = factory->NewSymbol();
{
- TNode<JSReceiver> object = m.CAST(m.Parameter(1));
- TNode<Name> unique_name = m.CAST(m.Parameter(2));
- TNode<Context> context = m.CAST(m.Parameter(kNumParams + 3));
+ auto object = m.Parameter<JSReceiver>(1);
+ auto unique_name = m.Parameter<Name>(2);
+ auto context = m.Parameter<Context>(kNumParams + 3);
TVariable<Object> var_value(&m);
Label if_found(&m), if_not_found(&m), if_bailout(&m);
@@ -1609,10 +1608,9 @@ TEST(TryLookupElement) {
enum Result { kFound, kAbsent, kNotFound, kBailout };
{
- TNode<HeapObject> object = m.CAST(m.Parameter(1));
- TNode<IntPtrT> index = m.SmiUntag(m.Parameter(2));
- TNode<MaybeObject> expected_result =
- m.UncheckedCast<MaybeObject>(m.Parameter(3));
+ auto object = m.Parameter<HeapObject>(1);
+ TNode<IntPtrT> index = m.SmiUntag(m.Parameter<Smi>(2));
+ TNode<MaybeObject> expected_result = m.UncheckedParameter<MaybeObject>(3);
Label passed(&m), failed(&m);
Label if_found(&m), if_not_found(&m), if_bailout(&m), if_absent(&m);
@@ -1839,9 +1837,9 @@ TEST(AllocateJSObjectFromMap) {
CodeStubAssembler m(asm_tester.state());
{
- TNode<Map> map = m.CAST(m.Parameter(1));
- TNode<HeapObject> properties = m.CAST(m.Parameter(2));
- TNode<FixedArray> elements = m.CAST(m.Parameter(3));
+ auto map = m.Parameter<Map>(1);
+ auto properties = m.Parameter<HeapObject>(2);
+ auto elements = m.Parameter<FixedArray>(3);
TNode<JSObject> result =
m.AllocateJSObjectFromMap(map, properties, elements);
@@ -1912,7 +1910,7 @@ TEST(AllocateNameDictionary) {
CodeStubAssembler m(asm_tester.state());
{
- Node* capacity = m.Parameter(1);
+ auto capacity = m.Parameter<Smi>(1);
TNode<NameDictionary> result =
m.AllocateNameDictionary(m.SmiUntag(capacity));
m.Return(result);
@@ -1933,65 +1931,188 @@ TEST(AllocateNameDictionary) {
}
}
-TEST(PopAndReturnConstant) {
+TEST(PopAndReturnFromJSBuiltinWithStackParameters) {
Isolate* isolate(CcTest::InitIsolateOnce());
- const int kNumParams = 4;
- const int kNumProgrammaticParams = 2;
- CodeAssemblerTester asm_tester(
- isolate,
- kNumParams - kNumProgrammaticParams + 1); // Include receiver.
- CodeStubAssembler m(asm_tester.state());
+ const int kNumStackParams = 1;
+ CodeAssemblerTester asm_tester(isolate, kNumStackParams);
+ {
+ CodeStubAssembler m(asm_tester.state());
+ m.PopAndReturn(m.SmiUntag(m.Parameter<Smi>(0)),
+ m.SmiConstant(Smi::FromInt(1234)));
+ }
- // Call a function that return |kNumProgramaticParams| parameters in addition
- // to those specified by the static descriptor. |kNumProgramaticParams| is
- // specified as a constant.
- CSA_CHECK(&m, m.SmiEqual(m.CAST(m.Parameter(2)), m.SmiConstant(5678)));
- m.PopAndReturn(m.Int32Constant(kNumProgrammaticParams),
- m.SmiConstant(Smi::FromInt(1234)));
+ // Attempt to generate code must trigger CHECK failure in RawMachineAssebler.
+ // PopAndReturn is not allowed in builtins with JS linkage and declared stack
+ // parameters.
+ asm_tester.GenerateCode();
+}
- FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
- Handle<Object> result;
- for (int test_count = 0; test_count < 100; ++test_count) {
- result = ft.Call(isolate->factory()->undefined_value(),
- Handle<Smi>(Smi::FromInt(5678), isolate),
- isolate->factory()->undefined_value(),
- isolate->factory()->undefined_value())
- .ToHandleChecked();
- CHECK_EQ(1234, Handle<Smi>::cast(result)->value());
+TEST(PopAndReturnFromTFCBuiltinWithStackParameters) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+
+ // Setup CSA for creating TFC-style builtin with stack arguments.
+ // For the testing purposes we need any interface descriptor that has at
+ // least one argument passed on stack.
+ using Descriptor = FlatMapIntoArrayDescriptor;
+ Descriptor descriptor;
+ CHECK_LT(0, descriptor.GetStackParameterCount());
+
+ CodeAssemblerTester asm_tester(isolate, Descriptor());
+ {
+ CodeStubAssembler m(asm_tester.state());
+ m.PopAndReturn(m.SmiUntag(m.Parameter<Smi>(0)),
+ m.SmiConstant(Smi::FromInt(1234)));
}
+
+ // Attempt to generate code must trigger CHECK failure in RawMachineAssebler.
+ // PopAndReturn is not allowed in builtins with JS linkage and declared stack
+ // parameters.
+ asm_tester.GenerateCode();
}
-TEST(PopAndReturnVariable) {
- Isolate* isolate(CcTest::InitIsolateOnce());
+namespace {
- const int kNumParams = 4;
- const int kNumProgrammaticParams = 2;
- CodeAssemblerTester asm_tester(
- isolate,
- kNumParams - kNumProgrammaticParams + 1); // Include receiver.
- CodeStubAssembler m(asm_tester.state());
+TNode<Object> MakeConstantNode(CodeStubAssembler& m, Handle<Object> value) {
+ if (value->IsSmi()) {
+ return m.SmiConstant(Smi::ToInt(*value));
+ }
+ return m.HeapConstant(Handle<HeapObject>::cast(value));
+}
- // Call a function that return |kNumProgramaticParams| parameters in addition
- // to those specified by the static descriptor. |kNumProgramaticParams| is
- // passed in as a parameter to the function so that it can't be recognized as
- // a constant.
- CSA_CHECK(&m, m.SmiEqual(m.CAST(m.Parameter(2)), m.SmiConstant(5678)));
- m.PopAndReturn(m.SmiUntag(m.CAST(m.Parameter(1))),
- m.SmiConstant(Smi::FromInt(1234)));
+// Buids a CSA function that calls |target| function with given arguments
+// |number_of_iterations| times and checks that the stack pointer values before
+// the calls and after the calls are the same.
+// Then this new function is called multiple times.
+template <typename... Args>
+void CallFunctionWithStackPointerChecks(Isolate* isolate,
+ Handle<Object> expected_result,
+ Handle<Object> target,
+ Handle<Object> receiver, Args... args) {
+ // Setup CSA for creating TFJ-style builtin.
+ using Descriptor = JSTrampolineDescriptor;
+ CodeAssemblerTester asm_tester(isolate, Descriptor());
+
+ {
+ CodeStubAssembler m(asm_tester.state());
+
+ TNode<Context> context = m.Parameter<Context>(Descriptor::kContext);
+
+#ifdef V8_CC_GNU
+ // GetStackPointer is available only when V8_CC_GNU is defined.
+ const TNode<ExternalReference> get_stack_ptr = m.ExternalConstant(
+ ExternalReference::Create(reinterpret_cast<Address>(GetStackPointer)));
+
+ // CSA doesn't have instructions for reading current stack pointer value,
+ // so we use a C function that returns address of its local variable.
+ // This is a good-enough approximation for the stack pointer.
+ MachineType type_intptr = MachineType::IntPtr();
+ TNode<WordT> stack_pointer0 =
+ m.UncheckedCast<WordT>(m.CallCFunction(get_stack_ptr, type_intptr));
+#endif
+
+ // CSA::CallCFunction() aligns stack pointer before the call, so off-by one
+ // errors will not be detected. In order to handle this we do the calls in a
+ // loop in order to exaggerate the effect of potentially broken stack
+ // pointer so that the GetStackPointer function will be able to notice it.
+ m.BuildFastLoop<IntPtrT>(
+ m.IntPtrConstant(0), m.IntPtrConstant(153),
+ [&](TNode<IntPtrT> index) {
+ TNode<Object> result = m.Call(context, MakeConstantNode(m, target),
+ MakeConstantNode(m, receiver),
+ MakeConstantNode(m, args)...);
+ CSA_CHECK(
+ &m, m.TaggedEqual(result, MakeConstantNode(m, expected_result)));
+ },
+ 1, CodeStubAssembler::IndexAdvanceMode::kPost);
+
+#ifdef V8_CC_GNU
+ TNode<WordT> stack_pointer1 =
+ m.UncheckedCast<WordT>(m.CallCFunction(get_stack_ptr, type_intptr));
+ CSA_CHECK(&m, m.WordEqual(stack_pointer0, stack_pointer1));
+#endif
+ m.Return(m.SmiConstant(42));
+ }
+ FunctionTester ft(asm_tester.GenerateCode(), 1); // Include receiver.
- FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
Handle<Object> result;
for (int test_count = 0; test_count < 100; ++test_count) {
- result = ft.Call(Handle<Smi>(Smi::FromInt(kNumProgrammaticParams), isolate),
- Handle<Smi>(Smi::FromInt(5678), isolate),
- isolate->factory()->undefined_value(),
- isolate->factory()->undefined_value())
- .ToHandleChecked();
- CHECK_EQ(1234, Handle<Smi>::cast(result)->value());
+ result = ft.Call().ToHandleChecked();
+ CHECK_EQ(Smi::FromInt(42), *result);
}
}
+} // namespace
+
+TEST(PopAndReturnConstant) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+
+ // Setup CSA for creating TFJ-style builtin.
+ using Descriptor = JSTrampolineDescriptor;
+ CodeAssemblerTester asm_tester(isolate, Descriptor());
+
+ const int kNumParams = 4; // Not including receiver
+ {
+ CodeStubAssembler m(asm_tester.state());
+ TNode<Int32T> argc =
+ m.UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
+ CSA_CHECK(&m, m.Word32Equal(argc, m.Int32Constant(kNumParams)));
+
+ m.PopAndReturn(m.IntPtrConstant(kNumParams + 1), // Include receiver.
+ m.SmiConstant(1234));
+ }
+
+ FunctionTester ft(asm_tester.GenerateCode(), 0);
+ ft.function->shared().DontAdaptArguments();
+
+ // Now call this function multiple time also checking that the stack pointer
+ // didn't change after the calls.
+ Handle<Object> receiver = isolate->factory()->undefined_value();
+ Handle<Smi> expected_result(Smi::FromInt(1234), isolate);
+ CallFunctionWithStackPointerChecks(isolate, expected_result, ft.function,
+ receiver,
+ // Pass kNumParams arguments.
+ Handle<Smi>(Smi::FromInt(1), isolate),
+ Handle<Smi>(Smi::FromInt(2), isolate),
+ Handle<Smi>(Smi::FromInt(3), isolate),
+ Handle<Smi>(Smi::FromInt(4), isolate));
+}
+
+TEST(PopAndReturnVariable) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+
+ // Setup CSA for creating TFJ-style builtin.
+ using Descriptor = JSTrampolineDescriptor;
+ CodeAssemblerTester asm_tester(isolate, Descriptor());
+
+ const int kNumParams = 4; // Not including receiver
+ {
+ CodeStubAssembler m(asm_tester.state());
+ TNode<Int32T> argc =
+ m.UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
+ CSA_CHECK(&m, m.Word32Equal(argc, m.Int32Constant(kNumParams)));
+
+ TNode<Int32T> argc_with_receiver = m.Int32Add(argc, m.Int32Constant(1));
+ m.PopAndReturn(m.ChangeInt32ToIntPtr(argc_with_receiver),
+ m.SmiConstant(1234));
+ }
+
+ FunctionTester ft(asm_tester.GenerateCode(), 0);
+ ft.function->shared().DontAdaptArguments();
+
+ // Now call this function multiple time also checking that the stack pointer
+ // didn't change after the calls.
+ Handle<Object> receiver = isolate->factory()->undefined_value();
+ Handle<Smi> expected_result(Smi::FromInt(1234), isolate);
+ CallFunctionWithStackPointerChecks(isolate, expected_result, ft.function,
+ receiver,
+ // Pass kNumParams arguments.
+ Handle<Smi>(Smi::FromInt(1), isolate),
+ Handle<Smi>(Smi::FromInt(2), isolate),
+ Handle<Smi>(Smi::FromInt(3), isolate),
+ Handle<Smi>(Smi::FromInt(4), isolate));
+}
+
TEST(OneToTwoByteStringCopy) {
Isolate* isolate(CcTest::InitIsolateOnce());
@@ -1999,7 +2120,7 @@ TEST(OneToTwoByteStringCopy) {
CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
StringBuiltinsAssembler m(asm_tester.state());
- m.CopyStringCharacters<String>(m.CAST(m.Parameter(1)), m.CAST(m.Parameter(2)),
+ m.CopyStringCharacters<String>(m.Parameter<String>(1), m.Parameter<String>(2),
m.IntPtrConstant(0), m.IntPtrConstant(0),
m.IntPtrConstant(5), String::ONE_BYTE_ENCODING,
String::TWO_BYTE_ENCODING);
@@ -2032,7 +2153,7 @@ TEST(OneToOneByteStringCopy) {
CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
StringBuiltinsAssembler m(asm_tester.state());
- m.CopyStringCharacters<String>(m.CAST(m.Parameter(1)), m.CAST(m.Parameter(2)),
+ m.CopyStringCharacters<String>(m.Parameter<String>(1), m.Parameter<String>(2),
m.IntPtrConstant(0), m.IntPtrConstant(0),
m.IntPtrConstant(5), String::ONE_BYTE_ENCODING,
String::ONE_BYTE_ENCODING);
@@ -2065,7 +2186,7 @@ TEST(OneToOneByteStringCopyNonZeroStart) {
CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
StringBuiltinsAssembler m(asm_tester.state());
- m.CopyStringCharacters<String>(m.CAST(m.Parameter(1)), m.CAST(m.Parameter(2)),
+ m.CopyStringCharacters<String>(m.Parameter<String>(1), m.Parameter<String>(2),
m.IntPtrConstant(0), m.IntPtrConstant(3),
m.IntPtrConstant(2), String::ONE_BYTE_ENCODING,
String::ONE_BYTE_ENCODING);
@@ -2095,7 +2216,7 @@ TEST(TwoToTwoByteStringCopy) {
CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
StringBuiltinsAssembler m(asm_tester.state());
- m.CopyStringCharacters<String>(m.CAST(m.Parameter(1)), m.CAST(m.Parameter(2)),
+ m.CopyStringCharacters<String>(m.Parameter<String>(1), m.Parameter<String>(2),
m.IntPtrConstant(0), m.IntPtrConstant(0),
m.IntPtrConstant(5), String::TWO_BYTE_ENCODING,
String::TWO_BYTE_ENCODING);
@@ -2127,54 +2248,107 @@ TEST(TwoToTwoByteStringCopy) {
TEST(Arguments) {
Isolate* isolate(CcTest::InitIsolateOnce());
- const int kNumParams = 3;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
- CodeStubAssembler m(asm_tester.state());
+ // Setup CSA for creating TFJ-style builtin.
+ using Descriptor = JSTrampolineDescriptor;
+ CodeAssemblerTester asm_tester(isolate, Descriptor());
- CodeStubArguments arguments(&m, m.IntPtrConstant(3));
+ {
+ CodeStubAssembler m(asm_tester.state());
+ TNode<Int32T> argc =
+ m.UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
+ CodeStubArguments arguments(&m, argc);
- CSA_ASSERT(&m, m.TaggedEqual(arguments.AtIndex(0), m.SmiConstant(12)));
- CSA_ASSERT(&m, m.TaggedEqual(arguments.AtIndex(1), m.SmiConstant(13)));
- CSA_ASSERT(&m, m.TaggedEqual(arguments.AtIndex(2), m.SmiConstant(14)));
+ CSA_CHECK(&m, m.TaggedEqual(arguments.AtIndex(0), m.SmiConstant(12)));
+ CSA_CHECK(&m, m.TaggedEqual(arguments.AtIndex(1), m.SmiConstant(13)));
+ CSA_CHECK(&m, m.TaggedEqual(arguments.AtIndex(2), m.SmiConstant(14)));
- arguments.PopAndReturn(arguments.GetReceiver());
+ arguments.PopAndReturn(arguments.GetReceiver());
+ }
- FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
- Handle<Object> result = ft.Call(Handle<Smi>(Smi::FromInt(12), isolate),
- Handle<Smi>(Smi::FromInt(13), isolate),
- Handle<Smi>(Smi::FromInt(14), isolate))
- .ToHandleChecked();
+ FunctionTester ft(asm_tester.GenerateCode(), 0);
+ ft.function->shared().DontAdaptArguments();
+
+ Handle<Object> result;
+ result = ft.Call(Handle<Smi>(Smi::FromInt(12), isolate),
+ Handle<Smi>(Smi::FromInt(13), isolate),
+ Handle<Smi>(Smi::FromInt(14), isolate))
+ .ToHandleChecked();
// When calling with undefined object as the receiver, the CallFunction
// builtin swaps it to the global proxy object.
CHECK_EQ(*isolate->global_proxy(), *result);
+
+ result = ft.Call(Handle<Smi>(Smi::FromInt(12), isolate),
+ Handle<Smi>(Smi::FromInt(13), isolate),
+ Handle<Smi>(Smi::FromInt(14), isolate),
+ Handle<Smi>(Smi::FromInt(15), isolate))
+ .ToHandleChecked();
+ CHECK_EQ(*isolate->global_proxy(), *result);
+
+ result = ft.Call(Handle<Smi>(Smi::FromInt(12), isolate),
+ Handle<Smi>(Smi::FromInt(13), isolate),
+ Handle<Smi>(Smi::FromInt(14), isolate),
+ Handle<Smi>(Smi::FromInt(15), isolate),
+ Handle<Smi>(Smi::FromInt(16), isolate),
+ Handle<Smi>(Smi::FromInt(17), isolate),
+ Handle<Smi>(Smi::FromInt(18), isolate),
+ Handle<Smi>(Smi::FromInt(19), isolate))
+ .ToHandleChecked();
+ CHECK_EQ(*isolate->global_proxy(), *result);
}
TEST(ArgumentsForEach) {
Isolate* isolate(CcTest::InitIsolateOnce());
- const int kNumParams = 3;
- CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
- CodeStubAssembler m(asm_tester.state());
+ // Setup CSA for creating TFJ-style builtin.
+ using Descriptor = JSTrampolineDescriptor;
+ CodeAssemblerTester asm_tester(isolate, Descriptor());
+
+ {
+ CodeStubAssembler m(asm_tester.state());
+
+ TNode<Int32T> argc =
+ m.UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
+ CodeStubArguments arguments(&m, argc);
- CodeStubArguments arguments(&m, m.IntPtrConstant(3));
+ TVariable<Smi> sum(&m);
+ CodeAssemblerVariableList list({&sum}, m.zone());
- TVariable<Smi> sum(&m);
- CodeAssemblerVariableList list({&sum}, m.zone());
+ sum = m.SmiConstant(0);
- sum = m.SmiConstant(0);
+ arguments.ForEach(list, [&](TNode<Object> arg) {
+ sum = m.SmiAdd(sum.value(), m.CAST(arg));
+ });
- arguments.ForEach(list, [&](TNode<Object> arg) {
- sum = m.SmiAdd(sum.value(), m.CAST(arg));
- });
+ arguments.PopAndReturn(sum.value());
+ }
- arguments.PopAndReturn(sum.value());
+ FunctionTester ft(asm_tester.GenerateCode(), 0);
+ ft.function->shared().DontAdaptArguments();
- FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
- Handle<Object> result = ft.Call(Handle<Smi>(Smi::FromInt(12), isolate),
- Handle<Smi>(Smi::FromInt(13), isolate),
- Handle<Smi>(Smi::FromInt(14), isolate))
- .ToHandleChecked();
+ Handle<Object> result;
+ result = ft.Call(Handle<Smi>(Smi::FromInt(12), isolate),
+ Handle<Smi>(Smi::FromInt(13), isolate),
+ Handle<Smi>(Smi::FromInt(14), isolate))
+ .ToHandleChecked();
CHECK_EQ(Smi::FromInt(12 + 13 + 14), *result);
+
+ result = ft.Call(Handle<Smi>(Smi::FromInt(12), isolate),
+ Handle<Smi>(Smi::FromInt(13), isolate),
+ Handle<Smi>(Smi::FromInt(14), isolate),
+ Handle<Smi>(Smi::FromInt(15), isolate))
+ .ToHandleChecked();
+ CHECK_EQ(Smi::FromInt(12 + 13 + 14 + 15), *result);
+
+ result = ft.Call(Handle<Smi>(Smi::FromInt(12), isolate),
+ Handle<Smi>(Smi::FromInt(13), isolate),
+ Handle<Smi>(Smi::FromInt(14), isolate),
+ Handle<Smi>(Smi::FromInt(15), isolate),
+ Handle<Smi>(Smi::FromInt(16), isolate),
+ Handle<Smi>(Smi::FromInt(17), isolate),
+ Handle<Smi>(Smi::FromInt(18), isolate),
+ Handle<Smi>(Smi::FromInt(19), isolate))
+ .ToHandleChecked();
+ CHECK_EQ(Smi::FromInt(12 + 13 + 14 + 15 + 16 + 17 + 18 + 19), *result);
}
TEST(IsDebugActive) {
@@ -2384,8 +2558,8 @@ TEST(NewJSPromise) {
CodeAssemblerTester asm_tester(isolate, kNumParams);
PromiseBuiltinsAssembler m(asm_tester.state());
- Node* const context = m.Parameter(kNumParams + 2);
- const TNode<JSPromise> promise = m.NewJSPromise(m.CAST(context));
+ auto context = m.Parameter<Context>(kNumParams + 2);
+ const TNode<JSPromise> promise = m.NewJSPromise(context);
m.Return(promise);
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
@@ -2401,9 +2575,9 @@ TEST(NewJSPromise2) {
CodeAssemblerTester asm_tester(isolate, kNumParams);
PromiseBuiltinsAssembler m(asm_tester.state());
- Node* const context = m.Parameter(kNumParams + 2);
+ auto context = m.Parameter<Context>(kNumParams + 2);
const TNode<JSPromise> promise =
- m.NewJSPromise(m.CAST(context), v8::Promise::kRejected, m.SmiConstant(1));
+ m.NewJSPromise(context, v8::Promise::kRejected, m.SmiConstant(1));
m.Return(promise);
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
@@ -2423,7 +2597,7 @@ TEST(IsSymbol) {
CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
CodeStubAssembler m(asm_tester.state());
- TNode<HeapObject> const symbol = m.CAST(m.Parameter(1));
+ auto symbol = m.Parameter<HeapObject>(1);
m.Return(m.SelectBooleanConstant(m.IsSymbol(symbol)));
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
@@ -2442,7 +2616,7 @@ TEST(IsPrivateSymbol) {
CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
CodeStubAssembler m(asm_tester.state());
- TNode<HeapObject> const symbol = m.CAST(m.Parameter(1));
+ auto symbol = m.Parameter<HeapObject>(1);
m.Return(m.SelectBooleanConstant(m.IsPrivateSymbol(symbol)));
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
@@ -2464,9 +2638,9 @@ TEST(PromiseHasHandler) {
CodeAssemblerTester asm_tester(isolate, kNumParams);
PromiseBuiltinsAssembler m(asm_tester.state());
- Node* const context = m.Parameter(kNumParams + 2);
+ auto context = m.Parameter<Context>(kNumParams + 2);
const TNode<JSPromise> promise =
- m.NewJSPromise(m.CAST(context), m.UndefinedConstant());
+ m.NewJSPromise(context, m.UndefinedConstant());
m.Return(m.SelectBooleanConstant(m.PromiseHasHandler(promise)));
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
@@ -2482,7 +2656,7 @@ TEST(CreatePromiseResolvingFunctionsContext) {
CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
PromiseBuiltinsAssembler m(asm_tester.state());
- const TNode<Context> context = m.CAST(m.Parameter(kNumParams + 3));
+ const auto context = m.Parameter<Context>(kNumParams + 3);
const TNode<NativeContext> native_context = m.LoadNativeContext(context);
const TNode<JSPromise> promise =
m.NewJSPromise(context, m.UndefinedConstant());
@@ -2509,12 +2683,12 @@ TEST(CreatePromiseResolvingFunctions) {
CodeAssemblerTester asm_tester(isolate, kNumParams);
PromiseBuiltinsAssembler m(asm_tester.state());
- Node* const context = m.Parameter(kNumParams + 2);
+ auto context = m.Parameter<Context>(kNumParams + 2);
const TNode<NativeContext> native_context = m.LoadNativeContext(context);
const TNode<JSPromise> promise =
- m.NewJSPromise(m.CAST(context), m.UndefinedConstant());
+ m.NewJSPromise(context, m.UndefinedConstant());
PromiseResolvingFunctions funcs = m.CreatePromiseResolvingFunctions(
- m.CAST(context), promise, m.BooleanConstant(false), native_context);
+ context, promise, m.BooleanConstant(false), native_context);
Node *resolve = funcs.resolve, *reject = funcs.reject;
TNode<IntPtrT> const kSize = m.IntPtrConstant(2);
TNode<FixedArray> const arr =
@@ -2536,8 +2710,8 @@ TEST(NewElementsCapacity) {
Isolate* isolate(CcTest::InitIsolateOnce());
CodeAssemblerTester asm_tester(isolate, 2);
CodeStubAssembler m(asm_tester.state());
- m.Return(
- m.SmiTag(m.CalculateNewElementsCapacity(m.SmiUntag(m.Parameter(1)))));
+ m.Return(m.SmiTag(
+ m.CalculateNewElementsCapacity(m.SmiUntag(m.Parameter<Smi>(1)))));
FunctionTester ft(asm_tester.GenerateCode(), 1);
Handle<Smi> test_value = Handle<Smi>(Smi::FromInt(1), isolate);
@@ -2566,8 +2740,7 @@ TEST(NewElementsCapacitySmi) {
Isolate* isolate(CcTest::InitIsolateOnce());
CodeAssemblerTester asm_tester(isolate, 2);
CodeStubAssembler m(asm_tester.state());
- m.Return(
- m.CalculateNewElementsCapacity(m.UncheckedCast<Smi>(m.Parameter(1))));
+ m.Return(m.CalculateNewElementsCapacity(m.UncheckedParameter<Smi>(1)));
FunctionTester ft(asm_tester.GenerateCode(), 1);
Handle<Smi> test_value = Handle<Smi>(Smi::FromInt(0), isolate);
@@ -2599,7 +2772,7 @@ TEST(AllocateFunctionWithMapAndContext) {
CodeAssemblerTester asm_tester(isolate, kNumParams);
PromiseBuiltinsAssembler m(asm_tester.state());
- const TNode<Context> context = m.CAST(m.Parameter(kNumParams + 2));
+ const auto context = m.Parameter<Context>(kNumParams + 2);
const TNode<NativeContext> native_context = m.LoadNativeContext(context);
const TNode<JSPromise> promise =
m.NewJSPromise(context, m.UndefinedConstant());
@@ -2638,7 +2811,7 @@ TEST(CreatePromiseGetCapabilitiesExecutorContext) {
CodeAssemblerTester asm_tester(isolate, kNumParams);
PromiseBuiltinsAssembler m(asm_tester.state());
- TNode<Context> context = m.CAST(m.Parameter(kNumParams + 2));
+ auto context = m.Parameter<Context>(kNumParams + 2);
TNode<NativeContext> native_context = m.LoadNativeContext(context);
TNode<PromiseCapability> capability = m.CreatePromiseCapability(
@@ -2668,7 +2841,7 @@ TEST(NewPromiseCapability) {
kNumParams + 1); // Include receiver.
PromiseBuiltinsAssembler m(asm_tester.state());
- Node* const context = m.Parameter(kNumParams + 3);
+ auto context = m.Parameter<Context>(kNumParams + 3);
const TNode<NativeContext> native_context = m.LoadNativeContext(context);
const TNode<Object> promise_constructor =
m.LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
@@ -2716,9 +2889,9 @@ TEST(NewPromiseCapability) {
kNumParams + 1); // Include receiver.
PromiseBuiltinsAssembler m(asm_tester.state());
- Node* const context = m.Parameter(kNumParams + 3);
+ auto context = m.Parameter<Context>(kNumParams + 3);
- Node* const constructor = m.Parameter(1);
+ auto constructor = m.Parameter<Object>(1);
const TNode<Oddball> debug_event = m.TrueConstant();
const TNode<Object> capability = m.CallBuiltin(
Builtins::kNewPromiseCapability, context, constructor, debug_event);
@@ -2944,9 +3117,9 @@ TEST(LoadJSArrayElementsMap) {
CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
{
CodeStubAssembler m(asm_tester.state());
- Node* context = m.Parameter(kNumParams + 3);
+ auto context = m.Parameter<Context>(kNumParams + 3);
TNode<NativeContext> native_context = m.LoadNativeContext(context);
- TNode<Int32T> kind = m.SmiToInt32(m.Parameter(1));
+ TNode<Int32T> kind = m.SmiToInt32(m.Parameter<Smi>(1));
m.Return(m.LoadJSArrayElementsMap(kind, native_context));
}
@@ -2962,21 +3135,20 @@ TEST(LoadJSArrayElementsMap) {
}
}
-TEST(GotoIfNotWhiteSpaceOrLineTerminator) {
+TEST(IsWhiteSpaceOrLineTerminator) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 1;
CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
- StringTrimAssembler m(asm_tester.state());
{ // Returns true if whitespace, false otherwise.
- Label if_not_whitespace(&m);
-
- m.GotoIfNotWhiteSpaceOrLineTerminator(m.SmiToInt32(m.Parameter(1)),
- &if_not_whitespace);
+ CodeStubAssembler m(asm_tester.state());
+ Label if_true(&m), if_false(&m);
+ m.Branch(m.IsWhiteSpaceOrLineTerminator(m.SmiToInt32(m.Parameter<Smi>(1))),
+ &if_true, &if_false);
+ m.BIND(&if_true);
m.Return(m.TrueConstant());
-
- m.BIND(&if_not_whitespace);
+ m.BIND(&if_false);
m.Return(m.FalseConstant());
}
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
@@ -2999,9 +3171,9 @@ TEST(BranchIfNumberRelationalComparison) {
{
CodeStubAssembler m(asm_tester.state());
Label return_true(&m), return_false(&m);
- m.BranchIfNumberRelationalComparison(Operation::kGreaterThanOrEqual,
- m.Parameter(1), m.Parameter(2),
- &return_true, &return_false);
+ m.BranchIfNumberRelationalComparison(
+ Operation::kGreaterThanOrEqual, m.Parameter<Number>(1),
+ m.Parameter<Number>(2), &return_true, &return_false);
m.BIND(&return_true);
m.Return(m.BooleanConstant(true));
m.BIND(&return_false);
@@ -3028,7 +3200,7 @@ TEST(IsNumberArrayIndex) {
CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
{
CodeStubAssembler m(asm_tester.state());
- TNode<Number> number = m.CAST(m.Parameter(1));
+ auto number = m.Parameter<Number>(1);
m.Return(
m.SmiFromInt32(m.UncheckedCast<Int32T>(m.IsNumberArrayIndex(number))));
}
@@ -3077,7 +3249,7 @@ TEST(NumberMinMax) {
kNumParams + 1); // Include receiver.
{
CodeStubAssembler m(asm_tester_min.state());
- m.Return(m.NumberMin(m.Parameter(1), m.Parameter(2)));
+ m.Return(m.NumberMin(m.Parameter<Number>(1), m.Parameter<Number>(2)));
}
FunctionTester ft_min(asm_tester_min.GenerateCode(), kNumParams);
@@ -3085,7 +3257,7 @@ TEST(NumberMinMax) {
kNumParams + 1); // Include receiver.
{
CodeStubAssembler m(asm_tester_max.state());
- m.Return(m.NumberMax(m.Parameter(1), m.Parameter(2)));
+ m.Return(m.NumberMax(m.Parameter<Number>(1), m.Parameter<Number>(2)));
}
FunctionTester ft_max(asm_tester_max.GenerateCode(), kNumParams);
@@ -3135,7 +3307,7 @@ TEST(NumberAddSub) {
kNumParams + 1); // Include receiver.
{
CodeStubAssembler m(asm_tester_add.state());
- m.Return(m.NumberAdd(m.Parameter(1), m.Parameter(2)));
+ m.Return(m.NumberAdd(m.Parameter<Number>(1), m.Parameter<Number>(2)));
}
FunctionTester ft_add(asm_tester_add.GenerateCode(), kNumParams);
@@ -3143,7 +3315,7 @@ TEST(NumberAddSub) {
kNumParams + 1); // Include receiver.
{
CodeStubAssembler m(asm_tester_sub.state());
- m.Return(m.NumberSub(m.Parameter(1), m.Parameter(2)));
+ m.Return(m.NumberSub(m.Parameter<Number>(1), m.Parameter<Number>(2)));
}
FunctionTester ft_sub(asm_tester_sub.GenerateCode(), kNumParams);
@@ -3180,7 +3352,7 @@ TEST(CloneEmptyFixedArray) {
CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
{
CodeStubAssembler m(asm_tester.state());
- m.Return(m.CloneFixedArray(m.CAST(m.Parameter(1))));
+ m.Return(m.CloneFixedArray(m.Parameter<FixedArrayBase>(1)));
}
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
@@ -3197,7 +3369,7 @@ TEST(CloneFixedArray) {
CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
{
CodeStubAssembler m(asm_tester.state());
- m.Return(m.CloneFixedArray(m.CAST(m.Parameter(1))));
+ m.Return(m.CloneFixedArray(m.Parameter<FixedArrayBase>(1)));
}
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
@@ -3219,7 +3391,7 @@ TEST(CloneFixedArrayCOW) {
CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
{
CodeStubAssembler m(asm_tester.state());
- m.Return(m.CloneFixedArray(m.CAST(m.Parameter(1))));
+ m.Return(m.CloneFixedArray(m.Parameter<FixedArrayBase>(1)));
}
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
@@ -3240,7 +3412,7 @@ TEST(ExtractFixedArrayCOWForceCopy) {
CodeStubAssembler::ExtractFixedArrayFlags flags;
flags |= CodeStubAssembler::ExtractFixedArrayFlag::kAllFixedArrays;
base::Optional<TNode<Smi>> constant(m.SmiConstant(0));
- m.Return(m.ExtractFixedArray(m.CAST(m.Parameter(1)), constant,
+ m.Return(m.ExtractFixedArray(m.Parameter<FixedArrayBase>(1), constant,
base::Optional<TNode<Smi>>(base::nullopt),
base::Optional<TNode<Smi>>(base::nullopt),
flags));
@@ -3270,10 +3442,10 @@ TEST(ExtractFixedArraySimple) {
CodeStubAssembler::ExtractFixedArrayFlags flags;
flags |= CodeStubAssembler::ExtractFixedArrayFlag::kAllFixedArrays;
flags |= CodeStubAssembler::ExtractFixedArrayFlag::kDontCopyCOW;
- base::Optional<TNode<IntPtrT>> p1_untagged(m.SmiUntag(m.Parameter(2)));
- base::Optional<TNode<IntPtrT>> p2_untagged(m.SmiUntag(m.Parameter(3)));
+ base::Optional<TNode<IntPtrT>> p1_untagged(m.SmiUntag(m.Parameter<Smi>(2)));
+ base::Optional<TNode<IntPtrT>> p2_untagged(m.SmiUntag(m.Parameter<Smi>(3)));
m.Return(m.ExtractFixedArray(
- m.CAST(m.Parameter(1)), p1_untagged, p2_untagged,
+ m.Parameter<FixedArrayBase>(1), p1_untagged, p2_untagged,
base::Optional<TNode<IntPtrT>>(base::nullopt), flags));
}
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
@@ -3301,9 +3473,9 @@ TEST(ExtractFixedArraySimpleSmiConstant) {
flags |= CodeStubAssembler::ExtractFixedArrayFlag::kDontCopyCOW;
base::Optional<TNode<Smi>> constant_1(m.SmiConstant(1));
base::Optional<TNode<Smi>> constant_2(m.SmiConstant(2));
- m.Return(m.ExtractFixedArray(m.CAST(m.Parameter(1)), constant_1, constant_2,
- base::Optional<TNode<Smi>>(base::nullopt),
- flags));
+ m.Return(m.ExtractFixedArray(
+ m.Parameter<FixedArrayBase>(1), constant_1, constant_2,
+ base::Optional<TNode<Smi>>(base::nullopt), flags));
}
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
@@ -3327,9 +3499,9 @@ TEST(ExtractFixedArraySimpleIntPtrConstant) {
flags |= CodeStubAssembler::ExtractFixedArrayFlag::kDontCopyCOW;
base::Optional<TNode<IntPtrT>> constant_1(m.IntPtrConstant(1));
base::Optional<TNode<IntPtrT>> constant_2(m.IntPtrConstant(2));
- m.Return(m.ExtractFixedArray(m.CAST(m.Parameter(1)), constant_1, constant_2,
- base::Optional<TNode<IntPtrT>>(base::nullopt),
- flags));
+ m.Return(m.ExtractFixedArray(
+ m.Parameter<FixedArrayBase>(1), constant_1, constant_2,
+ base::Optional<TNode<IntPtrT>>(base::nullopt), flags));
}
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
@@ -3351,7 +3523,7 @@ TEST(ExtractFixedArraySimpleIntPtrConstantNoDoubles) {
base::Optional<TNode<IntPtrT>> constant_1(m.IntPtrConstant(1));
base::Optional<TNode<IntPtrT>> constant_2(m.IntPtrConstant(2));
m.Return(m.ExtractFixedArray(
- m.CAST(m.Parameter(1)), constant_1, constant_2,
+ m.Parameter<FixedArrayBase>(1), constant_1, constant_2,
base::Optional<TNode<IntPtrT>>(base::nullopt),
CodeStubAssembler::ExtractFixedArrayFlag::kFixedArrays));
}
@@ -3372,10 +3544,10 @@ TEST(ExtractFixedArraySimpleIntPtrParameters) {
CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
{
CodeStubAssembler m(asm_tester.state());
- base::Optional<TNode<IntPtrT>> p1_untagged(m.SmiUntag(m.Parameter(2)));
- base::Optional<TNode<IntPtrT>> p2_untagged(m.SmiUntag(m.Parameter(3)));
- m.Return(
- m.ExtractFixedArray(m.CAST(m.Parameter(1)), p1_untagged, p2_untagged));
+ base::Optional<TNode<IntPtrT>> p1_untagged(m.SmiUntag(m.Parameter<Smi>(2)));
+ base::Optional<TNode<IntPtrT>> p2_untagged(m.SmiUntag(m.Parameter<Smi>(3)));
+ m.Return(m.ExtractFixedArray(m.Parameter<FixedArrayBase>(1), p1_untagged,
+ p2_untagged));
}
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
@@ -3417,14 +3589,14 @@ TEST(SingleInputPhiElimination) {
TVariable<Smi> temp2(&m);
Label temp_label(&m, {&temp1, &temp2});
Label end_label(&m, {&temp1, &temp2});
- temp1 = m.CAST(m.Parameter(1));
- temp2 = m.CAST(m.Parameter(1));
- m.Branch(m.TaggedEqual(m.UncheckedCast<Object>(m.Parameter(0)),
- m.UncheckedCast<Object>(m.Parameter(1))),
+ temp1 = m.Parameter<Smi>(1);
+ temp2 = m.Parameter<Smi>(1);
+ m.Branch(m.TaggedEqual(m.UncheckedParameter<Object>(0),
+ m.UncheckedParameter<Object>(1)),
&end_label, &temp_label);
m.BIND(&temp_label);
- temp1 = m.CAST(m.Parameter(2));
- temp2 = m.CAST(m.Parameter(2));
+ temp1 = m.Parameter<Smi>(2);
+ temp2 = m.Parameter<Smi>(2);
m.Goto(&end_label);
m.BIND(&end_label);
m.Return(m.UncheckedCast<Object>(temp1.value()));
@@ -3440,7 +3612,7 @@ TEST(SmallOrderedHashMapAllocate) {
CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
{
CodeStubAssembler m(asm_tester.state());
- TNode<Smi> capacity = m.CAST(m.Parameter(1));
+ auto capacity = m.Parameter<Smi>(1);
m.Return(m.AllocateSmallOrderedHashMap(m.SmiToIntPtr(capacity)));
}
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
@@ -3478,7 +3650,7 @@ TEST(SmallOrderedHashSetAllocate) {
CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
{
CodeStubAssembler m(asm_tester.state());
- TNode<Smi> capacity = m.CAST(m.Parameter(1));
+ auto capacity = m.Parameter<Smi>(1);
m.Return(m.AllocateSmallOrderedHashSet(m.SmiToIntPtr(capacity)));
}
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
@@ -3517,7 +3689,7 @@ TEST(IsDoubleElementsKind) {
{
CodeStubAssembler m(ft_tester.state());
m.Return(m.SmiFromInt32(m.UncheckedCast<Int32T>(
- m.IsDoubleElementsKind(m.SmiToInt32(m.Parameter(1))))));
+ m.IsDoubleElementsKind(m.SmiToInt32(m.Parameter<Smi>(1))))));
}
FunctionTester ft(ft_tester.GenerateCode(), kNumParams);
CHECK_EQ(
@@ -3566,8 +3738,8 @@ TEST(TestCallBuiltinInlineTrampoline) {
CodeStubAssembler m(asm_tester.state());
const int kContextOffset = 3;
- Node* str = m.Parameter(1);
- TNode<Context> context = m.CAST(m.Parameter(kNumParams + kContextOffset));
+ auto str = m.Parameter<String>(1);
+ auto context = m.Parameter<Context>(kNumParams + kContextOffset);
TNode<Smi> index = m.SmiConstant(2);
@@ -3592,8 +3764,8 @@ DISABLED_TEST(TestCallBuiltinIndirectLoad) {
CodeStubAssembler m(asm_tester.state());
const int kContextOffset = 2;
- Node* str = m.Parameter(0);
- TNode<Context> context = m.CAST(m.Parameter(kNumParams + kContextOffset));
+ auto str = m.Parameter<String>(0);
+ auto context = m.Parameter<Context>(kNumParams + kContextOffset);
TNode<Smi> index = m.SmiConstant(2);
@@ -3622,7 +3794,7 @@ TEST(InstructionSchedulingCallerSavedRegisters) {
CodeStubAssembler m(asm_tester.state());
{
- Node* x = m.SmiUntag(m.Parameter(1));
+ Node* x = m.SmiUntag(m.Parameter<Smi>(1));
Node* y = m.WordOr(m.WordShr(x, 1), m.IntPtrConstant(1));
TNode<ExternalReference> isolate_ptr =
m.ExternalConstant(ExternalReference::isolate_address(isolate));
@@ -3671,10 +3843,9 @@ TEST(WasmInt32ToHeapNumber) {
int32_t test_value = test_values[i];
CodeAssemblerTester asm_tester(isolate, kNumParams);
CodeStubAssembler m(asm_tester.state());
- Node* context = m.Parameter(kNumParams + 1);
const TNode<Int32T> arg = m.Int32Constant(test_value);
- const TNode<Object> call_result =
- m.CallBuiltin(Builtins::kWasmInt32ToHeapNumber, context, arg);
+ const TNode<Object> call_result = m.CallBuiltin(
+ Builtins::kWasmInt32ToHeapNumber, m.NoContextConstant(), arg);
m.Return(call_result);
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
@@ -3716,8 +3887,8 @@ TEST(WasmTaggedNonSmiToInt32) {
const int kNumParams = 2;
CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
CodeStubAssembler m(asm_tester.state());
- Node* context = m.Parameter(kNumParams + 3);
- const TNode<Object> arg = m.CAST(m.Parameter(1));
+ auto context = m.Parameter<Context>(kNumParams + 3);
+ const auto arg = m.Parameter<Object>(1);
int32_t result = 0;
Node* base = m.IntPtrConstant(reinterpret_cast<intptr_t>(&result));
Node* value = m.CallBuiltin(Builtins::kWasmTaggedNonSmiToInt32, context, arg);
@@ -3758,10 +3929,9 @@ TEST(WasmFloat32ToNumber) {
double test_value = test_values[i];
CodeAssemblerTester asm_tester(isolate, kNumParams);
CodeStubAssembler m(asm_tester.state());
- Node* context = m.Parameter(kNumParams + 1);
const TNode<Float32T> arg = m.Float32Constant(test_value);
- const TNode<Object> call_result =
- m.CallBuiltin(Builtins::kWasmFloat32ToNumber, context, arg);
+ const TNode<Object> call_result = m.CallBuiltin(
+ Builtins::kWasmFloat32ToNumber, m.NoContextConstant(), arg);
m.Return(call_result);
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
@@ -3799,10 +3969,9 @@ TEST(WasmFloat64ToNumber) {
double test_value = test_values[i];
CodeAssemblerTester asm_tester(isolate, kNumParams);
CodeStubAssembler m(asm_tester.state());
- Node* context = m.Parameter(kNumParams + 1);
const TNode<Float64T> arg = m.Float64Constant(test_value);
- const TNode<Object> call_result =
- m.CallBuiltin(Builtins::kWasmFloat64ToNumber, context, arg);
+ const TNode<Object> call_result = m.CallBuiltin(
+ Builtins::kWasmFloat64ToNumber, m.NoContextConstant(), arg);
m.Return(call_result);
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
@@ -3854,8 +4023,8 @@ TEST(WasmTaggedToFloat64) {
const int kNumParams = 1;
CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
CodeStubAssembler m(asm_tester.state());
- Node* context = m.Parameter(kNumParams + 3);
- const TNode<Object> arg = m.CAST(m.Parameter(1));
+ auto context = m.Parameter<Context>(kNumParams + 3);
+ const auto arg = m.Parameter<Object>(1);
double result = 0;
Node* base = m.IntPtrConstant(reinterpret_cast<intptr_t>(&result));
Node* value = m.CallBuiltin(Builtins::kWasmTaggedToFloat64, context, arg);
@@ -3883,8 +4052,7 @@ TEST(SmiUntagLeftShiftOptimization) {
CodeStubAssembler m(asm_tester.state());
{
- TNode<TaggedIndex> param =
- TNode<TaggedIndex>::UncheckedCast(m.Parameter(0));
+ TNode<TaggedIndex> param = m.UncheckedParameter<TaggedIndex>(0);
TNode<WordT> unoptimized =
m.IntPtrMul(m.TaggedIndexToIntPtr(param), m.IntPtrConstant(8));
TNode<WordT> optimized = m.WordShl(
@@ -3904,8 +4072,8 @@ TEST(SmiUntagComparisonOptimization) {
CodeStubAssembler m(asm_tester.state());
{
- TNode<Smi> a = TNode<Smi>::UncheckedCast(m.Parameter(0));
- TNode<Smi> b = TNode<Smi>::UncheckedCast(m.Parameter(1));
+ TNode<Smi> a = m.UncheckedParameter<Smi>(0);
+ TNode<Smi> b = m.UncheckedParameter<Smi>(1);
TNode<BoolT> unoptimized = m.UintPtrLessThan(m.SmiUntag(a), m.SmiUntag(b));
#ifdef V8_COMPRESS_POINTERS
TNode<BoolT> optimized = m.Uint32LessThan(
diff --git a/deps/v8/test/cctest/test-compiler.cc b/deps/v8/test/cctest/test-compiler.cc
index a2c73fb301..7ba7ce8978 100644
--- a/deps/v8/test/cctest/test-compiler.cc
+++ b/deps/v8/test/cctest/test-compiler.cc
@@ -808,30 +808,6 @@ TEST(InvocationCount) {
CHECK_EQ(4, foo->feedback_vector().invocation_count());
}
-TEST(SafeToSkipArgumentsAdaptor) {
- CcTest::InitializeVM();
- v8::HandleScope scope(CcTest::isolate());
- CompileRun(
- "function a() { \"use strict\"; }; a();"
- "function b() { }; b();"
- "function c() { \"use strict\"; return arguments; }; c();"
- "function d(...args) { return args; }; d();"
- "function e() { \"use strict\"; return eval(\"\"); }; e();"
- "function f(x, y) { \"use strict\"; return x + y; }; f(1, 2);");
- Handle<JSFunction> a = Handle<JSFunction>::cast(GetGlobalProperty("a"));
- CHECK(a->shared().is_safe_to_skip_arguments_adaptor());
- Handle<JSFunction> b = Handle<JSFunction>::cast(GetGlobalProperty("b"));
- CHECK(!b->shared().is_safe_to_skip_arguments_adaptor());
- Handle<JSFunction> c = Handle<JSFunction>::cast(GetGlobalProperty("c"));
- CHECK(!c->shared().is_safe_to_skip_arguments_adaptor());
- Handle<JSFunction> d = Handle<JSFunction>::cast(GetGlobalProperty("d"));
- CHECK(!d->shared().is_safe_to_skip_arguments_adaptor());
- Handle<JSFunction> e = Handle<JSFunction>::cast(GetGlobalProperty("e"));
- CHECK(!e->shared().is_safe_to_skip_arguments_adaptor());
- Handle<JSFunction> f = Handle<JSFunction>::cast(GetGlobalProperty("f"));
- CHECK(f->shared().is_safe_to_skip_arguments_adaptor());
-}
-
TEST(ShallowEagerCompilation) {
i::FLAG_always_opt = false;
CcTest::InitializeVM();
@@ -981,6 +957,9 @@ TEST(DecideToPretenureDuringCompilation) {
FLAG_allow_natives_syntax = true;
FLAG_allocation_site_pretenuring = true;
FLAG_flush_bytecode = false;
+ // Turn on lazy feedback allocation, so we create exactly one allocation site.
+ // Without lazy feedback allocation, we create two allocation sites.
+ FLAG_lazy_feedback_allocation = true;
// We want to trigger exactly 1 optimization.
FLAG_use_osr = false;
@@ -1105,7 +1084,7 @@ TEST(ProfilerEnabledDuringBackgroundCompile) {
std::make_unique<DummySourceStream>(source),
v8::ScriptCompiler::StreamedSource::UTF8);
std::unique_ptr<v8::ScriptCompiler::ScriptStreamingTask> task(
- v8::ScriptCompiler::StartStreamingScript(isolate, &streamed_source));
+ v8::ScriptCompiler::StartStreaming(isolate, &streamed_source));
// Run the background compilation task on the main thread.
task->Run();
diff --git a/deps/v8/test/cctest/test-concurrent-descriptor-array.cc b/deps/v8/test/cctest/test-concurrent-descriptor-array.cc
index 97f3f5dc94..568cf3df27 100644
--- a/deps/v8/test/cctest/test-concurrent-descriptor-array.cc
+++ b/deps/v8/test/cctest/test-concurrent-descriptor-array.cc
@@ -33,7 +33,8 @@ class ConcurrentSearchThread final : public v8::base::Thread {
sema_started_(sema_started) {}
void Run() override {
- LocalHeap local_heap(heap_, std::move(ph_));
+ LocalHeap local_heap(heap_, ThreadKind::kBackground, std::move(ph_));
+ UnparkedScope unparked_scope(&local_heap);
LocalHandleScope scope(&local_heap);
for (int i = 0; i < kNumHandles; i++) {
@@ -48,7 +49,7 @@ class ConcurrentSearchThread final : public v8::base::Thread {
Handle<Map> map(handle->map(), &local_heap);
Handle<DescriptorArray> descriptors(
- map->synchronized_instance_descriptors(), &local_heap);
+ map->instance_descriptors(kAcquireLoad), &local_heap);
bool is_background_thread = true;
InternalIndex const number =
descriptors->Search(*name_, *map, is_background_thread);
@@ -56,11 +57,9 @@ class ConcurrentSearchThread final : public v8::base::Thread {
}
CHECK_EQ(handles_.size(), kNumHandles * 2);
-
- CHECK(!ph_);
- ph_ = local_heap.DetachPersistentHandles();
}
+ private:
Heap* heap_;
std::vector<Handle<JSObject>> handles_;
std::unique_ptr<PersistentHandles> ph_;
diff --git a/deps/v8/test/cctest/test-concurrent-feedback-vector.cc b/deps/v8/test/cctest/test-concurrent-feedback-vector.cc
new file mode 100644
index 0000000000..4cb843c97d
--- /dev/null
+++ b/deps/v8/test/cctest/test-concurrent-feedback-vector.cc
@@ -0,0 +1,268 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <atomic>
+#include <unordered_set>
+
+#include "src/api/api.h"
+#include "src/base/platform/semaphore.h"
+#include "src/handles/handles-inl.h"
+#include "src/handles/local-handles-inl.h"
+#include "src/handles/persistent-handles.h"
+#include "src/heap/heap.h"
+#include "src/heap/local-heap.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/heap/heap-utils.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+// kCycles is large enough to ensure we see every state we are interested in.
+const int kCycles = 1000;
+static std::atomic<bool> all_states_seen{false};
+
+class FeedbackVectorExplorationThread final : public v8::base::Thread {
+ public:
+ FeedbackVectorExplorationThread(Heap* heap, base::Semaphore* sema_started,
+ base::Semaphore* vector_ready,
+ base::Semaphore* vector_consumed,
+ std::unique_ptr<PersistentHandles> ph,
+ Handle<FeedbackVector> feedback_vector)
+ : v8::base::Thread(base::Thread::Options("ThreadWithLocalHeap")),
+ heap_(heap),
+ feedback_vector_(feedback_vector),
+ ph_(std::move(ph)),
+ sema_started_(sema_started),
+ vector_ready_(vector_ready),
+ vector_consumed_(vector_consumed) {}
+
+ using InlineCacheSet = std::unordered_set<InlineCacheState, std::hash<int>>;
+ bool AllRequiredStatesSeen(const InlineCacheSet& found) {
+ auto end = found.end();
+ return (found.find(UNINITIALIZED) != end &&
+ found.find(MONOMORPHIC) != end && found.find(POLYMORPHIC) != end &&
+ found.find(MEGAMORPHIC) != end);
+ }
+
+ void Run() override {
+ Isolate* isolate = heap_->isolate();
+ LocalHeap local_heap(heap_, ThreadKind::kBackground, std::move(ph_));
+ UnparkedScope scope(&local_heap);
+
+ // Get the feedback vector
+ NexusConfig nexus_config =
+ NexusConfig::FromBackgroundThread(isolate, &local_heap);
+ FeedbackSlot slot(0);
+
+ // FeedbackVectorExplorationThread signals that it's beginning it's loop.
+ sema_started_->Signal();
+
+ InlineCacheSet found_states;
+ for (int i = 0; i < kCycles; i++) {
+ FeedbackNexus nexus(feedback_vector_, slot, nexus_config);
+ auto state = nexus.ic_state();
+ if (state == MONOMORPHIC || state == POLYMORPHIC) {
+ MapHandles maps;
+ nexus.ExtractMaps(&maps);
+ for (unsigned int i = 0; i < maps.size(); i++) {
+ CHECK(maps[i]->IsMap());
+ }
+ }
+
+ if (found_states.find(state) == found_states.end()) {
+ found_states.insert(state);
+ if (AllRequiredStatesSeen(found_states)) {
+ // We are finished.
+ break;
+ }
+ }
+ }
+
+ if (!AllRequiredStatesSeen(found_states)) {
+ // Repeat the exercise with an explicit handshaking protocol. This ensures
+ // at least coverage of the necessary code paths even though it is
+ // avoiding actual concurrency. I found that in test runs, there is always
+ // one or two bots that have a thread interleaving that doesn't allow all
+ // states to be seen. This is for that situation.
+ vector_ready_->Wait();
+ fprintf(stderr, "Worker beginning to check for uninitialized\n");
+ {
+ FeedbackNexus nexus(feedback_vector_, slot, nexus_config);
+ auto state = nexus.ic_state();
+ CHECK_EQ(state, UNINITIALIZED);
+ }
+ vector_consumed_->Signal();
+ vector_ready_->Wait();
+ fprintf(stderr, "Worker beginning to check for monomorphic\n");
+ {
+ FeedbackNexus nexus(feedback_vector_, slot, nexus_config);
+ auto state = nexus.ic_state();
+ CHECK_EQ(state, MONOMORPHIC);
+ MapHandles maps;
+ nexus.ExtractMaps(&maps);
+ CHECK(maps[0]->IsMap());
+ }
+ vector_consumed_->Signal();
+ vector_ready_->Wait();
+ fprintf(stderr, "Worker beginning to check for polymorphic\n");
+ {
+ FeedbackNexus nexus(feedback_vector_, slot, nexus_config);
+ auto state = nexus.ic_state();
+ CHECK_EQ(state, POLYMORPHIC);
+ MapHandles maps;
+ nexus.ExtractMaps(&maps);
+ for (unsigned int i = 0; i < maps.size(); i++) {
+ CHECK(maps[i]->IsMap());
+ }
+ }
+ vector_consumed_->Signal();
+ vector_ready_->Wait();
+ fprintf(stderr, "Worker beginning to check for megamorphic\n");
+ {
+ FeedbackNexus nexus(feedback_vector_, slot, nexus_config);
+ auto state = nexus.ic_state();
+ CHECK_EQ(state, MEGAMORPHIC);
+ }
+ }
+
+ all_states_seen.store(true, std::memory_order_release);
+ vector_consumed_->Signal();
+
+ CHECK(!ph_);
+ ph_ = local_heap.DetachPersistentHandles();
+ }
+
+ Heap* heap_;
+ Handle<FeedbackVector> feedback_vector_;
+ std::unique_ptr<PersistentHandles> ph_;
+ base::Semaphore* sema_started_;
+
+ // These two semaphores control the explicit handshaking mode in case we
+ // didn't see all states within kCycles loops.
+ base::Semaphore* vector_ready_;
+ base::Semaphore* vector_consumed_;
+};
+
+static void CheckedWait(base::Semaphore& semaphore) {
+ while (!all_states_seen.load(std::memory_order_acquire)) {
+ if (semaphore.WaitFor(base::TimeDelta::FromMilliseconds(1))) break;
+ }
+}
+
+// Verify that a LoadIC can be cycled through different states and safely
+// read on a background thread.
+TEST(CheckLoadICStates) {
+ CcTest::InitializeVM();
+ FLAG_local_heaps = true;
+ FLAG_lazy_feedback_allocation = false;
+ Isolate* isolate = CcTest::i_isolate();
+
+ std::unique_ptr<PersistentHandles> ph = isolate->NewPersistentHandles();
+ HandleScope handle_scope(isolate);
+
+ Handle<HeapObject> o1 = Handle<HeapObject>::cast(
+ Utils::OpenHandle(*CompileRun("o1 = { bar: {} };")));
+ Handle<HeapObject> o2 = Handle<HeapObject>::cast(
+ Utils::OpenHandle(*CompileRun("o2 = { baz: 3, bar: 3 };")));
+ Handle<HeapObject> o3 = Handle<HeapObject>::cast(
+ Utils::OpenHandle(*CompileRun("o3 = { blu: 3, baz: 3, bar: 3 };")));
+ Handle<HeapObject> o4 = Handle<HeapObject>::cast(Utils::OpenHandle(
+ *CompileRun("o4 = { ble: 3, blu: 3, baz: 3, bar: 3 };")));
+ auto result = CompileRun(
+ "function foo(o) {"
+ " let a = o.bar;"
+ " return a;"
+ "}"
+ "foo(o1);"
+ "foo;");
+ Handle<JSFunction> function =
+ Handle<JSFunction>::cast(Utils::OpenHandle(*result));
+ Handle<FeedbackVector> vector(function->feedback_vector(), isolate);
+ FeedbackSlot slot(0);
+ FeedbackNexus nexus(vector, slot);
+ CHECK(IsLoadICKind(nexus.kind()));
+ CHECK_EQ(MONOMORPHIC, nexus.ic_state());
+ nexus.ConfigureUninitialized();
+
+ // Now the basic environment is set up. Start the worker thread.
+ base::Semaphore sema_started(0);
+ base::Semaphore vector_ready(0);
+ base::Semaphore vector_consumed(0);
+ Handle<FeedbackVector> persistent_vector =
+ Handle<FeedbackVector>::cast(ph->NewHandle(vector));
+ std::unique_ptr<FeedbackVectorExplorationThread> thread(
+ new FeedbackVectorExplorationThread(isolate->heap(), &sema_started,
+ &vector_ready, &vector_consumed,
+ std::move(ph), persistent_vector));
+ CHECK(thread->Start());
+ sema_started.Wait();
+
+ // Cycle the IC through all states repeatedly.
+
+ // {dummy_handler} is just an arbitrary value to associate with a map in order
+ // to fill in the feedback vector slots in a minimally acceptable way.
+ MaybeObjectHandle dummy_handler(Smi::FromInt(10), isolate);
+ for (int i = 0; i < kCycles; i++) {
+ if (all_states_seen.load(std::memory_order_acquire)) break;
+
+ CHECK_EQ(UNINITIALIZED, nexus.ic_state());
+ if (i == (kCycles - 1)) {
+ // If we haven't seen all states by the last attempt, enter an explicit
+ // handshaking mode.
+ vector_ready.Signal();
+ CheckedWait(vector_consumed);
+ fprintf(stderr, "Main thread configuring monomorphic\n");
+ }
+ nexus.ConfigureMonomorphic(Handle<Name>(), Handle<Map>(o1->map(), isolate),
+ dummy_handler);
+ CHECK_EQ(MONOMORPHIC, nexus.ic_state());
+
+ if (i == (kCycles - 1)) {
+ vector_ready.Signal();
+ CheckedWait(vector_consumed);
+ fprintf(stderr, "Main thread configuring polymorphic\n");
+ }
+
+ // Go polymorphic.
+ std::vector<MapAndHandler> map_and_handlers;
+ map_and_handlers.push_back(
+ MapAndHandler(Handle<Map>(o1->map(), isolate), dummy_handler));
+ map_and_handlers.push_back(
+ MapAndHandler(Handle<Map>(o2->map(), isolate), dummy_handler));
+ map_and_handlers.push_back(
+ MapAndHandler(Handle<Map>(o3->map(), isolate), dummy_handler));
+ map_and_handlers.push_back(
+ MapAndHandler(Handle<Map>(o4->map(), isolate), dummy_handler));
+ nexus.ConfigurePolymorphic(Handle<Name>(), map_and_handlers);
+ CHECK_EQ(POLYMORPHIC, nexus.ic_state());
+
+ if (i == (kCycles - 1)) {
+ vector_ready.Signal();
+ CheckedWait(vector_consumed);
+ fprintf(stderr, "Main thread configuring megamorphic\n");
+ }
+
+ // Go Megamorphic
+ nexus.ConfigureMegamorphic();
+ CHECK_EQ(MEGAMORPHIC, nexus.ic_state());
+
+ if (i == (kCycles - 1)) {
+ vector_ready.Signal();
+ CheckedWait(vector_consumed);
+ fprintf(stderr, "Main thread finishing\n");
+ }
+
+ nexus.ConfigureUninitialized();
+ }
+
+ CHECK(all_states_seen.load(std::memory_order_acquire));
+ thread->Join();
+}
+
+} // anonymous namespace
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-concurrent-prototype.cc b/deps/v8/test/cctest/test-concurrent-prototype.cc
index 65029add71..7484d4d22e 100644
--- a/deps/v8/test/cctest/test-concurrent-prototype.cc
+++ b/deps/v8/test/cctest/test-concurrent-prototype.cc
@@ -32,7 +32,8 @@ class ConcurrentSearchThread final : public v8::base::Thread {
sema_started_(sema_started) {}
void Run() override {
- LocalHeap local_heap(heap_, std::move(ph_));
+ LocalHeap local_heap(heap_, ThreadKind::kBackground, std::move(ph_));
+ UnparkedScope unparked_scope(&local_heap);
LocalHandleScope scope(&local_heap);
for (int i = 0; i < kNumHandles; i++) {
@@ -55,11 +56,9 @@ class ConcurrentSearchThread final : public v8::base::Thread {
}
CHECK_EQ(handles_.size(), kNumHandles * 2);
-
- CHECK(!ph_);
- ph_ = local_heap.DetachPersistentHandles();
}
+ private:
Heap* heap_;
std::vector<Handle<JSObject>> handles_;
std::unique_ptr<PersistentHandles> ph_;
diff --git a/deps/v8/test/cctest/test-concurrent-script-context-table.cc b/deps/v8/test/cctest/test-concurrent-script-context-table.cc
index 600aa2b08c..1ea90c7902 100644
--- a/deps/v8/test/cctest/test-concurrent-script-context-table.cc
+++ b/deps/v8/test/cctest/test-concurrent-script-context-table.cc
@@ -28,14 +28,14 @@ class ScriptContextTableAccessUsedThread final : public v8::base::Thread {
Handle<ScriptContextTable> script_context_table)
: v8::base::Thread(
base::Thread::Options("ScriptContextTableAccessUsedThread")),
- isolate_(isolate),
heap_(heap),
sema_started_(sema_started),
ph_(std::move(ph)),
script_context_table_(script_context_table) {}
void Run() override {
- LocalHeap local_heap(heap_, std::move(ph_));
+ LocalHeap local_heap(heap_, ThreadKind::kBackground, std::move(ph_));
+ UnparkedScope unparked_scope(&local_heap);
LocalHandleScope scope(&local_heap);
sema_started_->Signal();
@@ -44,12 +44,9 @@ class ScriptContextTableAccessUsedThread final : public v8::base::Thread {
Context context = script_context_table_->get_context(i);
CHECK(context.IsScriptContext());
}
-
- CHECK(!ph_);
- ph_ = local_heap.DetachPersistentHandles();
}
- Isolate* isolate_;
+ private:
Heap* heap_;
base::Semaphore* sema_started_;
std::unique_ptr<PersistentHandles> ph_;
@@ -64,14 +61,14 @@ class AccessScriptContextTableThread final : public v8::base::Thread {
Handle<NativeContext> native_context)
: v8::base::Thread(
base::Thread::Options("AccessScriptContextTableThread")),
- isolate_(isolate),
heap_(heap),
sema_started_(sema_started),
ph_(std::move(ph)),
native_context_(native_context) {}
void Run() override {
- LocalHeap local_heap(heap_, std::move(ph_));
+ LocalHeap local_heap(heap_, ThreadKind::kBackground, std::move(ph_));
+ UnparkedScope unparked_scope(&local_heap);
LocalHandleScope scope(&local_heap);
sema_started_->Signal();
@@ -87,12 +84,9 @@ class AccessScriptContextTableThread final : public v8::base::Thread {
&local_heap);
CHECK(!context.is_null());
}
-
- CHECK(!ph_);
- ph_ = local_heap.DetachPersistentHandles();
}
- Isolate* isolate_;
+ private:
Heap* heap_;
base::Semaphore* sema_started_;
std::unique_ptr<PersistentHandles> ph_;
diff --git a/deps/v8/test/cctest/test-concurrent-transition-array.cc b/deps/v8/test/cctest/test-concurrent-transition-array.cc
index 95c5cb2c3a..3836104b57 100644
--- a/deps/v8/test/cctest/test-concurrent-transition-array.cc
+++ b/deps/v8/test/cctest/test-concurrent-transition-array.cc
@@ -34,18 +34,18 @@ class ConcurrentSearchThread : public v8::base::Thread {
result_map_(result_map) {}
void Run() override {
- LocalHeap local_heap(heap_, std::move(ph_));
+ LocalHeap local_heap(heap_, ThreadKind::kBackground, std::move(ph_));
+ UnparkedScope scope(&local_heap);
background_thread_started_->Signal();
CHECK_EQ(TransitionsAccessor(CcTest::i_isolate(), map_, true)
.SearchTransition(*name_, kData, NONE),
result_map_ ? **result_map_ : Map());
-
- CHECK(!ph_);
- ph_ = local_heap.DetachPersistentHandles();
}
+ // protected instead of private due to having a subclass.
+ protected:
Heap* heap_;
base::Semaphore* background_thread_started_;
std::unique_ptr<PersistentHandles> ph_;
@@ -70,7 +70,8 @@ class ConcurrentSearchOnOutdatedAccessorThread final
main_thread_finished_(main_thread_finished) {}
void Run() override {
- LocalHeap local_heap(heap_, std::move(ph_));
+ LocalHeap local_heap(heap_, ThreadKind::kBackground, std::move(ph_));
+ UnparkedScope scope(&local_heap);
TransitionsAccessor accessor(CcTest::i_isolate(), map_, true);
background_thread_started_->Signal();
diff --git a/deps/v8/test/cctest/test-cpu-profiler.cc b/deps/v8/test/cctest/test-cpu-profiler.cc
index 84c4ee08fe..1db0a7b426 100644
--- a/deps/v8/test/cctest/test-cpu-profiler.cc
+++ b/deps/v8/test/cctest/test-cpu-profiler.cc
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Tests of profiles generator and utilities.
+// Tests of the CPU profiler and utilities.
#include <limits>
#include <memory>
@@ -45,6 +45,7 @@
#include "src/objects/objects-inl.h"
#include "src/profiler/cpu-profiler-inl.h"
#include "src/profiler/profiler-listener.h"
+#include "src/profiler/symbolizer.h"
#include "src/profiler/tracing-cpu-profiler.h"
#include "src/tracing/trace-event.h"
#include "src/utils/utils.h"
@@ -86,11 +87,11 @@ TEST(StartStop) {
i::Isolate* isolate = CcTest::i_isolate();
CpuProfilesCollection profiles(isolate);
ProfilerCodeObserver code_observer(isolate);
- ProfileGenerator generator(&profiles, code_observer.code_map());
+ Symbolizer symbolizer(code_observer.code_map());
std::unique_ptr<ProfilerEventsProcessor> processor(
- new SamplingEventsProcessor(isolate, &generator, &code_observer,
- v8::base::TimeDelta::FromMicroseconds(100),
- true));
+ new SamplingEventsProcessor(
+ isolate, &symbolizer, &code_observer, &profiles,
+ v8::base::TimeDelta::FromMicroseconds(100), true));
CHECK(processor->Start());
processor->StopSynchronously();
}
@@ -169,10 +170,9 @@ TEST(CodeEvents) {
CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate);
ProfilerCodeObserver code_observer(isolate);
- ProfileGenerator* generator =
- new ProfileGenerator(profiles, code_observer.code_map());
+ Symbolizer* symbolizer = new Symbolizer(code_observer.code_map());
ProfilerEventsProcessor* processor = new SamplingEventsProcessor(
- isolate, generator, &code_observer,
+ isolate, symbolizer, &code_observer, profiles,
v8::base::TimeDelta::FromMicroseconds(100), true);
CHECK(processor->Start());
ProfilerListener profiler_listener(isolate, processor);
@@ -195,21 +195,21 @@ TEST(CodeEvents) {
isolate->logger()->RemoveCodeEventListener(&profiler_listener);
processor->StopSynchronously();
- // Check the state of profile generator.
+ // Check the state of the symbolizer.
CodeEntry* aaa =
- generator->code_map()->FindEntry(aaa_code->InstructionStart());
+ symbolizer->code_map()->FindEntry(aaa_code->InstructionStart());
CHECK(aaa);
CHECK_EQ(0, strcmp(aaa_str, aaa->name()));
CodeEntry* comment =
- generator->code_map()->FindEntry(comment_code->InstructionStart());
+ symbolizer->code_map()->FindEntry(comment_code->InstructionStart());
CHECK(comment);
CHECK_EQ(0, strcmp("comment", comment->name()));
- CHECK(!generator->code_map()->FindEntry(comment2_code->InstructionStart()));
+ CHECK(!symbolizer->code_map()->FindEntry(comment2_code->InstructionStart()));
CodeEntry* comment2 =
- generator->code_map()->FindEntry(moved_code->InstructionStart());
+ symbolizer->code_map()->FindEntry(moved_code->InstructionStart());
CHECK(comment2);
CHECK_EQ(0, strcmp("comment2", comment2->name()));
}
@@ -231,13 +231,12 @@ TEST(TickEvents) {
CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate);
ProfilerCodeObserver code_observer(isolate);
- ProfileGenerator* generator =
- new ProfileGenerator(profiles, code_observer.code_map());
+ Symbolizer* symbolizer = new Symbolizer(code_observer.code_map());
ProfilerEventsProcessor* processor = new SamplingEventsProcessor(
- CcTest::i_isolate(), generator, &code_observer,
+ CcTest::i_isolate(), symbolizer, &code_observer, profiles,
v8::base::TimeDelta::FromMicroseconds(100), true);
- CpuProfiler profiler(isolate, kDebugNaming, kLazyLogging, profiles, generator,
- processor);
+ CpuProfiler profiler(isolate, kDebugNaming, kLazyLogging, profiles,
+ symbolizer, processor);
profiles->StartProfiling("");
CHECK(processor->Start());
ProfilerListener profiler_listener(isolate, processor);
@@ -248,10 +247,11 @@ TEST(TickEvents) {
profiler_listener.CodeCreateEvent(i::Logger::BUILTIN_TAG, frame3_code, "ddd");
EnqueueTickSampleEvent(processor, frame1_code->raw_instruction_start());
- EnqueueTickSampleEvent(
- processor,
- frame2_code->raw_instruction_start() + frame2_code->ExecutableSize() / 2,
- frame1_code->raw_instruction_start() + frame1_code->ExecutableSize() / 2);
+ EnqueueTickSampleEvent(processor,
+ frame2_code->raw_instruction_start() +
+ frame2_code->raw_instruction_size() / 2,
+ frame1_code->raw_instruction_start() +
+ frame1_code->raw_instruction_size() / 2);
EnqueueTickSampleEvent(processor, frame3_code->raw_instruction_end() - 1,
frame2_code->raw_instruction_end() - 1,
frame1_code->raw_instruction_end() - 1);
@@ -279,6 +279,94 @@ TEST(TickEvents) {
CHECK(top_down_ddd_children->empty());
}
+TEST(CodeMapClearedBetweenProfilesWithLazyLogging) {
+ TestSetup test_setup;
+ LocalContext env;
+ i::Isolate* isolate = CcTest::i_isolate();
+ i::HandleScope scope(isolate);
+
+ // This gets logged when the profiler starts up and scans the heap.
+ i::Handle<i::AbstractCode> code1(CreateCode(&env), isolate);
+
+ CpuProfiler profiler(isolate, kDebugNaming, kLazyLogging);
+ profiler.StartProfiling("");
+
+ CpuProfile* profile = profiler.StopProfiling("");
+ CHECK(profile);
+
+ // Check that our code is still in the code map.
+ CodeMap* code_map = profiler.code_map_for_test();
+ CodeEntry* code1_entry = code_map->FindEntry(code1->InstructionStart());
+ CHECK(code1_entry);
+ CHECK_EQ(0, strcmp("function_1", code1_entry->name()));
+
+ profiler.DeleteProfile(profile);
+
+ // Check that the code map is emptied once the last profile is deleted.
+ CHECK(!code_map->FindEntry(code1->InstructionStart()));
+
+ // Create code between profiles. This should not be logged yet.
+ i::Handle<i::AbstractCode> code2(CreateCode(&env), isolate);
+
+ CHECK(!code_map->FindEntry(code2->InstructionStart()));
+}
+
+TEST(CodeMapNotClearedBetweenProfilesWithEagerLogging) {
+ TestSetup test_setup;
+ LocalContext env;
+ i::Isolate* isolate = CcTest::i_isolate();
+ i::HandleScope scope(isolate);
+
+ // This gets logged when the profiler starts up and scans the heap.
+ i::Handle<i::AbstractCode> code1(CreateCode(&env), isolate);
+
+ CpuProfiler profiler(isolate, kDebugNaming, kEagerLogging);
+ profiler.StartProfiling("");
+
+ CpuProfile* profile = profiler.StopProfiling("");
+ CHECK(profile);
+
+ // Check that our code is still in the code map.
+ CodeMap* code_map = profiler.code_map_for_test();
+ CodeEntry* code1_entry = code_map->FindEntry(code1->InstructionStart());
+ CHECK(code1_entry);
+ CHECK_EQ(0, strcmp("function_1", code1_entry->name()));
+
+ profiler.DeleteProfile(profile);
+
+ // We should still have an entry in kEagerLogging mode.
+ code1_entry = code_map->FindEntry(code1->InstructionStart());
+ CHECK(code1_entry);
+ CHECK_EQ(0, strcmp("function_1", code1_entry->name()));
+
+ // Create code between profiles. This should be logged too.
+ i::Handle<i::AbstractCode> code2(CreateCode(&env), isolate);
+ CHECK(code_map->FindEntry(code2->InstructionStart()));
+
+ profiler.StartProfiling("");
+ CpuProfile* profile2 = profiler.StopProfiling("");
+ CHECK(profile2);
+
+ // Check that we still have code map entries for both code objects.
+ code1_entry = code_map->FindEntry(code1->InstructionStart());
+ CHECK(code1_entry);
+ CHECK_EQ(0, strcmp("function_1", code1_entry->name()));
+ CodeEntry* code2_entry = code_map->FindEntry(code2->InstructionStart());
+ CHECK(code2_entry);
+ CHECK_EQ(0, strcmp("function_2", code2_entry->name()));
+
+ profiler.DeleteProfile(profile2);
+
+ // Check that we still have code map entries for both code objects, even after
+ // the last profile is deleted.
+ code1_entry = code_map->FindEntry(code1->InstructionStart());
+ CHECK(code1_entry);
+ CHECK_EQ(0, strcmp("function_1", code1_entry->name()));
+ code2_entry = code_map->FindEntry(code2->InstructionStart());
+ CHECK(code2_entry);
+ CHECK_EQ(0, strcmp("function_2", code2_entry->name()));
+}
+
// http://crbug/51594
// This test must not crash.
TEST(CrashIfStoppingLastNonExistentProfile) {
@@ -303,13 +391,12 @@ TEST(Issue1398) {
CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate);
ProfilerCodeObserver code_observer(isolate);
- ProfileGenerator* generator =
- new ProfileGenerator(profiles, code_observer.code_map());
+ Symbolizer* symbolizer = new Symbolizer(code_observer.code_map());
ProfilerEventsProcessor* processor = new SamplingEventsProcessor(
- CcTest::i_isolate(), generator, &code_observer,
+ CcTest::i_isolate(), symbolizer, &code_observer, profiles,
v8::base::TimeDelta::FromMicroseconds(100), true);
- CpuProfiler profiler(isolate, kDebugNaming, kLazyLogging, profiles, generator,
- processor);
+ CpuProfiler profiler(isolate, kDebugNaming, kLazyLogging, profiles,
+ symbolizer, processor);
profiles->StartProfiling("");
CHECK(processor->Start());
ProfilerListener profiler_listener(isolate, processor);
@@ -1157,18 +1244,17 @@ static void TickLines(bool optimize) {
CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate);
ProfilerCodeObserver code_observer(isolate);
- ProfileGenerator* generator =
- new ProfileGenerator(profiles, code_observer.code_map());
+ Symbolizer* symbolizer = new Symbolizer(code_observer.code_map());
ProfilerEventsProcessor* processor = new SamplingEventsProcessor(
- CcTest::i_isolate(), generator, &code_observer,
+ CcTest::i_isolate(), symbolizer, &code_observer, profiles,
v8::base::TimeDelta::FromMicroseconds(100), true);
- CpuProfiler profiler(isolate, kDebugNaming, kLazyLogging, profiles, generator,
- processor);
+ CpuProfiler profiler(isolate, kDebugNaming, kLazyLogging, profiles,
+ symbolizer, processor);
profiles->StartProfiling("");
// TODO(delphick): Stop using the CpuProfiler internals here: This forces
// LogCompiledFunctions so that source positions are collected everywhere.
// This would normally happen automatically with CpuProfiler::StartProfiling
- // but doesn't because it's constructed with a generator and a processor.
+ // but doesn't because it's constructed with a symbolizer and a processor.
isolate->logger()->LogCompiledFunctions();
CHECK(processor->Start());
ProfilerListener profiler_listener(isolate, processor);
@@ -1189,8 +1275,8 @@ static void TickLines(bool optimize) {
CpuProfile* profile = profiles->StopProfiling("");
CHECK(profile);
- // Check the state of profile generator.
- CodeEntry* func_entry = generator->code_map()->FindEntry(code_address);
+ // Check the state of the symbolizer.
+ CodeEntry* func_entry = symbolizer->code_map()->FindEntry(code_address);
CHECK(func_entry);
CHECK_EQ(0, strcmp(func_name, func_entry->name()));
const i::SourcePositionTable* line_info = func_entry->line_info();
@@ -2213,24 +2299,24 @@ TEST(FunctionDetailsInlining) {
}
static const char* pre_profiling_osr_script = R"(
+ const kMinIterationDurationMs = 1;
function whenPass(pass, optDuration) {
if (pass == 5) startProfiling();
}
function hot(optDuration, deoptDuration) {
- const startTime = Date.now();
%PrepareFunctionForOptimization(hot);
for (let pass = 0; pass <= optDuration + deoptDuration; pass++) {
+ const startTime = Date.now();
// Let a few passes go by to ensure we have enough feeback info
if (pass == 3) %OptimizeOsr();
// Force deoptimization. %DeoptimizeNow and %DeoptimizeFunction don't
// doptimize OSRs.
if (pass == optDuration) whenPass = () => {};
whenPass(pass, optDuration);
- for (let i = 0; i < 1e5; i++) {
+ while (Date.now() - startTime < kMinIterationDurationMs) {
for (let j = 0; j < 1000; j++) {
x = Math.random() * j;
}
- if ((Date.now() - startTime) > pass) break;
}
}
}
@@ -2268,10 +2354,11 @@ TEST(StartProfilingAfterOsr) {
v8::Local<v8::Context> env = CcTest::NewContext({PROFILER_EXTENSION_ID});
v8::Context::Scope context_scope(env);
ProfilerHelper helper(env);
+ helper.profiler()->SetSamplingInterval(100);
CompileRun(pre_profiling_osr_script);
v8::Local<v8::Function> function = GetFunction(env, "notHot");
- int32_t profiling_optimized_ms = 80;
+ int32_t profiling_optimized_ms = 120;
int32_t profiling_deoptimized_ms = 40;
v8::Local<v8::Value> args[] = {
v8::Integer::New(env->GetIsolate(), profiling_optimized_ms),
@@ -3279,15 +3366,63 @@ TEST(FastStopProfiling) {
CHECK_LT(duration, kWaitThreshold.InMillisecondsF());
}
+// Tests that when current_profiles->size() is greater than the max allowable
+// number of concurrent profiles (100), we don't allow a new Profile to be
+// profiled
+TEST(MaxSimultaneousProfiles) {
+ LocalContext env;
+ i::Isolate* isolate = CcTest::i_isolate();
+ i::HandleScope scope(isolate);
+
+ v8::CpuProfiler* profiler = v8::CpuProfiler::New(env->GetIsolate());
+
+ // Spin up first profiler. Verify that status is kStarted
+ CpuProfilingStatus firstStatus = profiler->StartProfiling(
+ v8_str("1us"), {v8::CpuProfilingMode::kLeafNodeLineNumbers,
+ v8::CpuProfilingOptions::kNoSampleLimit, 1});
+
+ CHECK_EQ(firstStatus, CpuProfilingStatus::kStarted);
+
+ // Spin up profiler with same title. Verify that status is kAlreadyStarted
+ CpuProfilingStatus startedStatus = profiler->StartProfiling(
+ v8_str("1us"), {v8::CpuProfilingMode::kLeafNodeLineNumbers,
+ v8::CpuProfilingOptions::kNoSampleLimit, 1});
+
+ CHECK_EQ(startedStatus, CpuProfilingStatus::kAlreadyStarted);
+
+ // Spin up 99 more profilers, maxing out CpuProfilersCollection.
+ // Check they all return status of kStarted
+ for (int i = 2; i <= CpuProfilesCollection::kMaxSimultaneousProfiles; i++) {
+ CpuProfilingStatus status =
+ profiler->StartProfiling(v8_str((std::to_string(i) + "us").c_str()),
+ {v8::CpuProfilingMode::kLeafNodeLineNumbers,
+ v8::CpuProfilingOptions::kNoSampleLimit, i});
+ CHECK_EQ(status, CpuProfilingStatus::kStarted);
+ }
+
+ // Spin up 101st profiler. Verify status is kErrorTooManyProfilers
+ CpuProfilingStatus errorStatus = profiler->StartProfiling(
+ v8_str("101us"), {v8::CpuProfilingMode::kLeafNodeLineNumbers,
+ v8::CpuProfilingOptions::kNoSampleLimit, 2});
+
+ CHECK_EQ(errorStatus, CpuProfilingStatus::kErrorTooManyProfilers);
+
+ // Clean up, otherwise will show a crash.
+ for (int i = 1; i <= CpuProfilesCollection::kMaxSimultaneousProfiles + 1;
+ i++) {
+ profiler->StopProfiling(v8_str((std::to_string(i) + "us").c_str()));
+ }
+}
+
TEST(LowPrecisionSamplingStartStopInternal) {
i::Isolate* isolate = CcTest::i_isolate();
CpuProfilesCollection profiles(isolate);
ProfilerCodeObserver code_observer(isolate);
- ProfileGenerator generator(&profiles, code_observer.code_map());
+ Symbolizer symbolizer(code_observer.code_map());
std::unique_ptr<ProfilerEventsProcessor> processor(
- new SamplingEventsProcessor(isolate, &generator, &code_observer,
- v8::base::TimeDelta::FromMicroseconds(100),
- false));
+ new SamplingEventsProcessor(
+ isolate, &symbolizer, &code_observer, &profiles,
+ v8::base::TimeDelta::FromMicroseconds(100), false));
CHECK(processor->Start());
processor->StopSynchronously();
}
@@ -3409,14 +3544,13 @@ TEST(ProflilerSubsampling) {
CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate);
ProfilerCodeObserver code_observer(isolate);
- ProfileGenerator* generator =
- new ProfileGenerator(profiles, code_observer.code_map());
+ Symbolizer* symbolizer = new Symbolizer(code_observer.code_map());
ProfilerEventsProcessor* processor =
- new SamplingEventsProcessor(isolate, generator, &code_observer,
+ new SamplingEventsProcessor(isolate, symbolizer, &code_observer, profiles,
v8::base::TimeDelta::FromMicroseconds(1),
/* use_precise_sampling */ true);
- CpuProfiler profiler(isolate, kDebugNaming, kLazyLogging, profiles, generator,
- processor);
+ CpuProfiler profiler(isolate, kDebugNaming, kLazyLogging, profiles,
+ symbolizer, processor);
// Create a new CpuProfile that wants samples at 8us.
CpuProfile profile(&profiler, "",
@@ -3454,14 +3588,13 @@ TEST(DynamicResampling) {
CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate);
ProfilerCodeObserver code_observer(isolate);
- ProfileGenerator* generator =
- new ProfileGenerator(profiles, code_observer.code_map());
+ Symbolizer* symbolizer = new Symbolizer(code_observer.code_map());
ProfilerEventsProcessor* processor =
- new SamplingEventsProcessor(isolate, generator, &code_observer,
+ new SamplingEventsProcessor(isolate, symbolizer, &code_observer, profiles,
v8::base::TimeDelta::FromMicroseconds(1),
/* use_precise_sampling */ true);
- CpuProfiler profiler(isolate, kDebugNaming, kLazyLogging, profiles, generator,
- processor);
+ CpuProfiler profiler(isolate, kDebugNaming, kLazyLogging, profiles,
+ symbolizer, processor);
// Set a 1us base sampling rate, dividing all possible intervals.
profiler.set_sampling_interval(base::TimeDelta::FromMicroseconds(1));
@@ -3516,14 +3649,13 @@ TEST(DynamicResamplingWithBaseInterval) {
CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate);
ProfilerCodeObserver code_observer(isolate);
- ProfileGenerator* generator =
- new ProfileGenerator(profiles, code_observer.code_map());
+ Symbolizer* symbolizer = new Symbolizer(code_observer.code_map());
ProfilerEventsProcessor* processor =
- new SamplingEventsProcessor(isolate, generator, &code_observer,
+ new SamplingEventsProcessor(isolate, symbolizer, &code_observer, profiles,
v8::base::TimeDelta::FromMicroseconds(1),
/* use_precise_sampling */ true);
- CpuProfiler profiler(isolate, kDebugNaming, kLazyLogging, profiles, generator,
- processor);
+ CpuProfiler profiler(isolate, kDebugNaming, kLazyLogging, profiles,
+ symbolizer, processor);
profiler.set_sampling_interval(base::TimeDelta::FromMicroseconds(7));
diff --git a/deps/v8/test/cctest/test-descriptor-array.cc b/deps/v8/test/cctest/test-descriptor-array.cc
index 7db189d812..edb63e8db0 100644
--- a/deps/v8/test/cctest/test-descriptor-array.cc
+++ b/deps/v8/test/cctest/test-descriptor-array.cc
@@ -56,7 +56,7 @@ void CheckDescriptorArrayLookups(Isolate* isolate, Handle<Map> map,
// Test C++ implementation.
{
DisallowHeapAllocation no_gc;
- DescriptorArray descriptors = map->instance_descriptors();
+ DescriptorArray descriptors = map->instance_descriptors(kRelaxedLoad);
DCHECK(descriptors.IsSortedNoDuplicates());
int nof_descriptors = descriptors.number_of_descriptors();
@@ -91,8 +91,8 @@ void CheckTransitionArrayLookups(Isolate* isolate,
for (size_t i = 0; i < maps.size(); ++i) {
Map expected_map = *maps[i];
- Name name =
- expected_map.instance_descriptors().GetKey(expected_map.LastAdded());
+ Name name = expected_map.instance_descriptors(kRelaxedLoad)
+ .GetKey(expected_map.LastAdded());
Map map = transitions->SearchAndGetTargetForTesting(PropertyKind::kData,
name, NONE);
@@ -105,8 +105,8 @@ void CheckTransitionArrayLookups(Isolate* isolate,
if (!FLAG_jitless) {
for (size_t i = 0; i < maps.size(); ++i) {
Handle<Map> expected_map = maps[i];
- Handle<Name> name(expected_map->instance_descriptors().GetKey(
- expected_map->LastAdded()),
+ Handle<Name> name(expected_map->instance_descriptors(kRelaxedLoad)
+ .GetKey(expected_map->LastAdded()),
isolate);
Handle<Object> transition_map =
@@ -131,12 +131,12 @@ Handle<JSFunction> CreateCsaDescriptorArrayLookup(Isolate* isolate) {
compiler::CodeAssemblerTester asm_tester(
isolate, kNumParams + 1, // +1 to include receiver.
- CodeKind::STUB);
+ CodeKind::FOR_TESTING);
{
CodeStubAssembler m(asm_tester.state());
- TNode<Map> map = m.CAST(m.Parameter(1));
- TNode<Name> unique_name = m.CAST(m.Parameter(2));
+ auto map = m.Parameter<Map>(1);
+ auto unique_name = m.Parameter<Name>(2);
Label passed(&m), failed(&m);
Label if_found(&m), if_not_found(&m);
@@ -176,12 +176,12 @@ Handle<JSFunction> CreateCsaTransitionArrayLookup(Isolate* isolate) {
const int kNumParams = 2;
compiler::CodeAssemblerTester asm_tester(
isolate, kNumParams + 1, // +1 to include receiver.
- CodeKind::STUB);
+ CodeKind::FOR_TESTING);
{
CodeStubAssembler m(asm_tester.state());
- TNode<TransitionArray> transitions = m.CAST(m.Parameter(1));
- TNode<Name> unique_name = m.CAST(m.Parameter(2));
+ auto transitions = m.Parameter<TransitionArray>(1);
+ auto unique_name = m.Parameter<Name>(2);
Label passed(&m), failed(&m);
Label if_found(&m), if_not_found(&m);
@@ -260,7 +260,7 @@ TEST(DescriptorArrayHashCollisionMassive) {
CheckDescriptorArrayLookups(isolate, map, names, csa_lookup);
// Sort descriptor array and check it again.
- map->instance_descriptors().Sort();
+ map->instance_descriptors(kRelaxedLoad).Sort();
CheckDescriptorArrayLookups(isolate, map, names, csa_lookup);
}
@@ -309,7 +309,7 @@ TEST(DescriptorArrayHashCollision) {
CheckDescriptorArrayLookups(isolate, map, names, csa_lookup);
// Sort descriptor array and check it again.
- map->instance_descriptors().Sort();
+ map->instance_descriptors(kRelaxedLoad).Sort();
CheckDescriptorArrayLookups(isolate, map, names, csa_lookup);
}
diff --git a/deps/v8/test/cctest/test-disasm-arm.cc b/deps/v8/test/cctest/test-disasm-arm.cc
index 23cc4973d1..d20cd061d0 100644
--- a/deps/v8/test/cctest/test-disasm-arm.cc
+++ b/deps/v8/test/cctest/test-disasm-arm.cc
@@ -996,6 +996,14 @@ TEST(Neon) {
CpuFeatureScope scope(&assm, NEON);
COMPARE(vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(r1)),
"f421420f vld1.8 {d4, d5, d6, d7}, [r1]");
+ COMPARE(vld1s(Neon32, NeonListOperand(d4, 1), 0, NeonMemOperand(r1)),
+ "f4a1480f vld1.32 {d4[0]}, [r1]");
+ COMPARE(vld1s(Neon16, NeonListOperand(d4, 1), 3, NeonMemOperand(r1)),
+ "f4a144cf vld1.16 {d4[3]}, [r1]");
+ COMPARE(vld1r(Neon8, NeonListOperand(d4, 1), NeonMemOperand(r1)),
+ "f4a14c0f vld1.8 {d4}, [r1]");
+ COMPARE(vld1r(Neon16, NeonListOperand(d4, 2), NeonMemOperand(r1)),
+ "f4a14c6f vld1.16 {d4, d5}, [r1]");
COMPARE(vst1(Neon16, NeonListOperand(d17, 4), NeonMemOperand(r9)),
"f449124f vst1.16 {d17, d18, d19, d20}, [r9]");
COMPARE(vmovl(NeonU8, q3, d1), "f3886a11 vmovl.u8 q3, d1");
@@ -1055,6 +1063,8 @@ TEST(Neon) {
"f2812052 vmov.i32 q1, 18");
COMPARE(vmov(q0, 0xffffffffffffffff),
"f3870e5f vmov.i8 q0, 255");
+ COMPARE(vmov(d0, 0xffffffffffffffff),
+ "f3870e1f vmov.i8 q0, 255");
COMPARE(vmvn(q0, q15),
"f3b005ee vmvn q0, q15");
COMPARE(vmvn(q8, q9),
@@ -1275,6 +1285,10 @@ TEST(Neon) {
"f3b6f100 vuzp.16 d15, d0");
COMPARE(vuzp(Neon16, q15, q0),
"f3f6e140 vuzp.16 q15, q0");
+ COMPARE(vrev16(Neon8, q15, q0),
+ "f3f0e140 vrev16.8 q15, q0");
+ COMPARE(vrev32(Neon8, q15, q0),
+ "f3f0e0c0 vrev32.8 q15, q0");
COMPARE(vrev64(Neon8, q15, q0),
"f3f0e040 vrev64.8 q15, q0");
COMPARE(vtrn(Neon16, d15, d0),
diff --git a/deps/v8/test/cctest/test-disasm-ia32.cc b/deps/v8/test/cctest/test-disasm-ia32.cc
index fae6bf072f..1bce800d37 100644
--- a/deps/v8/test/cctest/test-disasm-ia32.cc
+++ b/deps/v8/test/cctest/test-disasm-ia32.cc
@@ -989,7 +989,7 @@ TEST(DisasmIa320) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
USE(code);
#ifdef OBJECT_PRINT
StdoutStream os;
diff --git a/deps/v8/test/cctest/test-disasm-x64.cc b/deps/v8/test/cctest/test-disasm-x64.cc
index 0eb3d27556..6c4df1a630 100644
--- a/deps/v8/test/cctest/test-disasm-x64.cc
+++ b/deps/v8/test/cctest/test-disasm-x64.cc
@@ -400,6 +400,11 @@ TEST(DisasmX64) {
__ movdqa(Operand(rsp, 12), xmm0);
__ movdqu(xmm0, Operand(rsp, 12));
__ movdqu(Operand(rsp, 12), xmm0);
+ __ movdqu(xmm1, xmm0);
+ __ movlps(xmm8, Operand(rbx, rcx, times_4, 10000));
+ __ movlps(Operand(rbx, rcx, times_4, 10000), xmm9);
+ __ movhps(xmm8, Operand(rbx, rcx, times_4, 10000));
+ __ movhps(Operand(rbx, rcx, times_4, 10000), xmm9);
__ shufps(xmm0, xmm9, 0x0);
__ ucomiss(xmm0, xmm1);
@@ -585,6 +590,10 @@ TEST(DisasmX64) {
__ cvtps2dq(xmm5, Operand(rdx, 4));
__ cvtdq2ps(xmm5, xmm1);
__ cvtdq2ps(xmm5, Operand(rdx, 4));
+
+ __ pblendvb(xmm5, xmm1);
+ __ blendvps(xmm5, xmm1);
+ __ blendvps(xmm5, Operand(rdx, 4));
__ blendvpd(xmm5, xmm1);
__ blendvpd(xmm5, Operand(rdx, 4));
@@ -650,6 +659,11 @@ TEST(DisasmX64) {
__ vmovdqu(xmm9, Operand(rbx, rcx, times_4, 10000));
__ vmovdqu(Operand(rbx, rcx, times_4, 10000), xmm0);
+ __ vmovlps(xmm8, xmm9, Operand(rbx, rcx, times_4, 10000));
+ __ vmovlps(Operand(rbx, rcx, times_4, 10000), xmm9);
+ __ vmovhps(xmm8, xmm9, Operand(rbx, rcx, times_4, 10000));
+ __ vmovhps(Operand(rbx, rcx, times_4, 10000), xmm12);
+
__ vroundps(xmm9, xmm2, kRoundUp);
__ vroundpd(xmm9, xmm2, kRoundToNearest);
__ vroundss(xmm9, xmm1, xmm2, kRoundDown);
@@ -819,6 +833,8 @@ TEST(DisasmX64) {
__ vpalignr(xmm1, xmm2, xmm3, 4);
__ vpalignr(xmm1, xmm2, Operand(rbx, rcx, times_4, 10000), 4);
+ __ vpblendvb(xmm1, xmm2, xmm3, xmm4);
+ __ vblendvps(xmm1, xmm2, xmm3, xmm4);
__ vblendvpd(xmm1, xmm2, xmm3, xmm4);
__ vmovddup(xmm1, xmm2);
@@ -984,7 +1000,7 @@ TEST(DisasmX64) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
USE(code);
#ifdef OBJECT_PRINT
StdoutStream os;
diff --git a/deps/v8/test/cctest/test-factory.cc b/deps/v8/test/cctest/test-factory.cc
index ace7c48679..8f261ced4b 100644
--- a/deps/v8/test/cctest/test-factory.cc
+++ b/deps/v8/test/cctest/test-factory.cc
@@ -60,7 +60,8 @@ TEST(Factory_CodeBuilder) {
HandleScope scope(isolate);
// Create a big function that ends up in CODE_LO_SPACE.
- const int instruction_size = kMaxRegularHeapObjectSize + 1;
+ const int instruction_size =
+ MemoryChunkLayout::MaxRegularCodeObjectSize() + 1;
std::unique_ptr<byte[]> instructions(new byte[instruction_size]);
CodeDesc desc;
diff --git a/deps/v8/test/cctest/test-feedback-vector.cc b/deps/v8/test/cctest/test-feedback-vector.cc
index a15eeff133..a454d96dd1 100644
--- a/deps/v8/test/cctest/test-feedback-vector.cc
+++ b/deps/v8/test/cctest/test-feedback-vector.cc
@@ -89,7 +89,7 @@ TEST(VectorStructure) {
{
FeedbackVectorSpec spec(&zone);
spec.AddForInSlot();
- spec.AddFeedbackCellForCreateClosure();
+ spec.AddCreateClosureSlot();
spec.AddForInSlot();
vector = NewFeedbackVector(isolate, &spec);
FeedbackVectorHelper helper(vector);
@@ -131,7 +131,7 @@ TEST(VectorICMetadata) {
// Meanwhile set some feedback values and type feedback values to
// verify the data structure remains intact.
- vector->Set(FeedbackSlot(0), MaybeObject::FromObject(*vector));
+ vector->SynchronizedSet(FeedbackSlot(0), MaybeObject::FromObject(*vector));
// Verify the metadata is correctly set up from the spec.
for (int i = 0; i < 40; i++) {
diff --git a/deps/v8/test/cctest/test-field-type-tracking.cc b/deps/v8/test/cctest/test-field-type-tracking.cc
index 2f59d7bff8..0666a7f380 100644
--- a/deps/v8/test/cctest/test-field-type-tracking.cc
+++ b/deps/v8/test/cctest/test-field-type-tracking.cc
@@ -274,7 +274,7 @@ class Expectations {
CHECK_EQ(expected_nof, map.NumberOfOwnDescriptors());
CHECK(!map.is_dictionary_map());
- DescriptorArray descriptors = map.instance_descriptors();
+ DescriptorArray descriptors = map.instance_descriptors(kRelaxedLoad);
CHECK(expected_nof <= number_of_properties_);
for (InternalIndex i : InternalIndex::Range(expected_nof)) {
if (!Check(descriptors, i)) {
@@ -443,8 +443,8 @@ class Expectations {
Handle<Object> getter(pair->getter(), isolate);
Handle<Object> setter(pair->setter(), isolate);
- InternalIndex descriptor =
- map->instance_descriptors().SearchWithCache(isolate, *name, *map);
+ InternalIndex descriptor = map->instance_descriptors(kRelaxedLoad)
+ .SearchWithCache(isolate, *name, *map);
map = Map::TransitionToAccessorProperty(isolate, map, name, descriptor,
getter, setter, attributes);
CHECK(!map->is_deprecated());
@@ -551,8 +551,10 @@ TEST(ReconfigureAccessorToNonExistingDataFieldHeavy) {
CHECK_EQ(1, obj->map().NumberOfOwnDescriptors());
InternalIndex first(0);
- CHECK(
- obj->map().instance_descriptors().GetStrongValue(first).IsAccessorPair());
+ CHECK(obj->map()
+ .instance_descriptors(kRelaxedLoad)
+ .GetStrongValue(first)
+ .IsAccessorPair());
Handle<Object> value(Smi::FromInt(42), isolate);
JSObject::SetOwnPropertyIgnoreAttributes(obj, foo_str, value, NONE).Check();
@@ -585,7 +587,7 @@ Handle<Code> CreateDummyOptimizedCode(Isolate* isolate) {
desc.buffer = buffer;
desc.buffer_size = arraysize(buffer);
desc.instr_size = arraysize(buffer);
- return Factory::CodeBuilder(isolate, desc, CodeKind::OPTIMIZED_FUNCTION)
+ return Factory::CodeBuilder(isolate, desc, CodeKind::TURBOFAN)
.set_is_turbofanned()
.Build();
}
@@ -2878,10 +2880,13 @@ void TestStoreToConstantField(const char* store_func_source,
CHECK(!map->is_deprecated());
CHECK_EQ(1, map->NumberOfOwnDescriptors());
InternalIndex first(0);
- CHECK(map->instance_descriptors().GetDetails(first).representation().Equals(
- expected_rep));
- CHECK_EQ(PropertyConstness::kConst,
- map->instance_descriptors().GetDetails(first).constness());
+ CHECK(map->instance_descriptors(kRelaxedLoad)
+ .GetDetails(first)
+ .representation()
+ .Equals(expected_rep));
+ CHECK_EQ(
+ PropertyConstness::kConst,
+ map->instance_descriptors(kRelaxedLoad).GetDetails(first).constness());
// Store value2 to obj2 and check that it got same map and property details
// did not change.
@@ -2893,10 +2898,13 @@ void TestStoreToConstantField(const char* store_func_source,
CHECK(!map->is_deprecated());
CHECK_EQ(1, map->NumberOfOwnDescriptors());
- CHECK(map->instance_descriptors().GetDetails(first).representation().Equals(
- expected_rep));
- CHECK_EQ(PropertyConstness::kConst,
- map->instance_descriptors().GetDetails(first).constness());
+ CHECK(map->instance_descriptors(kRelaxedLoad)
+ .GetDetails(first)
+ .representation()
+ .Equals(expected_rep));
+ CHECK_EQ(
+ PropertyConstness::kConst,
+ map->instance_descriptors(kRelaxedLoad).GetDetails(first).constness());
// Store value2 to obj1 and check that property became mutable.
Call(isolate, store_func, obj1, value2).Check();
@@ -2906,10 +2914,13 @@ void TestStoreToConstantField(const char* store_func_source,
CHECK(!map->is_deprecated());
CHECK_EQ(1, map->NumberOfOwnDescriptors());
- CHECK(map->instance_descriptors().GetDetails(first).representation().Equals(
- expected_rep));
- CHECK_EQ(expected_constness,
- map->instance_descriptors().GetDetails(first).constness());
+ CHECK(map->instance_descriptors(kRelaxedLoad)
+ .GetDetails(first)
+ .representation()
+ .Equals(expected_rep));
+ CHECK_EQ(
+ expected_constness,
+ map->instance_descriptors(kRelaxedLoad).GetDetails(first).constness());
}
void TestStoreToConstantField_PlusMinusZero(const char* store_func_source,
diff --git a/deps/v8/test/cctest/test-global-handles.cc b/deps/v8/test/cctest/test-global-handles.cc
index 558df39202..acb5de444f 100644
--- a/deps/v8/test/cctest/test-global-handles.cc
+++ b/deps/v8/test/cctest/test-global-handles.cc
@@ -158,13 +158,13 @@ void TracedGlobalTest(v8::Isolate* isolate,
NonRootingEmbedderHeapTracer tracer;
heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
- TracedGlobalWrapper fp;
- construct_function(isolate, context, &fp);
- CHECK(heap::InCorrectGeneration(isolate, fp.handle));
- modifier_function(&fp);
+ auto fp = std::make_unique<TracedGlobalWrapper>();
+ construct_function(isolate, context, fp.get());
+ CHECK(heap::InCorrectGeneration(isolate, fp->handle));
+ modifier_function(fp.get());
gc_function();
- CHECK_IMPLIES(survives == SurvivalMode::kSurvives, !fp.handle.IsEmpty());
- CHECK_IMPLIES(survives == SurvivalMode::kDies, fp.handle.IsEmpty());
+ CHECK_IMPLIES(survives == SurvivalMode::kSurvives, !fp->handle.IsEmpty());
+ CHECK_IMPLIES(survives == SurvivalMode::kDies, fp->handle.IsEmpty());
}
void ResurrectingFinalizer(
diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc
index f6197e9287..233e4962a4 100644
--- a/deps/v8/test/cctest/test-heap-profiler.cc
+++ b/deps/v8/test/cctest/test-heap-profiler.cc
@@ -2600,8 +2600,8 @@ TEST(ManyLocalsInSharedContext) {
}
}
-
TEST(AllocationSitesAreVisible) {
+ i::FLAG_lazy_feedback_allocation = false;
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
@@ -2623,12 +2623,12 @@ TEST(AllocationSitesAreVisible) {
CHECK(feedback_cell);
const v8::HeapGraphNode* vector = GetProperty(
env->GetIsolate(), feedback_cell, v8::HeapGraphEdge::kInternal, "value");
- CHECK_EQ(v8::HeapGraphNode::kArray, vector->GetType());
- CHECK_EQ(3, vector->GetChildrenCount());
+ CHECK_EQ(v8::HeapGraphNode::kHidden, vector->GetType());
+ CHECK_EQ(4, vector->GetChildrenCount());
- // The first value in the feedback vector should be the boilerplate,
- // after an AllocationSite.
- const v8::HeapGraphEdge* prop = vector->GetChild(2);
+ // The last value in the feedback vector should be the boilerplate,
+ // found in AllocationSite.transition_info.
+ const v8::HeapGraphEdge* prop = vector->GetChild(3);
const v8::HeapGraphNode* allocation_site = prop->GetToNode();
v8::String::Utf8Value name(env->GetIsolate(), allocation_site->GetName());
CHECK_EQ(0, strcmp("system / AllocationSite", *name));
@@ -4068,10 +4068,15 @@ TEST(WeakReference) {
i::CodeDesc desc;
assm.GetCode(i_isolate, &desc);
i::Handle<i::Code> code =
- i::Factory::CodeBuilder(i_isolate, desc, i::CodeKind::STUB).Build();
+ i::Factory::CodeBuilder(i_isolate, desc, i::CodeKind::FOR_TESTING)
+ .Build();
CHECK(code->IsCode());
- fv->set_optimized_code_weak_or_smi(i::HeapObjectReference::Weak(*code));
+ fv->set_maybe_optimized_code(i::HeapObjectReference::Weak(*code));
+ fv->set_flags(i::FeedbackVector::OptimizationTierBits::encode(
+ i::OptimizationTier::kTopTier) |
+ i::FeedbackVector::OptimizationMarkerBits::encode(
+ i::OptimizationMarker::kNone));
v8::HeapProfiler* heap_profiler = isolate->GetHeapProfiler();
const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
diff --git a/deps/v8/test/cctest/test-identity-map.cc b/deps/v8/test/cctest/test-identity-map.cc
index 23cc8a71bf..7207c86452 100644
--- a/deps/v8/test/cctest/test-identity-map.cc
+++ b/deps/v8/test/cctest/test-identity-map.cc
@@ -26,45 +26,48 @@ class IdentityMapTester : public HandleAndZoneScope {
Heap* heap() { return isolate()->heap(); }
Isolate* isolate() { return main_isolate(); }
- void TestGetFind(Handle<Object> key1, void* val1, Handle<Object> key2,
- void* val2) {
+ void TestInsertFind(Handle<Object> key1, void* val1, Handle<Object> key2,
+ void* val2) {
CHECK_NULL(map.Find(key1));
CHECK_NULL(map.Find(key2));
// Set {key1} the first time.
- void** entry = map.Get(key1);
- CHECK_NOT_NULL(entry);
- *entry = val1;
+ auto find_result = map.FindOrInsert(key1);
+ CHECK_NOT_NULL(find_result.entry);
+ CHECK(!find_result.already_exists);
+ *find_result.entry = val1;
for (int i = 0; i < 3; i++) { // Get and find {key1} K times.
{
- void** nentry = map.Get(key1);
- CHECK_EQ(entry, nentry);
- CHECK_EQ(val1, *nentry);
+ auto new_find_result = map.FindOrInsert(key1);
+ CHECK(new_find_result.already_exists);
+ CHECK_EQ(find_result.entry, new_find_result.entry);
+ CHECK_EQ(val1, *new_find_result.entry);
CHECK_NULL(map.Find(key2));
}
{
void** nentry = map.Find(key1);
- CHECK_EQ(entry, nentry);
+ CHECK_EQ(find_result.entry, nentry);
CHECK_EQ(val1, *nentry);
CHECK_NULL(map.Find(key2));
}
}
// Set {key2} the first time.
- void** entry2 = map.Get(key2);
- CHECK_NOT_NULL(entry2);
- *entry2 = val2;
+ auto find_result2 = map.FindOrInsert(key2);
+ CHECK_NOT_NULL(find_result2.entry);
+ CHECK(!find_result2.already_exists);
+ *find_result2.entry = val2;
for (int i = 0; i < 3; i++) { // Get and find {key1} and {key2} K times.
{
- void** nentry = map.Get(key2);
- CHECK_EQ(entry2, nentry);
- CHECK_EQ(val2, *nentry);
+ auto new_find_result = map.FindOrInsert(key2);
+ CHECK_EQ(find_result2.entry, new_find_result.entry);
+ CHECK_EQ(val2, *new_find_result.entry);
}
{
void** nentry = map.Find(key2);
- CHECK_EQ(entry2, nentry);
+ CHECK_EQ(find_result2.entry, nentry);
CHECK_EQ(val2, *nentry);
}
{
@@ -80,12 +83,14 @@ class IdentityMapTester : public HandleAndZoneScope {
CHECK_NULL(map.Find(key2));
// Set {key1} and {key2} for the first time.
- void** entry1 = map.Get(key1);
- CHECK_NOT_NULL(entry1);
- *entry1 = val1;
- void** entry2 = map.Get(key2);
- CHECK_NOT_NULL(entry2);
- *entry2 = val2;
+ auto find_result1 = map.FindOrInsert(key1);
+ CHECK(!find_result1.already_exists);
+ CHECK_NOT_NULL(find_result1.entry);
+ *find_result1.entry = val1;
+ auto find_result2 = map.FindOrInsert(key2);
+ CHECK(!find_result1.already_exists);
+ CHECK_NOT_NULL(find_result2.entry);
+ *find_result2.entry = val2;
for (int i = 0; i < 3; i++) { // Find {key1} and {key2} 3 times.
{
@@ -157,10 +162,11 @@ class IdentityMapTester : public HandleAndZoneScope {
CHECK_EQ(value, *entry);
}
- void CheckGet(Handle<Object> key, void* value) {
- void** entry = map.Get(key);
- CHECK_NOT_NULL(entry);
- CHECK_EQ(value, *entry);
+ void CheckFindOrInsert(Handle<Object> key, void* value) {
+ auto find_result = map.FindOrInsert(key);
+ CHECK(find_result.already_exists);
+ CHECK_NOT_NULL(find_result.entry);
+ CHECK_EQ(value, *find_result.entry);
}
void CheckDelete(Handle<Object> key, void* value) {
@@ -219,17 +225,17 @@ TEST(Delete_num_not_found) {
TEST(GetFind_smi_0) {
IdentityMapTester t;
- t.TestGetFind(t.smi(0), t.isolate(), t.smi(1), t.heap());
+ t.TestInsertFind(t.smi(0), t.isolate(), t.smi(1), t.heap());
}
TEST(GetFind_smi_13) {
IdentityMapTester t;
- t.TestGetFind(t.smi(13), t.isolate(), t.smi(17), t.heap());
+ t.TestInsertFind(t.smi(13), t.isolate(), t.smi(17), t.heap());
}
TEST(GetFind_num_13) {
IdentityMapTester t;
- t.TestGetFind(t.num(13.1), t.isolate(), t.num(17.1), t.heap());
+ t.TestInsertFind(t.num(13.1), t.isolate(), t.num(17.1), t.heap());
}
TEST(Delete_smi_13) {
@@ -250,7 +256,7 @@ TEST(GetFind_smi_17m) {
IdentityMapTester t;
for (int i = 1; i < 100; i += kInterval) {
- t.map.Set(t.smi(i), reinterpret_cast<void*>(i + kShift));
+ t.map.Insert(t.smi(i), reinterpret_cast<void*>(i + kShift));
}
for (int i = 1; i < 100; i += kInterval) {
@@ -258,7 +264,7 @@ TEST(GetFind_smi_17m) {
}
for (int i = 1; i < 100; i += kInterval) {
- t.CheckGet(t.smi(i), reinterpret_cast<void*>(i + kShift));
+ t.CheckFindOrInsert(t.smi(i), reinterpret_cast<void*>(i + kShift));
}
for (int i = 1; i < 100; i++) {
@@ -278,7 +284,7 @@ TEST(Delete_smi_17m) {
IdentityMapTester t;
for (int i = 1; i < 100; i += kInterval) {
- t.map.Set(t.smi(i), reinterpret_cast<void*>(i + kShift));
+ t.map.Insert(t.smi(i), reinterpret_cast<void*>(i + kShift));
}
for (int i = 1; i < 100; i += kInterval) {
@@ -288,7 +294,7 @@ TEST(Delete_smi_17m) {
for (int i = 1; i < 100; i += kInterval) {
t.CheckDelete(t.smi(i), reinterpret_cast<void*>(i + kShift));
for (int j = 1; j < 100; j += kInterval) {
- void** entry = t.map.Find(t.smi(j));
+ auto entry = t.map.Find(t.smi(j));
if (j <= i) {
CHECK_NULL(entry);
} else {
@@ -306,7 +312,7 @@ TEST(GetFind_num_1000) {
int val2;
for (int i = 0; i < 1000; i++) {
- t.TestGetFind(t.smi(i * kPrime), &val1, t.smi(i * kPrime + 1), &val2);
+ t.TestInsertFind(t.smi(i * kPrime), &val1, t.smi(i * kPrime + 1), &val2);
}
}
@@ -315,7 +321,7 @@ TEST(Delete_num_1000) {
IdentityMapTester t;
for (int i = 0; i < 1000; i++) {
- t.map.Set(t.smi(i * kPrime), reinterpret_cast<void*>(i * kPrime));
+ t.map.Insert(t.smi(i * kPrime), reinterpret_cast<void*>(i * kPrime));
}
// Delete every second value in reverse.
@@ -326,7 +332,7 @@ TEST(Delete_num_1000) {
}
for (int i = 0; i < 1000; i++) {
- void** entry = t.map.Find(t.smi(i * kPrime));
+ auto entry = t.map.Find(t.smi(i * kPrime));
if (i % 2) {
CHECK_NULL(entry);
} else {
@@ -343,7 +349,7 @@ TEST(Delete_num_1000) {
}
for (int i = 0; i < 1000; i++) {
- void** entry = t.map.Find(t.smi(i * kPrime));
+ auto entry = t.map.Find(t.smi(i * kPrime));
CHECK_NULL(entry);
}
}
@@ -353,10 +359,10 @@ TEST(GetFind_smi_gc) {
const int kShift = 1211;
IdentityMapTester t;
- t.map.Set(t.smi(kKey), &t);
+ t.map.Insert(t.smi(kKey), &t);
t.SimulateGCByIncrementingSmisBy(kShift);
t.CheckFind(t.smi(kKey + kShift), &t);
- t.CheckGet(t.smi(kKey + kShift), &t);
+ t.CheckFindOrInsert(t.smi(kKey + kShift), &t);
}
TEST(Delete_smi_gc) {
@@ -364,7 +370,7 @@ TEST(Delete_smi_gc) {
const int kShift = 1211;
IdentityMapTester t;
- t.map.Set(t.smi(kKey), &t);
+ t.map.Insert(t.smi(kKey), &t);
t.SimulateGCByIncrementingSmisBy(kShift);
t.CheckDelete(t.smi(kKey + kShift), &t);
}
@@ -375,13 +381,13 @@ TEST(GetFind_smi_gc2) {
const int kShift = 1211;
IdentityMapTester t;
- t.map.Set(t.smi(kKey1), &kKey1);
- t.map.Set(t.smi(kKey2), &kKey2);
+ t.map.Insert(t.smi(kKey1), &kKey1);
+ t.map.Insert(t.smi(kKey2), &kKey2);
t.SimulateGCByIncrementingSmisBy(kShift);
t.CheckFind(t.smi(kKey1 + kShift), &kKey1);
- t.CheckGet(t.smi(kKey1 + kShift), &kKey1);
+ t.CheckFindOrInsert(t.smi(kKey1 + kShift), &kKey1);
t.CheckFind(t.smi(kKey2 + kShift), &kKey2);
- t.CheckGet(t.smi(kKey2 + kShift), &kKey2);
+ t.CheckFindOrInsert(t.smi(kKey2 + kShift), &kKey2);
}
TEST(Delete_smi_gc2) {
@@ -390,8 +396,8 @@ TEST(Delete_smi_gc2) {
const int kShift = 1211;
IdentityMapTester t;
- t.map.Set(t.smi(kKey1), &kKey1);
- t.map.Set(t.smi(kKey2), &kKey2);
+ t.map.Insert(t.smi(kKey1), &kKey1);
+ t.map.Insert(t.smi(kKey2), &kKey2);
t.SimulateGCByIncrementingSmisBy(kShift);
t.CheckDelete(t.smi(kKey1 + kShift), &kKey1);
t.CheckDelete(t.smi(kKey2 + kShift), &kKey2);
@@ -404,7 +410,8 @@ TEST(GetFind_smi_gc_n) {
1 + 32, 2 + 32, 7 + 32, 8 + 32, 15 + 32, 23 + 32};
// Initialize the map first.
for (size_t i = 0; i < arraysize(keys); i += 2) {
- t.TestGetFind(t.smi(keys[i]), &keys[i], t.smi(keys[i + 1]), &keys[i + 1]);
+ t.TestInsertFind(t.smi(keys[i]), &keys[i], t.smi(keys[i + 1]),
+ &keys[i + 1]);
}
// Check the above initialization.
for (size_t i = 0; i < arraysize(keys); i++) {
@@ -418,7 +425,7 @@ TEST(GetFind_smi_gc_n) {
}
// Check that searching for the incremented smis gets the same values.
for (size_t i = 0; i < arraysize(keys); i++) {
- t.CheckGet(t.smi(keys[i] + kShift), &keys[i]);
+ t.CheckFindOrInsert(t.smi(keys[i] + kShift), &keys[i]);
}
}
@@ -429,7 +436,7 @@ TEST(Delete_smi_gc_n) {
1 + 32, 2 + 32, 7 + 32, 8 + 32, 15 + 32, 23 + 32};
// Initialize the map first.
for (size_t i = 0; i < arraysize(keys); i++) {
- t.map.Set(t.smi(keys[i]), &keys[i]);
+ t.map.Insert(t.smi(keys[i]), &keys[i]);
}
// Simulate a GC by "moving" the smis in the internal keys array.
t.SimulateGCByIncrementingSmisBy(kShift);
@@ -448,10 +455,10 @@ TEST(GetFind_smi_num_gc_n) {
t.num(9.9), t.num(10.1)};
// Initialize the map first.
for (size_t i = 0; i < arraysize(smi_keys); i++) {
- t.map.Set(t.smi(smi_keys[i]), &smi_keys[i]);
+ t.map.Insert(t.smi(smi_keys[i]), &smi_keys[i]);
}
for (size_t i = 0; i < arraysize(num_keys); i++) {
- t.map.Set(num_keys[i], &num_keys[i]);
+ t.map.Insert(num_keys[i], &num_keys[i]);
}
// Check the above initialization.
for (size_t i = 0; i < arraysize(smi_keys); i++) {
@@ -468,13 +475,13 @@ TEST(GetFind_smi_num_gc_n) {
// Check that searching for the incremented smis finds the same values.
for (size_t i = 0; i < arraysize(smi_keys); i++) {
t.CheckFind(t.smi(smi_keys[i] + kShift), &smi_keys[i]);
- t.CheckGet(t.smi(smi_keys[i] + kShift), &smi_keys[i]);
+ t.CheckFindOrInsert(t.smi(smi_keys[i] + kShift), &smi_keys[i]);
}
// Check that searching for the numbers finds the same values.
for (size_t i = 0; i < arraysize(num_keys); i++) {
t.CheckFind(num_keys[i], &num_keys[i]);
- t.CheckGet(num_keys[i], &num_keys[i]);
+ t.CheckFindOrInsert(num_keys[i], &num_keys[i]);
}
}
@@ -487,10 +494,10 @@ TEST(Delete_smi_num_gc_n) {
t.num(9.9), t.num(10.1)};
// Initialize the map first.
for (size_t i = 0; i < arraysize(smi_keys); i++) {
- t.map.Set(t.smi(smi_keys[i]), &smi_keys[i]);
+ t.map.Insert(t.smi(smi_keys[i]), &smi_keys[i]);
}
for (size_t i = 0; i < arraysize(num_keys); i++) {
- t.map.Set(num_keys[i], &num_keys[i]);
+ t.map.Insert(num_keys[i], &num_keys[i]);
}
// Simulate a GC by moving SMIs.
@@ -514,14 +521,14 @@ TEST(Delete_smi_resizes) {
IdentityMapTester t;
// Insert one element to initialize map.
- t.map.Set(t.smi(0), reinterpret_cast<void*>(kValueOffset));
+ t.map.Insert(t.smi(0), reinterpret_cast<void*>(kValueOffset));
int initial_capacity = t.map.capacity();
CHECK_LT(initial_capacity, kKeyCount);
// Insert another kKeyCount - 1 keys.
for (int i = 1; i < kKeyCount; i++) {
- t.map.Set(t.smi(i), reinterpret_cast<void*>(i + kValueOffset));
+ t.map.Insert(t.smi(i), reinterpret_cast<void*>(i + kValueOffset));
}
// Check capacity increased.
@@ -545,10 +552,10 @@ TEST(Iterator_smi_num) {
t.num(9.9), t.num(10.1)};
// Initialize the map.
for (size_t i = 0; i < arraysize(smi_keys); i++) {
- t.map.Set(t.smi(smi_keys[i]), reinterpret_cast<void*>(i));
+ t.map.Insert(t.smi(smi_keys[i]), reinterpret_cast<void*>(i));
}
for (size_t i = 0; i < arraysize(num_keys); i++) {
- t.map.Set(num_keys[i], reinterpret_cast<void*>(i + 5));
+ t.map.Insert(num_keys[i], reinterpret_cast<void*>(i + 5));
}
// Check iterator sees all values once.
@@ -574,10 +581,10 @@ TEST(Iterator_smi_num_gc) {
t.num(9.9), t.num(10.1)};
// Initialize the map.
for (size_t i = 0; i < arraysize(smi_keys); i++) {
- t.map.Set(t.smi(smi_keys[i]), reinterpret_cast<void*>(i));
+ t.map.Insert(t.smi(smi_keys[i]), reinterpret_cast<void*>(i));
}
for (size_t i = 0; i < arraysize(num_keys); i++) {
- t.map.Set(num_keys[i], reinterpret_cast<void*>(i + 5));
+ t.map.Insert(num_keys[i], reinterpret_cast<void*>(i + 5));
}
// Simulate GC by moving the SMIs.
@@ -605,7 +612,7 @@ void IterateCollisionTest(int stride) {
HandleScope scope(t.isolate());
int next = 1;
for (int i = 0; i < load; i++) {
- t.map.Set(t.smi(next), reinterpret_cast<void*>(next));
+ t.map.Insert(t.smi(next), reinterpret_cast<void*>(next));
t.CheckFind(t.smi(next), reinterpret_cast<void*>(next));
next = next + stride;
}
@@ -627,7 +634,7 @@ void IterateCollisionTest(int stride) {
for (int i = 0; i < load; i++) {
CHECK(seen.find(next) != seen.end());
t.CheckFind(t.smi(next), reinterpret_cast<void*>(next));
- t.CheckGet(t.smi(next), reinterpret_cast<void*>(next));
+ t.CheckFindOrInsert(t.smi(next), reinterpret_cast<void*>(next));
next = next + stride;
}
}
@@ -648,7 +655,7 @@ void CollisionTest(int stride, bool rehash = false, bool resize = false) {
HandleScope scope(t.isolate());
int next = 1;
for (int i = 0; i < load; i++) {
- t.map.Set(t.smi(next), reinterpret_cast<void*>(next));
+ t.map.Insert(t.smi(next), reinterpret_cast<void*>(next));
t.CheckFind(t.smi(next), reinterpret_cast<void*>(next));
next = next + stride;
}
@@ -660,7 +667,7 @@ void CollisionTest(int stride, bool rehash = false, bool resize = false) {
int next = 1;
for (int i = 0; i < load; i++) {
t.CheckFind(t.smi(next), reinterpret_cast<void*>(next));
- t.CheckGet(t.smi(next), reinterpret_cast<void*>(next));
+ t.CheckFindOrInsert(t.smi(next), reinterpret_cast<void*>(next));
next = next + stride;
}
}
@@ -683,7 +690,7 @@ TEST(ExplicitGC) {
// Insert some objects that should be in new space.
for (size_t i = 0; i < arraysize(num_keys); i++) {
- t.map.Set(num_keys[i], &num_keys[i]);
+ t.map.Insert(num_keys[i], &num_keys[i]);
}
// Do an explicit, real GC.
@@ -692,7 +699,7 @@ TEST(ExplicitGC) {
// Check that searching for the numbers finds the same values.
for (size_t i = 0; i < arraysize(num_keys); i++) {
t.CheckFind(num_keys[i], &num_keys[i]);
- t.CheckGet(num_keys[i], &num_keys[i]);
+ t.CheckFindOrInsert(num_keys[i], &num_keys[i]);
}
}
@@ -790,7 +797,7 @@ TEST(GCShortCutting) {
// greater to capacity_ if not corrected by IdentityMap
// (see crbug.com/704132).
for (int j = 0; j < i; j++) {
- t.map.Set(t.smi(j), reinterpret_cast<void*>(kDummyValue));
+ t.map.Insert(t.smi(j), reinterpret_cast<void*>(kDummyValue));
}
Handle<String> thin_string =
@@ -801,8 +808,8 @@ TEST(GCShortCutting) {
DCHECK_NE(*thin_string, *internalized_string);
// Insert both keys into the map.
- t.map.Set(thin_string, &thin_string);
- t.map.Set(internalized_string, &internalized_string);
+ t.map.Insert(thin_string, &thin_string);
+ t.map.Insert(internalized_string, &internalized_string);
// Do an explicit, real GC, this should short-cut the thin string to point
// to the internalized string.
@@ -812,16 +819,16 @@ TEST(GCShortCutting) {
*thin_string == *internalized_string);
// Check that getting the object points to one of the handles.
- void** thin_string_entry = t.map.Get(thin_string);
+ void** thin_string_entry = t.map.Find(thin_string);
CHECK(*thin_string_entry == &thin_string ||
*thin_string_entry == &internalized_string);
- void** internalized_string_entry = t.map.Get(internalized_string);
+ void** internalized_string_entry = t.map.Find(internalized_string);
CHECK(*internalized_string_entry == &thin_string ||
*internalized_string_entry == &internalized_string);
// Trigger resize.
for (int j = 0; j < 16; j++) {
- t.map.Set(t.smi(j + 16), reinterpret_cast<void*>(kDummyValue));
+ t.map.Insert(t.smi(j + 16), reinterpret_cast<void*>(kDummyValue));
}
t.map.Clear();
}
diff --git a/deps/v8/test/cctest/test-local-handles.cc b/deps/v8/test/cctest/test-local-handles.cc
index 8d1d08fa93..bcc2ce7d51 100644
--- a/deps/v8/test/cctest/test-local-handles.cc
+++ b/deps/v8/test/cctest/test-local-handles.cc
@@ -35,7 +35,8 @@ class LocalHandlesThread final : public v8::base::Thread {
sema_gc_finished_(sema_gc_finished) {}
void Run() override {
- LocalHeap local_heap(heap_);
+ LocalHeap local_heap(heap_, ThreadKind::kBackground);
+ UnparkedScope unparked_scope(&local_heap);
LocalHandleScope scope(&local_heap);
static constexpr int kNumHandles =
@@ -102,7 +103,8 @@ TEST(CreateLocalHandlesWithoutLocalHandleScope) {
Isolate* isolate = CcTest::i_isolate();
{
- LocalHeap local_heap(isolate->heap());
+ LocalHeap local_heap(isolate->heap(), ThreadKind::kMain);
+ UnparkedScope scope(&local_heap);
handle(Smi::FromInt(17), &local_heap);
}
}
@@ -122,7 +124,8 @@ TEST(DereferenceLocalHandle) {
ph = phs->NewHandle(number);
}
{
- LocalHeap local_heap(isolate->heap(), std::move(phs));
+ LocalHeap local_heap(isolate->heap(), ThreadKind::kMain, std::move(phs));
+ UnparkedScope unparked_scope(&local_heap);
LocalHandleScope scope(&local_heap);
Handle<HeapNumber> local_number = handle(*ph, &local_heap);
CHECK_EQ(42, local_number->value());
diff --git a/deps/v8/test/cctest/test-lockers.cc b/deps/v8/test/cctest/test-lockers.cc
index 4ed00e0a11..c31dfd164c 100644
--- a/deps/v8/test/cctest/test-lockers.cc
+++ b/deps/v8/test/cctest/test-lockers.cc
@@ -363,6 +363,8 @@ class JoinableThread {
}
virtual ~JoinableThread() = default;
+ JoinableThread(const JoinableThread&) = delete;
+ JoinableThread& operator=(const JoinableThread&) = delete;
void Start() { CHECK(thread_.Start()); }
@@ -394,8 +396,6 @@ class JoinableThread {
ThreadWithSemaphore thread_;
friend class ThreadWithSemaphore;
-
- DISALLOW_COPY_AND_ASSIGN(JoinableThread);
};
diff --git a/deps/v8/test/cctest/test-log.cc b/deps/v8/test/cctest/test-log.cc
index d39b6d7331..5909ffaf96 100644
--- a/deps/v8/test/cctest/test-log.cc
+++ b/deps/v8/test/cctest/test-log.cc
@@ -83,6 +83,9 @@ class ScopedLoggerInitializer {
if (log_file != nullptr) fclose(log_file);
}
+ ScopedLoggerInitializer(const ScopedLoggerInitializer&) = delete;
+ ScopedLoggerInitializer& operator=(const ScopedLoggerInitializer&) = delete;
+
v8::Local<v8::Context>& env() { return env_; }
v8::Isolate* isolate() { return isolate_; }
@@ -212,8 +215,6 @@ class ScopedLoggerInitializer {
std::string raw_log_;
std::vector<std::string> log_;
-
- DISALLOW_COPY_AND_ASSIGN(ScopedLoggerInitializer);
};
class TestCodeEventHandler : public v8::CodeEventHandler {
@@ -1182,12 +1183,12 @@ UNINITIALIZED_TEST(BuiltinsNotLoggedAsLazyCompile) {
i::SNPrintF(buffer, ",0x%" V8PRIxPTR ",%d,BooleanConstructor",
builtin->InstructionStart(), builtin->InstructionSize());
CHECK(logger.ContainsLine(
- {"code-creation,Builtin,3,", std::string(buffer.begin())}));
+ {"code-creation,Builtin,2,", std::string(buffer.begin())}));
i::SNPrintF(buffer, ",0x%" V8PRIxPTR ",%d,", builtin->InstructionStart(),
builtin->InstructionSize());
CHECK(!logger.ContainsLine(
- {"code-creation,LazyCompile,3,", std::string(buffer.begin())}));
+ {"code-creation,LazyCompile,2,", std::string(buffer.begin())}));
}
isolate->Dispose();
}
diff --git a/deps/v8/test/cctest/test-macro-assembler-arm.cc b/deps/v8/test/cctest/test-macro-assembler-arm.cc
index 83f7a38a73..be7345d3e2 100644
--- a/deps/v8/test/cctest/test-macro-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-arm.cc
@@ -29,6 +29,7 @@
#include "src/codegen/assembler-inl.h"
#include "src/codegen/macro-assembler.h"
+#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/simulator.h"
#include "src/init/v8.h"
#include "src/objects/objects-inl.h"
@@ -147,7 +148,7 @@ TEST(ExtractLane) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -278,7 +279,7 @@ TEST(ReplaceLane) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -309,6 +310,29 @@ TEST(ReplaceLane) {
}
}
+TEST(DeoptExitSizeIsFixed) {
+ CHECK(Deoptimizer::kSupportsFixedDeoptExitSizes);
+
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope handles(isolate);
+ auto buffer = AllocateAssemblerBuffer();
+ MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes,
+ buffer->CreateView());
+
+ STATIC_ASSERT(static_cast<int>(kFirstDeoptimizeKind) == 0);
+ for (int i = 0; i < kDeoptimizeKindCount; i++) {
+ DeoptimizeKind kind = static_cast<DeoptimizeKind>(i);
+ Builtins::Name target = Deoptimizer::GetDeoptimizationEntry(isolate, kind);
+ Label before_exit;
+ masm.bind(&before_exit);
+ masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit);
+ CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
+ kind == DeoptimizeKind::kLazy
+ ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kNonLazyDeoptExitSize);
+ }
+}
+
#undef __
} // namespace test_macro_assembler_arm
diff --git a/deps/v8/test/cctest/test-macro-assembler-arm64.cc b/deps/v8/test/cctest/test-macro-assembler-arm64.cc
index f693230b9b..0c231ae0a0 100644
--- a/deps/v8/test/cctest/test-macro-assembler-arm64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-arm64.cc
@@ -27,13 +27,13 @@
#include <stdlib.h>
-#include "src/init/v8.h"
-
#include "src/base/platform/platform.h"
#include "src/codegen/arm64/assembler-arm64-inl.h"
-#include "src/codegen/macro-assembler.h"
+#include "src/codegen/macro-assembler-inl.h"
+#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/simulator.h"
#include "src/heap/factory.h"
+#include "src/init/v8.h"
#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
#include "src/utils/ostreams.h"
@@ -66,7 +66,7 @@ TEST(EmbeddedObj) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -91,6 +91,35 @@ TEST(EmbeddedObj) {
#endif // V8_COMPRESS_POINTERS
}
+TEST(DeoptExitSizeIsFixed) {
+ CHECK(Deoptimizer::kSupportsFixedDeoptExitSizes);
+
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope handles(isolate);
+ auto buffer = AllocateAssemblerBuffer();
+ MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes,
+ buffer->CreateView());
+
+ STATIC_ASSERT(static_cast<int>(kFirstDeoptimizeKind) == 0);
+ for (int i = 0; i < kDeoptimizeKindCount; i++) {
+ DeoptimizeKind kind = static_cast<DeoptimizeKind>(i);
+ Builtins::Name target = Deoptimizer::GetDeoptimizationEntry(isolate, kind);
+ Label before_exit;
+ // Mirroring logic in code-generator.cc.
+ if (kind == DeoptimizeKind::kLazy) {
+ // CFI emits an extra instruction here.
+ masm.BindExceptionHandler(&before_exit);
+ } else {
+ masm.bind(&before_exit);
+ }
+ masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit);
+ CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
+ kind == DeoptimizeKind::kLazy
+ ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kNonLazyDeoptExitSize);
+ }
+}
+
#undef __
} // namespace test_macro_assembler_arm64
diff --git a/deps/v8/test/cctest/test-macro-assembler-mips.cc b/deps/v8/test/cctest/test-macro-assembler-mips.cc
index 48c4e6248e..2af0312978 100644
--- a/deps/v8/test/cctest/test-macro-assembler-mips.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-mips.cc
@@ -89,7 +89,7 @@ TEST(BYTESWAP) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (size_t i = 0; i < arraysize(test_values); i++) {
@@ -199,7 +199,7 @@ TEST(jump_tables4) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -262,7 +262,7 @@ TEST(jump_tables5) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -350,7 +350,7 @@ TEST(jump_tables6) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -375,7 +375,7 @@ static uint32_t run_lsa(uint32_t rt, uint32_t rs, int8_t sa) {
CodeDesc desc;
assembler.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F1>::FromCode(*code);
@@ -503,7 +503,7 @@ RET_TYPE run_Cvt(IN_TYPE x, Func GenerateConvertInstructionFunc) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F_CVT>::FromCode(*code);
@@ -615,7 +615,7 @@ TEST(OverflowInstructions) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.lhs = ii;
t.rhs = jj;
@@ -738,7 +738,7 @@ TEST(min_max_nan) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputsa[i];
@@ -773,7 +773,7 @@ bool run_Unaligned(char* memory_buffer, int32_t in_offset, int32_t out_offset,
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F_CVT>::FromCode(*code);
@@ -1020,7 +1020,7 @@ bool run_Sltu(uint32_t rs, uint32_t rd, Func GenerateSltuInstructionFunc) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F_CVT>::FromCode(*code);
int32_t res = reinterpret_cast<int32_t>(f.Call(rs, rd, 0, 0, 0));
@@ -1114,7 +1114,8 @@ static GeneratedCode<F4> GenerateMacroFloat32MinMax(MacroAssembler* masm) {
CodeDesc desc;
masm->GetCode(masm->isolate(), &desc);
Handle<Code> code =
- Factory::CodeBuilder(masm->isolate(), desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(masm->isolate(), desc, CodeKind::FOR_TESTING)
+ .Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -1255,7 +1256,8 @@ static GeneratedCode<F4> GenerateMacroFloat64MinMax(MacroAssembler* masm) {
CodeDesc desc;
masm->GetCode(masm->isolate(), &desc);
Handle<Code> code =
- Factory::CodeBuilder(masm->isolate(), desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(masm->isolate(), desc, CodeKind::FOR_TESTING)
+ .Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
diff --git a/deps/v8/test/cctest/test-macro-assembler-mips64.cc b/deps/v8/test/cctest/test-macro-assembler-mips64.cc
index b982e6c12c..b10952dd7a 100644
--- a/deps/v8/test/cctest/test-macro-assembler-mips64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-mips64.cc
@@ -109,7 +109,7 @@ TEST(BYTESWAP) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (size_t i = 0; i < arraysize(test_values); i++) {
@@ -164,7 +164,7 @@ TEST(LoadConstants) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<FV>::FromCode(*code);
(void)f.Call(reinterpret_cast<int64_t>(result), 0, 0, 0, 0);
@@ -207,7 +207,7 @@ TEST(LoadAddress) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<FV>::FromCode(*code);
(void)f.Call(0, 0, 0, 0, 0);
@@ -264,7 +264,7 @@ TEST(jump_tables4) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -334,7 +334,7 @@ TEST(jump_tables5) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -423,7 +423,7 @@ TEST(jump_tables6) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -448,7 +448,7 @@ static uint64_t run_lsa(uint32_t rt, uint32_t rs, int8_t sa) {
CodeDesc desc;
assembler.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F1>::FromCode(*code);
@@ -528,7 +528,7 @@ static uint64_t run_dlsa(uint64_t rt, uint64_t rs, int8_t sa) {
CodeDesc desc;
assembler.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<FV>::FromCode(*code);
@@ -678,7 +678,7 @@ RET_TYPE run_Cvt(IN_TYPE x, Func GenerateConvertInstructionFunc) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F_CVT>::FromCode(*code);
@@ -853,7 +853,7 @@ TEST(OverflowInstructions) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.lhs = ii;
t.rhs = jj;
@@ -976,7 +976,7 @@ TEST(min_max_nan) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputsa[i];
@@ -1011,7 +1011,7 @@ bool run_Unaligned(char* memory_buffer, int32_t in_offset, int32_t out_offset,
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F_CVT>::FromCode(*code);
@@ -1375,7 +1375,7 @@ bool run_Sltu(uint64_t rs, uint64_t rd, Func GenerateSltuInstructionFunc) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
auto f = GeneratedCode<F_CVT>::FromCode(*code);
int64_t res = reinterpret_cast<int64_t>(f.Call(rs, rd, 0, 0, 0));
@@ -1469,7 +1469,8 @@ static GeneratedCode<F4> GenerateMacroFloat32MinMax(MacroAssembler* masm) {
CodeDesc desc;
masm->GetCode(masm->isolate(), &desc);
Handle<Code> code =
- Factory::CodeBuilder(masm->isolate(), desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(masm->isolate(), desc, CodeKind::FOR_TESTING)
+ .Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -1610,7 +1611,8 @@ static GeneratedCode<F4> GenerateMacroFloat64MinMax(MacroAssembler* masm) {
CodeDesc desc;
masm->GetCode(masm->isolate(), &desc);
Handle<Code> code =
- Factory::CodeBuilder(masm->isolate(), desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(masm->isolate(), desc, CodeKind::FOR_TESTING)
+ .Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
diff --git a/deps/v8/test/cctest/test-macro-assembler-x64.cc b/deps/v8/test/cctest/test-macro-assembler-x64.cc
index 86f81c39f1..c5001f5b91 100644
--- a/deps/v8/test/cctest/test-macro-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-x64.cc
@@ -27,13 +27,13 @@
#include <stdlib.h>
-#include "src/init/v8.h"
-
#include "src/base/platform/platform.h"
#include "src/codegen/macro-assembler.h"
#include "src/codegen/x64/assembler-x64-inl.h"
+#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/simulator.h"
#include "src/heap/factory.h"
+#include "src/init/v8.h"
#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
#include "src/utils/ostreams.h"
@@ -450,7 +450,7 @@ TEST(EmbeddedObj) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -1033,6 +1033,29 @@ TEST(AreAliased) {
DCHECK(AreAliased(rax, no_reg, rbx, no_reg, rcx, no_reg, rdx, rax, no_reg));
}
+TEST(DeoptExitSizeIsFixed) {
+ CHECK(Deoptimizer::kSupportsFixedDeoptExitSizes);
+
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope handles(isolate);
+ auto buffer = AllocateAssemblerBuffer();
+ MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes,
+ buffer->CreateView());
+
+ STATIC_ASSERT(static_cast<int>(kFirstDeoptimizeKind) == 0);
+ for (int i = 0; i < kDeoptimizeKindCount; i++) {
+ DeoptimizeKind kind = static_cast<DeoptimizeKind>(i);
+ Builtins::Name target = Deoptimizer::GetDeoptimizationEntry(isolate, kind);
+ Label before_exit;
+ masm.bind(&before_exit);
+ masm.CallForDeoptimization(target, 42, &before_exit, kind, nullptr);
+ CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
+ kind == DeoptimizeKind::kLazy
+ ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kNonLazyDeoptExitSize);
+ }
+}
+
#undef __
} // namespace test_macro_assembler_x64
diff --git a/deps/v8/test/cctest/test-modules.cc b/deps/v8/test/cctest/test-modules.cc
index 13c6a3b7a8..65abf2ab4f 100644
--- a/deps/v8/test/cctest/test-modules.cc
+++ b/deps/v8/test/cctest/test-modules.cc
@@ -808,9 +808,7 @@ v8::MaybeLocal<v8::Promise> HostImportModuleDynamicallyCallbackReject(
TEST(ModuleEvaluationTopLevelAwaitDynamicImport) {
bool previous_top_level_await_flag_value = i::FLAG_harmony_top_level_await;
- bool previous_dynamic_import_flag_value = i::FLAG_harmony_dynamic_import;
i::FLAG_harmony_top_level_await = true;
- i::FLAG_harmony_dynamic_import = true;
Isolate* isolate = CcTest::isolate();
HandleScope scope(isolate);
isolate->SetMicrotasksPolicy(v8::MicrotasksPolicy::kExplicit);
@@ -847,14 +845,11 @@ TEST(ModuleEvaluationTopLevelAwaitDynamicImport) {
CHECK_EQ(promise->State(), v8::Promise::kFulfilled);
}
i::FLAG_harmony_top_level_await = previous_top_level_await_flag_value;
- i::FLAG_harmony_dynamic_import = previous_dynamic_import_flag_value;
}
TEST(ModuleEvaluationTopLevelAwaitDynamicImportError) {
bool previous_top_level_await_flag_value = i::FLAG_harmony_top_level_await;
- bool previous_dynamic_import_flag_value = i::FLAG_harmony_dynamic_import;
i::FLAG_harmony_top_level_await = true;
- i::FLAG_harmony_dynamic_import = true;
Isolate* isolate = CcTest::isolate();
HandleScope scope(isolate);
isolate->SetMicrotasksPolicy(v8::MicrotasksPolicy::kExplicit);
@@ -895,7 +890,6 @@ TEST(ModuleEvaluationTopLevelAwaitDynamicImportError) {
CHECK(!try_catch.HasCaught());
}
i::FLAG_harmony_top_level_await = previous_top_level_await_flag_value;
- i::FLAG_harmony_dynamic_import = previous_dynamic_import_flag_value;
}
TEST(TerminateExecutionTopLevelAwaitSync) {
@@ -993,4 +987,131 @@ TEST(TerminateExecutionTopLevelAwaitAsync) {
i::FLAG_harmony_top_level_await = previous_top_level_await_flag_value;
}
+static Local<Module> async_leaf_module;
+static Local<Module> sync_leaf_module;
+static Local<Module> cycle_self_module;
+static Local<Module> cycle_one_module;
+static Local<Module> cycle_two_module;
+MaybeLocal<Module> ResolveCallbackForIsGraphAsyncTopLevelAwait(
+ Local<Context> context, Local<String> specifier, Local<Module> referrer) {
+ if (specifier->StrictEquals(v8_str("./async_leaf.js"))) {
+ return async_leaf_module;
+ } else if (specifier->StrictEquals(v8_str("./sync_leaf.js"))) {
+ return sync_leaf_module;
+ } else if (specifier->StrictEquals(v8_str("./cycle_self.js"))) {
+ return cycle_self_module;
+ } else if (specifier->StrictEquals(v8_str("./cycle_one.js"))) {
+ return cycle_one_module;
+ } else {
+ CHECK(specifier->StrictEquals(v8_str("./cycle_two.js")));
+ return cycle_two_module;
+ }
+}
+
+TEST(IsGraphAsyncTopLevelAwait) {
+ bool previous_top_level_await_flag_value = i::FLAG_harmony_top_level_await;
+ i::FLAG_harmony_top_level_await = true;
+
+ Isolate* isolate = CcTest::isolate();
+ HandleScope scope(isolate);
+ LocalContext env;
+
+ {
+ Local<String> source_text = v8_str("await notExecuted();");
+ ScriptOrigin origin =
+ ModuleOrigin(v8_str("async_leaf.js"), CcTest::isolate());
+ ScriptCompiler::Source source(source_text, origin);
+ async_leaf_module =
+ ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
+ CHECK(async_leaf_module
+ ->InstantiateModule(env.local(),
+ ResolveCallbackForIsGraphAsyncTopLevelAwait)
+ .FromJust());
+ CHECK(async_leaf_module->IsGraphAsync());
+ }
+
+ {
+ Local<String> source_text = v8_str("notExecuted();");
+ ScriptOrigin origin =
+ ModuleOrigin(v8_str("sync_leaf.js"), CcTest::isolate());
+ ScriptCompiler::Source source(source_text, origin);
+ sync_leaf_module =
+ ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
+ CHECK(sync_leaf_module
+ ->InstantiateModule(env.local(),
+ ResolveCallbackForIsGraphAsyncTopLevelAwait)
+ .FromJust());
+ CHECK(!sync_leaf_module->IsGraphAsync());
+ }
+
+ {
+ Local<String> source_text = v8_str("import './async_leaf.js'");
+ ScriptOrigin origin =
+ ModuleOrigin(v8_str("import_async.js"), CcTest::isolate());
+ ScriptCompiler::Source source(source_text, origin);
+ Local<Module> module =
+ ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
+ CHECK(module
+ ->InstantiateModule(env.local(),
+ ResolveCallbackForIsGraphAsyncTopLevelAwait)
+ .FromJust());
+ CHECK(module->IsGraphAsync());
+ }
+
+ {
+ Local<String> source_text = v8_str("import './sync_leaf.js'");
+ ScriptOrigin origin =
+ ModuleOrigin(v8_str("import_sync.js"), CcTest::isolate());
+ ScriptCompiler::Source source(source_text, origin);
+ Local<Module> module =
+ ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
+ CHECK(module
+ ->InstantiateModule(env.local(),
+ ResolveCallbackForIsGraphAsyncTopLevelAwait)
+ .FromJust());
+ CHECK(!module->IsGraphAsync());
+ }
+
+ {
+ Local<String> source_text = v8_str(
+ "import './cycle_self.js'\n"
+ "import './async_leaf.js'");
+ ScriptOrigin origin =
+ ModuleOrigin(v8_str("cycle_self.js"), CcTest::isolate());
+ ScriptCompiler::Source source(source_text, origin);
+ cycle_self_module =
+ ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
+ CHECK(cycle_self_module
+ ->InstantiateModule(env.local(),
+ ResolveCallbackForIsGraphAsyncTopLevelAwait)
+ .FromJust());
+ CHECK(cycle_self_module->IsGraphAsync());
+ }
+
+ {
+ Local<String> source_text1 = v8_str("import './cycle_two.js'");
+ ScriptOrigin origin1 =
+ ModuleOrigin(v8_str("cycle_one.js"), CcTest::isolate());
+ ScriptCompiler::Source source1(source_text1, origin1);
+ cycle_one_module =
+ ScriptCompiler::CompileModule(isolate, &source1).ToLocalChecked();
+ Local<String> source_text2 = v8_str(
+ "import './cycle_one.js'\n"
+ "import './async_leaf.js'");
+ ScriptOrigin origin2 =
+ ModuleOrigin(v8_str("cycle_two.js"), CcTest::isolate());
+ ScriptCompiler::Source source2(source_text2, origin2);
+ cycle_two_module =
+ ScriptCompiler::CompileModule(isolate, &source2).ToLocalChecked();
+ CHECK(cycle_one_module
+ ->InstantiateModule(env.local(),
+ ResolveCallbackForIsGraphAsyncTopLevelAwait)
+ .FromJust());
+ CHECK(cycle_one_module->IsGraphAsync());
+ CHECK(cycle_two_module->IsGraphAsync());
+ }
+
+ i::FLAG_harmony_top_level_await = previous_top_level_await_flag_value;
+}
+
} // anonymous namespace
diff --git a/deps/v8/test/cctest/test-object.cc b/deps/v8/test/cctest/test-object.cc
index 04e47eb7bd..fbb4a2b30c 100644
--- a/deps/v8/test/cctest/test-object.cc
+++ b/deps/v8/test/cctest/test-object.cc
@@ -115,10 +115,14 @@ TEST(EnumCache) {
*env->Global()->Get(env.local(), v8_str("cc")).ToLocalChecked()));
// Check the transition tree.
- CHECK_EQ(a->map().instance_descriptors(), b->map().instance_descriptors());
- CHECK_EQ(b->map().instance_descriptors(), c->map().instance_descriptors());
- CHECK_NE(c->map().instance_descriptors(), cc->map().instance_descriptors());
- CHECK_NE(b->map().instance_descriptors(), cc->map().instance_descriptors());
+ CHECK_EQ(a->map().instance_descriptors(kRelaxedLoad),
+ b->map().instance_descriptors(kRelaxedLoad));
+ CHECK_EQ(b->map().instance_descriptors(kRelaxedLoad),
+ c->map().instance_descriptors(kRelaxedLoad));
+ CHECK_NE(c->map().instance_descriptors(kRelaxedLoad),
+ cc->map().instance_descriptors(kRelaxedLoad));
+ CHECK_NE(b->map().instance_descriptors(kRelaxedLoad),
+ cc->map().instance_descriptors(kRelaxedLoad));
// Check that the EnumLength is unset.
CHECK_EQ(a->map().EnumLength(), kInvalidEnumCacheSentinel);
@@ -127,13 +131,13 @@ TEST(EnumCache) {
CHECK_EQ(cc->map().EnumLength(), kInvalidEnumCacheSentinel);
// Check that the EnumCache is empty.
- CHECK_EQ(a->map().instance_descriptors().enum_cache(),
+ CHECK_EQ(a->map().instance_descriptors(kRelaxedLoad).enum_cache(),
*factory->empty_enum_cache());
- CHECK_EQ(b->map().instance_descriptors().enum_cache(),
+ CHECK_EQ(b->map().instance_descriptors(kRelaxedLoad).enum_cache(),
*factory->empty_enum_cache());
- CHECK_EQ(c->map().instance_descriptors().enum_cache(),
+ CHECK_EQ(c->map().instance_descriptors(kRelaxedLoad).enum_cache(),
*factory->empty_enum_cache());
- CHECK_EQ(cc->map().instance_descriptors().enum_cache(),
+ CHECK_EQ(cc->map().instance_descriptors(kRelaxedLoad).enum_cache(),
*factory->empty_enum_cache());
// The EnumCache is shared on the DescriptorArray, creating it on {cc} has no
@@ -145,14 +149,15 @@ TEST(EnumCache) {
CHECK_EQ(c->map().EnumLength(), kInvalidEnumCacheSentinel);
CHECK_EQ(cc->map().EnumLength(), 3);
- CHECK_EQ(a->map().instance_descriptors().enum_cache(),
+ CHECK_EQ(a->map().instance_descriptors(kRelaxedLoad).enum_cache(),
*factory->empty_enum_cache());
- CHECK_EQ(b->map().instance_descriptors().enum_cache(),
+ CHECK_EQ(b->map().instance_descriptors(kRelaxedLoad).enum_cache(),
*factory->empty_enum_cache());
- CHECK_EQ(c->map().instance_descriptors().enum_cache(),
+ CHECK_EQ(c->map().instance_descriptors(kRelaxedLoad).enum_cache(),
*factory->empty_enum_cache());
- EnumCache enum_cache = cc->map().instance_descriptors().enum_cache();
+ EnumCache enum_cache =
+ cc->map().instance_descriptors(kRelaxedLoad).enum_cache();
CHECK_NE(enum_cache, *factory->empty_enum_cache());
CHECK_EQ(enum_cache.keys().length(), 3);
CHECK_EQ(enum_cache.indices().length(), 3);
@@ -169,14 +174,19 @@ TEST(EnumCache) {
// The enum cache is shared on the descriptor array of maps {a}, {b} and
// {c} only.
- EnumCache enum_cache = a->map().instance_descriptors().enum_cache();
+ EnumCache enum_cache =
+ a->map().instance_descriptors(kRelaxedLoad).enum_cache();
CHECK_NE(enum_cache, *factory->empty_enum_cache());
- CHECK_NE(cc->map().instance_descriptors().enum_cache(),
+ CHECK_NE(cc->map().instance_descriptors(kRelaxedLoad).enum_cache(),
*factory->empty_enum_cache());
- CHECK_NE(cc->map().instance_descriptors().enum_cache(), enum_cache);
- CHECK_EQ(a->map().instance_descriptors().enum_cache(), enum_cache);
- CHECK_EQ(b->map().instance_descriptors().enum_cache(), enum_cache);
- CHECK_EQ(c->map().instance_descriptors().enum_cache(), enum_cache);
+ CHECK_NE(cc->map().instance_descriptors(kRelaxedLoad).enum_cache(),
+ enum_cache);
+ CHECK_EQ(a->map().instance_descriptors(kRelaxedLoad).enum_cache(),
+ enum_cache);
+ CHECK_EQ(b->map().instance_descriptors(kRelaxedLoad).enum_cache(),
+ enum_cache);
+ CHECK_EQ(c->map().instance_descriptors(kRelaxedLoad).enum_cache(),
+ enum_cache);
CHECK_EQ(enum_cache.keys().length(), 1);
CHECK_EQ(enum_cache.indices().length(), 1);
@@ -185,7 +195,8 @@ TEST(EnumCache) {
// Creating the EnumCache for {c} will create a new EnumCache on the shared
// DescriptorArray.
Handle<EnumCache> previous_enum_cache(
- a->map().instance_descriptors().enum_cache(), a->GetIsolate());
+ a->map().instance_descriptors(kRelaxedLoad).enum_cache(),
+ a->GetIsolate());
Handle<FixedArray> previous_keys(previous_enum_cache->keys(),
a->GetIsolate());
Handle<FixedArray> previous_indices(previous_enum_cache->indices(),
@@ -197,7 +208,8 @@ TEST(EnumCache) {
CHECK_EQ(c->map().EnumLength(), 3);
CHECK_EQ(cc->map().EnumLength(), 3);
- EnumCache enum_cache = c->map().instance_descriptors().enum_cache();
+ EnumCache enum_cache =
+ c->map().instance_descriptors(kRelaxedLoad).enum_cache();
CHECK_NE(enum_cache, *factory->empty_enum_cache());
// The keys and indices caches are updated.
CHECK_EQ(enum_cache, *previous_enum_cache);
@@ -210,20 +222,25 @@ TEST(EnumCache) {
// The enum cache is shared on the descriptor array of maps {a}, {b} and
// {c} only.
- CHECK_NE(cc->map().instance_descriptors().enum_cache(),
+ CHECK_NE(cc->map().instance_descriptors(kRelaxedLoad).enum_cache(),
*factory->empty_enum_cache());
- CHECK_NE(cc->map().instance_descriptors().enum_cache(), enum_cache);
- CHECK_NE(cc->map().instance_descriptors().enum_cache(),
+ CHECK_NE(cc->map().instance_descriptors(kRelaxedLoad).enum_cache(),
+ enum_cache);
+ CHECK_NE(cc->map().instance_descriptors(kRelaxedLoad).enum_cache(),
*previous_enum_cache);
- CHECK_EQ(a->map().instance_descriptors().enum_cache(), enum_cache);
- CHECK_EQ(b->map().instance_descriptors().enum_cache(), enum_cache);
- CHECK_EQ(c->map().instance_descriptors().enum_cache(), enum_cache);
+ CHECK_EQ(a->map().instance_descriptors(kRelaxedLoad).enum_cache(),
+ enum_cache);
+ CHECK_EQ(b->map().instance_descriptors(kRelaxedLoad).enum_cache(),
+ enum_cache);
+ CHECK_EQ(c->map().instance_descriptors(kRelaxedLoad).enum_cache(),
+ enum_cache);
}
// {b} can reuse the existing EnumCache, hence we only need to set the correct
// EnumLength on the map without modifying the cache itself.
previous_enum_cache =
- handle(a->map().instance_descriptors().enum_cache(), a->GetIsolate());
+ handle(a->map().instance_descriptors(kRelaxedLoad).enum_cache(),
+ a->GetIsolate());
previous_keys = handle(previous_enum_cache->keys(), a->GetIsolate());
previous_indices = handle(previous_enum_cache->indices(), a->GetIsolate());
CompileRun("var s = 0; for (let key in b) { s += b[key] };");
@@ -233,7 +250,8 @@ TEST(EnumCache) {
CHECK_EQ(c->map().EnumLength(), 3);
CHECK_EQ(cc->map().EnumLength(), 3);
- EnumCache enum_cache = c->map().instance_descriptors().enum_cache();
+ EnumCache enum_cache =
+ c->map().instance_descriptors(kRelaxedLoad).enum_cache();
CHECK_NE(enum_cache, *factory->empty_enum_cache());
// The keys and indices caches are not updated.
CHECK_EQ(enum_cache, *previous_enum_cache);
@@ -244,14 +262,18 @@ TEST(EnumCache) {
// The enum cache is shared on the descriptor array of maps {a}, {b} and
// {c} only.
- CHECK_NE(cc->map().instance_descriptors().enum_cache(),
+ CHECK_NE(cc->map().instance_descriptors(kRelaxedLoad).enum_cache(),
*factory->empty_enum_cache());
- CHECK_NE(cc->map().instance_descriptors().enum_cache(), enum_cache);
- CHECK_NE(cc->map().instance_descriptors().enum_cache(),
+ CHECK_NE(cc->map().instance_descriptors(kRelaxedLoad).enum_cache(),
+ enum_cache);
+ CHECK_NE(cc->map().instance_descriptors(kRelaxedLoad).enum_cache(),
*previous_enum_cache);
- CHECK_EQ(a->map().instance_descriptors().enum_cache(), enum_cache);
- CHECK_EQ(b->map().instance_descriptors().enum_cache(), enum_cache);
- CHECK_EQ(c->map().instance_descriptors().enum_cache(), enum_cache);
+ CHECK_EQ(a->map().instance_descriptors(kRelaxedLoad).enum_cache(),
+ enum_cache);
+ CHECK_EQ(b->map().instance_descriptors(kRelaxedLoad).enum_cache(),
+ enum_cache);
+ CHECK_EQ(c->map().instance_descriptors(kRelaxedLoad).enum_cache(),
+ enum_cache);
}
}
diff --git a/deps/v8/test/cctest/test-orderedhashtable.cc b/deps/v8/test/cctest/test-orderedhashtable.cc
index f3887bdacd..e7cddab549 100644
--- a/deps/v8/test/cctest/test-orderedhashtable.cc
+++ b/deps/v8/test/cctest/test-orderedhashtable.cc
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <utility>
-#include "src/init/v8.h"
+#include "src/init/v8.h"
#include "src/objects/objects-inl.h"
#include "src/objects/ordered-hash-table-inl.h"
#include "test/cctest/cctest.h"
@@ -27,6 +27,68 @@ void Verify(Isolate* isolate, Handle<HeapObject> obj) {
#endif
}
+// Helpers to abstract over differences in interfaces of the different ordered
+// datastructures
+
+template <typename T>
+Handle<T> Add(Isolate* isolate, Handle<T> table, Handle<String> key1,
+ Handle<String> value1, PropertyDetails details);
+
+template <>
+Handle<OrderedHashMap> Add(Isolate* isolate, Handle<OrderedHashMap> table,
+ Handle<String> key, Handle<String> value,
+ PropertyDetails details) {
+ return OrderedHashMap::Add(isolate, table, key, value).ToHandleChecked();
+}
+
+template <>
+Handle<OrderedHashSet> Add(Isolate* isolate, Handle<OrderedHashSet> table,
+ Handle<String> key, Handle<String> value,
+ PropertyDetails details) {
+ return OrderedHashSet::Add(isolate, table, key).ToHandleChecked();
+}
+
+template <>
+Handle<OrderedNameDictionary> Add(Isolate* isolate,
+ Handle<OrderedNameDictionary> table,
+ Handle<String> key, Handle<String> value,
+ PropertyDetails details) {
+ return OrderedNameDictionary::Add(isolate, table, key, value, details)
+ .ToHandleChecked();
+}
+
+// version for
+// OrderedHashMap, OrderedHashSet
+template <typename T>
+bool HasKey(Isolate* isolate, Handle<T> table, Object key) {
+ return T::HasKey(isolate, *table, key);
+}
+
+template <>
+bool HasKey(Isolate* isolate, Handle<OrderedNameDictionary> table, Object key) {
+ return table->FindEntry(isolate, key).is_found();
+}
+
+// version for
+// OrderedHashTable, OrderedHashSet
+template <typename T>
+Handle<T> Delete(Isolate* isolate, Handle<T> table, Object key) {
+ T::Delete(isolate, *table, key);
+ return table;
+}
+
+template <>
+Handle<OrderedNameDictionary> Delete(Isolate* isolate,
+ Handle<OrderedNameDictionary> table,
+ Object key) {
+ // OrderedNameDictionary doesn't have Delete, but only DeleteEntry, which
+ // requires the key to be deleted to be present
+ InternalIndex entry = table->FindEntry(isolate, key);
+ if (entry.is_not_found()) return table;
+
+ return OrderedNameDictionary::DeleteEntry(isolate, table, entry);
+}
+
TEST(SmallOrderedHashSetInsertion) {
LocalContext context;
Isolate* isolate = GetIsolateFrom(&context);
@@ -1367,7 +1429,7 @@ TEST(OrderedNameDictionaryInsertion) {
Handle<String> key1 = isolate->factory()->InternalizeUtf8String("foo");
Handle<String> value = isolate->factory()->InternalizeUtf8String("bar");
- CHECK_EQ(OrderedNameDictionary::kNotFound, dict->FindEntry(isolate, *key1));
+ CHECK(dict->FindEntry(isolate, *key1).is_not_found());
PropertyDetails details = PropertyDetails::Empty();
dict = OrderedNameDictionary::Add(isolate, dict, key1, value, details)
.ToHandleChecked();
@@ -1375,17 +1437,17 @@ TEST(OrderedNameDictionaryInsertion) {
CHECK_EQ(2, dict->NumberOfBuckets());
CHECK_EQ(1, dict->NumberOfElements());
- CHECK_EQ(0, dict->FindEntry(isolate, *key1));
+ CHECK_EQ(InternalIndex(0), dict->FindEntry(isolate, *key1));
Handle<Symbol> key2 = factory->NewSymbol();
- CHECK_EQ(OrderedNameDictionary::kNotFound, dict->FindEntry(isolate, *key2));
+ CHECK(dict->FindEntry(isolate, *key2).is_not_found());
dict = OrderedNameDictionary::Add(isolate, dict, key2, value, details)
.ToHandleChecked();
Verify(isolate, dict);
CHECK_EQ(2, dict->NumberOfBuckets());
CHECK_EQ(2, dict->NumberOfElements());
- CHECK_EQ(0, dict->FindEntry(isolate, *key1));
- CHECK_EQ(1, dict->FindEntry(isolate, *key2));
+ CHECK_EQ(InternalIndex(0), dict->FindEntry(isolate, *key1));
+ CHECK_EQ(InternalIndex(1), dict->FindEntry(isolate, *key2));
}
TEST(OrderedNameDictionaryFindEntry) {
@@ -1408,9 +1470,9 @@ TEST(OrderedNameDictionaryFindEntry) {
CHECK_EQ(2, dict->NumberOfBuckets());
CHECK_EQ(1, dict->NumberOfElements());
- int entry = dict->FindEntry(isolate, *key1);
- CHECK_EQ(entry, 0);
- CHECK_NE(entry, OrderedNameDictionary::kNotFound);
+ InternalIndex entry = dict->FindEntry(isolate, *key1);
+ CHECK_EQ(entry, InternalIndex(0));
+ CHECK(entry.is_found());
Handle<Symbol> key2 = factory->NewSymbol();
dict = OrderedNameDictionary::Add(isolate, dict, key2, value, details)
@@ -1420,12 +1482,12 @@ TEST(OrderedNameDictionaryFindEntry) {
CHECK_EQ(2, dict->NumberOfElements());
entry = dict->FindEntry(isolate, *key1);
- CHECK_NE(entry, OrderedNameDictionary::kNotFound);
- CHECK_EQ(entry, 0);
+ CHECK(entry.is_found());
+ CHECK_EQ(entry, InternalIndex(0));
entry = dict->FindEntry(isolate, *key2);
- CHECK_NE(entry, OrderedNameDictionary::kNotFound);
- CHECK_EQ(entry, 1);
+ CHECK(entry.is_found());
+ CHECK_EQ(entry, InternalIndex(1));
}
TEST(OrderedNameDictionaryValueAtAndValueAtPut) {
@@ -1441,16 +1503,16 @@ TEST(OrderedNameDictionaryValueAtAndValueAtPut) {
Handle<String> key1 = isolate->factory()->InternalizeUtf8String("foo");
Handle<String> value = isolate->factory()->InternalizeUtf8String("bar");
- CHECK_EQ(OrderedNameDictionary::kNotFound, dict->FindEntry(isolate, *key1));
+ CHECK(dict->FindEntry(isolate, *key1).is_not_found());
PropertyDetails details = PropertyDetails::Empty();
dict = OrderedNameDictionary::Add(isolate, dict, key1, value, details)
.ToHandleChecked();
Verify(isolate, dict);
CHECK_EQ(2, dict->NumberOfBuckets());
CHECK_EQ(1, dict->NumberOfElements());
- CHECK_EQ(0, dict->FindEntry(isolate, *key1));
+ CHECK_EQ(InternalIndex(0), dict->FindEntry(isolate, *key1));
- int entry = dict->FindEntry(isolate, *key1);
+ InternalIndex entry = dict->FindEntry(isolate, *key1);
Handle<Object> found = handle(dict->ValueAt(entry), isolate);
CHECK_EQ(*found, *value);
@@ -1463,14 +1525,14 @@ TEST(OrderedNameDictionaryValueAtAndValueAtPut) {
CHECK_EQ(*found, *other_value);
Handle<Symbol> key2 = factory->NewSymbol();
- CHECK_EQ(OrderedNameDictionary::kNotFound, dict->FindEntry(isolate, *key2));
+ CHECK(dict->FindEntry(isolate, *key2).is_not_found());
dict = OrderedNameDictionary::Add(isolate, dict, key2, value, details)
.ToHandleChecked();
Verify(isolate, dict);
CHECK_EQ(2, dict->NumberOfBuckets());
CHECK_EQ(2, dict->NumberOfElements());
- CHECK_EQ(0, dict->FindEntry(isolate, *key1));
- CHECK_EQ(1, dict->FindEntry(isolate, *key2));
+ CHECK_EQ(InternalIndex(0), dict->FindEntry(isolate, *key1));
+ CHECK_EQ(InternalIndex(1), dict->FindEntry(isolate, *key2));
entry = dict->FindEntry(isolate, *key1);
found = handle(dict->ValueAt(entry), isolate);
@@ -1501,16 +1563,16 @@ TEST(OrderedNameDictionaryDetailsAtAndDetailsAtPut) {
Handle<String> key1 = isolate->factory()->InternalizeUtf8String("foo");
Handle<String> value = isolate->factory()->InternalizeUtf8String("bar");
- CHECK_EQ(OrderedNameDictionary::kNotFound, dict->FindEntry(isolate, *key1));
+ CHECK(dict->FindEntry(isolate, *key1).is_not_found());
PropertyDetails details = PropertyDetails::Empty();
dict = OrderedNameDictionary::Add(isolate, dict, key1, value, details)
.ToHandleChecked();
Verify(isolate, dict);
CHECK_EQ(2, dict->NumberOfBuckets());
CHECK_EQ(1, dict->NumberOfElements());
- CHECK_EQ(0, dict->FindEntry(isolate, *key1));
+ CHECK_EQ(InternalIndex(0), dict->FindEntry(isolate, *key1));
- int entry = dict->FindEntry(isolate, *key1);
+ InternalIndex entry = dict->FindEntry(isolate, *key1);
PropertyDetails found = dict->DetailsAt(entry);
CHECK_EQ(PropertyDetails::Empty().AsSmi(), found.AsSmi());
@@ -1523,14 +1585,14 @@ TEST(OrderedNameDictionaryDetailsAtAndDetailsAtPut) {
CHECK_EQ(other.AsSmi(), found.AsSmi());
Handle<Symbol> key2 = factory->NewSymbol();
- CHECK_EQ(OrderedNameDictionary::kNotFound, dict->FindEntry(isolate, *key2));
+ CHECK(dict->FindEntry(isolate, *key2).is_not_found());
dict = OrderedNameDictionary::Add(isolate, dict, key2, value, details)
.ToHandleChecked();
Verify(isolate, dict);
CHECK_EQ(2, dict->NumberOfBuckets());
CHECK_EQ(2, dict->NumberOfElements());
- CHECK_EQ(0, dict->FindEntry(isolate, *key1));
- CHECK_EQ(1, dict->FindEntry(isolate, *key2));
+ CHECK_EQ(InternalIndex(0), dict->FindEntry(isolate, *key1));
+ CHECK_EQ(InternalIndex(1), dict->FindEntry(isolate, *key2));
entry = dict->FindEntry(isolate, *key1);
found = dict->DetailsAt(entry);
@@ -1559,26 +1621,24 @@ TEST(SmallOrderedNameDictionaryInsertion) {
Handle<String> key1 = isolate->factory()->InternalizeUtf8String("foo");
Handle<String> value = isolate->factory()->InternalizeUtf8String("bar");
- CHECK_EQ(SmallOrderedNameDictionary::kNotFound,
- dict->FindEntry(isolate, *key1));
+ CHECK(dict->FindEntry(isolate, *key1).is_not_found());
PropertyDetails details = PropertyDetails::Empty();
dict = SmallOrderedNameDictionary::Add(isolate, dict, key1, value, details)
.ToHandleChecked();
Verify(isolate, dict);
CHECK_EQ(2, dict->NumberOfBuckets());
CHECK_EQ(1, dict->NumberOfElements());
- CHECK_EQ(0, dict->FindEntry(isolate, *key1));
+ CHECK_EQ(InternalIndex(0), dict->FindEntry(isolate, *key1));
Handle<Symbol> key2 = factory->NewSymbol();
- CHECK_EQ(SmallOrderedNameDictionary::kNotFound,
- dict->FindEntry(isolate, *key2));
+ CHECK(dict->FindEntry(isolate, *key2).is_not_found());
dict = SmallOrderedNameDictionary::Add(isolate, dict, key2, value, details)
.ToHandleChecked();
Verify(isolate, dict);
CHECK_EQ(2, dict->NumberOfBuckets());
CHECK_EQ(2, dict->NumberOfElements());
- CHECK_EQ(0, dict->FindEntry(isolate, *key1));
- CHECK_EQ(1, dict->FindEntry(isolate, *key2));
+ CHECK_EQ(InternalIndex(0), dict->FindEntry(isolate, *key1));
+ CHECK_EQ(InternalIndex(1), dict->FindEntry(isolate, *key2));
}
TEST(SmallOrderedNameDictionaryInsertionMax) {
@@ -1624,8 +1684,7 @@ TEST(SmallOrderedNameDictionaryFindEntry) {
Handle<String> key1 = isolate->factory()->InternalizeUtf8String("foo");
Handle<String> value = isolate->factory()->InternalizeUtf8String("bar");
- CHECK_EQ(SmallOrderedNameDictionary::kNotFound,
- dict->FindEntry(isolate, *key1));
+ CHECK(dict->FindEntry(isolate, *key1).is_not_found());
PropertyDetails details = PropertyDetails::Empty();
dict = SmallOrderedNameDictionary::Add(isolate, dict, key1, value, details)
@@ -1633,22 +1692,21 @@ TEST(SmallOrderedNameDictionaryFindEntry) {
Verify(isolate, dict);
CHECK_EQ(2, dict->NumberOfBuckets());
CHECK_EQ(1, dict->NumberOfElements());
- CHECK_EQ(0, dict->FindEntry(isolate, *key1));
+ CHECK_EQ(InternalIndex(0), dict->FindEntry(isolate, *key1));
- int entry = dict->FindEntry(isolate, *key1);
- CHECK_NE(entry, OrderedNameDictionary::kNotFound);
+ InternalIndex entry = dict->FindEntry(isolate, *key1);
+ CHECK(entry.is_found());
Handle<Symbol> key2 = factory->NewSymbol();
- CHECK_EQ(SmallOrderedNameDictionary::kNotFound,
- dict->FindEntry(isolate, *key2));
+ CHECK(dict->FindEntry(isolate, *key2).is_not_found());
dict = SmallOrderedNameDictionary::Add(isolate, dict, key2, value, details)
.ToHandleChecked();
Verify(isolate, dict);
CHECK_EQ(2, dict->NumberOfBuckets());
CHECK_EQ(2, dict->NumberOfElements());
- CHECK_EQ(0, dict->FindEntry(isolate, *key1));
- CHECK_EQ(1, dict->FindEntry(isolate, *key2));
+ CHECK_EQ(InternalIndex(0), dict->FindEntry(isolate, *key1));
+ CHECK_EQ(InternalIndex(1), dict->FindEntry(isolate, *key2));
}
TEST(SmallOrderedNameDictionaryValueAtAndValueAtPut) {
@@ -1665,17 +1723,16 @@ TEST(SmallOrderedNameDictionaryValueAtAndValueAtPut) {
Handle<String> key1 = isolate->factory()->InternalizeUtf8String("foo");
Handle<String> value = isolate->factory()->InternalizeUtf8String("bar");
- CHECK_EQ(SmallOrderedNameDictionary::kNotFound,
- dict->FindEntry(isolate, *key1));
+ CHECK(dict->FindEntry(isolate, *key1).is_not_found());
PropertyDetails details = PropertyDetails::Empty();
dict = SmallOrderedNameDictionary::Add(isolate, dict, key1, value, details)
.ToHandleChecked();
Verify(isolate, dict);
CHECK_EQ(2, dict->NumberOfBuckets());
CHECK_EQ(1, dict->NumberOfElements());
- CHECK_EQ(0, dict->FindEntry(isolate, *key1));
+ CHECK_EQ(InternalIndex(0), dict->FindEntry(isolate, *key1));
- int entry = dict->FindEntry(isolate, *key1);
+ InternalIndex entry = dict->FindEntry(isolate, *key1);
Handle<Object> found = handle(dict->ValueAt(entry), isolate);
CHECK_EQ(*found, *value);
@@ -1688,15 +1745,14 @@ TEST(SmallOrderedNameDictionaryValueAtAndValueAtPut) {
CHECK_EQ(*found, *other_value);
Handle<Symbol> key2 = factory->NewSymbol();
- CHECK_EQ(SmallOrderedNameDictionary::kNotFound,
- dict->FindEntry(isolate, *key2));
+ CHECK(dict->FindEntry(isolate, *key2).is_not_found());
dict = SmallOrderedNameDictionary::Add(isolate, dict, key2, value, details)
.ToHandleChecked();
Verify(isolate, dict);
CHECK_EQ(2, dict->NumberOfBuckets());
CHECK_EQ(2, dict->NumberOfElements());
- CHECK_EQ(0, dict->FindEntry(isolate, *key1));
- CHECK_EQ(1, dict->FindEntry(isolate, *key2));
+ CHECK_EQ(InternalIndex(0), dict->FindEntry(isolate, *key1));
+ CHECK_EQ(InternalIndex(1), dict->FindEntry(isolate, *key2));
entry = dict->FindEntry(isolate, *key1);
found = handle(dict->ValueAt(entry), isolate);
@@ -1728,17 +1784,16 @@ TEST(SmallOrderedNameDictionaryDetailsAtAndDetailsAtPut) {
Handle<String> key1 = isolate->factory()->InternalizeUtf8String("foo");
Handle<String> value = isolate->factory()->InternalizeUtf8String("bar");
- CHECK_EQ(SmallOrderedNameDictionary::kNotFound,
- dict->FindEntry(isolate, *key1));
+ CHECK(dict->FindEntry(isolate, *key1).is_not_found());
PropertyDetails details = PropertyDetails::Empty();
dict = SmallOrderedNameDictionary::Add(isolate, dict, key1, value, details)
.ToHandleChecked();
Verify(isolate, dict);
CHECK_EQ(2, dict->NumberOfBuckets());
CHECK_EQ(1, dict->NumberOfElements());
- CHECK_EQ(0, dict->FindEntry(isolate, *key1));
+ CHECK_EQ(InternalIndex(0), dict->FindEntry(isolate, *key1));
- int entry = dict->FindEntry(isolate, *key1);
+ InternalIndex entry = dict->FindEntry(isolate, *key1);
PropertyDetails found = dict->DetailsAt(entry);
CHECK_EQ(PropertyDetails::Empty().AsSmi(), found.AsSmi());
@@ -1751,15 +1806,14 @@ TEST(SmallOrderedNameDictionaryDetailsAtAndDetailsAtPut) {
CHECK_EQ(other.AsSmi(), found.AsSmi());
Handle<Symbol> key2 = factory->NewSymbol();
- CHECK_EQ(SmallOrderedNameDictionary::kNotFound,
- dict->FindEntry(isolate, *key2));
+ CHECK(dict->FindEntry(isolate, *key2).is_not_found());
dict = SmallOrderedNameDictionary::Add(isolate, dict, key2, value, details)
.ToHandleChecked();
Verify(isolate, dict);
CHECK_EQ(2, dict->NumberOfBuckets());
CHECK_EQ(2, dict->NumberOfElements());
- CHECK_EQ(0, dict->FindEntry(isolate, *key1));
- CHECK_EQ(1, dict->FindEntry(isolate, *key2));
+ CHECK_EQ(InternalIndex(0), dict->FindEntry(isolate, *key1));
+ CHECK_EQ(InternalIndex(1), dict->FindEntry(isolate, *key2));
entry = dict->FindEntry(isolate, *key1);
found = dict->DetailsAt(entry);
@@ -1843,8 +1897,8 @@ TEST(OrderedNameDictionaryHandlerInsertion) {
DCHECK(key->IsUniqueName());
Verify(isolate, table);
CHECK(table->IsSmallOrderedNameDictionary());
- CHECK_NE(OrderedNameDictionaryHandler::kNotFound,
- OrderedNameDictionaryHandler::FindEntry(isolate, *table, *key));
+ CHECK(OrderedNameDictionaryHandler::FindEntry(isolate, *table, *key)
+ .is_found());
char buf[10];
for (int i = 0; i < 1024; i++) {
@@ -1859,17 +1913,15 @@ TEST(OrderedNameDictionaryHandlerInsertion) {
for (int j = 0; j <= i; j++) {
CHECK_LT(0, snprintf(buf, sizeof(buf), "foo%d", j));
Handle<Name> key_j = isolate->factory()->InternalizeUtf8String(buf);
- CHECK_NE(
- OrderedNameDictionaryHandler::kNotFound,
- OrderedNameDictionaryHandler::FindEntry(isolate, *table, *key_j));
+ CHECK(OrderedNameDictionaryHandler::FindEntry(isolate, *table, *key_j)
+ .is_found());
}
for (int j = i + 1; j < 1024; j++) {
CHECK_LT(0, snprintf(buf, sizeof(buf), "foo%d", j));
Handle<Name> key_j = isolate->factory()->InternalizeUtf8String(buf);
- CHECK_EQ(
- OrderedNameDictionaryHandler::kNotFound,
- OrderedNameDictionaryHandler::FindEntry(isolate, *table, *key_j));
+ CHECK(OrderedNameDictionaryHandler::FindEntry(isolate, *table, *key_j)
+ .is_not_found());
}
}
@@ -1897,24 +1949,24 @@ TEST(OrderedNameDictionaryHandlerDeletion) {
DCHECK(key->IsUniqueName());
Verify(isolate, table);
CHECK(table->IsSmallOrderedNameDictionary());
- CHECK_NE(OrderedNameDictionaryHandler::kNotFound,
- OrderedNameDictionaryHandler::FindEntry(isolate, *table, *key));
+ CHECK(OrderedNameDictionaryHandler::FindEntry(isolate, *table, *key)
+ .is_found());
// Remove a non-existing key.
OrderedNameDictionaryHandler::Delete(isolate, table, key2);
Verify(isolate, table);
CHECK(table->IsSmallOrderedNameDictionary());
- CHECK_EQ(OrderedNameDictionaryHandler::kNotFound,
- OrderedNameDictionaryHandler::FindEntry(isolate, *table, *key2));
- CHECK_NE(OrderedNameDictionaryHandler::kNotFound,
- OrderedNameDictionaryHandler::FindEntry(isolate, *table, *key));
+ CHECK(OrderedNameDictionaryHandler::FindEntry(isolate, *table, *key2)
+ .is_not_found());
+ CHECK(OrderedNameDictionaryHandler::FindEntry(isolate, *table, *key)
+ .is_found());
// Remove an existing key.
OrderedNameDictionaryHandler::Delete(isolate, table, key);
Verify(isolate, table);
CHECK(table->IsSmallOrderedNameDictionary());
- CHECK_EQ(OrderedNameDictionaryHandler::kNotFound,
- OrderedNameDictionaryHandler::FindEntry(isolate, *table, *key));
+ CHECK(OrderedNameDictionaryHandler::FindEntry(isolate, *table, *key)
+ .is_not_found());
CHECK(table->IsSmallOrderedNameDictionary());
}
@@ -1933,7 +1985,7 @@ TEST(OrderedNameDictionarySetEntry) {
Handle<String> key = factory->InternalizeUtf8String("foo");
Handle<String> value = factory->InternalizeUtf8String("bar");
- CHECK_EQ(OrderedNameDictionary::kNotFound, dict->FindEntry(isolate, *key));
+ CHECK(dict->FindEntry(isolate, *key).is_not_found());
PropertyDetails details = PropertyDetails::Empty();
dict = OrderedNameDictionary::Add(isolate, dict, key, value, details)
.ToHandleChecked();
@@ -1941,8 +1993,8 @@ TEST(OrderedNameDictionarySetEntry) {
CHECK_EQ(2, dict->NumberOfBuckets());
CHECK_EQ(1, dict->NumberOfElements());
- int entry = dict->FindEntry(isolate, *key);
- CHECK_EQ(0, entry);
+ InternalIndex entry = dict->FindEntry(isolate, *key);
+ CHECK_EQ(InternalIndex(0), entry);
Handle<Object> found = handle(dict->ValueAt(entry), isolate);
CHECK_EQ(*found, *value);
@@ -1953,7 +2005,7 @@ TEST(OrderedNameDictionarySetEntry) {
dict->SetEntry(entry, *key, *other_value, other_details);
entry = dict->FindEntry(isolate, *key);
- CHECK_EQ(0, entry);
+ CHECK_EQ(InternalIndex(0), entry);
found = handle(dict->ValueAt(entry), isolate);
CHECK_EQ(*found, *other_value);
found = handle(dict->KeyAt(entry), isolate);
@@ -1976,8 +2028,7 @@ TEST(SmallOrderedNameDictionarySetEntry) {
Handle<String> key = factory->InternalizeUtf8String("foo");
Handle<String> value = factory->InternalizeUtf8String("bar");
- CHECK_EQ(SmallOrderedNameDictionary::kNotFound,
- dict->FindEntry(isolate, *key));
+ CHECK(dict->FindEntry(isolate, *key).is_not_found());
PropertyDetails details = PropertyDetails::Empty();
dict = SmallOrderedNameDictionary::Add(isolate, dict, key, value, details)
.ToHandleChecked();
@@ -1986,8 +2037,8 @@ TEST(SmallOrderedNameDictionarySetEntry) {
CHECK_EQ(1, dict->NumberOfElements());
CHECK_EQ(0, dict->NumberOfDeletedElements());
- int entry = dict->FindEntry(isolate, *key);
- CHECK_EQ(0, entry);
+ InternalIndex entry = dict->FindEntry(isolate, *key);
+ CHECK_EQ(InternalIndex(0), entry);
Handle<Object> found = handle(dict->ValueAt(entry), isolate);
CHECK_EQ(*found, *value);
@@ -1998,7 +2049,7 @@ TEST(SmallOrderedNameDictionarySetEntry) {
dict->SetEntry(entry, *key, *other_value, other_details);
entry = dict->FindEntry(isolate, *key);
- CHECK_EQ(0, entry);
+ CHECK_EQ(InternalIndex(0), entry);
found = handle(dict->ValueAt(entry), isolate);
CHECK_EQ(*found, *other_value);
found = handle(dict->KeyAt(entry), isolate);
@@ -2020,7 +2071,7 @@ TEST(OrderedNameDictionaryDeleteEntry) {
Handle<String> key = factory->InternalizeUtf8String("foo");
Handle<String> value = factory->InternalizeUtf8String("bar");
- CHECK_EQ(OrderedNameDictionary::kNotFound, dict->FindEntry(isolate, *key));
+ CHECK(dict->FindEntry(isolate, *key).is_not_found());
PropertyDetails details = PropertyDetails::Empty();
dict = OrderedNameDictionary::Add(isolate, dict, key, value, details)
.ToHandleChecked();
@@ -2029,11 +2080,11 @@ TEST(OrderedNameDictionaryDeleteEntry) {
CHECK_EQ(1, dict->NumberOfElements());
CHECK_EQ(0, dict->NumberOfDeletedElements());
- int entry = dict->FindEntry(isolate, *key);
- CHECK_EQ(0, entry);
+ InternalIndex entry = dict->FindEntry(isolate, *key);
+ CHECK_EQ(InternalIndex(0), entry);
dict = OrderedNameDictionary::DeleteEntry(isolate, dict, entry);
entry = dict->FindEntry(isolate, *key);
- CHECK_EQ(OrderedNameDictionary::kNotFound, entry);
+ CHECK(entry.is_not_found());
CHECK_EQ(0, dict->NumberOfElements());
char buf[10];
@@ -2061,7 +2112,7 @@ TEST(OrderedNameDictionaryDeleteEntry) {
Verify(isolate, dict);
entry = dict->FindEntry(isolate, *key);
- CHECK_EQ(OrderedNameDictionary::kNotFound, entry);
+ CHECK(entry.is_not_found());
}
CHECK_EQ(0, dict->NumberOfElements());
// Dictionary shrunk again.
@@ -2082,8 +2133,7 @@ TEST(SmallOrderedNameDictionaryDeleteEntry) {
Handle<String> key = factory->InternalizeUtf8String("foo");
Handle<String> value = factory->InternalizeUtf8String("bar");
- CHECK_EQ(SmallOrderedNameDictionary::kNotFound,
- dict->FindEntry(isolate, *key));
+ CHECK(dict->FindEntry(isolate, *key).is_not_found());
PropertyDetails details = PropertyDetails::Empty();
dict = SmallOrderedNameDictionary::Add(isolate, dict, key, value, details)
.ToHandleChecked();
@@ -2092,11 +2142,11 @@ TEST(SmallOrderedNameDictionaryDeleteEntry) {
CHECK_EQ(1, dict->NumberOfElements());
CHECK_EQ(0, dict->NumberOfDeletedElements());
- int entry = dict->FindEntry(isolate, *key);
- CHECK_EQ(0, entry);
+ InternalIndex entry = dict->FindEntry(isolate, *key);
+ CHECK_EQ(InternalIndex(0), entry);
dict = SmallOrderedNameDictionary::DeleteEntry(isolate, dict, entry);
entry = dict->FindEntry(isolate, *key);
- CHECK_EQ(SmallOrderedNameDictionary::kNotFound, entry);
+ CHECK(entry.is_not_found());
char buf[10];
// Make sure we grow at least once.
@@ -2124,7 +2174,7 @@ TEST(SmallOrderedNameDictionaryDeleteEntry) {
Verify(isolate, dict);
entry = dict->FindEntry(isolate, *key);
- CHECK_EQ(SmallOrderedNameDictionary::kNotFound, entry);
+ CHECK(entry.is_not_found());
}
CHECK_EQ(0, dict->NumberOfElements());
@@ -2132,6 +2182,265 @@ TEST(SmallOrderedNameDictionaryDeleteEntry) {
CHECK_EQ(0, dict->NumberOfDeletedElements());
}
+template <typename T>
+void TestEmptyOrderedHashTable(Isolate* isolate, Factory* factory,
+ Handle<T> table) {
+ CHECK_EQ(0, table->NumberOfElements());
+
+ PropertyDetails details = PropertyDetails::Empty();
+
+ Handle<String> key1 = isolate->factory()->InternalizeUtf8String("key1");
+ Handle<String> value1 = isolate->factory()->InternalizeUtf8String("value1");
+ table = Add(isolate, table, key1, value1, details);
+ Verify(isolate, table);
+ CHECK_EQ(1, table->NumberOfElements());
+ CHECK(HasKey(isolate, table, *key1));
+
+ Handle<String> key2 = factory->InternalizeUtf8String("key2");
+ Handle<String> value2 = factory->InternalizeUtf8String("value2");
+ CHECK(!HasKey(isolate, table, *key2));
+ table = Add(isolate, table, key2, value2, details);
+ Verify(isolate, table);
+ CHECK_EQ(2, table->NumberOfElements());
+ CHECK(HasKey(isolate, table, *key1));
+ CHECK(HasKey(isolate, table, *key2));
+
+ Handle<String> key3 = factory->InternalizeUtf8String("key3");
+ Handle<String> value3 = factory->InternalizeUtf8String("value3");
+ CHECK(!HasKey(isolate, table, *key3));
+ table = Add(isolate, table, key3, value3, details);
+ Verify(isolate, table);
+ CHECK_EQ(3, table->NumberOfElements());
+ CHECK(HasKey(isolate, table, *key1));
+ CHECK(HasKey(isolate, table, *key2));
+ CHECK(HasKey(isolate, table, *key3));
+
+ Handle<String> key4 = factory->InternalizeUtf8String("key4");
+ Handle<String> value4 = factory->InternalizeUtf8String("value4");
+ CHECK(!HasKey(isolate, table, *key4));
+ table = Delete(isolate, table, *key4);
+ Verify(isolate, table);
+ CHECK_EQ(3, table->NumberOfElements());
+ CHECK_EQ(0, table->NumberOfDeletedElements());
+ CHECK(!HasKey(isolate, table, *key4));
+
+ table = Add(isolate, table, key4, value4, details);
+ Verify(isolate, table);
+ CHECK_EQ(4, table->NumberOfElements());
+ CHECK_EQ(0, table->NumberOfDeletedElements());
+ CHECK(HasKey(isolate, table, *key4));
+
+ CHECK(HasKey(isolate, table, *key4));
+ table = Delete(isolate, table, *key4);
+ Verify(isolate, table);
+ CHECK_EQ(3, table->NumberOfElements());
+ CHECK_EQ(1, table->NumberOfDeletedElements());
+ CHECK(!HasKey(isolate, table, *key4));
+}
+
+TEST(ZeroSizeOrderedHashMap) {
+ LocalContext context;
+ Isolate* isolate = GetIsolateFrom(&context);
+ Factory* factory = isolate->factory();
+ HandleScope scope(isolate);
+ ReadOnlyRoots ro_roots(isolate);
+
+ Handle<Smi> key1(Smi::FromInt(1), isolate);
+ Handle<Smi> value1(Smi::FromInt(1), isolate);
+
+ Handle<OrderedHashMap> empty =
+ Handle<OrderedHashMap>(ro_roots.empty_ordered_hash_map(), isolate);
+ {
+ Handle<OrderedHashMap> map = empty;
+
+ CHECK_EQ(0, map->NumberOfBuckets());
+ CHECK_EQ(0, map->NumberOfElements());
+ CHECK(!OrderedHashMap::HasKey(isolate, *map, *key1));
+
+ TestEmptyOrderedHashTable(isolate, factory, map);
+ }
+ {
+ Handle<OrderedHashMap> map = empty;
+
+ map = OrderedHashMap::EnsureGrowable(isolate, map).ToHandleChecked();
+
+ CHECK_LT(0, map->NumberOfBuckets());
+ CHECK_EQ(0, map->NumberOfElements());
+ }
+ {
+ Handle<OrderedHashMap> map = empty;
+
+ CHECK(map->FindEntry(isolate, *key1).is_not_found());
+
+ TestEmptyOrderedHashTable(isolate, factory, map);
+ }
+ {
+ Handle<OrderedHashMap> map = empty;
+
+ map = OrderedHashMap::Add(isolate, map, key1, value1).ToHandleChecked();
+
+ CHECK_EQ(1, map->NumberOfElements());
+ CHECK(OrderedHashMap::HasKey(isolate, *map, *key1));
+ }
+ {
+ Handle<OrderedHashMap> map = empty;
+
+ map = OrderedHashMap::Clear(isolate, map);
+
+ TestEmptyOrderedHashTable(isolate, factory, map);
+ }
+ {
+ Handle<OrderedHashMap> map = empty;
+
+ map = OrderedHashMap::Rehash(isolate, map).ToHandleChecked();
+
+ TestEmptyOrderedHashTable(isolate, factory, map);
+ }
+ {
+ Handle<OrderedHashMap> map = empty;
+
+ map = OrderedHashMap::Shrink(isolate, map);
+
+ TestEmptyOrderedHashTable(isolate, factory, map);
+ }
+ {
+ Handle<OrderedHashMap> map = empty;
+
+ OrderedHashMap::Delete(isolate, *map, *key1);
+
+ TestEmptyOrderedHashTable(isolate, factory, map);
+ }
+}
+
+TEST(ZeroSizeOrderedHashSet) {
+ LocalContext context;
+ Isolate* isolate = GetIsolateFrom(&context);
+ Factory* factory = isolate->factory();
+ HandleScope scope(isolate);
+ ReadOnlyRoots ro_roots(isolate);
+
+ Handle<Smi> key1(Smi::FromInt(1), isolate);
+ Handle<Smi> value1(Smi::FromInt(1), isolate);
+
+ Handle<OrderedHashSet> empty =
+ Handle<OrderedHashSet>(ro_roots.empty_ordered_hash_set(), isolate);
+
+ {
+ Handle<OrderedHashSet> set = empty;
+
+ CHECK_EQ(0, set->NumberOfBuckets());
+ CHECK_EQ(0, set->NumberOfElements());
+ CHECK(!OrderedHashSet::HasKey(isolate, *set, *key1));
+
+ TestEmptyOrderedHashTable(isolate, factory, set);
+ }
+ {
+ Handle<OrderedHashSet> set = empty;
+
+ set = OrderedHashSet::EnsureGrowable(isolate, set).ToHandleChecked();
+
+ CHECK_LT(0, set->NumberOfBuckets());
+ CHECK_EQ(0, set->NumberOfElements());
+ }
+ {
+ Handle<OrderedHashSet> set = empty;
+
+ CHECK(set->FindEntry(isolate, *key1).is_not_found());
+
+ TestEmptyOrderedHashTable(isolate, factory, set);
+ }
+ {
+ Handle<OrderedHashSet> set = empty;
+
+ set = OrderedHashSet::Add(isolate, set, key1).ToHandleChecked();
+
+ CHECK_EQ(1, set->NumberOfElements());
+ CHECK(OrderedHashSet::HasKey(isolate, *set, *key1));
+ }
+ {
+ Handle<OrderedHashSet> set = empty;
+
+ set = OrderedHashSet::Clear(isolate, set);
+
+ TestEmptyOrderedHashTable(isolate, factory, set);
+ }
+ {
+ Handle<OrderedHashSet> set = empty;
+
+ set = OrderedHashSet::Rehash(isolate, set).ToHandleChecked();
+
+ TestEmptyOrderedHashTable(isolate, factory, set);
+ }
+ {
+ Handle<OrderedHashSet> set = empty;
+
+ set = OrderedHashSet::Shrink(isolate, set);
+
+ TestEmptyOrderedHashTable(isolate, factory, set);
+ }
+ {
+ Handle<OrderedHashSet> set = empty;
+
+ OrderedHashSet::Delete(isolate, *set, *key1);
+
+ TestEmptyOrderedHashTable(isolate, factory, set);
+ }
+}
+
+TEST(ZeroSizeOrderedNameDictionary) {
+ LocalContext context;
+ Isolate* isolate = GetIsolateFrom(&context);
+ Factory* factory = isolate->factory();
+ HandleScope scope(isolate);
+ ReadOnlyRoots ro_roots(isolate);
+
+ Handle<String> key1 = isolate->factory()->InternalizeUtf8String("key1");
+ Handle<String> value1 = isolate->factory()->InternalizeUtf8String("value1");
+ PropertyDetails details = PropertyDetails::Empty();
+
+ Handle<OrderedNameDictionary> empty = Handle<OrderedNameDictionary>(
+ ro_roots.empty_ordered_property_dictionary(), isolate);
+
+ {
+ Handle<OrderedNameDictionary> dict = empty;
+
+ CHECK_EQ(0, dict->NumberOfBuckets());
+ CHECK_EQ(0, dict->NumberOfElements());
+ CHECK(!HasKey(isolate, dict, *key1));
+
+ TestEmptyOrderedHashTable(isolate, factory, dict);
+ }
+ {
+ Handle<OrderedNameDictionary> dict = empty;
+
+ CHECK(dict->FindEntry(isolate, *key1).is_not_found());
+
+ TestEmptyOrderedHashTable(isolate, factory, dict);
+ }
+ {
+ Handle<OrderedNameDictionary> dict = empty;
+
+ dict = OrderedNameDictionary::Add(isolate, dict, key1, value1, details)
+ .ToHandleChecked();
+ CHECK_EQ(1, dict->NumberOfElements());
+ CHECK(HasKey(isolate, dict, *key1));
+ }
+ {
+ Handle<OrderedNameDictionary> dict = empty;
+
+ dict = OrderedNameDictionary::Rehash(isolate, dict, 0).ToHandleChecked();
+
+ TestEmptyOrderedHashTable(isolate, factory, dict);
+ }
+ {
+ Handle<OrderedNameDictionary> dict = empty;
+
+ dict = OrderedNameDictionary::Shrink(isolate, dict);
+
+ TestEmptyOrderedHashTable(isolate, factory, dict);
+ }
+}
+
} // namespace test_orderedhashtable
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-parsing.cc b/deps/v8/test/cctest/test-parsing.cc
index 6d4246ce5b..1076a67214 100644
--- a/deps/v8/test/cctest/test-parsing.cc
+++ b/deps/v8/test/cctest/test-parsing.cc
@@ -1620,8 +1620,6 @@ enum ParserFlag {
kAllowLazy,
kAllowNatives,
kAllowHarmonyPrivateMethods,
- kAllowHarmonyDynamicImport,
- kAllowHarmonyImportMeta,
kAllowHarmonyLogicalAssignment,
};
@@ -1634,8 +1632,6 @@ enum ParserSyncTestResult {
void SetGlobalFlags(base::EnumSet<ParserFlag> flags) {
i::FLAG_allow_natives_syntax = flags.contains(kAllowNatives);
i::FLAG_harmony_private_methods = flags.contains(kAllowHarmonyPrivateMethods);
- i::FLAG_harmony_dynamic_import = flags.contains(kAllowHarmonyDynamicImport);
- i::FLAG_harmony_import_meta = flags.contains(kAllowHarmonyImportMeta);
i::FLAG_harmony_logical_assignment =
flags.contains(kAllowHarmonyLogicalAssignment);
}
@@ -1645,10 +1641,6 @@ void SetParserFlags(i::UnoptimizedCompileFlags* compile_flags,
compile_flags->set_allow_natives_syntax(flags.contains(kAllowNatives));
compile_flags->set_allow_harmony_private_methods(
flags.contains(kAllowHarmonyPrivateMethods));
- compile_flags->set_allow_harmony_dynamic_import(
- flags.contains(kAllowHarmonyDynamicImport));
- compile_flags->set_allow_harmony_import_meta(
- flags.contains(kAllowHarmonyImportMeta));
compile_flags->set_allow_harmony_logical_assignment(
flags.contains(kAllowHarmonyLogicalAssignment));
}
@@ -4862,23 +4854,8 @@ TEST(ImportExpressionSuccess) {
// clang-format on
- // We ignore test error messages because the error message from the
- // parser/preparser is different for the same data depending on the
- // context.
- // For example, a top level "import(" is parsed as an
- // import declaration. The parser parses the import token correctly
- // and then shows an "Unexpected token '('" error message. The
- // preparser does not understand the import keyword (this test is
- // run without kAllowHarmonyDynamicImport flag), so this results in
- // an "Unexpected token 'import'" error.
- RunParserSyncTest(context_data, data, kError);
- RunModuleParserSyncTest(context_data, data, kError, nullptr, 0, nullptr, 0,
- nullptr, 0, true, true);
- static const ParserFlag flags[] = {kAllowHarmonyDynamicImport};
- RunParserSyncTest(context_data, data, kSuccess, nullptr, 0, flags,
- arraysize(flags));
- RunModuleParserSyncTest(context_data, data, kSuccess, nullptr, 0, flags,
- arraysize(flags));
+ RunParserSyncTest(context_data, data, kSuccess);
+ RunModuleParserSyncTest(context_data, data, kSuccess);
}
TEST(ImportExpressionErrors) {
@@ -4924,13 +4901,6 @@ TEST(ImportExpressionErrors) {
// clang-format on
RunParserSyncTest(context_data, data, kError);
- // We ignore the error messages for the reason explained in the
- // ImportExpressionSuccess test.
- RunModuleParserSyncTest(context_data, data, kError, nullptr, 0, nullptr, 0,
- nullptr, 0, true, true);
- static const ParserFlag flags[] = {kAllowHarmonyDynamicImport};
- RunParserSyncTest(context_data, data, kError, nullptr, 0, flags,
- arraysize(flags));
// We ignore test error messages because the error message from
// the parser/preparser is different for the same data depending
@@ -4939,8 +4909,8 @@ TEST(ImportExpressionErrors) {
// correctly and then shows an "Unexpected end of input" error
// message because of the '{'. The preparser shows an "Unexpected
// token '{'" because it's not a valid token in a CallExpression.
- RunModuleParserSyncTest(context_data, data, kError, nullptr, 0, flags,
- arraysize(flags), nullptr, 0, true, true);
+ RunModuleParserSyncTest(context_data, data, kError, nullptr, 0, nullptr, 0,
+ nullptr, 0, true, true);
}
{
@@ -4960,11 +4930,8 @@ TEST(ImportExpressionErrors) {
RunParserSyncTest(context_data, data, kError);
RunModuleParserSyncTest(context_data, data, kError);
- static const ParserFlag flags[] = {kAllowHarmonyDynamicImport};
- RunParserSyncTest(context_data, data, kError, nullptr, 0, flags,
- arraysize(flags));
- RunModuleParserSyncTest(context_data, data, kError, nullptr, 0, flags,
- arraysize(flags));
+ RunParserSyncTest(context_data, data, kError);
+ RunModuleParserSyncTest(context_data, data, kError);
}
// Import statements as arrow function params and destructuring targets.
@@ -4993,11 +4960,131 @@ TEST(ImportExpressionErrors) {
RunParserSyncTest(context_data, data, kError);
RunModuleParserSyncTest(context_data, data, kError);
- static const ParserFlag flags[] = {kAllowHarmonyDynamicImport};
- RunParserSyncTest(context_data, data, kError, nullptr, 0, flags,
- arraysize(flags));
- RunModuleParserSyncTest(context_data, data, kError, nullptr, 0, flags,
- arraysize(flags));
+ RunParserSyncTest(context_data, data, kError);
+ RunModuleParserSyncTest(context_data, data, kError);
+ }
+}
+
+TEST(BasicImportAssertionParsing) {
+ // clang-format off
+ const char* kSources[] = {
+ "import { a as b } from 'm.js' assert { };",
+ "import n from 'n.js' assert { };",
+ "export { a as b } from 'm.js' assert { };",
+ "export * from 'm.js' assert { };",
+ "import 'm.js' assert { };",
+ "import * as foo from 'bar.js' assert { };",
+
+ "import { a as b } from 'm.js' assert { a: 'b' };",
+ "import { a as b } from 'm.js' assert { c: 'd' };",
+ "import { a as b } from 'm.js' assert { 'c': 'd' };",
+ "import { a as b } from 'm.js' assert { a: 'b', 'c': 'd', e: 'f' };",
+ "import { a as b } from 'm.js' assert { 'c': 'd', };",
+ "import n from 'n.js' assert { 'c': 'd' };",
+ "export { a as b } from 'm.js' assert { 'c': 'd' };",
+ "export * from 'm.js' assert { 'c': 'd' };",
+ "import 'm.js' assert { 'c': 'd' };",
+ "import * as foo from 'bar.js' assert { 'c': 'd' };",
+
+ "import { a as b } from 'm.js' assert { \nc: 'd'};",
+ "import { a as b } from 'm.js' assert { c:\n 'd'};",
+ "import { a as b } from 'm.js' assert { c:'d'\n};",
+ };
+ // clang-format on
+
+ i::FLAG_harmony_import_assertions = true;
+ i::Isolate* isolate = CcTest::i_isolate();
+ i::Factory* factory = isolate->factory();
+
+ v8::HandleScope handles(CcTest::isolate());
+ v8::Local<v8::Context> context = v8::Context::New(CcTest::isolate());
+ v8::Context::Scope context_scope(context);
+
+ isolate->stack_guard()->SetStackLimit(i::GetCurrentStackPosition() -
+ 128 * 1024);
+
+ for (unsigned i = 0; i < arraysize(kSources); ++i) {
+ i::Handle<i::String> source =
+ factory->NewStringFromAsciiChecked(kSources[i]);
+
+ // Show that parsing as a module works
+ {
+ i::Handle<i::Script> script = factory->NewScript(source);
+ i::UnoptimizedCompileState compile_state(isolate);
+ i::UnoptimizedCompileFlags flags =
+ i::UnoptimizedCompileFlags::ForScriptCompile(isolate, *script);
+ flags.set_is_module(true);
+ i::ParseInfo info(isolate, flags, &compile_state);
+ CHECK_PARSE_PROGRAM(&info, script, isolate);
+ }
+
+ // And that parsing a script does not.
+ {
+ i::UnoptimizedCompileState compile_state(isolate);
+ i::Handle<i::Script> script = factory->NewScript(source);
+ i::UnoptimizedCompileFlags flags =
+ i::UnoptimizedCompileFlags::ForScriptCompile(isolate, *script);
+ i::ParseInfo info(isolate, flags, &compile_state);
+ CHECK(!i::parsing::ParseProgram(&info, script, isolate,
+ parsing::ReportStatisticsMode::kYes));
+ CHECK(info.pending_error_handler()->has_pending_error());
+ }
+ }
+}
+
+TEST(ImportAssertionParsingErrors) {
+ // clang-format off
+ const char* kErrorSources[] = {
+ "import { a } from 'm.js' assert {;",
+ "import { a } from 'm.js' assert };",
+ "import { a } from 'm.js' , assert { };",
+ "import { a } from 'm.js' assert , { };",
+ "import { a } from 'm.js' assert { , };",
+ "import { a } from 'm.js' assert { b };",
+ "import { a } from 'm.js' assert { 'b' };",
+ "import { a } from 'm.js' assert { for };",
+ "import { a } from 'm.js' assert { assert };",
+ "export { a } assert { };",
+ "export * assert { };",
+
+ "import 'm.js'\n assert { };",
+ "import 'm.js' \nassert { };",
+ "import { a } from 'm.js'\n assert { };",
+ "export * from 'm.js'\n assert { };",
+
+ "import { a } from 'm.js' assert { 1: 2 };",
+ "import { a } from 'm.js' assert { b: c };",
+ "import { a } from 'm.js' assert { 'b': c };",
+ "import { a } from 'm.js' assert { , b: c };",
+ "import { a } from 'm.js' assert { a: 'b', a: 'c' };",
+ "import { a } from 'm.js' assert { a: 'b', 'a': 'c' };",
+ };
+ // clang-format on
+
+ i::FLAG_harmony_import_assertions = true;
+ i::Isolate* isolate = CcTest::i_isolate();
+ i::Factory* factory = isolate->factory();
+
+ v8::HandleScope handles(CcTest::isolate());
+ v8::Local<v8::Context> context = v8::Context::New(CcTest::isolate());
+ v8::Context::Scope context_scope(context);
+
+ isolate->stack_guard()->SetStackLimit(i::GetCurrentStackPosition() -
+ 128 * 1024);
+
+ for (unsigned i = 0; i < arraysize(kErrorSources); ++i) {
+ i::Handle<i::String> source =
+ factory->NewStringFromAsciiChecked(kErrorSources[i]);
+
+ i::Handle<i::Script> script = factory->NewScript(source);
+ i::UnoptimizedCompileState compile_state(isolate);
+ i::UnoptimizedCompileFlags flags =
+ i::UnoptimizedCompileFlags::ForScriptCompile(isolate, *script);
+ flags.set_is_module(true);
+ i::ParseInfo info(isolate, flags, &compile_state);
+ CHECK(!i::parsing::ParseProgram(&info, script, isolate,
+ parsing::ReportStatisticsMode::kYes));
+ CHECK(info.pending_error_handler()->has_pending_error());
}
}
@@ -7539,7 +7626,6 @@ TEST(NamespaceExportParsing) {
};
// clang-format on
- i::FLAG_harmony_namespace_exports = true;
i::Isolate* isolate = CcTest::i_isolate();
i::Factory* factory = isolate->factory();
@@ -7996,19 +8082,19 @@ TEST(ModuleParsingInternals) {
CHECK_EQ(5u, descriptor->module_requests().size());
for (const auto& elem : descriptor->module_requests()) {
- if (elem.first->IsOneByteEqualTo("m.js")) {
+ if (elem.first->specifier()->IsOneByteEqualTo("m.js")) {
CHECK_EQ(0, elem.second.index);
CHECK_EQ(51, elem.second.position);
- } else if (elem.first->IsOneByteEqualTo("n.js")) {
+ } else if (elem.first->specifier()->IsOneByteEqualTo("n.js")) {
CHECK_EQ(1, elem.second.index);
CHECK_EQ(72, elem.second.position);
- } else if (elem.first->IsOneByteEqualTo("p.js")) {
+ } else if (elem.first->specifier()->IsOneByteEqualTo("p.js")) {
CHECK_EQ(2, elem.second.index);
CHECK_EQ(123, elem.second.position);
- } else if (elem.first->IsOneByteEqualTo("q.js")) {
+ } else if (elem.first->specifier()->IsOneByteEqualTo("q.js")) {
CHECK_EQ(3, elem.second.index);
CHECK_EQ(249, elem.second.position);
- } else if (elem.first->IsOneByteEqualTo("bar.js")) {
+ } else if (elem.first->specifier()->IsOneByteEqualTo("bar.js")) {
CHECK_EQ(4, elem.second.index);
CHECK_EQ(370, elem.second.position);
} else {
@@ -8086,6 +8172,353 @@ TEST(ModuleParsingInternals) {
CheckEntry(entry, nullptr, "aa", "aa", 0);
}
+TEST(ModuleParsingInternalsWithImportAssertions) {
+ i::FLAG_harmony_import_assertions = true;
+ i::Isolate* isolate = CcTest::i_isolate();
+ i::Factory* factory = isolate->factory();
+ v8::HandleScope handles(CcTest::isolate());
+ v8::Local<v8::Context> context = v8::Context::New(CcTest::isolate());
+ v8::Context::Scope context_scope(context);
+ isolate->stack_guard()->SetStackLimit(base::Stack::GetCurrentStackPosition() -
+ 128 * 1024);
+
+ static const char kSource[] =
+ "import { q as z } from 'm.js';"
+ "import { q as z2 } from 'm.js' assert { foo: 'bar'};"
+ "import { q as z3 } from 'm.js' assert { foo2: 'bar'};"
+ "import { q as z4 } from 'm.js' assert { foo: 'bar2'};"
+ "import { q as z5 } from 'm.js' assert { foo: 'bar', foo2: 'bar'};"
+ "import { q as z6 } from 'n.js' assert { foo: 'bar'};"
+ "import 'm.js' assert { foo: 'bar'};"
+ "export * from 'm.js' assert { foo: 'bar', foo2: 'bar'};";
+ i::Handle<i::String> source = factory->NewStringFromAsciiChecked(kSource);
+ i::Handle<i::Script> script = factory->NewScript(source);
+ i::UnoptimizedCompileState compile_state(isolate);
+ i::UnoptimizedCompileFlags flags =
+ i::UnoptimizedCompileFlags::ForScriptCompile(isolate, *script);
+ flags.set_is_module(true);
+ i::ParseInfo info(isolate, flags, &compile_state);
+ CHECK_PARSE_PROGRAM(&info, script, isolate);
+
+ i::FunctionLiteral* func = info.literal();
+ i::ModuleScope* module_scope = func->scope()->AsModuleScope();
+ CHECK(module_scope->is_module_scope());
+
+ i::SourceTextModuleDescriptor* descriptor = module_scope->module();
+ CHECK_NOT_NULL(descriptor);
+
+ const i::AstRawString* foo_string =
+ info.ast_value_factory()->GetOneByteString("foo");
+ const i::AstRawString* foo2_string =
+ info.ast_value_factory()->GetOneByteString("foo2");
+ CHECK_EQ(6u, descriptor->module_requests().size());
+ for (const auto& elem : descriptor->module_requests()) {
+ if (elem.second.index == 0) {
+ CHECK(elem.first->specifier()->IsOneByteEqualTo("m.js"));
+ CHECK_EQ(0, elem.first->import_assertions()->size());
+ CHECK_EQ(23, elem.second.position);
+ } else if (elem.second.index == 1) {
+ CHECK(elem.first->specifier()->IsOneByteEqualTo("m.js"));
+ CHECK_EQ(1, elem.first->import_assertions()->size());
+ CHECK_EQ(54, elem.second.position);
+ CHECK(elem.first->import_assertions()
+ ->at(foo_string)
+ .first->IsOneByteEqualTo("bar"));
+ CHECK_EQ(70,
+ elem.first->import_assertions()->at(foo_string).second.beg_pos);
+ } else if (elem.second.index == 2) {
+ CHECK(elem.first->specifier()->IsOneByteEqualTo("m.js"));
+ CHECK_EQ(1, elem.first->import_assertions()->size());
+ CHECK_EQ(106, elem.second.position);
+ CHECK(elem.first->import_assertions()
+ ->at(foo2_string)
+ .first->IsOneByteEqualTo("bar"));
+ CHECK_EQ(122,
+ elem.first->import_assertions()->at(foo2_string).second.beg_pos);
+ } else if (elem.second.index == 3) {
+ CHECK(elem.first->specifier()->IsOneByteEqualTo("m.js"));
+ CHECK_EQ(1, elem.first->import_assertions()->size());
+ CHECK_EQ(159, elem.second.position);
+ CHECK(elem.first->import_assertions()
+ ->at(foo_string)
+ .first->IsOneByteEqualTo("bar2"));
+ CHECK_EQ(175,
+ elem.first->import_assertions()->at(foo_string).second.beg_pos);
+ } else if (elem.second.index == 4) {
+ CHECK(elem.first->specifier()->IsOneByteEqualTo("m.js"));
+ CHECK_EQ(2, elem.first->import_assertions()->size());
+ CHECK_EQ(212, elem.second.position);
+ CHECK(elem.first->import_assertions()
+ ->at(foo_string)
+ .first->IsOneByteEqualTo("bar"));
+ CHECK_EQ(228,
+ elem.first->import_assertions()->at(foo_string).second.beg_pos);
+ CHECK(elem.first->import_assertions()
+ ->at(foo2_string)
+ .first->IsOneByteEqualTo("bar"));
+ CHECK_EQ(240,
+ elem.first->import_assertions()->at(foo2_string).second.beg_pos);
+ } else if (elem.second.index == 5) {
+ CHECK(elem.first->specifier()->IsOneByteEqualTo("n.js"));
+ CHECK_EQ(1, elem.first->import_assertions()->size());
+ CHECK_EQ(277, elem.second.position);
+ CHECK(elem.first->import_assertions()
+ ->at(foo_string)
+ .first->IsOneByteEqualTo("bar"));
+ CHECK_EQ(293,
+ elem.first->import_assertions()->at(foo_string).second.beg_pos);
+ } else {
+ UNREACHABLE();
+ }
+ }
+}
+
+TEST(ModuleParsingImportAssertionOrdering) {
+ i::FLAG_harmony_import_assertions = true;
+ i::Isolate* isolate = CcTest::i_isolate();
+ i::Factory* factory = isolate->factory();
+ v8::HandleScope handles(CcTest::isolate());
+ v8::Local<v8::Context> context = v8::Context::New(CcTest::isolate());
+ v8::Context::Scope context_scope(context);
+ isolate->stack_guard()->SetStackLimit(base::Stack::GetCurrentStackPosition() -
+ 128 * 1024);
+
+ static const char kSource[] =
+ "import 'foo' assert { };"
+ "import 'baaaaaar' assert { };"
+ "import 'aa' assert { };"
+ "import 'a' assert { a: 'b' };"
+ "import 'b' assert { };"
+ "import 'd' assert { a: 'b' };"
+ "import 'c' assert { };"
+ "import 'f' assert { };"
+ "import 'f' assert { a: 'b'};"
+ "import 'g' assert { a: 'b' };"
+ "import 'g' assert { };"
+ "import 'h' assert { a: 'd' };"
+ "import 'h' assert { b: 'c' };"
+ "import 'i' assert { b: 'c' };"
+ "import 'i' assert { a: 'd' };"
+ "import 'j' assert { a: 'b' };"
+ "import 'j' assert { a: 'c' };"
+ "import 'k' assert { a: 'c' };"
+ "import 'k' assert { a: 'b' };"
+ "import 'l' assert { a: 'b', e: 'f' };"
+ "import 'l' assert { a: 'c', d: 'g' };"
+ "import 'm' assert { a: 'c', d: 'g' };"
+ "import 'm' assert { a: 'b', e: 'f' };"
+ "import 'n' assert { 'd': '' };"
+ "import 'n' assert { 'a': 'b' };"
+ "import 'o' assert { 'a': 'b' };"
+ "import 'o' assert { 'd': '' };"
+ "import 'p' assert { 'z': 'c' };"
+ "import 'p' assert { 'a': 'c', 'b': 'c' };";
+ i::Handle<i::String> source = factory->NewStringFromAsciiChecked(kSource);
+ i::Handle<i::Script> script = factory->NewScript(source);
+ i::UnoptimizedCompileState compile_state(isolate);
+ i::UnoptimizedCompileFlags flags =
+ i::UnoptimizedCompileFlags::ForScriptCompile(isolate, *script);
+ flags.set_is_module(true);
+ i::ParseInfo info(isolate, flags, &compile_state);
+ CHECK_PARSE_PROGRAM(&info, script, isolate);
+
+ i::FunctionLiteral* func = info.literal();
+ i::ModuleScope* module_scope = func->scope()->AsModuleScope();
+ CHECK(module_scope->is_module_scope());
+
+ i::SourceTextModuleDescriptor* descriptor = module_scope->module();
+ CHECK_NOT_NULL(descriptor);
+
+ const i::AstRawString* a_string =
+ info.ast_value_factory()->GetOneByteString("a");
+ const i::AstRawString* b_string =
+ info.ast_value_factory()->GetOneByteString("b");
+ const i::AstRawString* d_string =
+ info.ast_value_factory()->GetOneByteString("d");
+ const i::AstRawString* e_string =
+ info.ast_value_factory()->GetOneByteString("e");
+ const i::AstRawString* z_string =
+ info.ast_value_factory()->GetOneByteString("z");
+ CHECK_EQ(29u, descriptor->module_requests().size());
+ auto request_iterator = descriptor->module_requests().cbegin();
+
+ CHECK(request_iterator->first->specifier()->IsOneByteEqualTo("a"));
+ ++request_iterator;
+
+ CHECK(request_iterator->first->specifier()->IsOneByteEqualTo("b"));
+ ++request_iterator;
+
+ CHECK(request_iterator->first->specifier()->IsOneByteEqualTo("c"));
+ ++request_iterator;
+
+ CHECK(request_iterator->first->specifier()->IsOneByteEqualTo("d"));
+ ++request_iterator;
+
+ CHECK(request_iterator->first->specifier()->IsOneByteEqualTo("f"));
+ CHECK_EQ(0, request_iterator->first->import_assertions()->size());
+ ++request_iterator;
+
+ CHECK(request_iterator->first->specifier()->IsOneByteEqualTo("f"));
+ CHECK_EQ(1, request_iterator->first->import_assertions()->size());
+ ++request_iterator;
+
+ CHECK(request_iterator->first->specifier()->IsOneByteEqualTo("g"));
+ CHECK_EQ(0, request_iterator->first->import_assertions()->size());
+ ++request_iterator;
+
+ CHECK(request_iterator->first->specifier()->IsOneByteEqualTo("g"));
+ CHECK_EQ(1, request_iterator->first->import_assertions()->size());
+ ++request_iterator;
+
+ CHECK(request_iterator->first->specifier()->IsOneByteEqualTo("h"));
+ CHECK_EQ(1, request_iterator->first->import_assertions()->size());
+ CHECK(request_iterator->first->import_assertions()
+ ->at(a_string)
+ .first->IsOneByteEqualTo("d"));
+ ++request_iterator;
+
+ CHECK(request_iterator->first->specifier()->IsOneByteEqualTo("h"));
+ CHECK_EQ(1, request_iterator->first->import_assertions()->size());
+ CHECK(request_iterator->first->import_assertions()
+ ->at(b_string)
+ .first->IsOneByteEqualTo("c"));
+ ++request_iterator;
+
+ CHECK(request_iterator->first->specifier()->IsOneByteEqualTo("i"));
+ CHECK_EQ(1, request_iterator->first->import_assertions()->size());
+ CHECK(request_iterator->first->import_assertions()
+ ->at(a_string)
+ .first->IsOneByteEqualTo("d"));
+ ++request_iterator;
+
+ CHECK(request_iterator->first->specifier()->IsOneByteEqualTo("i"));
+ CHECK_EQ(1, request_iterator->first->import_assertions()->size());
+ CHECK(request_iterator->first->import_assertions()
+ ->at(b_string)
+ .first->IsOneByteEqualTo("c"));
+ ++request_iterator;
+
+ CHECK(request_iterator->first->specifier()->IsOneByteEqualTo("j"));
+ CHECK_EQ(1, request_iterator->first->import_assertions()->size());
+ CHECK(request_iterator->first->import_assertions()
+ ->at(a_string)
+ .first->IsOneByteEqualTo("b"));
+ ++request_iterator;
+
+ CHECK(request_iterator->first->specifier()->IsOneByteEqualTo("j"));
+ CHECK_EQ(1, request_iterator->first->import_assertions()->size());
+ CHECK(request_iterator->first->import_assertions()
+ ->at(a_string)
+ .first->IsOneByteEqualTo("c"));
+ ++request_iterator;
+
+ CHECK(request_iterator->first->specifier()->IsOneByteEqualTo("k"));
+ CHECK_EQ(1, request_iterator->first->import_assertions()->size());
+ CHECK(request_iterator->first->import_assertions()
+ ->at(a_string)
+ .first->IsOneByteEqualTo("b"));
+ ++request_iterator;
+
+ CHECK(request_iterator->first->specifier()->IsOneByteEqualTo("k"));
+ CHECK_EQ(1, request_iterator->first->import_assertions()->size());
+ CHECK(request_iterator->first->import_assertions()
+ ->at(a_string)
+ .first->IsOneByteEqualTo("c"));
+ ++request_iterator;
+
+ CHECK(request_iterator->first->specifier()->IsOneByteEqualTo("l"));
+ CHECK_EQ(2, request_iterator->first->import_assertions()->size());
+ CHECK(request_iterator->first->import_assertions()
+ ->at(a_string)
+ .first->IsOneByteEqualTo("b"));
+ CHECK(request_iterator->first->import_assertions()
+ ->at(e_string)
+ .first->IsOneByteEqualTo("f"));
+ ++request_iterator;
+
+ CHECK(request_iterator->first->specifier()->IsOneByteEqualTo("l"));
+ CHECK_EQ(2, request_iterator->first->import_assertions()->size());
+ CHECK(request_iterator->first->import_assertions()
+ ->at(a_string)
+ .first->IsOneByteEqualTo("c"));
+ CHECK(request_iterator->first->import_assertions()
+ ->at(d_string)
+ .first->IsOneByteEqualTo("g"));
+ ++request_iterator;
+
+ CHECK(request_iterator->first->specifier()->IsOneByteEqualTo("m"));
+ CHECK_EQ(2, request_iterator->first->import_assertions()->size());
+ CHECK(request_iterator->first->import_assertions()
+ ->at(a_string)
+ .first->IsOneByteEqualTo("b"));
+ CHECK(request_iterator->first->import_assertions()
+ ->at(e_string)
+ .first->IsOneByteEqualTo("f"));
+ ++request_iterator;
+
+ CHECK(request_iterator->first->specifier()->IsOneByteEqualTo("m"));
+ CHECK_EQ(2, request_iterator->first->import_assertions()->size());
+ CHECK(request_iterator->first->import_assertions()
+ ->at(a_string)
+ .first->IsOneByteEqualTo("c"));
+ CHECK(request_iterator->first->import_assertions()
+ ->at(d_string)
+ .first->IsOneByteEqualTo("g"));
+ ++request_iterator;
+
+ CHECK(request_iterator->first->specifier()->IsOneByteEqualTo("n"));
+ CHECK_EQ(1, request_iterator->first->import_assertions()->size());
+ CHECK(request_iterator->first->import_assertions()
+ ->at(a_string)
+ .first->IsOneByteEqualTo("b"));
+ ++request_iterator;
+
+ CHECK(request_iterator->first->specifier()->IsOneByteEqualTo("n"));
+ CHECK_EQ(1, request_iterator->first->import_assertions()->size());
+ CHECK(request_iterator->first->import_assertions()
+ ->at(d_string)
+ .first->IsOneByteEqualTo(""));
+ ++request_iterator;
+
+ CHECK(request_iterator->first->specifier()->IsOneByteEqualTo("o"));
+ CHECK_EQ(1, request_iterator->first->import_assertions()->size());
+ CHECK(request_iterator->first->import_assertions()
+ ->at(a_string)
+ .first->IsOneByteEqualTo("b"));
+ ++request_iterator;
+
+ CHECK(request_iterator->first->specifier()->IsOneByteEqualTo("o"));
+ CHECK_EQ(1, request_iterator->first->import_assertions()->size());
+ CHECK(request_iterator->first->import_assertions()
+ ->at(d_string)
+ .first->IsOneByteEqualTo(""));
+ ++request_iterator;
+
+ CHECK(request_iterator->first->specifier()->IsOneByteEqualTo("p"));
+ CHECK_EQ(1, request_iterator->first->import_assertions()->size());
+ CHECK(request_iterator->first->import_assertions()
+ ->at(z_string)
+ .first->IsOneByteEqualTo("c"));
+ ++request_iterator;
+
+ CHECK(request_iterator->first->specifier()->IsOneByteEqualTo("p"));
+ CHECK_EQ(2, request_iterator->first->import_assertions()->size());
+ CHECK(request_iterator->first->import_assertions()
+ ->at(a_string)
+ .first->IsOneByteEqualTo("c"));
+ CHECK(request_iterator->first->import_assertions()
+ ->at(b_string)
+ .first->IsOneByteEqualTo("c"));
+ ++request_iterator;
+
+ CHECK(request_iterator->first->specifier()->IsOneByteEqualTo("aa"));
+ ++request_iterator;
+
+ CHECK(request_iterator->first->specifier()->IsOneByteEqualTo("foo"));
+ ++request_iterator;
+
+ CHECK(request_iterator->first->specifier()->IsOneByteEqualTo("baaaaaar"));
+}
TEST(DuplicateProtoError) {
const char* context_data[][2] = {
@@ -9448,23 +9881,12 @@ TEST(ImportMetaSuccess) {
// clang-format on
- // Making sure the same *wouldn't* parse without the flags
- RunModuleParserSyncTest(context_data, data, kError, nullptr, 0, nullptr, 0,
- nullptr, 0, true, true);
-
- static const ParserFlag flags[] = {
- kAllowHarmonyImportMeta, kAllowHarmonyDynamicImport,
- };
// 2.1.1 Static Semantics: Early Errors
// ImportMeta
// * It is an early Syntax Error if Module is not the syntactic goal symbol.
- RunParserSyncTest(context_data, data, kError, nullptr, 0, flags,
- arraysize(flags));
- // Making sure the same wouldn't parse without the flags either
RunParserSyncTest(context_data, data, kError);
- RunModuleParserSyncTest(context_data, data, kSuccess, nullptr, 0, flags,
- arraysize(flags));
+ RunModuleParserSyncTest(context_data, data, kSuccess);
}
TEST(ImportMetaFailure) {
@@ -9490,18 +9912,8 @@ TEST(ImportMetaFailure) {
// clang-format on
- static const ParserFlag flags[] = {
- kAllowHarmonyImportMeta, kAllowHarmonyDynamicImport,
- };
-
- RunParserSyncTest(context_data, data, kError, nullptr, 0, flags,
- arraysize(flags));
- RunModuleParserSyncTest(context_data, data, kError, nullptr, 0, flags,
- arraysize(flags));
-
- RunModuleParserSyncTest(context_data, data, kError, nullptr, 0, nullptr, 0,
- nullptr, 0, true, true);
RunParserSyncTest(context_data, data, kError);
+ RunModuleParserSyncTest(context_data, data, kError);
}
TEST(ConstSloppy) {
diff --git a/deps/v8/test/cctest/test-persistent-handles.cc b/deps/v8/test/cctest/test-persistent-handles.cc
index 8805f9307a..3c440bff67 100644
--- a/deps/v8/test/cctest/test-persistent-handles.cc
+++ b/deps/v8/test/cctest/test-persistent-handles.cc
@@ -41,7 +41,8 @@ class PersistentHandlesThread final : public v8::base::Thread {
sema_gc_finished_(sema_gc_finished) {}
void Run() override {
- LocalHeap local_heap(heap_, std::move(ph_));
+ LocalHeap local_heap(heap_, ThreadKind::kBackground, std::move(ph_));
+ UnparkedScope unparked_scope(&local_heap);
LocalHandleScope scope(&local_heap);
for (int i = 0; i < kNumHandles; i++) {
@@ -65,6 +66,12 @@ class PersistentHandlesThread final : public v8::base::Thread {
ph_ = local_heap.DetachPersistentHandles();
}
+ std::unique_ptr<PersistentHandles> DetachPersistentHandles() {
+ CHECK(ph_);
+ return std::move(ph_);
+ }
+
+ private:
Heap* heap_;
std::vector<Handle<HeapNumber>> handles_;
std::unique_ptr<PersistentHandles> ph_;
@@ -105,7 +112,7 @@ TEST(CreatePersistentHandles) {
thread->Join();
// get persistent handles back to main thread
- ph = std::move(thread->ph_);
+ ph = thread->DetachPersistentHandles();
ph->NewHandle(number);
}
@@ -122,7 +129,8 @@ TEST(DereferencePersistentHandle) {
ph = phs->NewHandle(number);
}
{
- LocalHeap local_heap(isolate->heap(), std::move(phs));
+ LocalHeap local_heap(isolate->heap(), ThreadKind::kMain, std::move(phs));
+ UnparkedScope scope(&local_heap);
CHECK_EQ(42, ph->value());
DisallowHandleDereference disallow_scope;
CHECK_EQ(42, ph->value());
@@ -133,8 +141,7 @@ TEST(NewPersistentHandleFailsWhenParked) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
- LocalHeap local_heap(isolate->heap());
- ParkedScope scope(&local_heap);
+ LocalHeap local_heap(isolate->heap(), ThreadKind::kMain);
// Fail here in debug mode: Persistent handles can't be created if local heap
// is parked
local_heap.NewPersistentHandle(Smi::FromInt(1));
@@ -144,8 +151,8 @@ TEST(NewPersistentHandleFailsWhenParkedExplicit) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
- LocalHeap local_heap(isolate->heap(), isolate->NewPersistentHandles());
- ParkedScope scope(&local_heap);
+ LocalHeap local_heap(isolate->heap(), ThreadKind::kMain,
+ isolate->NewPersistentHandles());
// Fail here in debug mode: Persistent handles can't be created if local heap
// is parked
local_heap.NewPersistentHandle(Smi::FromInt(1));
diff --git a/deps/v8/test/cctest/test-platform.cc b/deps/v8/test/cctest/test-platform.cc
index 0298e73dc9..b5c65a2c63 100644
--- a/deps/v8/test/cctest/test-platform.cc
+++ b/deps/v8/test/cctest/test-platform.cc
@@ -3,8 +3,10 @@
// found in the LICENSE file.
#include <stdint.h>
+
#include "src/base/build_config.h"
#include "src/base/platform/platform.h"
+#include "test/cctest/cctest-utils.h"
#include "test/cctest/cctest.h"
using OS = v8::base::OS;
@@ -13,33 +15,11 @@ namespace v8 {
namespace internal {
#ifdef V8_CC_GNU
-static uintptr_t sp_addr = 0;
-void GetStackPointer(const v8::FunctionCallbackInfo<v8::Value>& args) {
-#if V8_HOST_ARCH_X64
- __asm__ __volatile__("mov %%rsp, %0" : "=g"(sp_addr));
-#elif V8_HOST_ARCH_IA32
- __asm__ __volatile__("mov %%esp, %0" : "=g"(sp_addr));
-#elif V8_HOST_ARCH_ARM
- __asm__ __volatile__("str sp, %0" : "=g"(sp_addr));
-#elif V8_HOST_ARCH_ARM64
- __asm__ __volatile__("mov x16, sp; str x16, %0" : "=g"(sp_addr));
-#elif V8_HOST_ARCH_MIPS
- __asm__ __volatile__("sw $sp, %0" : "=g"(sp_addr));
-#elif V8_HOST_ARCH_MIPS64
- __asm__ __volatile__("sd $sp, %0" : "=g"(sp_addr));
-#elif defined(__s390x__) || defined(_ARCH_S390X)
- __asm__ __volatile__("stg %%r15, %0" : "=m"(sp_addr));
-#elif defined(__s390__) || defined(_ARCH_S390)
- __asm__ __volatile__("st 15, %0" : "=m"(sp_addr));
-#elif defined(__PPC64__) || defined(_ARCH_PPC64)
- __asm__ __volatile__("std 1, %0" : "=g"(sp_addr));
-#elif defined(__PPC__) || defined(_ARCH_PPC)
- __asm__ __volatile__("stw 1, %0" : "=g"(sp_addr));
-#else
-#error Host architecture was not detected as supported by v8
-#endif
+static uintptr_t sp_addr = 0;
+void GetStackPointerCallback(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ GET_STACK_POINTER_TO(sp_addr);
args.GetReturnValue().Set(v8::Integer::NewFromUnsigned(
args.GetIsolate(), static_cast<uint32_t>(sp_addr)));
}
@@ -49,8 +29,9 @@ TEST(StackAlignment) {
v8::HandleScope handle_scope(isolate);
v8::Local<v8::ObjectTemplate> global_template =
v8::ObjectTemplate::New(isolate);
- global_template->Set(isolate, "get_stack_pointer",
- v8::FunctionTemplate::New(isolate, GetStackPointer));
+ global_template->Set(
+ isolate, "get_stack_pointer",
+ v8::FunctionTemplate::New(isolate, GetStackPointerCallback));
LocalContext env(nullptr, global_template);
CompileRun(
diff --git a/deps/v8/test/cctest/test-pointer-auth-arm64.cc b/deps/v8/test/cctest/test-pointer-auth-arm64.cc
index 11632be808..d55349ff2c 100644
--- a/deps/v8/test/cctest/test-pointer-auth-arm64.cc
+++ b/deps/v8/test/cctest/test-pointer-auth-arm64.cc
@@ -30,6 +30,9 @@ TEST(compute_pac) {
}
TEST(add_and_auth_pac) {
+#ifdef DEBUG
+ i::FLAG_sim_abort_on_bad_auth = false;
+#endif
Decoder<DispatchingDecoderVisitor>* decoder =
new Decoder<DispatchingDecoderVisitor>();
Simulator simulator(decoder);
diff --git a/deps/v8/test/cctest/test-profile-generator.cc b/deps/v8/test/cctest/test-profile-generator.cc
index 2144bf8e98..02e65e76ed 100644
--- a/deps/v8/test/cctest/test-profile-generator.cc
+++ b/deps/v8/test/cctest/test-profile-generator.cc
@@ -34,6 +34,7 @@
#include "src/objects/objects-inl.h"
#include "src/profiler/cpu-profiler.h"
#include "src/profiler/profile-generator-inl.h"
+#include "src/profiler/symbolizer.h"
#include "test/cctest/cctest.h"
#include "test/cctest/profiler-extension.h"
@@ -200,7 +201,7 @@ TEST(ProfileTreeAddPathFromEndWithLineNumbers) {
ProfileTree tree(CcTest::i_isolate());
ProfileTreeTestHelper helper(&tree);
- ProfileStackTrace path = {{{&c, 5}}, {{&b, 3}}, {{&a, 1}}};
+ ProfileStackTrace path = {{&c, 5}, {&b, 3}, {&a, 1}};
tree.AddPathFromEnd(path, v8::CpuProfileNode::kNoLineNumberInfo, true,
v8::CpuProfilingMode::kCallerLineNumbers);
@@ -355,6 +356,21 @@ TEST(CodeMapMoveAndDeleteCode) {
CHECK_EQ(entry3, code_map.FindEntry(ToAddress(0x1750)));
}
+TEST(CodeMapClear) {
+ CodeMap code_map;
+ CodeEntry* entry1 = new CodeEntry(i::CodeEventListener::FUNCTION_TAG, "aaa");
+ CodeEntry* entry2 = new CodeEntry(i::CodeEventListener::FUNCTION_TAG, "bbb");
+ code_map.AddCode(ToAddress(0x1500), entry1, 0x200);
+ code_map.AddCode(ToAddress(0x1700), entry2, 0x100);
+
+ code_map.Clear();
+ CHECK(!code_map.FindEntry(ToAddress(0x1500)));
+ CHECK(!code_map.FindEntry(ToAddress(0x1700)));
+
+ // Check that Clear() doesn't cause issues if called twice.
+ code_map.Clear();
+}
+
namespace {
class TestSetup {
@@ -376,19 +392,14 @@ class TestSetup {
TEST(SymbolizeTickSample) {
TestSetup test_setup;
- i::Isolate* isolate = CcTest::i_isolate();
- CpuProfilesCollection profiles(isolate);
- CpuProfiler profiler(isolate);
- profiles.set_cpu_profiler(&profiler);
- profiles.StartProfiling("");
CodeMap code_map;
- ProfileGenerator generator(&profiles, &code_map);
+ Symbolizer symbolizer(&code_map);
CodeEntry* entry1 = new CodeEntry(i::Logger::FUNCTION_TAG, "aaa");
CodeEntry* entry2 = new CodeEntry(i::Logger::FUNCTION_TAG, "bbb");
CodeEntry* entry3 = new CodeEntry(i::Logger::FUNCTION_TAG, "ccc");
- generator.code_map()->AddCode(ToAddress(0x1500), entry1, 0x200);
- generator.code_map()->AddCode(ToAddress(0x1700), entry2, 0x100);
- generator.code_map()->AddCode(ToAddress(0x1900), entry3, 0x50);
+ symbolizer.code_map()->AddCode(ToAddress(0x1500), entry1, 0x200);
+ symbolizer.code_map()->AddCode(ToAddress(0x1700), entry2, 0x100);
+ symbolizer.code_map()->AddCode(ToAddress(0x1900), entry3, 0x50);
// We are building the following calls tree:
// -> aaa - sample1
@@ -399,7 +410,13 @@ TEST(SymbolizeTickSample) {
sample1.tos = ToPointer(0x1500);
sample1.stack[0] = ToPointer(0x1510);
sample1.frames_count = 1;
- generator.SymbolizeTickSample(sample1);
+ Symbolizer::SymbolizedSample symbolized =
+ symbolizer.SymbolizeTickSample(sample1);
+ ProfileStackTrace& stack_trace = symbolized.stack_trace;
+ CHECK_EQ(2, stack_trace.size());
+ CHECK_EQ(entry1, stack_trace[0].code_entry);
+ CHECK_EQ(entry1, stack_trace[1].code_entry);
+
TickSample sample2;
sample2.pc = ToPointer(0x1925);
sample2.tos = ToPointer(0x1900);
@@ -407,32 +424,26 @@ TEST(SymbolizeTickSample) {
sample2.stack[1] = ToPointer(0x10000); // non-existent.
sample2.stack[2] = ToPointer(0x1620);
sample2.frames_count = 3;
- generator.SymbolizeTickSample(sample2);
+ symbolized = symbolizer.SymbolizeTickSample(sample2);
+ stack_trace = symbolized.stack_trace;
+ CHECK_EQ(4, stack_trace.size());
+ CHECK_EQ(entry3, stack_trace[0].code_entry);
+ CHECK_EQ(entry2, stack_trace[1].code_entry);
+ CHECK_EQ(nullptr, stack_trace[2].code_entry);
+ CHECK_EQ(entry1, stack_trace[3].code_entry);
+
TickSample sample3;
sample3.pc = ToPointer(0x1510);
sample3.tos = ToPointer(0x1500);
sample3.stack[0] = ToPointer(0x1910);
sample3.stack[1] = ToPointer(0x1610);
sample3.frames_count = 2;
- generator.SymbolizeTickSample(sample3);
-
- CpuProfile* profile = profiles.StopProfiling("");
- CHECK(profile);
- ProfileTreeTestHelper top_down_test_helper(profile->top_down());
- CHECK(!top_down_test_helper.Walk(entry2));
- CHECK(!top_down_test_helper.Walk(entry3));
- ProfileNode* node1 = top_down_test_helper.Walk(entry1);
- CHECK(node1);
- CHECK_EQ(entry1, node1->entry());
- ProfileNode* node2 = top_down_test_helper.Walk(entry1, entry1);
- CHECK(node2);
- CHECK_EQ(entry1, node2->entry());
- ProfileNode* node3 = top_down_test_helper.Walk(entry1, entry2, entry3);
- CHECK(node3);
- CHECK_EQ(entry3, node3->entry());
- ProfileNode* node4 = top_down_test_helper.Walk(entry1, entry3, entry1);
- CHECK(node4);
- CHECK_EQ(entry1, node4->entry());
+ symbolized = symbolizer.SymbolizeTickSample(sample3);
+ stack_trace = symbolized.stack_trace;
+ CHECK_EQ(3, stack_trace.size());
+ CHECK_EQ(entry1, stack_trace[0].code_entry);
+ CHECK_EQ(entry3, stack_trace[1].code_entry);
+ CHECK_EQ(entry1, stack_trace[2].code_entry);
}
static void CheckNodeIds(const ProfileNode* node, unsigned* expectedId) {
@@ -442,7 +453,6 @@ static void CheckNodeIds(const ProfileNode* node, unsigned* expectedId) {
}
}
-
TEST(SampleIds) {
TestSetup test_setup;
i::Isolate* isolate = CcTest::i_isolate();
@@ -451,13 +461,13 @@ TEST(SampleIds) {
profiles.set_cpu_profiler(&profiler);
profiles.StartProfiling("", {CpuProfilingMode::kLeafNodeLineNumbers});
CodeMap code_map;
- ProfileGenerator generator(&profiles, &code_map);
+ Symbolizer symbolizer(&code_map);
CodeEntry* entry1 = new CodeEntry(i::Logger::FUNCTION_TAG, "aaa");
CodeEntry* entry2 = new CodeEntry(i::Logger::FUNCTION_TAG, "bbb");
CodeEntry* entry3 = new CodeEntry(i::Logger::FUNCTION_TAG, "ccc");
- generator.code_map()->AddCode(ToAddress(0x1500), entry1, 0x200);
- generator.code_map()->AddCode(ToAddress(0x1700), entry2, 0x100);
- generator.code_map()->AddCode(ToAddress(0x1900), entry3, 0x50);
+ symbolizer.code_map()->AddCode(ToAddress(0x1500), entry1, 0x200);
+ symbolizer.code_map()->AddCode(ToAddress(0x1700), entry2, 0x100);
+ symbolizer.code_map()->AddCode(ToAddress(0x1900), entry3, 0x50);
// We are building the following calls tree:
// -> aaa #3 - sample1
@@ -468,7 +478,11 @@ TEST(SampleIds) {
sample1.pc = ToPointer(0x1600);
sample1.stack[0] = ToPointer(0x1510);
sample1.frames_count = 1;
- generator.SymbolizeTickSample(sample1);
+ auto symbolized = symbolizer.SymbolizeTickSample(sample1);
+ profiles.AddPathToCurrentProfiles(sample1.timestamp, symbolized.stack_trace,
+ symbolized.src_line, true,
+ base::TimeDelta());
+
TickSample sample2;
sample2.timestamp = v8::base::TimeTicks::HighResolutionNow();
sample2.pc = ToPointer(0x1925);
@@ -476,14 +490,21 @@ TEST(SampleIds) {
sample2.stack[1] = ToPointer(0x10000); // non-existent.
sample2.stack[2] = ToPointer(0x1620);
sample2.frames_count = 3;
- generator.SymbolizeTickSample(sample2);
+ symbolized = symbolizer.SymbolizeTickSample(sample2);
+ profiles.AddPathToCurrentProfiles(sample2.timestamp, symbolized.stack_trace,
+ symbolized.src_line, true,
+ base::TimeDelta());
+
TickSample sample3;
sample3.timestamp = v8::base::TimeTicks::HighResolutionNow();
sample3.pc = ToPointer(0x1510);
sample3.stack[0] = ToPointer(0x1910);
sample3.stack[1] = ToPointer(0x1610);
sample3.frames_count = 2;
- generator.SymbolizeTickSample(sample3);
+ symbolized = symbolizer.SymbolizeTickSample(sample3);
+ profiles.AddPathToCurrentProfiles(sample3.timestamp, symbolized.stack_trace,
+ symbolized.src_line, true,
+ base::TimeDelta());
CpuProfile* profile = profiles.StopProfiling("");
unsigned nodeId = 1;
@@ -497,7 +518,6 @@ TEST(SampleIds) {
}
}
-
TEST(NoSamples) {
TestSetup test_setup;
i::Isolate* isolate = CcTest::i_isolate();
@@ -506,9 +526,9 @@ TEST(NoSamples) {
profiles.set_cpu_profiler(&profiler);
profiles.StartProfiling("");
CodeMap code_map;
- ProfileGenerator generator(&profiles, &code_map);
+ Symbolizer symbolizer(&code_map);
CodeEntry* entry1 = new CodeEntry(i::Logger::FUNCTION_TAG, "aaa");
- generator.code_map()->AddCode(ToAddress(0x1500), entry1, 0x200);
+ symbolizer.code_map()->AddCode(ToAddress(0x1500), entry1, 0x200);
// We are building the following calls tree:
// (root)#1 -> aaa #2 -> aaa #3 - sample1
@@ -516,17 +536,19 @@ TEST(NoSamples) {
sample1.pc = ToPointer(0x1600);
sample1.stack[0] = ToPointer(0x1510);
sample1.frames_count = 1;
- generator.SymbolizeTickSample(sample1);
+ auto symbolized = symbolizer.SymbolizeTickSample(sample1);
+ profiles.AddPathToCurrentProfiles(v8::base::TimeTicks::HighResolutionNow(),
+ symbolized.stack_trace, symbolized.src_line,
+ true, base::TimeDelta());
CpuProfile* profile = profiles.StopProfiling("");
unsigned nodeId = 1;
CheckNodeIds(profile->top_down()->root(), &nodeId);
CHECK_EQ(3u, nodeId - 1);
- CHECK_EQ(0, profile->samples_count());
+ CHECK_EQ(1, profile->samples_count());
}
-
static const ProfileNode* PickChild(const ProfileNode* parent,
const char* name) {
for (const ProfileNode* child : *parent->children()) {
@@ -593,10 +615,12 @@ TEST(Issue51919) {
for (int i = 0; i < CpuProfilesCollection::kMaxSimultaneousProfiles; ++i) {
i::Vector<char> title = i::Vector<char>::New(16);
i::SNPrintF(title, "%d", i);
- CHECK(collection.StartProfiling(title.begin()));
+ CHECK_EQ(CpuProfilingStatus::kStarted,
+ collection.StartProfiling(title.begin()));
titles[i] = title.begin();
}
- CHECK(!collection.StartProfiling("maximum"));
+ CHECK_EQ(CpuProfilingStatus::kErrorTooManyProfilers,
+ collection.StartProfiling("maximum"));
for (int i = 0; i < CpuProfilesCollection::kMaxSimultaneousProfiles; ++i)
i::DeleteArray(titles[i]);
}
@@ -674,7 +698,7 @@ static const char* line_number_test_source_profile_time_functions =
int GetFunctionLineNumber(CpuProfiler* profiler, LocalContext* env,
const char* name) {
- CodeMap* code_map = profiler->generator()->code_map();
+ CodeMap* code_map = profiler->symbolizer()->code_map();
i::Handle<i::JSFunction> func = i::Handle<i::JSFunction>::cast(
v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
(*env)->Global()->Get(env->local(), v8_str(name)).ToLocalChecked())));
diff --git a/deps/v8/test/cctest/test-regexp.cc b/deps/v8/test/cctest/test-regexp.cc
index 16301fd609..c7c1f07265 100644
--- a/deps/v8/test/cctest/test-regexp.cc
+++ b/deps/v8/test/cctest/test-regexp.cc
@@ -50,32 +50,38 @@
#include "src/utils/ostreams.h"
#include "src/zone/zone-list-inl.h"
#include "test/cctest/cctest.h"
-#include "test/common/wasm/flag-utils.h"
+#include "test/common/flag-utils.h"
namespace v8 {
namespace internal {
namespace test_regexp {
static bool CheckParse(const char* input) {
+ Isolate* isolate = CcTest::i_isolate();
+
v8::HandleScope scope(CcTest::isolate());
- Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
- FlatStringReader reader(CcTest::i_isolate(), CStrVector(input));
+ Zone zone(isolate->allocator(), ZONE_NAME);
+ Handle<String> str = isolate->factory()->NewStringFromAsciiChecked(input);
+ FlatStringReader reader(isolate, str);
RegExpCompileData result;
- return v8::internal::RegExpParser::ParseRegExp(
- CcTest::i_isolate(), &zone, &reader, JSRegExp::kNone, &result);
+ return v8::internal::RegExpParser::ParseRegExp(isolate, &zone, &reader,
+ JSRegExp::kNone, &result);
}
static void CheckParseEq(const char* input, const char* expected,
bool unicode = false) {
+ Isolate* isolate = CcTest::i_isolate();
+
v8::HandleScope scope(CcTest::isolate());
- Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
- FlatStringReader reader(CcTest::i_isolate(), CStrVector(input));
+ Zone zone(isolate->allocator(), ZONE_NAME);
+ Handle<String> str = isolate->factory()->NewStringFromAsciiChecked(input);
+ FlatStringReader reader(isolate, str);
RegExpCompileData result;
JSRegExp::Flags flags = JSRegExp::kNone;
if (unicode) flags |= JSRegExp::kUnicode;
- CHECK(v8::internal::RegExpParser::ParseRegExp(CcTest::i_isolate(), &zone,
- &reader, flags, &result));
+ CHECK(v8::internal::RegExpParser::ParseRegExp(isolate, &zone, &reader, flags,
+ &result));
CHECK_NOT_NULL(result.tree);
CHECK(result.error == RegExpError::kNone);
std::ostringstream os;
@@ -88,12 +94,15 @@ static void CheckParseEq(const char* input, const char* expected,
static bool CheckSimple(const char* input) {
+ Isolate* isolate = CcTest::i_isolate();
+
v8::HandleScope scope(CcTest::isolate());
- Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
- FlatStringReader reader(CcTest::i_isolate(), CStrVector(input));
+ Zone zone(isolate->allocator(), ZONE_NAME);
+ Handle<String> str = isolate->factory()->NewStringFromAsciiChecked(input);
+ FlatStringReader reader(isolate, str);
RegExpCompileData result;
- CHECK(v8::internal::RegExpParser::ParseRegExp(
- CcTest::i_isolate(), &zone, &reader, JSRegExp::kNone, &result));
+ CHECK(v8::internal::RegExpParser::ParseRegExp(isolate, &zone, &reader,
+ JSRegExp::kNone, &result));
CHECK_NOT_NULL(result.tree);
CHECK(result.error == RegExpError::kNone);
return result.simple;
@@ -106,12 +115,15 @@ struct MinMaxPair {
static MinMaxPair CheckMinMaxMatch(const char* input) {
+ Isolate* isolate = CcTest::i_isolate();
+
v8::HandleScope scope(CcTest::isolate());
- Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
- FlatStringReader reader(CcTest::i_isolate(), CStrVector(input));
+ Zone zone(isolate->allocator(), ZONE_NAME);
+ Handle<String> str = isolate->factory()->NewStringFromAsciiChecked(input);
+ FlatStringReader reader(isolate, str);
RegExpCompileData result;
- CHECK(v8::internal::RegExpParser::ParseRegExp(
- CcTest::i_isolate(), &zone, &reader, JSRegExp::kNone, &result));
+ CHECK(v8::internal::RegExpParser::ParseRegExp(isolate, &zone, &reader,
+ JSRegExp::kNone, &result));
CHECK_NOT_NULL(result.tree);
CHECK(result.error == RegExpError::kNone);
int min_match = result.tree->min_match();
@@ -422,7 +434,8 @@ static void ExpectError(const char* input, const char* expected,
v8::HandleScope scope(CcTest::isolate());
Zone zone(isolate->allocator(), ZONE_NAME);
- FlatStringReader reader(isolate, CStrVector(input));
+ Handle<String> str = isolate->factory()->NewStringFromAsciiChecked(input);
+ FlatStringReader reader(isolate, str);
RegExpCompileData result;
JSRegExp::Flags flags = JSRegExp::kNone;
if (unicode) flags |= JSRegExp::kUnicode;
@@ -524,14 +537,15 @@ TEST(CharacterClassEscapes) {
static RegExpNode* Compile(const char* input, bool multiline, bool unicode,
bool is_one_byte, Zone* zone) {
Isolate* isolate = CcTest::i_isolate();
- FlatStringReader reader(isolate, CStrVector(input));
+ Handle<String> str = isolate->factory()->NewStringFromAsciiChecked(input);
+ FlatStringReader reader(isolate, str);
RegExpCompileData compile_data;
compile_data.compilation_target = RegExpCompilationTarget::kNative;
JSRegExp::Flags flags = JSRegExp::kNone;
if (multiline) flags = JSRegExp::kMultiline;
if (unicode) flags = JSRegExp::kUnicode;
- if (!v8::internal::RegExpParser::ParseRegExp(CcTest::i_isolate(), zone,
- &reader, flags, &compile_data))
+ if (!v8::internal::RegExpParser::ParseRegExp(isolate, zone, &reader, flags,
+ &compile_data))
return nullptr;
Handle<String> pattern = isolate->factory()
->NewStringFromUtf8(CStrVector(input))
diff --git a/deps/v8/test/cctest/test-serialize.cc b/deps/v8/test/cctest/test-serialize.cc
index 52c71befad..6ec2ea649e 100644
--- a/deps/v8/test/cctest/test-serialize.cc
+++ b/deps/v8/test/cctest/test-serialize.cc
@@ -94,21 +94,19 @@ class TestSerializer {
v8::Isolate* v8_isolate = NewIsolate(kEnableSerializer, kGenerateHeap);
v8::Isolate::Scope isolate_scope(v8_isolate);
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- isolate->Init(nullptr, nullptr);
+ isolate->Init(nullptr, nullptr, false);
return v8_isolate;
}
static v8::Isolate* NewIsolateFromBlob(const StartupBlobs& blobs) {
SnapshotData startup_snapshot(blobs.startup);
SnapshotData read_only_snapshot(blobs.read_only);
- ReadOnlyDeserializer read_only_deserializer(&read_only_snapshot);
- StartupDeserializer startup_deserializer(&startup_snapshot);
const bool kEnableSerializer = false;
const bool kGenerateHeap = false;
v8::Isolate* v8_isolate = NewIsolate(kEnableSerializer, kGenerateHeap);
v8::Isolate::Scope isolate_scope(v8_isolate);
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- isolate->Init(&read_only_deserializer, &startup_deserializer);
+ isolate->Init(&startup_snapshot, &read_only_snapshot, false);
return v8_isolate;
}
@@ -172,6 +170,7 @@ static StartupBlobs Serialize(v8::Isolate* isolate) {
i::GarbageCollectionReason::kTesting);
SafepointScope safepoint(internal_isolate->heap());
+ HandleScope scope(internal_isolate);
DisallowGarbageCollection no_gc;
ReadOnlySerializer read_only_serializer(internal_isolate,
@@ -246,36 +245,6 @@ UNINITIALIZED_TEST(StartupSerializerOnce) {
TestStartupSerializerOnceImpl();
}
-UNINITIALIZED_TEST(StartupSerializerOnce1) {
- DisableAlwaysOpt();
- FLAG_serialization_chunk_size = 1;
- TestStartupSerializerOnceImpl();
-}
-
-UNINITIALIZED_TEST(StartupSerializerOnce32) {
- DisableAlwaysOpt();
- FLAG_serialization_chunk_size = 32;
- TestStartupSerializerOnceImpl();
-}
-
-UNINITIALIZED_TEST(StartupSerializerOnce1K) {
- DisableAlwaysOpt();
- FLAG_serialization_chunk_size = 1 * KB;
- TestStartupSerializerOnceImpl();
-}
-
-UNINITIALIZED_TEST(StartupSerializerOnce4K) {
- DisableAlwaysOpt();
- FLAG_serialization_chunk_size = 4 * KB;
- TestStartupSerializerOnceImpl();
-}
-
-UNINITIALIZED_TEST(StartupSerializerOnce32K) {
- DisableAlwaysOpt();
- FLAG_serialization_chunk_size = 32 * KB;
- TestStartupSerializerOnceImpl();
-}
-
UNINITIALIZED_TEST(StartupSerializerTwice) {
DisableAlwaysOpt();
v8::Isolate* isolate = TestSerializer::NewIsolateInitialized();
@@ -308,7 +277,6 @@ UNINITIALIZED_TEST(StartupSerializerOnceRunScript) {
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
-
v8::Local<v8::Context> env = v8::Context::New(isolate);
env->Enter();
@@ -380,6 +348,7 @@ static void SerializeContext(Vector<const byte>* startup_blob_out,
v8::Local<v8::Context>::New(v8_isolate, env)->Exit();
}
+ HandleScope scope(isolate);
i::Context raw_context = i::Context::cast(*v8::Utils::OpenPersistent(env));
env.Reset();
@@ -532,6 +501,7 @@ static void SerializeCustomContext(Vector<const byte>* startup_blob_out,
v8::Local<v8::Context>::New(v8_isolate, env)->Exit();
}
+ HandleScope scope(isolate);
i::Context raw_context = i::Context::cast(*v8::Utils::OpenPersistent(env));
env.Reset();
@@ -1010,14 +980,17 @@ UNINITIALIZED_TEST(CustomSnapshotDataBlobArrayBufferWithOffset) {
"var x = new Int32Array([12, 24, 48, 96]);"
"var y = new Int32Array(x.buffer, 4, 2)";
Int32Expectations expectations = {
- std::make_tuple("x[1]", 24), std::make_tuple("x[2]", 48),
- std::make_tuple("y[0]", 24), std::make_tuple("y[1]", 48),
+ std::make_tuple("x[1]", 24),
+ std::make_tuple("x[2]", 48),
+ std::make_tuple("y[0]", 24),
+ std::make_tuple("y[1]", 48),
};
// Verify that the typed arrays use the same buffer (not independent copies).
const char* code_to_run_after_restore = "x[2] = 57; y[0] = 42;";
Int32Expectations after_restore_expectations = {
- std::make_tuple("x[1]", 42), std::make_tuple("y[1]", 57),
+ std::make_tuple("x[1]", 42),
+ std::make_tuple("y[1]", 57),
};
TypedArrayTestHelper(code, expectations, code_to_run_after_restore,
@@ -1167,6 +1140,10 @@ UNINITIALIZED_TEST(CustomSnapshotDataBlobOnOrOffHeapTypedArray) {
CompileRun(code);
TestInt32Expectations(expectations);
+ i::Handle<i::JSArrayBuffer> buffer =
+ GetBufferFromTypedArray(CompileRun("x"));
+ // The resulting buffer should be on-heap.
+ CHECK_NULL(buffer->backing_store());
creator.SetDefaultContext(
context, v8::SerializeInternalFieldsCallback(
SerializeInternalFields, reinterpret_cast<void*>(2016)));
@@ -1570,16 +1547,13 @@ UNINITIALIZED_TEST(CustomSnapshotDataBlobImmortalImmovableRoots) {
FreeCurrentEmbeddedBlob();
}
-TEST(TestThatAlwaysSucceeds) {
-}
-
+TEST(TestThatAlwaysSucceeds) {}
TEST(TestThatAlwaysFails) {
bool ArtificialFailure = false;
CHECK(ArtificialFailure);
}
-
int CountBuiltins() {
// Check that we have not deserialized any additional builtin.
HeapObjectIterator iterator(CcTest::heap());
@@ -1734,21 +1708,6 @@ TEST(CodeSerializerOnePlusOneWithDebugger) {
TestCodeSerializerOnePlusOneImpl();
}
-TEST(CodeSerializerOnePlusOne1) {
- FLAG_serialization_chunk_size = 1;
- TestCodeSerializerOnePlusOneImpl();
-}
-
-TEST(CodeSerializerOnePlusOne32) {
- FLAG_serialization_chunk_size = 32;
- TestCodeSerializerOnePlusOneImpl();
-}
-
-TEST(CodeSerializerOnePlusOne4K) {
- FLAG_serialization_chunk_size = 4 * KB;
- TestCodeSerializerOnePlusOneImpl();
-}
-
TEST(CodeSerializerPromotedToCompilationCache) {
LocalContext context;
Isolate* isolate = CcTest::i_isolate();
@@ -2059,8 +2018,9 @@ TEST(CodeSerializerThreeBigStrings) {
Handle<String> source_str =
f->NewConsString(
- f->NewConsString(source_a_str, source_b_str).ToHandleChecked(),
- source_c_str).ToHandleChecked();
+ f->NewConsString(source_a_str, source_b_str).ToHandleChecked(),
+ source_c_str)
+ .ToHandleChecked();
Handle<JSObject> global(isolate->context().global_object(), isolate);
ScriptData* cache = nullptr;
@@ -2112,7 +2072,6 @@ TEST(CodeSerializerThreeBigStrings) {
source_c.Dispose();
}
-
class SerializerOneByteResource
: public v8::String::ExternalOneByteStringResource {
public:
@@ -2129,7 +2088,6 @@ class SerializerOneByteResource
int dispose_count_;
};
-
class SerializerTwoByteResource : public v8::String::ExternalStringResource {
public:
SerializerTwoByteResource(const char* data, size_t length)
@@ -2240,10 +2198,11 @@ TEST(CodeSerializerLargeExternalString) {
// Create the source, which is "var <literal> = 42; <literal>".
Handle<String> source_str =
f->NewConsString(
- f->NewConsString(f->NewStringFromAsciiChecked("var "), name)
- .ToHandleChecked(),
- f->NewConsString(f->NewStringFromAsciiChecked(" = 42; "), name)
- .ToHandleChecked()).ToHandleChecked();
+ f->NewConsString(f->NewStringFromAsciiChecked("var "), name)
+ .ToHandleChecked(),
+ f->NewConsString(f->NewStringFromAsciiChecked(" = 42; "), name)
+ .ToHandleChecked())
+ .ToHandleChecked();
Handle<JSObject> global(isolate->context().global_object(), isolate);
ScriptData* cache = nullptr;
@@ -2327,10 +2286,8 @@ TEST(CodeSerializerExternalScriptName) {
delete cache;
}
-
static bool toplevel_test_code_event_found = false;
-
static void SerializerCodeEventListener(const v8::JitCodeEvent* event) {
if (event->type == v8::JitCodeEvent::CODE_ADDED &&
(memcmp(event->name.str, "Script:~ test", 13) == 0 ||
@@ -2564,7 +2521,7 @@ TEST(CodeSerializerBitFlip) {
v8::ScriptCompiler::CachedData* cache = CompileRunAndProduceCache(source);
// Arbitrary bit flip.
- int arbitrary_spot = 337;
+ int arbitrary_spot = 237;
CHECK_LT(arbitrary_spot, cache->length);
const_cast<uint8_t*>(cache->data)[arbitrary_spot] ^= 0x40;
@@ -2779,7 +2736,6 @@ static void AccessorForSerialization(
info.GetReturnValue().Set(v8_num(2017));
}
-
static SerializerOneByteResource serializable_one_byte_resource("one_byte", 8);
static SerializerTwoByteResource serializable_two_byte_resource("two_byte", 8);
@@ -3434,11 +3390,11 @@ UNINITIALIZED_TEST(SnapshotCreatorAddData) {
v8::Private::ForApi(isolate, v8_str("private_symbol"));
v8::Local<v8::Signature> signature =
- v8::Signature::New(isolate, v8::FunctionTemplate::New(isolate));
+ v8::Signature::New(isolate, v8::FunctionTemplate::New(isolate));
v8::Local<v8::AccessorSignature> accessor_signature =
- v8::AccessorSignature::New(isolate,
- v8::FunctionTemplate::New(isolate));
+ v8::AccessorSignature::New(isolate,
+ v8::FunctionTemplate::New(isolate));
CHECK_EQ(0u, creator.AddData(context, object));
CHECK_EQ(1u, creator.AddData(context, v8_str("context-dependent")));
@@ -3525,8 +3481,7 @@ UNINITIALIZED_TEST(SnapshotCreatorAddData) {
isolate->GetDataFromSnapshotOnce<v8::FunctionTemplate>(3).IsEmpty());
isolate->GetDataFromSnapshotOnce<v8::Private>(4).ToLocalChecked();
- CHECK(
- isolate->GetDataFromSnapshotOnce<v8::Private>(4).IsEmpty());
+ CHECK(isolate->GetDataFromSnapshotOnce<v8::Private>(4).IsEmpty());
isolate->GetDataFromSnapshotOnce<v8::Signature>(5).ToLocalChecked();
CHECK(isolate->GetDataFromSnapshotOnce<v8::Signature>(5).IsEmpty());
diff --git a/deps/v8/test/cctest/test-stack-unwinding-win64.cc b/deps/v8/test/cctest/test-stack-unwinding-win64.cc
index 84f1318a29..cd0243723b 100644
--- a/deps/v8/test/cctest/test-stack-unwinding-win64.cc
+++ b/deps/v8/test/cctest/test-stack-unwinding-win64.cc
@@ -101,7 +101,7 @@ UNINITIALIZED_TEST(StackUnwindingWin64) {
v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
env->Global()->Get(env.local(), v8_str("start")).ToLocalChecked());
- CompileRun("%OptimizeFunctionOnNextCall(start);");
+ CompileRun("start(1); %OptimizeFunctionOnNextCall(start);");
int32_t repeat_count = 100;
v8::Local<v8::Value> args[] = {v8::Integer::New(isolate, repeat_count)};
diff --git a/deps/v8/test/cctest/test-strings.cc b/deps/v8/test/cctest/test-strings.cc
index d6ae62adfc..837b4c169a 100644
--- a/deps/v8/test/cctest/test-strings.cc
+++ b/deps/v8/test/cctest/test-strings.cc
@@ -222,6 +222,8 @@ static void InitializeBuildingBlocks(Handle<String>* building_blocks,
class ConsStringStats {
public:
ConsStringStats() { Reset(); }
+ ConsStringStats(const ConsStringStats&) = delete;
+ ConsStringStats& operator=(const ConsStringStats&) = delete;
void Reset();
void VerifyEqual(const ConsStringStats& that) const;
int leaves_;
@@ -231,7 +233,6 @@ class ConsStringStats {
int right_traversals_;
private:
- DISALLOW_COPY_AND_ASSIGN(ConsStringStats);
};
void ConsStringStats::Reset() {
@@ -254,6 +255,8 @@ class ConsStringGenerationData {
public:
static const int kNumberOfBuildingBlocks = 256;
explicit ConsStringGenerationData(bool long_blocks);
+ ConsStringGenerationData(const ConsStringGenerationData&) = delete;
+ ConsStringGenerationData& operator=(const ConsStringGenerationData&) = delete;
void Reset();
inline Handle<String> block(int offset);
inline Handle<String> block(uint32_t offset);
@@ -270,9 +273,6 @@ class ConsStringGenerationData {
// Stats.
ConsStringStats stats_;
int early_terminations_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(ConsStringGenerationData);
};
ConsStringGenerationData::ConsStringGenerationData(bool long_blocks) {
diff --git a/deps/v8/test/cctest/test-sync-primitives-arm64.cc b/deps/v8/test/cctest/test-sync-primitives-arm64.cc
index 423ba05bca..c2b46fc8b3 100644
--- a/deps/v8/test/cctest/test-sync-primitives-arm64.cc
+++ b/deps/v8/test/cctest/test-sync-primitives-arm64.cc
@@ -205,7 +205,7 @@ void TestInvalidateExclusiveAccess(TestData initial_data, MemoryAccess access1,
CodeDesc desc;
masm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
TestData t = initial_data;
Simulator::current(isolate)->Call<void>(code->entry(), &t);
@@ -277,7 +277,7 @@ int ExecuteMemoryAccess(Isolate* isolate, TestData* test_data,
CodeDesc desc;
masm.GetCode(isolate, &desc);
Handle<Code> code =
- Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
Simulator::current(isolate)->Call<void>(code->entry(), test_data);
return Simulator::current(isolate)->wreg(0);
}
diff --git a/deps/v8/test/cctest/test-thread-termination.cc b/deps/v8/test/cctest/test-thread-termination.cc
index b371cd8d3c..dabd7b0dfe 100644
--- a/deps/v8/test/cctest/test-thread-termination.cc
+++ b/deps/v8/test/cctest/test-thread-termination.cc
@@ -872,6 +872,12 @@ class TerminatorSleeperThread : public v8::base::Thread {
TEST(TerminateRegExp) {
i::FLAG_allow_natives_syntax = true;
+ // We want to be stuck regexp execution, so no fallback to linear-time
+ // engine.
+ // TODO(mbid,v8:10765): Find a way to test interrupt support of the
+ // experimental engine.
+ i::FLAG_enable_experimental_regexp_engine_on_excessive_backtracks = false;
+
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
v8::Local<v8::ObjectTemplate> global = CreateGlobalTemplate(
diff --git a/deps/v8/test/cctest/test-trace-event.cc b/deps/v8/test/cctest/test-trace-event.cc
index 0f4a699d8a..43a477fae0 100644
--- a/deps/v8/test/cctest/test-trace-event.cc
+++ b/deps/v8/test/cctest/test-trace-event.cc
@@ -34,6 +34,8 @@ struct MockTraceObject {
class MockTracingController : public v8::TracingController {
public:
MockTracingController() = default;
+ MockTracingController(const MockTracingController&) = delete;
+ MockTracingController& operator=(const MockTracingController&) = delete;
uint64_t AddTraceEvent(
char phase, const uint8_t* category_enabled_flag, const char* name,
@@ -80,8 +82,6 @@ class MockTracingController : public v8::TracingController {
private:
std::vector<std::unique_ptr<MockTraceObject>> trace_objects_;
-
- DISALLOW_COPY_AND_ASSIGN(MockTracingController);
};
class MockTracingPlatform : public TestPlatform {
diff --git a/deps/v8/test/cctest/test-unboxed-doubles.cc b/deps/v8/test/cctest/test-unboxed-doubles.cc
index 525a73a788..d42df09c53 100644
--- a/deps/v8/test/cctest/test-unboxed-doubles.cc
+++ b/deps/v8/test/cctest/test-unboxed-doubles.cc
@@ -65,7 +65,7 @@ static double GetDoubleFieldValue(JSObject obj, FieldIndex field_index) {
}
void WriteToField(JSObject object, int index, Object value) {
- DescriptorArray descriptors = object.map().instance_descriptors();
+ DescriptorArray descriptors = object.map().instance_descriptors(kRelaxedLoad);
InternalIndex descriptor(index);
PropertyDetails details = descriptors.GetDetails(descriptor);
object.WriteToField(descriptor, details, value);
@@ -673,7 +673,8 @@ static Handle<LayoutDescriptor> TestLayoutDescriptorAppend(
}
map->InitializeDescriptors(isolate, *descriptors, *layout_descriptor);
}
- Handle<LayoutDescriptor> layout_descriptor(map->layout_descriptor(), isolate);
+ Handle<LayoutDescriptor> layout_descriptor(
+ map->layout_descriptor(kAcquireLoad), isolate);
CHECK(layout_descriptor->IsConsistentWithMap(*map, true));
return layout_descriptor;
}
@@ -800,7 +801,7 @@ static Handle<LayoutDescriptor> TestLayoutDescriptorAppendIfFastOrUseFull(
for (int i = 0; i < number_of_descriptors; i++) {
PropertyDetails details = descriptors->GetDetails(InternalIndex(i));
map = maps[i];
- LayoutDescriptor layout_desc = map->layout_descriptor();
+ LayoutDescriptor layout_desc = map->layout_descriptor(kAcquireLoad);
if (layout_desc.IsSlowLayout()) {
switched_to_slow_mode = true;
@@ -820,7 +821,7 @@ static Handle<LayoutDescriptor> TestLayoutDescriptorAppendIfFastOrUseFull(
CHECK(layout_desc.IsTagged(field_index + field_width_in_words));
}
}
- CHECK(map->layout_descriptor().IsConsistentWithMap(*map));
+ CHECK(map->layout_descriptor(kAcquireLoad).IsConsistentWithMap(*map));
}
Handle<LayoutDescriptor> layout_descriptor(map->GetLayoutDescriptor(),
@@ -991,10 +992,10 @@ TEST(DescriptorArrayTrimming) {
NONE, PropertyConstness::kMutable,
Representation::Double(), INSERT_TRANSITION)
.ToHandleChecked();
- CHECK(map->layout_descriptor().IsConsistentWithMap(*map, true));
- CHECK(map->layout_descriptor().IsSlowLayout());
+ CHECK(map->layout_descriptor(kAcquireLoad).IsConsistentWithMap(*map, true));
+ CHECK(map->layout_descriptor(kAcquireLoad).IsSlowLayout());
CHECK(map->owns_descriptors());
- CHECK_EQ(8, map->layout_descriptor().length());
+ CHECK_EQ(8, map->layout_descriptor(kAcquireLoad).length());
{
// Add transitions to double fields.
@@ -1006,35 +1007,38 @@ TEST(DescriptorArrayTrimming) {
any_type, NONE, PropertyConstness::kMutable,
Representation::Double(), INSERT_TRANSITION)
.ToHandleChecked();
- CHECK(tmp_map->layout_descriptor().IsConsistentWithMap(*tmp_map, true));
+ CHECK(tmp_map->layout_descriptor(kAcquireLoad)
+ .IsConsistentWithMap(*tmp_map, true));
}
// Check that descriptors are shared.
CHECK(tmp_map->owns_descriptors());
- CHECK_EQ(map->instance_descriptors(), tmp_map->instance_descriptors());
- CHECK_EQ(map->layout_descriptor(), tmp_map->layout_descriptor());
+ CHECK_EQ(map->instance_descriptors(kRelaxedLoad),
+ tmp_map->instance_descriptors(kRelaxedLoad));
+ CHECK_EQ(map->layout_descriptor(kAcquireLoad),
+ tmp_map->layout_descriptor(kAcquireLoad));
}
- CHECK(map->layout_descriptor().IsSlowLayout());
- CHECK_EQ(16, map->layout_descriptor().length());
+ CHECK(map->layout_descriptor(kAcquireLoad).IsSlowLayout());
+ CHECK_EQ(16, map->layout_descriptor(kAcquireLoad).length());
// The unused tail of the layout descriptor is now "durty" because of sharing.
- CHECK(map->layout_descriptor().IsConsistentWithMap(*map));
+ CHECK(map->layout_descriptor(kAcquireLoad).IsConsistentWithMap(*map));
for (int i = kSplitFieldIndex + 1; i < kTrimmedLayoutDescriptorLength; i++) {
- CHECK(!map->layout_descriptor().IsTagged(i));
+ CHECK(!map->layout_descriptor(kAcquireLoad).IsTagged(i));
}
CHECK_LT(map->NumberOfOwnDescriptors(),
- map->instance_descriptors().number_of_descriptors());
+ map->instance_descriptors(kRelaxedLoad).number_of_descriptors());
// Call GC that should trim both |map|'s descriptor array and layout
// descriptor.
CcTest::CollectAllGarbage();
// The unused tail of the layout descriptor is now "clean" again.
- CHECK(map->layout_descriptor().IsConsistentWithMap(*map, true));
+ CHECK(map->layout_descriptor(kAcquireLoad).IsConsistentWithMap(*map, true));
CHECK(map->owns_descriptors());
CHECK_EQ(map->NumberOfOwnDescriptors(),
- map->instance_descriptors().number_of_descriptors());
- CHECK(map->layout_descriptor().IsSlowLayout());
- CHECK_EQ(8, map->layout_descriptor().length());
+ map->instance_descriptors(kRelaxedLoad).number_of_descriptors());
+ CHECK(map->layout_descriptor(kAcquireLoad).IsSlowLayout());
+ CHECK_EQ(8, map->layout_descriptor(kAcquireLoad).length());
{
// Add transitions to tagged fields.
@@ -1047,18 +1051,21 @@ TEST(DescriptorArrayTrimming) {
any_type, NONE, PropertyConstness::kMutable,
Representation::Tagged(), INSERT_TRANSITION)
.ToHandleChecked();
- CHECK(tmp_map->layout_descriptor().IsConsistentWithMap(*tmp_map, true));
+ CHECK(tmp_map->layout_descriptor(kAcquireLoad)
+ .IsConsistentWithMap(*tmp_map, true));
}
tmp_map = Map::CopyWithField(isolate, tmp_map, CcTest::MakeString("dbl"),
any_type, NONE, PropertyConstness::kMutable,
Representation::Double(), INSERT_TRANSITION)
.ToHandleChecked();
- CHECK(tmp_map->layout_descriptor().IsConsistentWithMap(*tmp_map, true));
+ CHECK(tmp_map->layout_descriptor(kAcquireLoad)
+ .IsConsistentWithMap(*tmp_map, true));
// Check that descriptors are shared.
CHECK(tmp_map->owns_descriptors());
- CHECK_EQ(map->instance_descriptors(), tmp_map->instance_descriptors());
+ CHECK_EQ(map->instance_descriptors(kRelaxedLoad),
+ tmp_map->instance_descriptors(kRelaxedLoad));
}
- CHECK(map->layout_descriptor().IsSlowLayout());
+ CHECK(map->layout_descriptor(kAcquireLoad).IsSlowLayout());
}
@@ -1390,7 +1397,7 @@ TEST(LayoutDescriptorSharing) {
.ToHandleChecked();
}
Handle<LayoutDescriptor> split_layout_descriptor(
- split_map->layout_descriptor(), isolate);
+ split_map->layout_descriptor(kAcquireLoad), isolate);
CHECK(split_layout_descriptor->IsConsistentWithMap(*split_map, true));
CHECK(split_layout_descriptor->IsSlowLayout());
CHECK(split_map->owns_descriptors());
@@ -1401,12 +1408,13 @@ TEST(LayoutDescriptorSharing) {
Representation::Double(), INSERT_TRANSITION)
.ToHandleChecked();
CHECK(!split_map->owns_descriptors());
- CHECK_EQ(*split_layout_descriptor, split_map->layout_descriptor());
+ CHECK_EQ(*split_layout_descriptor,
+ split_map->layout_descriptor(kAcquireLoad));
// Layout descriptors should be shared with |split_map|.
CHECK(map1->owns_descriptors());
- CHECK_EQ(*split_layout_descriptor, map1->layout_descriptor());
- CHECK(map1->layout_descriptor().IsConsistentWithMap(*map1, true));
+ CHECK_EQ(*split_layout_descriptor, map1->layout_descriptor(kAcquireLoad));
+ CHECK(map1->layout_descriptor(kAcquireLoad).IsConsistentWithMap(*map1, true));
Handle<Map> map2 =
Map::CopyWithField(isolate, split_map, CcTest::MakeString("bar"),
@@ -1416,8 +1424,8 @@ TEST(LayoutDescriptorSharing) {
// Layout descriptors should not be shared with |split_map|.
CHECK(map2->owns_descriptors());
- CHECK_NE(*split_layout_descriptor, map2->layout_descriptor());
- CHECK(map2->layout_descriptor().IsConsistentWithMap(*map2, true));
+ CHECK_NE(*split_layout_descriptor, map2->layout_descriptor(kAcquireLoad));
+ CHECK(map2->layout_descriptor(kAcquireLoad).IsConsistentWithMap(*map2, true));
}
static void TestWriteBarrier(Handle<Map> map, Handle<Map> new_map,
diff --git a/deps/v8/test/cctest/test-unwinder-code-pages.cc b/deps/v8/test/cctest/test-unwinder-code-pages.cc
index fec8bc0163..18c8658b3e 100644
--- a/deps/v8/test/cctest/test-unwinder-code-pages.cc
+++ b/deps/v8/test/cctest/test-unwinder-code-pages.cc
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "include/v8-unwinder-state.h"
#include "include/v8.h"
-
#include "src/api/api-inl.h"
#include "src/builtins/builtins.h"
#include "src/execution/isolate.h"
@@ -15,10 +15,115 @@ namespace v8 {
namespace internal {
namespace test_unwinder_code_pages {
-static const void* fake_stack_base = nullptr;
+namespace {
+
+#define CHECK_EQ_VALUE_REGISTER(uiuntptr_value, register_value) \
+ CHECK_EQ(reinterpret_cast<void*>(uiuntptr_value), register_value)
+
+#ifdef V8_TARGET_ARCH_X64
+// How much the JSEntry frame occupies in the stack.
+constexpr int kJSEntryFrameSpace = 3;
+
+// Offset where the FP, PC and SP live from the beginning of the JSEntryFrame.
+constexpr int kFPOffset = 0;
+constexpr int kPCOffset = 1;
+constexpr int kSPOffset = 2;
+
+// Builds the stack from {stack} as x64 expects it.
+// TODO(solanes): Build the JSEntry stack in the way the builtin builds it.
+void BuildJSEntryStack(uintptr_t* stack) {
+ stack[0] = reinterpret_cast<uintptr_t>(stack + 0); // saved FP.
+ stack[1] = 100; // Return address into C++ code.
+ stack[2] = reinterpret_cast<uintptr_t>(stack + 2); // saved SP.
+}
+
+// Dummy method since we don't save callee saved registers in x64.
+void CheckCalleeSavedRegisters(const RegisterState& register_state) {}
+
+#elif V8_TARGET_ARCH_ARM
+// How much the JSEntry frame occupies in the stack.
+constexpr int kJSEntryFrameSpace = 27;
+
+// Offset where the FP, PC and SP live from the beginning of the JSEntryFrame.
+constexpr int kFPOffset = 24;
+constexpr int kPCOffset = 25;
+constexpr int kSPOffset = 26;
+
+// Builds the stack from {stack} as it is explained in frame-constants-arm.h.
+void BuildJSEntryStack(uintptr_t* stack) {
+ stack[0] = -1; // the bad frame pointer (0xF..F)
+ // Set d8 = 150, d9 = 151, ..., d15 = 157.
+ for (int i = 0; i < 8; ++i) {
+ // Double registers occupy two slots. Therefore, upper bits are zeroed.
+ stack[1 + i * 2] = 0;
+ stack[1 + i * 2 + 1] = 150 + i;
+ }
+ // Set r4 = 160, ..., r10 = 166.
+ for (int i = 0; i < 7; ++i) {
+ stack[17 + i] = 160 + i;
+ }
+ stack[24] = reinterpret_cast<uintptr_t>(stack + 24); // saved FP.
+ stack[25] = 100; // Return address into C++ code (i.e lr/pc)
+ stack[26] = reinterpret_cast<uintptr_t>(stack + 26); // saved SP.
+}
+
+// Checks that the values in the calee saved registers are the same as the ones
+// we saved in BuildJSEntryStack.
+void CheckCalleeSavedRegisters(const RegisterState& register_state) {
+ CHECK_EQ_VALUE_REGISTER(160, register_state.callee_saved->arm_r4);
+ CHECK_EQ_VALUE_REGISTER(161, register_state.callee_saved->arm_r5);
+ CHECK_EQ_VALUE_REGISTER(162, register_state.callee_saved->arm_r6);
+ CHECK_EQ_VALUE_REGISTER(163, register_state.callee_saved->arm_r7);
+ CHECK_EQ_VALUE_REGISTER(164, register_state.callee_saved->arm_r8);
+ CHECK_EQ_VALUE_REGISTER(165, register_state.callee_saved->arm_r9);
+ CHECK_EQ_VALUE_REGISTER(166, register_state.callee_saved->arm_r10);
+}
+
+#elif V8_TARGET_ARCH_ARM64
+// How much the JSEntry frame occupies in the stack.
+constexpr int kJSEntryFrameSpace = 22;
+
+// Offset where the FP, PC and SP live from the beginning of the JSEntryFrame.
+constexpr int kFPOffset = 11;
+constexpr int kPCOffset = 12;
+constexpr int kSPOffset = 21;
+
+// Builds the stack from {stack} as it is explained in frame-constants-arm64.h.
+void BuildJSEntryStack(uintptr_t* stack) {
+ stack[0] = -1; // the bad frame pointer (0xF..F)
+ // Set x19 = 150, ..., x28 = 159.
+ for (int i = 0; i < 10; ++i) {
+ stack[1 + i] = 150 + i;
+ }
+ stack[11] = reinterpret_cast<uintptr_t>(stack + 11); // saved FP.
+ stack[12] = 100; // Return address into C++ code (i.e lr/pc)
+ // Set d8 = 160, ..., d15 = 167.
+ for (int i = 0; i < 8; ++i) {
+ stack[13 + i] = 160 + i;
+ }
+ stack[21] = reinterpret_cast<uintptr_t>(stack + 21); // saved SP.
+}
+
+// Dummy method since we don't save callee saved registers in arm64.
+void CheckCalleeSavedRegisters(const RegisterState& register_state) {}
-#define CHECK_EQ_STACK_REGISTER(stack_value, register_value) \
- CHECK_EQ(reinterpret_cast<void*>(stack_value), register_value)
+#else
+// Dummy constants for the rest of the archs which are not supported.
+constexpr int kJSEntryFrameSpace = 1;
+constexpr int kFPOffset = 0;
+constexpr int kPCOffset = 0;
+constexpr int kSPOffset = 0;
+
+// Dummy methods to be able to compile.
+void BuildJSEntryStack(uintptr_t* stack) { UNREACHABLE(); }
+void CheckCalleeSavedRegisters(const RegisterState& register_state) {
+ UNREACHABLE();
+}
+#endif // V8_TARGET_ARCH_X64
+
+} // namespace
+
+static const void* fake_stack_base = nullptr;
TEST(Unwind_BadState_Fail_CodePagesAPI) {
JSEntryStubs entry_stubs; // Fields are intialized to nullptr.
@@ -72,9 +177,9 @@ TEST(Unwind_BuiltinPCInMiddle_Success_CodePagesAPI) {
bool unwound = v8::Unwinder::TryUnwindV8Frames(
entry_stubs, pages_length, code_pages, &register_state, stack_base);
CHECK(unwound);
- CHECK_EQ_STACK_REGISTER(stack[topmost_fp_index], register_state.fp);
- CHECK_EQ_STACK_REGISTER(stack[topmost_fp_index + 1], register_state.pc);
- CHECK_EQ_STACK_REGISTER(stack[topmost_fp_index + 2], register_state.sp);
+ CHECK_EQ_VALUE_REGISTER(stack[topmost_fp_index], register_state.fp);
+ CHECK_EQ_VALUE_REGISTER(stack[topmost_fp_index + 1], register_state.pc);
+ CHECK_EQ_VALUE_REGISTER(stack[topmost_fp_index + 2], register_state.sp);
}
// The unwinder should be able to unwind even if we haven't properly set up the
@@ -127,9 +232,9 @@ TEST(Unwind_BuiltinPCAtStart_Success_CodePagesAPI) {
entry_stubs, pages_length, code_pages, &register_state, stack_base);
CHECK(unwound);
- CHECK_EQ_STACK_REGISTER(stack[topmost_fp_index], register_state.fp);
- CHECK_EQ_STACK_REGISTER(stack[topmost_fp_index + 1], register_state.pc);
- CHECK_EQ_STACK_REGISTER(stack[topmost_fp_index + 2], register_state.sp);
+ CHECK_EQ_VALUE_REGISTER(stack[topmost_fp_index], register_state.fp);
+ CHECK_EQ_VALUE_REGISTER(stack[topmost_fp_index + 1], register_state.pc);
+ CHECK_EQ_VALUE_REGISTER(stack[topmost_fp_index + 2], register_state.sp);
}
const char* foo_source = R"(
@@ -215,9 +320,9 @@ TEST(Unwind_CodeObjectPCInMiddle_Success_CodePagesAPI) {
bool unwound = v8::Unwinder::TryUnwindV8Frames(
entry_stubs, pages_length, code_pages, &register_state, stack_base);
CHECK(unwound);
- CHECK_EQ_STACK_REGISTER(stack[topmost_fp_index], register_state.fp);
- CHECK_EQ_STACK_REGISTER(stack[topmost_fp_index + 1], register_state.pc);
- CHECK_EQ_STACK_REGISTER(stack[topmost_fp_index + 2], register_state.sp);
+ CHECK_EQ_VALUE_REGISTER(stack[topmost_fp_index], register_state.fp);
+ CHECK_EQ_VALUE_REGISTER(stack[topmost_fp_index + 1], register_state.pc);
+ CHECK_EQ_VALUE_REGISTER(stack[topmost_fp_index + 2], register_state.sp);
}
// If the PC is within JSEntry but we haven't set up the frame yet, then we
@@ -264,8 +369,8 @@ TEST(Unwind_JSEntryBeforeFrame_Fail_CodePagesAPI) {
entry_stubs, pages_length, code_pages, &register_state, stack_base);
CHECK(!unwound);
// The register state should not change when unwinding fails.
- CHECK_EQ_STACK_REGISTER(&stack[9], register_state.fp);
- CHECK_EQ_STACK_REGISTER(&stack[5], register_state.sp);
+ CHECK_EQ_VALUE_REGISTER(&stack[9], register_state.fp);
+ CHECK_EQ_VALUE_REGISTER(&stack[5], register_state.sp);
CHECK_EQ(jsentry_pc_value, register_state.pc);
// Change the PC to a few instructions later, after the frame is set up.
@@ -277,59 +382,11 @@ TEST(Unwind_JSEntryBeforeFrame_Fail_CodePagesAPI) {
// than just assuming the frame is unreadable.
CHECK(!unwound);
// The register state should not change when unwinding fails.
- CHECK_EQ_STACK_REGISTER(&stack[9], register_state.fp);
- CHECK_EQ_STACK_REGISTER(&stack[5], register_state.sp);
+ CHECK_EQ_VALUE_REGISTER(&stack[9], register_state.fp);
+ CHECK_EQ_VALUE_REGISTER(&stack[5], register_state.sp);
CHECK_EQ(jsentry_pc_value, register_state.pc);
}
-TEST(Unwind_OneJSFrame_Success_CodePagesAPI) {
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
-
- JSEntryStubs entry_stubs = isolate->GetJSEntryStubs();
- MemoryRange code_pages[1];
- size_t pages_length = 1;
- RegisterState register_state;
-
- // Use a fake code range so that we can initialize it to 0s.
- const size_t code_length = 40;
- uintptr_t code[code_length] = {0};
- code_pages[0].start = code;
- code_pages[0].length_in_bytes = code_length * sizeof(uintptr_t);
-
- // Our fake stack has two frames - one C++ frame and one JS frame (on top).
- // The stack grows from high addresses to low addresses.
- uintptr_t stack[10];
- void* stack_base = stack + arraysize(stack);
- stack[0] = 101;
- stack[1] = 111;
- stack[2] = 121;
- stack[3] = 131;
- stack[4] = 141;
- // Index on the stack for the topmost fp (i.e the one right before the C++
- // frame).
- const int topmost_fp_index = 5;
- stack[5] = reinterpret_cast<uintptr_t>(stack + 9); // saved FP.
- stack[6] = 100; // Return address into C++ code.
- stack[7] = reinterpret_cast<uintptr_t>(stack + 7); // saved SP.
- stack[8] = 404;
- stack[9] = 505;
-
- register_state.sp = stack;
- register_state.fp = stack + 5;
-
- // Put the current PC inside of the code range so it looks valid.
- register_state.pc = code + 30;
-
- bool unwound = v8::Unwinder::TryUnwindV8Frames(
- entry_stubs, pages_length, code_pages, &register_state, stack_base);
-
- CHECK(unwound);
- CHECK_EQ_STACK_REGISTER(stack[topmost_fp_index], register_state.fp);
- CHECK_EQ_STACK_REGISTER(stack[topmost_fp_index + 1], register_state.pc);
- CHECK_EQ_STACK_REGISTER(stack[topmost_fp_index + 2], register_state.sp);
-}
-
// Creates a fake stack with two JS frames on top of a C++ frame and checks that
// the unwinder correctly unwinds past the JS frames and returns the C++ frame's
// details.
@@ -350,22 +407,17 @@ TEST(Unwind_TwoJSFrames_Success_CodePagesAPI) {
// Our fake stack has three frames - one C++ frame and two JS frames (on top).
// The stack grows from high addresses to low addresses.
- uintptr_t stack[10];
+ uintptr_t stack[5 + kJSEntryFrameSpace];
void* stack_base = stack + arraysize(stack);
stack[0] = 101;
stack[1] = 111;
stack[2] = reinterpret_cast<uintptr_t>(stack + 5); // saved FP.
// The fake return address is in the JS code range.
- stack[3] = reinterpret_cast<uintptr_t>(code + 10);
+ const void* jsentry_pc = code + 10;
+ stack[3] = reinterpret_cast<uintptr_t>(jsentry_pc);
stack[4] = 141;
- // Index on the stack for the topmost fp (i.e the one right before the C++
- // frame).
- const int topmost_fp_index = 5;
- stack[5] = reinterpret_cast<uintptr_t>(stack + 9); // saved FP.
- stack[6] = 100; // Return address into C++ code.
- stack[7] = reinterpret_cast<uintptr_t>(stack + 7); // saved SP.
- stack[8] = 404;
- stack[9] = 505;
+ const int top_of_js_entry = 5;
+ BuildJSEntryStack(&stack[top_of_js_entry]);
register_state.sp = stack;
register_state.fp = stack + 2;
@@ -373,13 +425,21 @@ TEST(Unwind_TwoJSFrames_Success_CodePagesAPI) {
// Put the current PC inside of the code range so it looks valid.
register_state.pc = code + 30;
+ // Put the PC in the JSEntryRange.
+ entry_stubs.js_entry_stub.code.start = jsentry_pc;
+ entry_stubs.js_entry_stub.code.length_in_bytes = sizeof(uintptr_t);
+
bool unwound = v8::Unwinder::TryUnwindV8Frames(
entry_stubs, pages_length, code_pages, &register_state, stack_base);
CHECK(unwound);
- CHECK_EQ_STACK_REGISTER(stack[topmost_fp_index], register_state.fp);
- CHECK_EQ_STACK_REGISTER(stack[topmost_fp_index + 1], register_state.pc);
- CHECK_EQ_STACK_REGISTER(stack[topmost_fp_index + 2], register_state.sp);
+ CHECK_EQ_VALUE_REGISTER(stack[top_of_js_entry + kFPOffset],
+ register_state.fp);
+ CHECK_EQ_VALUE_REGISTER(stack[top_of_js_entry + kPCOffset],
+ register_state.pc);
+ CHECK_EQ_VALUE_REGISTER(stack[top_of_js_entry + kSPOffset],
+ register_state.sp);
+ CheckCalleeSavedRegisters(register_state);
}
// If the PC is in JSEntry then the frame might not be set up correctly, meaning
@@ -409,6 +469,8 @@ TEST(Unwind_JSEntry_Fail_CodePagesAPI) {
CHECK_EQ(start + 10, register_state.pc);
}
+// Tries to unwind a middle frame (i.e not a JSEntry frame) first with a wrong
+// stack base, and then with the correct one.
TEST(Unwind_StackBounds_Basic_CodePagesAPI) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
@@ -425,7 +487,7 @@ TEST(Unwind_StackBounds_Basic_CodePagesAPI) {
uintptr_t stack[3];
stack[0] = reinterpret_cast<uintptr_t>(stack + 2); // saved FP.
- stack[1] = 202; // Return address into C++ code.
+ stack[1] = 202; // saved PC.
stack[2] = 303; // saved SP.
register_state.sp = stack;
@@ -463,7 +525,7 @@ TEST(Unwind_StackBounds_WithUnwinding_CodePagesAPI) {
// Our fake stack has two frames - one C++ frame and one JS frame (on top).
// The stack grows from high addresses to low addresses.
- uintptr_t stack[11];
+ uintptr_t stack[9 + kJSEntryFrameSpace];
void* stack_base = stack + arraysize(stack);
stack[0] = 101;
stack[1] = 111;
@@ -471,12 +533,18 @@ TEST(Unwind_StackBounds_WithUnwinding_CodePagesAPI) {
stack[3] = 131;
stack[4] = 141;
stack[5] = reinterpret_cast<uintptr_t>(stack + 9); // saved FP.
- stack[6] = reinterpret_cast<uintptr_t>(code + 20); // JS code.
- stack[7] = 303; // saved SP.
+ const void* jsentry_pc = code + 20;
+ stack[6] = reinterpret_cast<uintptr_t>(jsentry_pc); // JS code.
+ stack[7] = 303; // saved SP.
stack[8] = 404;
- stack[9] = reinterpret_cast<uintptr_t>(stack) +
- (12 * sizeof(uintptr_t)); // saved FP (OOB).
- stack[10] = reinterpret_cast<uintptr_t>(code + 20); // JS code.
+ const int top_of_js_entry = 9;
+ BuildJSEntryStack(&stack[top_of_js_entry]);
+ // Override FP and PC
+ stack[top_of_js_entry + kFPOffset] =
+ reinterpret_cast<uintptr_t>(stack) +
+ (9 + kJSEntryFrameSpace + 1) * sizeof(uintptr_t); // saved FP (OOB).
+ stack[top_of_js_entry + kPCOffset] =
+ reinterpret_cast<uintptr_t>(code + 20); // JS code.
register_state.sp = stack;
register_state.fp = stack + 5;
@@ -484,18 +552,23 @@ TEST(Unwind_StackBounds_WithUnwinding_CodePagesAPI) {
// Put the current PC inside of the code range so it looks valid.
register_state.pc = code + 30;
+ // Put the PC in the JSEntryRange.
+ entry_stubs.js_entry_stub.code.start = jsentry_pc;
+ entry_stubs.js_entry_stub.code.length_in_bytes = sizeof(uintptr_t);
+
// Unwind will fail because stack[9] FP points outside of the stack.
bool unwound = v8::Unwinder::TryUnwindV8Frames(
entry_stubs, pages_length, code_pages, &register_state, stack_base);
CHECK(!unwound);
// Change the return address so that it is not in range. We will not range
- // check the stack[9] FP value because we have finished unwinding and the
+ // check the stack's FP value because we have finished unwinding and the
// contents of rbp does not necessarily have to be the FP in this case.
- stack[10] = 202;
+ stack[top_of_js_entry + kPCOffset] = 202;
unwound = v8::Unwinder::TryUnwindV8Frames(
entry_stubs, pages_length, code_pages, &register_state, stack_base);
CHECK(unwound);
+ CheckCalleeSavedRegisters(register_state);
}
TEST(PCIsInV8_BadState_Fail_CodePagesAPI) {
@@ -588,7 +661,7 @@ TEST(PCIsInV8_LargeCodeObject_CodePagesAPI) {
// Create a big function that ends up in CODE_LO_SPACE.
const int instruction_size = Page::kPageSize + 1;
- STATIC_ASSERT(instruction_size > kMaxRegularHeapObjectSize);
+ CHECK_GT(instruction_size, MemoryChunkLayout::MaxRegularCodeObjectSize());
std::unique_ptr<byte[]> instructions(new byte[instruction_size]);
CodeDesc desc;
@@ -690,7 +763,7 @@ TEST(Unwind_TwoNestedFunctions_CodePagesAPI) {
}
#endif
-#undef CHECK_EQ_STACK_REGISTER
+#undef CHECK_EQ_VALUE_REGISTER
} // namespace test_unwinder_code_pages
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-verifiers.cc b/deps/v8/test/cctest/test-verifiers.cc
new file mode 100644
index 0000000000..7a980b2ef9
--- /dev/null
+++ b/deps/v8/test/cctest/test-verifiers.cc
@@ -0,0 +1,187 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// These tests check that Torque-generated verifier functions crash the process
+// when encountering data that doesn't fit the Torque type definitions.
+
+#include "src/api/api-inl.h"
+#include "src/objects/descriptor-array.h"
+#include "src/objects/map-inl.h"
+#include "test/cctest/cctest.h"
+#include "torque-generated/class-verifiers.h"
+
+namespace v8 {
+namespace internal {
+
+// Defines a pair of tests with similar code. The goal is to test that a
+// specific action causes a failure, but that everything else in the test case
+// succeeds. The general pattern should be:
+//
+// TEST_PAIR(Something) {
+// do_setup_steps_that_always_succeed();
+// if (should_fail) {
+// do_the_step_that_fails();
+// }
+// do_teardown_steps_that_always_succeed();
+// }
+//
+// A corresponding entry in cctest.status specifies that all Fail* tests in this
+// file must fail.
+#define TEST_PAIR(Name) \
+ static void Name(bool should_fail); \
+ TEST(Pass##Name) { Name(false); } \
+ TEST(Fail##Name) { Name(true); } \
+ static void Name(bool should_fail)
+
+#ifdef VERIFY_HEAP
+
+TEST_PAIR(TestWrongTypeInNormalField) {
+ CcTest::InitializeVM();
+ v8::Isolate* isolate = CcTest::isolate();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Value> v = CompileRun("({a: 3, b: 4})");
+ Handle<JSObject> o = Handle<JSObject>::cast(v8::Utils::OpenHandle(*v));
+ Handle<Object> original_elements(
+ TaggedField<Object>::load(*o, JSObject::kElementsOffset), i_isolate);
+ CHECK(original_elements->IsFixedArrayBase());
+
+ // There must be no GC (and therefore no verifiers running) until we can
+ // restore the modified data.
+ DisallowHeapAllocation no_gc;
+
+ // Elements must be FixedArrayBase according to the Torque definition, so a
+ // JSObject should cause a failure.
+ TaggedField<Object>::store(*o, JSObject::kElementsOffset, *o);
+ if (should_fail) {
+ TorqueGeneratedClassVerifiers::JSObjectVerify(*o, i_isolate);
+ }
+
+ // Put back the original value in case verifiers run on test shutdown.
+ TaggedField<Object>::store(*o, JSObject::kElementsOffset, *original_elements);
+}
+
+TEST_PAIR(TestWrongStrongTypeInIndexedStructField) {
+ CcTest::InitializeVM();
+ v8::Isolate* isolate = CcTest::isolate();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Value> v = CompileRun("({a: 3, b: 4})");
+ Handle<Object> o = v8::Utils::OpenHandle(*v);
+ Handle<Map> map(Handle<HeapObject>::cast(o)->map(), i_isolate);
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(kRelaxedLoad),
+ i_isolate);
+ int offset = DescriptorArray::OffsetOfDescriptorAt(1) +
+ DescriptorArray::kEntryKeyOffset;
+ Handle<Object> original_key(TaggedField<Object>::load(*descriptors, offset),
+ i_isolate);
+ CHECK(original_key->IsString());
+
+ // There must be no GC (and therefore no verifiers running) until we can
+ // restore the modified data.
+ DisallowHeapAllocation no_gc;
+
+ // Key must be Name|Undefined according to the Torque definition, so a
+ // JSObject should cause a failure.
+ TaggedField<Object>::store(*descriptors, offset, *o);
+ if (should_fail) {
+ TorqueGeneratedClassVerifiers::DescriptorArrayVerify(*descriptors,
+ i_isolate);
+ }
+
+ // Put back the original value in case verifiers run on test shutdown.
+ TaggedField<Object>::store(*descriptors, offset, *original_key);
+}
+
+TEST_PAIR(TestWrongWeakTypeInIndexedStructField) {
+ CcTest::InitializeVM();
+ v8::Isolate* isolate = CcTest::isolate();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Value> v = CompileRun("({a: 3, b: 4})");
+ Handle<Object> o = v8::Utils::OpenHandle(*v);
+ Handle<Map> map(Handle<HeapObject>::cast(o)->map(), i_isolate);
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(kRelaxedLoad),
+ i_isolate);
+ int offset = DescriptorArray::OffsetOfDescriptorAt(0) +
+ DescriptorArray::kEntryValueOffset;
+ Handle<Object> original_value(TaggedField<Object>::load(*descriptors, offset),
+ i_isolate);
+
+ // There must be no GC (and therefore no verifiers running) until we can
+ // restore the modified data.
+ DisallowHeapAllocation no_gc;
+
+ // Value can be JSAny, which includes JSObject, and it can be Weak<Map>, but
+ // it can't be Weak<JSObject>.
+ TaggedField<Object>::store(*descriptors, offset, *o);
+ TorqueGeneratedClassVerifiers::DescriptorArrayVerify(*descriptors, i_isolate);
+ MaybeObject weak = MaybeObject::MakeWeak(MaybeObject::FromObject(*o));
+ TaggedField<MaybeObject>::store(*descriptors, offset, weak);
+ if (should_fail) {
+ TorqueGeneratedClassVerifiers::DescriptorArrayVerify(*descriptors,
+ i_isolate);
+ }
+
+ // Put back the original value in case verifiers run on test shutdown.
+ TaggedField<Object>::store(*descriptors, offset, *original_value);
+}
+
+TEST_PAIR(TestWrongOddball) {
+ CcTest::InitializeVM();
+ v8::Isolate* isolate = CcTest::isolate();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Value> v = CompileRun("new Date()");
+ Handle<JSDate> date = Handle<JSDate>::cast(v8::Utils::OpenHandle(*v));
+ Handle<Object> original_hour(
+ TaggedField<Object>::load(*date, JSDate::kHourOffset), i_isolate);
+
+ // There must be no GC (and therefore no verifiers running) until we can
+ // restore the modified data.
+ DisallowHeapAllocation no_gc;
+
+ // Hour is Undefined|Smi|NaN. Other oddballs like null should cause a failure.
+ TaggedField<Object>::store(*date, JSDate::kHourOffset,
+ *i_isolate->factory()->null_value());
+ if (should_fail) {
+ TorqueGeneratedClassVerifiers::JSDateVerify(*date, i_isolate);
+ }
+
+ // Put back the original value in case verifiers run on test shutdown.
+ TaggedField<Object>::store(*date, JSDate::kHourOffset, *original_hour);
+}
+
+TEST_PAIR(TestWrongNumber) {
+ CcTest::InitializeVM();
+ v8::Isolate* isolate = CcTest::isolate();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Value> v = CompileRun("new Date()");
+ Handle<JSDate> date = Handle<JSDate>::cast(v8::Utils::OpenHandle(*v));
+ Handle<Object> original_hour(
+ TaggedField<Object>::load(*date, JSDate::kHourOffset), i_isolate);
+ v8::Local<v8::Value> v2 = CompileRun("1.1");
+ Handle<Object> float_val = v8::Utils::OpenHandle(*v2);
+
+ // There must be no GC (and therefore no verifiers running) until we can
+ // restore the modified data.
+ DisallowHeapAllocation no_gc;
+
+ // Hour is Undefined|Smi|NaN. Other doubles like 1.1 should cause a failure.
+ TaggedField<Object>::store(*date, JSDate::kHourOffset, *float_val);
+ if (should_fail) {
+ TorqueGeneratedClassVerifiers::JSDateVerify(*date, i_isolate);
+ }
+
+ // Put back the original value in case verifiers run on test shutdown.
+ TaggedField<Object>::store(*date, JSDate::kHourOffset, *original_hour);
+}
+
+#endif // VERIFY_HEAP
+
+#undef TEST_PAIR
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/torque/test-torque.cc b/deps/v8/test/cctest/torque/test-torque.cc
index aa836289b5..22ee780ae2 100644
--- a/deps/v8/test/cctest/torque/test-torque.cc
+++ b/deps/v8/test/cctest/torque/test-torque.cc
@@ -16,10 +16,10 @@
#include "src/objects/elements-kind.h"
#include "src/objects/objects-inl.h"
#include "src/objects/promise-inl.h"
+#include "src/objects/torque-defined-classes-inl.h"
#include "src/strings/char-predicates.h"
#include "test/cctest/compiler/code-assembler-tester.h"
#include "test/cctest/compiler/function-tester.h"
-#include "torque-generated/exported-class-definitions-inl.h"
namespace v8 {
namespace internal {
@@ -137,8 +137,7 @@ TEST(TestFunctionPointers) {
CodeAssemblerTester asm_tester(isolate, kNumParams);
TestTorqueAssembler m(asm_tester.state());
{
- TNode<Context> context =
- m.UncheckedCast<Context>(m.Parameter(kNumParams + 2));
+ TNode<Context> context = m.UncheckedParameter<Context>(kNumParams + 2);
m.Return(m.TestFunctionPointers(context));
}
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
@@ -151,7 +150,7 @@ TEST(TestTernaryOperator) {
CodeAssemblerTester asm_tester(isolate, kNumParams + 1); // Include receiver.
TestTorqueAssembler m(asm_tester.state());
{
- TNode<Smi> arg = m.UncheckedCast<Smi>(m.Parameter(1));
+ TNode<Smi> arg = m.UncheckedParameter<Smi>(1);
m.Return(m.TestTernaryOperator(arg));
}
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
@@ -622,7 +621,7 @@ TEST(TestBranchOnBoolOptimization) {
{
m.TestBranchOnBoolOptimization(
m.UncheckedCast<Context>(m.HeapConstant(context)),
- m.UncheckedCast<Smi>(m.Parameter(0)));
+ m.UncheckedParameter<Smi>(0));
m.Return(m.UndefinedConstant());
}
asm_tester.GenerateCode();
@@ -638,15 +637,15 @@ TEST(TestBitFieldLoad) {
{
// Untag all of the parameters to get plain integer values.
TNode<Uint8T> val =
- m.UncheckedCast<Uint8T>(m.Unsigned(m.SmiToInt32(m.Parameter(1))));
+ m.UncheckedCast<Uint8T>(m.Unsigned(m.SmiToInt32(m.Parameter<Smi>(1))));
TNode<BoolT> expected_a =
- m.UncheckedCast<BoolT>(m.Unsigned(m.SmiToInt32(m.Parameter(2))));
+ m.UncheckedCast<BoolT>(m.Unsigned(m.SmiToInt32(m.Parameter<Smi>(2))));
TNode<Uint16T> expected_b =
- m.UncheckedCast<Uint16T>(m.Unsigned(m.SmiToInt32(m.Parameter(3))));
+ m.UncheckedCast<Uint16T>(m.Unsigned(m.SmiToInt32(m.Parameter<Smi>(3))));
TNode<Uint32T> expected_c =
- m.UncheckedCast<Uint32T>(m.Unsigned(m.SmiToInt32(m.Parameter(4))));
+ m.UncheckedCast<Uint32T>(m.Unsigned(m.SmiToInt32(m.Parameter<Smi>(4))));
TNode<BoolT> expected_d =
- m.UncheckedCast<BoolT>(m.Unsigned(m.SmiToInt32(m.Parameter(5))));
+ m.UncheckedCast<BoolT>(m.Unsigned(m.SmiToInt32(m.Parameter<Smi>(5))));
// Call the Torque-defined macro, which verifies that reading each bitfield
// out of val yields the correct result.
@@ -677,8 +676,8 @@ TEST(TestBitFieldStore) {
TestTorqueAssembler m(asm_tester.state());
{
// Untag the parameters to get a plain integer value.
- TNode<Uint8T> val = m.UncheckedCast<Uint8T>(
- m.Unsigned(m.SmiToInt32(m.CAST(m.Parameter(1)))));
+ TNode<Uint8T> val =
+ m.UncheckedCast<Uint8T>(m.Unsigned(m.SmiToInt32(m.Parameter<Smi>(1))));
m.TestBitFieldStore(val);
m.Return(m.UndefinedConstant());
@@ -701,13 +700,13 @@ TEST(TestBitFieldInit) {
{
// Untag all of the parameters to get plain integer values.
TNode<BoolT> a =
- m.UncheckedCast<BoolT>(m.Unsigned(m.SmiToInt32(m.Parameter(1))));
+ m.UncheckedCast<BoolT>(m.Unsigned(m.SmiToInt32(m.Parameter<Smi>(1))));
TNode<Uint16T> b =
- m.UncheckedCast<Uint16T>(m.Unsigned(m.SmiToInt32(m.Parameter(2))));
+ m.UncheckedCast<Uint16T>(m.Unsigned(m.SmiToInt32(m.Parameter<Smi>(2))));
TNode<Uint32T> c =
- m.UncheckedCast<Uint32T>(m.Unsigned(m.SmiToInt32(m.Parameter(3))));
+ m.UncheckedCast<Uint32T>(m.Unsigned(m.SmiToInt32(m.Parameter<Smi>(3))));
TNode<BoolT> d =
- m.UncheckedCast<BoolT>(m.Unsigned(m.SmiToInt32(m.Parameter(4))));
+ m.UncheckedCast<BoolT>(m.Unsigned(m.SmiToInt32(m.Parameter<Smi>(4))));
// Call the Torque-defined macro, which verifies that reading each bitfield
// out of val yields the correct result.
@@ -738,9 +737,9 @@ TEST(TestBitFieldUintptrOps) {
{
// Untag the parameters to get a plain integer value.
TNode<Uint32T> val2 =
- m.UncheckedCast<Uint32T>(m.Unsigned(m.SmiToInt32(m.Parameter(1))));
+ m.UncheckedCast<Uint32T>(m.Unsigned(m.SmiToInt32(m.Parameter<Smi>(1))));
TNode<UintPtrT> val3 = m.UncheckedCast<UintPtrT>(
- m.ChangeUint32ToWord(m.Unsigned(m.SmiToInt32(m.Parameter(2)))));
+ m.ChangeUint32ToWord(m.Unsigned(m.SmiToInt32(m.Parameter<Smi>(2)))));
m.TestBitFieldUintptrOps(val2, val3);
m.Return(m.UndefinedConstant());
@@ -763,10 +762,10 @@ TEST(TestBitFieldMultipleFlags) {
TestTorqueAssembler m(asm_tester.state());
{
TNode<BoolT> a =
- m.UncheckedCast<BoolT>(m.Unsigned(m.SmiToInt32(m.Parameter(0))));
- TNode<Int32T> b = m.SmiToInt32(m.Parameter(1));
+ m.UncheckedCast<BoolT>(m.Unsigned(m.SmiToInt32(m.Parameter<Smi>(0))));
+ TNode<Int32T> b = m.SmiToInt32(m.Parameter<Smi>(1));
TNode<BoolT> c =
- m.UncheckedCast<BoolT>(m.Unsigned(m.SmiToInt32(m.Parameter(2))));
+ m.UncheckedCast<BoolT>(m.Unsigned(m.SmiToInt32(m.Parameter<Smi>(2))));
m.TestBitFieldMultipleFlags(a, b, c);
m.Return(m.UndefinedConstant());
}
diff --git a/deps/v8/test/cctest/wasm/DIR_METADATA b/deps/v8/test/cctest/wasm/DIR_METADATA
new file mode 100644
index 0000000000..3b428d9660
--- /dev/null
+++ b/deps/v8/test/cctest/wasm/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>WebAssembly"
+} \ No newline at end of file
diff --git a/deps/v8/test/cctest/wasm/OWNERS b/deps/v8/test/cctest/wasm/OWNERS
index 0b1c176e04..a89e5f1056 100644
--- a/deps/v8/test/cctest/wasm/OWNERS
+++ b/deps/v8/test/cctest/wasm/OWNERS
@@ -1,3 +1 @@
file:../../../src/wasm/OWNERS
-
-# COMPONENT: Blink>JavaScript>WebAssembly
diff --git a/deps/v8/test/cctest/wasm/test-c-wasm-entry.cc b/deps/v8/test/cctest/wasm/test-c-wasm-entry.cc
index 3ee0830001..9ce5dec4fb 100644
--- a/deps/v8/test/cctest/wasm/test-c-wasm-entry.cc
+++ b/deps/v8/test/cctest/wasm/test-c-wasm-entry.cc
@@ -5,6 +5,7 @@
#include <cstdint>
#include "src/base/overflowing-math.h"
+#include "src/base/safe_conversions.h"
#include "src/codegen/assembler-inl.h"
#include "src/objects/objects-inl.h"
#include "src/wasm/wasm-arguments.h"
@@ -115,7 +116,11 @@ TEST(TestCWasmEntryArgPassing_int64_double) {
WASM_I64_SCONVERT_F64(WASM_GET_LOCAL(0))},
[](double d) { return static_cast<int64_t>(d); });
- FOR_INT64_INPUTS(i) { tester.CheckCall(i); }
+ FOR_FLOAT64_INPUTS(d) {
+ if (base::IsValueInRangeForNumericType<int64_t>(d)) {
+ tester.CheckCall(d);
+ }
+ }
}
// Pass float, return double.
diff --git a/deps/v8/test/cctest/wasm/test-gc.cc b/deps/v8/test/cctest/wasm/test-gc.cc
index 8454e8bb35..3366fe04f2 100644
--- a/deps/v8/test/cctest/wasm/test-gc.cc
+++ b/deps/v8/test/cctest/wasm/test-gc.cc
@@ -88,6 +88,10 @@ class WasmGCTester {
byte DefineSignature(FunctionSig* sig) { return builder_.AddSignature(sig); }
+ byte DefineTable(ValueType type, uint32_t min_size, uint32_t max_size) {
+ return builder_.AddTable(type, min_size, max_size);
+ }
+
void CompileModule() {
ZoneBuffer buffer(&zone);
builder_.WriteTo(&buffer);
@@ -1048,6 +1052,21 @@ TEST(GlobalInitReferencingGlobal) {
tester.CheckResult(func, 42);
}
+TEST(IndirectNullSetManually) {
+ WasmGCTester tester;
+ byte sig_index = tester.DefineSignature(tester.sigs.i_i());
+ tester.DefineTable(ValueType::Ref(sig_index, kNullable), 1, 1);
+ byte func_index = tester.DefineFunction(
+ tester.sigs.i_i(), {},
+ {WASM_TABLE_SET(0, WASM_I32V(0), WASM_REF_NULL(sig_index)),
+ WASM_CALL_INDIRECT(sig_index, WASM_I32V(0), WASM_GET_LOCAL(0)),
+ kExprEnd});
+
+ tester.CompileModule();
+
+ tester.CheckHasThrown(func_index, 42);
+}
+
TEST(JsAccess) {
WasmGCTester tester;
const byte type_index = tester.DefineStruct({F(wasm::kWasmI32, true)});
diff --git a/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc b/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc
index 31662ee24d..6b888511d9 100644
--- a/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc
+++ b/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc
@@ -33,13 +33,9 @@ constexpr int kJumpTableSlotCount = 128;
constexpr uint32_t kJumpTableSize =
JumpTableAssembler::SizeForNumberOfSlots(kJumpTableSlotCount);
-// Must be a safe commit page size.
-#if V8_OS_MACOSX && V8_HOST_ARCH_ARM64
-// See kAppleArmPageSize in platform-posix.cc.
-constexpr size_t kThunkBufferSize = 1 << 14;
-#else
-constexpr size_t kThunkBufferSize = 4 * KB;
-#endif
+// This must be a safe commit page size so we pick the largest OS page size that
+// V8 is known to support. Arm64 linux can support up to 64k at runtime.
+constexpr size_t kThunkBufferSize = 64 * KB;
#if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64
// We need the branches (from CompileJumpTableThunk) to be within near-call
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-64.cc b/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
index 06d59620fb..5a0d0918a3 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
@@ -8,9 +8,9 @@
#include "src/base/bits.h"
#include "src/base/overflowing-math.h"
+#include "src/base/safe_conversions.h"
#include "src/codegen/assembler-inl.h"
#include "src/objects/objects-inl.h"
-
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
#include "test/cctest/wasm/wasm-run-utils.h"
@@ -250,6 +250,8 @@ WASM_EXEC_TEST(I64RemS) {
FOR_INT64_INPUTS(j) {
if (j == 0) {
CHECK_TRAP64(r.Call(i, j));
+ } else if (j == -1) {
+ CHECK_EQ(0, r.Call(i, j));
} else {
CHECK_EQ(i % j, r.Call(i, j));
}
@@ -725,8 +727,7 @@ WASM_EXEC_TEST(I64SConvertF32) {
BUILD(r, WASM_I64_SCONVERT_F32(WASM_GET_LOCAL(0)));
FOR_FLOAT32_INPUTS(i) {
- if (i < static_cast<float>(std::numeric_limits<int64_t>::max()) &&
- i >= static_cast<float>(std::numeric_limits<int64_t>::min())) {
+ if (base::IsValueInRangeForNumericType<int64_t>(i)) {
CHECK_EQ(static_cast<int64_t>(i), r.Call(i));
} else {
CHECK_TRAP64(r.Call(i));
@@ -739,8 +740,7 @@ WASM_EXEC_TEST(I64SConvertSatF32) {
BUILD(r, WASM_I64_SCONVERT_SAT_F32(WASM_GET_LOCAL(0)));
FOR_FLOAT32_INPUTS(i) {
int64_t expected;
- if (i < static_cast<float>(std::numeric_limits<int64_t>::max()) &&
- i >= static_cast<float>(std::numeric_limits<int64_t>::min())) {
+ if (base::IsValueInRangeForNumericType<int64_t>(i)) {
expected = static_cast<int64_t>(i);
} else if (std::isnan(i)) {
expected = static_cast<int64_t>(0);
@@ -759,8 +759,7 @@ WASM_EXEC_TEST(I64SConvertF64) {
BUILD(r, WASM_I64_SCONVERT_F64(WASM_GET_LOCAL(0)));
FOR_FLOAT64_INPUTS(i) {
- if (i < static_cast<double>(std::numeric_limits<int64_t>::max()) &&
- i >= static_cast<double>(std::numeric_limits<int64_t>::min())) {
+ if (base::IsValueInRangeForNumericType<int64_t>(i)) {
CHECK_EQ(static_cast<int64_t>(i), r.Call(i));
} else {
CHECK_TRAP64(r.Call(i));
@@ -773,8 +772,7 @@ WASM_EXEC_TEST(I64SConvertSatF64) {
BUILD(r, WASM_I64_SCONVERT_SAT_F64(WASM_GET_LOCAL(0)));
FOR_FLOAT64_INPUTS(i) {
int64_t expected;
- if (i < static_cast<double>(std::numeric_limits<int64_t>::max()) &&
- i >= static_cast<double>(std::numeric_limits<int64_t>::min())) {
+ if (base::IsValueInRangeForNumericType<int64_t>(i)) {
expected = static_cast<int64_t>(i);
} else if (std::isnan(i)) {
expected = static_cast<int64_t>(0);
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-simd-scalar-lowering.cc b/deps/v8/test/cctest/wasm/test-run-wasm-simd-scalar-lowering.cc
index 013ca93a1b..e1d16b02eb 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-simd-scalar-lowering.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-simd-scalar-lowering.cc
@@ -273,6 +273,59 @@ WASM_SIMD_TEST(V128_I64_PARAMS) {
CHECK_EQ(0, r.Call(0));
}
+WASM_SIMD_TEST(I8x16WidenS_I16x8NarrowU) {
+ // Test any_true lowring with splats of different shapes.
+ {
+ WasmRunner<int32_t, int16_t> r(execution_tier, lower_simd);
+
+ BUILD(r, WASM_SIMD_I16x8_SPLAT(WASM_GET_LOCAL(0)),
+ WASM_SIMD_I16x8_SPLAT(WASM_GET_LOCAL(0)),
+ WASM_SIMD_OP(kExprI8x16UConvertI16x8),
+ WASM_SIMD_OP(kExprI16x8SConvertI8x16Low),
+ WASM_SIMD_OP(kExprI32x4ExtractLane), TO_BYTE(0));
+
+ CHECK_EQ(bit_cast<int32_t>(0xffffffff), r.Call(0x7fff));
+ }
+}
+
+WASM_SIMD_TEST(S128SelectWithF32x4) {
+ WasmRunner<float, int32_t, float, int32_t> r(execution_tier, lower_simd);
+ BUILD(r, WASM_GET_LOCAL(0), WASM_SIMD_OP(kExprI32x4Splat), WASM_GET_LOCAL(1),
+ WASM_SIMD_OP(kExprF32x4Splat), WASM_GET_LOCAL(2),
+ WASM_SIMD_OP(kExprI32x4Splat), WASM_SIMD_OP(kExprS128Select),
+ WASM_SIMD_OP(kExprF32x4ExtractLane), 0);
+ // Selection mask is all 0, so always select 2.0.
+ CHECK_EQ(2.0, r.Call(1, 2.0, 0));
+}
+
+WASM_SIMD_TEST(S128AndNotWithF32x4) {
+ WasmRunner<float, int32_t, float> r(execution_tier, lower_simd);
+ BUILD(r, WASM_GET_LOCAL(0), WASM_SIMD_OP(kExprI32x4Splat), WASM_GET_LOCAL(1),
+ WASM_SIMD_OP(kExprF32x4Splat), WASM_SIMD_OP(kExprS128AndNot),
+ WASM_SIMD_OP(kExprF32x4ExtractLane), 0);
+ // 0x00700000 & !0x40800000 = 0x00700000
+ CHECK_EQ(bit_cast<float>(0x700000),
+ r.Call(0x00700000, bit_cast<float>(0x40800000)));
+}
+
+WASM_SIMD_TEST(FunctionCallWithExtractLaneOutputAsArgument) {
+ // This uses the result of an extract lane as an argument to a function call
+ // to exercise lowering for kCall and make sure the the extract lane is
+ // correctly replaced with a scalar.
+ TestSignatures sigs;
+ WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
+ WasmFunctionCompiler& fn = r.NewFunction(sigs.f_f());
+
+ BUILD(fn, WASM_GET_LOCAL(0), WASM_GET_LOCAL(0), kExprF32Add);
+
+ BUILD(r, WASM_GET_LOCAL(0), WASM_SIMD_OP(kExprI32x4Splat),
+ WASM_SIMD_OP(kExprF32x4ExtractLane), 0, kExprCallFunction,
+ fn.function_index(), WASM_SIMD_OP(kExprF32x4Splat), WASM_GET_LOCAL(0),
+ WASM_SIMD_OP(kExprI32x4Splat), WASM_SIMD_OP(kExprI32x4Add),
+ WASM_SIMD_OP(kExprI32x4ExtractLane), 0);
+ CHECK_EQ(15, r.Call(5));
+}
+
} // namespace test_run_wasm_simd
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc b/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
index 8fdcabf44e..8f2131d32c 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
@@ -2,15 +2,38 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <algorithm>
+#include <array>
+#include <cmath>
+#include <cstdint>
+#include <cstring>
#include <limits>
+#include <tuple>
#include <type_traits>
+#include <vector>
#include "src/base/bits.h"
+#include "src/base/logging.h"
+#include "src/base/macros.h"
+#include "src/base/memory.h"
#include "src/base/overflowing-math.h"
+#include "src/base/utils/random-number-generator.h"
#include "src/codegen/assembler-inl.h"
+#include "src/codegen/cpu-features.h"
+#include "src/codegen/machine-type.h"
+#include "src/common/globals.h"
+#include "src/flags/flags.h"
+#include "src/utils/utils.h"
+#include "src/utils/vector.h"
+#include "src/wasm/compilation-environment.h"
+#include "src/wasm/value-type.h"
+#include "src/wasm/wasm-constants.h"
+#include "src/wasm/wasm-opcodes.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
#include "test/cctest/wasm/wasm-run-utils.h"
+#include "test/common/flag-utils.h"
+#include "test/common/wasm/flag-utils.h"
#include "test/common/wasm/wasm-macro-gen.h"
namespace v8 {
@@ -92,27 +115,14 @@ T Mul(T a, T b) {
return a * b;
}
-template <typename T, typename = typename std::enable_if<
- std::is_floating_point<T>::value>::type>
-T Div(T a, T b) {
- // Workaround C++ undefined behavior when b is 0.
- return base::Divide(a, b);
-}
-
template <typename T>
T Minimum(T a, T b) {
- // Follow one of the possible implementation given in
- // https://en.cppreference.com/w/cpp/algorithm/min so that it works the same
- // way for floats (when given NaNs/Infs).
- return (b < a) ? b : a;
+ return std::min(a, b);
}
template <typename T>
T Maximum(T a, T b) {
- // Follow one of the possible implementation given in
- // https://en.cppreference.com/w/cpp/algorithm/max so that it works the same
- // way for floats (when given NaNs/Infs).
- return (a < b) ? b : a;
+ return std::max(a, b);
}
template <typename T>
@@ -213,90 +223,6 @@ T ArithmeticShiftRight(T a, int shift) {
}
template <typename T>
-T Clamp(int64_t value) {
- static_assert(sizeof(int64_t) > sizeof(T), "T must be int32_t or smaller");
- int64_t min = static_cast<int64_t>(std::numeric_limits<T>::min());
- int64_t max = static_cast<int64_t>(std::numeric_limits<T>::max());
- int64_t clamped = std::max(min, std::min(max, value));
- return static_cast<T>(clamped);
-}
-
-template <typename T>
-int64_t Widen(T value) {
- static_assert(sizeof(int64_t) > sizeof(T), "T must be int32_t or smaller");
- return static_cast<int64_t>(value);
-}
-
-template <typename T>
-int64_t UnsignedWiden(T value) {
- static_assert(sizeof(int64_t) > sizeof(T), "T must be int32_t or smaller");
- using UnsignedT = typename std::make_unsigned<T>::type;
- return static_cast<int64_t>(static_cast<UnsignedT>(value));
-}
-
-template <typename T>
-T Narrow(int64_t value) {
- return Clamp<T>(value);
-}
-
-template <typename T>
-T AddSaturate(T a, T b) {
- return Clamp<T>(Widen(a) + Widen(b));
-}
-
-template <typename T>
-T SubSaturate(T a, T b) {
- return Clamp<T>(Widen(a) - Widen(b));
-}
-
-template <typename T>
-T UnsignedAddSaturate(T a, T b) {
- using UnsignedT = typename std::make_unsigned<T>::type;
- return Clamp<UnsignedT>(UnsignedWiden(a) + UnsignedWiden(b));
-}
-
-template <typename T>
-T UnsignedSubSaturate(T a, T b) {
- using UnsignedT = typename std::make_unsigned<T>::type;
- return Clamp<UnsignedT>(UnsignedWiden(a) - UnsignedWiden(b));
-}
-
-template <typename T>
-T And(T a, T b) {
- return a & b;
-}
-
-template <typename T>
-T Or(T a, T b) {
- return a | b;
-}
-
-template <typename T>
-T Xor(T a, T b) {
- return a ^ b;
-}
-
-template <typename T>
-T Not(T a) {
- return ~a;
-}
-
-template <typename T>
-T LogicalNot(T a) {
- return a == 0 ? -1 : 0;
-}
-
-template <typename T>
-T Sqrt(T a) {
- return std::sqrt(a);
-}
-
-template <typename T>
-T AndNot(T a, T b) {
- return a & ~b;
-}
-
-template <typename T>
T Abs(T a) {
return std::abs(a);
}
@@ -461,6 +387,14 @@ bool IsExtreme(float x) {
(abs_x < kSmallFloatThreshold || abs_x > kLargeFloatThreshold);
}
+#if V8_OS_AIX
+template <typename T>
+bool MightReverseSign(T float_op) {
+ return float_op == static_cast<T>(Negate) ||
+ float_op == static_cast<T>(std::abs);
+}
+#endif
+
WASM_SIMD_TEST(S128Globals) {
WasmRunner<int32_t> r(execution_tier, lower_simd);
// Set up a global to hold input and output vectors.
@@ -633,6 +567,10 @@ void RunF32x4UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
// Extreme values have larger errors so skip them for approximation tests.
if (!exact && IsExtreme(x)) continue;
float expected = expected_op(x);
+#if V8_OS_AIX
+ if (!MightReverseSign<FloatUnOp>(expected_op))
+ expected = FpOpWorkaround<float>(x, expected);
+#endif
if (!PlatformCanRepresent(expected)) continue;
r.Call(x);
for (int i = 0; i < 4; i++) {
@@ -665,7 +603,7 @@ WASM_SIMD_TEST(F32x4Neg) {
}
WASM_SIMD_TEST(F32x4Sqrt) {
- RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4Sqrt, Sqrt);
+ RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4Sqrt, std::sqrt);
}
WASM_SIMD_TEST(F32x4RecipApprox) {
@@ -680,19 +618,19 @@ WASM_SIMD_TEST(F32x4RecipSqrtApprox) {
base::RecipSqrt, false /* !exact */);
}
-WASM_SIMD_TEST_NO_LOWERING(F32x4Ceil) {
+WASM_SIMD_TEST(F32x4Ceil) {
RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4Ceil, ceilf, true);
}
-WASM_SIMD_TEST_NO_LOWERING(F32x4Floor) {
+WASM_SIMD_TEST(F32x4Floor) {
RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4Floor, floorf, true);
}
-WASM_SIMD_TEST_NO_LOWERING(F32x4Trunc) {
+WASM_SIMD_TEST(F32x4Trunc) {
RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4Trunc, truncf, true);
}
-WASM_SIMD_TEST_NO_LOWERING(F32x4NearestInt) {
+WASM_SIMD_TEST(F32x4NearestInt) {
RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4NearestInt, nearbyintf,
true);
}
@@ -755,7 +693,7 @@ WASM_SIMD_TEST(F32x4Mul) {
RunF32x4BinOpTest(execution_tier, lower_simd, kExprF32x4Mul, Mul);
}
WASM_SIMD_TEST(F32x4Div) {
- RunF32x4BinOpTest(execution_tier, lower_simd, kExprF32x4Div, Div);
+ RunF32x4BinOpTest(execution_tier, lower_simd, kExprF32x4Div, base::Divide);
}
WASM_SIMD_TEST(F32x4Min) {
RunF32x4BinOpTest(execution_tier, lower_simd, kExprF32x4Min, JSMin);
@@ -764,11 +702,11 @@ WASM_SIMD_TEST(F32x4Max) {
RunF32x4BinOpTest(execution_tier, lower_simd, kExprF32x4Max, JSMax);
}
-WASM_SIMD_TEST_NO_LOWERING(F32x4Pmin) {
+WASM_SIMD_TEST(F32x4Pmin) {
RunF32x4BinOpTest(execution_tier, lower_simd, kExprF32x4Pmin, Minimum);
}
-WASM_SIMD_TEST_NO_LOWERING(F32x4Pmax) {
+WASM_SIMD_TEST(F32x4Pmax) {
RunF32x4BinOpTest(execution_tier, lower_simd, kExprF32x4Pmax, Maximum);
}
@@ -827,6 +765,65 @@ WASM_SIMD_TEST(F32x4Le) {
RunF32x4CompareOpTest(execution_tier, lower_simd, kExprF32x4Le, LessEqual);
}
+#if V8_TARGET_ARCH_X64
+// TODO(v8:10983) Prototyping sign select.
+template <typename T>
+void RunSignSelect(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode signselect, WasmOpcode splat,
+ std::array<int8_t, kSimd128Size> mask) {
+ FLAG_SCOPE(wasm_simd_post_mvp);
+ WasmRunner<int32_t, T, T> r(execution_tier, lower_simd);
+ T* output = r.builder().template AddGlobal<T>(kWasmS128);
+
+ // Splat 2 constant values, then use a mask that selects alternate lanes.
+ BUILD(r, WASM_GET_LOCAL(0), WASM_SIMD_OP(splat), WASM_GET_LOCAL(1),
+ WASM_SIMD_OP(splat), WASM_SIMD_CONSTANT(mask), WASM_SIMD_OP(signselect),
+ kExprGlobalSet, 0, WASM_ONE);
+
+ r.Call(1, 2);
+
+ constexpr int lanes = kSimd128Size / sizeof(T);
+ for (int i = 0; i < lanes; i += 2) {
+ CHECK_EQ(1, ReadLittleEndianValue<T>(&output[i]));
+ }
+ for (int i = 1; i < lanes; i += 2) {
+ CHECK_EQ(2, ReadLittleEndianValue<T>(&output[i]));
+ }
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I8x16SignSelect) {
+ std::array<int8_t, kSimd128Size> mask = {0x80, 0, -1, 0, 0x80, 0, -1, 0,
+ 0x80, 0, -1, 0, 0x80, 0, -1, 0};
+ RunSignSelect<int8_t>(execution_tier, lower_simd, kExprI8x16SignSelect,
+ kExprI8x16Splat, mask);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I16x8SignSelect) {
+ std::array<int16_t, kSimd128Size / 2> selection = {0x8000, 0, -1, 0,
+ 0x8000, 0, -1, 0};
+ std::array<int8_t, kSimd128Size> mask;
+ memcpy(mask.data(), selection.data(), kSimd128Size);
+ RunSignSelect<int16_t>(execution_tier, lower_simd, kExprI16x8SignSelect,
+ kExprI16x8Splat, mask);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I32x4SignSelect) {
+ std::array<int32_t, kSimd128Size / 4> selection = {0x80000000, 0, -1, 0};
+ std::array<int8_t, kSimd128Size> mask;
+ memcpy(mask.data(), selection.data(), kSimd128Size);
+ RunSignSelect<int32_t>(execution_tier, lower_simd, kExprI32x4SignSelect,
+ kExprI32x4Splat, mask);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2SignSelect) {
+ std::array<int64_t, kSimd128Size / 8> selection = {0x8000000000000000, 0};
+ std::array<int8_t, kSimd128Size> mask;
+ memcpy(mask.data(), selection.data(), kSimd128Size);
+ RunSignSelect<int64_t>(execution_tier, lower_simd, kExprI64x2SignSelect,
+ kExprI64x2Splat, mask);
+}
+#endif // V8_TARGET_ARCH_X64
+
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X
WASM_SIMD_TEST_NO_LOWERING(F32x4Qfma) {
FLAG_SCOPE(wasm_simd_post_mvp);
@@ -1043,44 +1040,6 @@ WASM_SIMD_TEST(I64x2Sub) {
WASM_SIMD_TEST_NO_LOWERING(I64x2Eq) {
RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2Eq, Equal);
}
-
-WASM_SIMD_TEST_NO_LOWERING(I64x2Ne) {
- RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2Ne, NotEqual);
-}
-
-WASM_SIMD_TEST_NO_LOWERING(I64x2LtS) {
- RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2LtS, Less);
-}
-
-WASM_SIMD_TEST_NO_LOWERING(I64x2LeS) {
- RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2LeS, LessEqual);
-}
-
-WASM_SIMD_TEST_NO_LOWERING(I64x2GtS) {
- RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2GtS, Greater);
-}
-
-WASM_SIMD_TEST_NO_LOWERING(I64x2GeS) {
- RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2GeS, GreaterEqual);
-}
-
-WASM_SIMD_TEST_NO_LOWERING(I64x2LtU) {
- RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2LtU, UnsignedLess);
-}
-
-WASM_SIMD_TEST_NO_LOWERING(I64x2LeU) {
- RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2LeU,
- UnsignedLessEqual);
-}
-
-WASM_SIMD_TEST_NO_LOWERING(I64x2GtU) {
- RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2GtU, UnsignedGreater);
-}
-
-WASM_SIMD_TEST_NO_LOWERING(I64x2GeU) {
- RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2GeU,
- UnsignedGreaterEqual);
-}
#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X
WASM_SIMD_TEST(F64x2Splat) {
@@ -1261,6 +1220,10 @@ void RunF64x2UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
// Extreme values have larger errors so skip them for approximation tests.
if (!exact && IsExtreme(x)) continue;
double expected = expected_op(x);
+#if V8_OS_AIX
+ if (!MightReverseSign<DoubleUnOp>(expected_op))
+ expected = FpOpWorkaround<double>(x, expected);
+#endif
if (!PlatformCanRepresent(expected)) continue;
r.Call(x);
for (int i = 0; i < 2; i++) {
@@ -1293,22 +1256,22 @@ WASM_SIMD_TEST(F64x2Neg) {
}
WASM_SIMD_TEST(F64x2Sqrt) {
- RunF64x2UnOpTest(execution_tier, lower_simd, kExprF64x2Sqrt, Sqrt);
+ RunF64x2UnOpTest(execution_tier, lower_simd, kExprF64x2Sqrt, std::sqrt);
}
-WASM_SIMD_TEST_NO_LOWERING(F64x2Ceil) {
+WASM_SIMD_TEST(F64x2Ceil) {
RunF64x2UnOpTest(execution_tier, lower_simd, kExprF64x2Ceil, ceil, true);
}
-WASM_SIMD_TEST_NO_LOWERING(F64x2Floor) {
+WASM_SIMD_TEST(F64x2Floor) {
RunF64x2UnOpTest(execution_tier, lower_simd, kExprF64x2Floor, floor, true);
}
-WASM_SIMD_TEST_NO_LOWERING(F64x2Trunc) {
+WASM_SIMD_TEST(F64x2Trunc) {
RunF64x2UnOpTest(execution_tier, lower_simd, kExprF64x2Trunc, trunc, true);
}
-WASM_SIMD_TEST_NO_LOWERING(F64x2NearestInt) {
+WASM_SIMD_TEST(F64x2NearestInt) {
RunF64x2UnOpTest(execution_tier, lower_simd, kExprF64x2NearestInt, nearbyint,
true);
}
@@ -1373,14 +1336,14 @@ WASM_SIMD_TEST(F64x2Mul) {
}
WASM_SIMD_TEST(F64x2Div) {
- RunF64x2BinOpTest(execution_tier, lower_simd, kExprF64x2Div, Div);
+ RunF64x2BinOpTest(execution_tier, lower_simd, kExprF64x2Div, base::Divide);
}
-WASM_SIMD_TEST_NO_LOWERING(F64x2Pmin) {
+WASM_SIMD_TEST(F64x2Pmin) {
RunF64x2BinOpTest(execution_tier, lower_simd, kExprF64x2Pmin, Minimum);
}
-WASM_SIMD_TEST_NO_LOWERING(F64x2Pmax) {
+WASM_SIMD_TEST(F64x2Pmax) {
RunF64x2BinOpTest(execution_tier, lower_simd, kExprF64x2Pmax, Maximum);
}
@@ -1457,26 +1420,6 @@ WASM_SIMD_TEST(I64x2Mul) {
base::MulWithWraparound);
}
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390X
-WASM_SIMD_TEST_NO_LOWERING(I64x2MinS) {
- RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2MinS, Minimum);
-}
-
-WASM_SIMD_TEST_NO_LOWERING(I64x2MaxS) {
- RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2MaxS, Maximum);
-}
-
-WASM_SIMD_TEST_NO_LOWERING(I64x2MinU) {
- RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2MinU,
- UnsignedMinimum);
-}
-
-WASM_SIMD_TEST_NO_LOWERING(I64x2MaxU) {
- RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2MaxU,
- UnsignedMaximum);
-}
-#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390X
-
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X
WASM_SIMD_TEST_NO_LOWERING(F64x2Qfma) {
FLAG_SCOPE(wasm_simd_post_mvp);
@@ -1686,6 +1629,27 @@ WASM_SIMD_TEST(I32x4BitMask) {
}
}
+// TODO(v8:10997) Prototyping i64x2.bitmask.
+#if V8_TARGET_ARCH_X64
+WASM_SIMD_TEST_NO_LOWERING(I64x2BitMask) {
+ FLAG_SCOPE(wasm_simd_post_mvp);
+ WasmRunner<int32_t, int64_t> r(execution_tier, lower_simd);
+ byte value1 = r.AllocateLocal(kWasmS128);
+
+ BUILD(r, WASM_SET_LOCAL(value1, WASM_SIMD_I64x2_SPLAT(WASM_GET_LOCAL(0))),
+ WASM_SET_LOCAL(value1, WASM_SIMD_I64x2_REPLACE_LANE(
+ 0, WASM_GET_LOCAL(value1), WASM_I64V_1(0))),
+ WASM_SIMD_UNOP(kExprI64x2BitMask, WASM_GET_LOCAL(value1)));
+
+ for (int64_t x : compiler::ValueHelper::GetVector<int64_t>()) {
+ int32_t actual = r.Call(x);
+ // Lane 0 is always 0 (positive).
+ int32_t expected = std::signbit(static_cast<double>(x)) ? 0x2 : 0x0;
+ CHECK_EQ(actual, expected);
+ }
+}
+#endif // V8_TARGET_ARCH_X64
+
WASM_SIMD_TEST(I8x16Splat) {
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
// Set up a global to hold output vector.
@@ -1827,8 +1791,8 @@ WASM_SIMD_TEST(I32x4ConvertI16x8) {
FOR_INT16_INPUTS(x) {
r.Call(x);
- int32_t expected_signed = static_cast<int32_t>(Widen<int16_t>(x));
- int32_t expected_unsigned = static_cast<int32_t>(UnsignedWiden<int16_t>(x));
+ int32_t expected_signed = static_cast<int32_t>(x);
+ int32_t expected_unsigned = static_cast<int32_t>(static_cast<uint16_t>(x));
for (int i = 0; i < 4; i++) {
CHECK_EQ(expected_signed, ReadLittleEndianValue<int32_t>(&g0[i]));
CHECK_EQ(expected_signed, ReadLittleEndianValue<int32_t>(&g1[i]));
@@ -1838,6 +1802,45 @@ WASM_SIMD_TEST(I32x4ConvertI16x8) {
}
}
+// TODO(v8:10972) Prototyping i64x2 convert from i32x4.
+// Tests both signed and unsigned conversion from I32x4 (unpacking).
+#if V8_TARGET_ARCH_ARM64
+WASM_SIMD_TEST_NO_LOWERING(I64x2ConvertI32x4) {
+ FLAG_SCOPE(wasm_simd_post_mvp);
+ WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
+ // Create four output vectors to hold signed and unsigned results.
+ int64_t* g0 = r.builder().AddGlobal<int64_t>(kWasmS128);
+ int64_t* g1 = r.builder().AddGlobal<int64_t>(kWasmS128);
+ int64_t* g2 = r.builder().AddGlobal<int64_t>(kWasmS128);
+ int64_t* g3 = r.builder().AddGlobal<int64_t>(kWasmS128);
+ // Build fn to splat test value, perform conversions, and write the results.
+ byte value = 0;
+ byte temp1 = r.AllocateLocal(kWasmS128);
+ BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(value))),
+ WASM_SET_GLOBAL(0, WASM_SIMD_UNOP(kExprI64x2SConvertI32x4High,
+ WASM_GET_LOCAL(temp1))),
+ WASM_SET_GLOBAL(1, WASM_SIMD_UNOP(kExprI64x2SConvertI32x4Low,
+ WASM_GET_LOCAL(temp1))),
+ WASM_SET_GLOBAL(2, WASM_SIMD_UNOP(kExprI64x2UConvertI32x4High,
+ WASM_GET_LOCAL(temp1))),
+ WASM_SET_GLOBAL(3, WASM_SIMD_UNOP(kExprI64x2UConvertI32x4Low,
+ WASM_GET_LOCAL(temp1))),
+ WASM_ONE);
+
+ FOR_INT32_INPUTS(x) {
+ r.Call(x);
+ int64_t expected_signed = static_cast<int64_t>(x);
+ int64_t expected_unsigned = static_cast<int64_t>(static_cast<uint32_t>(x));
+ for (int i = 0; i < 2; i++) {
+ CHECK_EQ(expected_signed, ReadLittleEndianValue<int64_t>(&g0[i]));
+ CHECK_EQ(expected_signed, ReadLittleEndianValue<int64_t>(&g1[i]));
+ CHECK_EQ(expected_unsigned, ReadLittleEndianValue<int64_t>(&g2[i]));
+ CHECK_EQ(expected_unsigned, ReadLittleEndianValue<int64_t>(&g3[i]));
+ }
+ }
+}
+#endif // V8_TARGET_ARCH_ARM64
+
void RunI32x4UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode opcode, Int32UnOp expected_op) {
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
@@ -1865,12 +1868,64 @@ WASM_SIMD_TEST(I32x4Neg) {
}
WASM_SIMD_TEST(I32x4Abs) {
- RunI32x4UnOpTest(execution_tier, lower_simd, kExprI32x4Abs, Abs);
+ RunI32x4UnOpTest(execution_tier, lower_simd, kExprI32x4Abs, std::abs);
}
WASM_SIMD_TEST(S128Not) {
- RunI32x4UnOpTest(execution_tier, lower_simd, kExprS128Not, Not);
+ RunI32x4UnOpTest(execution_tier, lower_simd, kExprS128Not,
+ [](int32_t x) { return ~x; });
+}
+
+#if V8_TARGET_ARCH_ARM64
+// TODO(v8:11086) Prototype i32x4.extadd_pairwise_i16x8_{s,u}
+template <typename Narrow, typename Wide>
+void RunExtAddPairwiseTest(TestExecutionTier execution_tier,
+ LowerSimd lower_simd, WasmOpcode ext_add_pairwise,
+ WasmOpcode splat) {
+ FLAG_SCOPE(wasm_simd_post_mvp);
+ constexpr int num_lanes = kSimd128Size / sizeof(Wide);
+ WasmRunner<int32_t, Narrow> r(execution_tier, lower_simd);
+ Wide* g = r.builder().template AddGlobal<Wide>(kWasmS128);
+
+ // TODO(v8:11086) We splat the same value, so pairwise adding ends up adding
+ // the same value to itself, consider a more complicated test, like having 2
+ // vectors, and shuffling them.
+ BUILD(r, WASM_GET_LOCAL(0), WASM_SIMD_OP(splat),
+ WASM_SIMD_OP(ext_add_pairwise), kExprGlobalSet, 0, WASM_ONE);
+
+ for (Narrow x : compiler::ValueHelper::GetVector<Narrow>()) {
+ r.Call(x);
+ Wide expected = AddLong<Wide>(x, x);
+ for (int i = 0; i < num_lanes; i++) {
+ CHECK_EQ(expected, ReadLittleEndianValue<Wide>(&g[i]));
+ }
+ }
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I32x4ExtAddPairwiseI16x8S) {
+ RunExtAddPairwiseTest<int16_t, int32_t>(execution_tier, lower_simd,
+ kExprI32x4ExtAddPairwiseI16x8S,
+ kExprI16x8Splat);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I32x4ExtAddPairwiseI16x8U) {
+ RunExtAddPairwiseTest<uint16_t, uint32_t>(execution_tier, lower_simd,
+ kExprI32x4ExtAddPairwiseI16x8U,
+ kExprI16x8Splat);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I16x8ExtAddPairwiseI8x16S) {
+ RunExtAddPairwiseTest<int8_t, int16_t>(execution_tier, lower_simd,
+ kExprI16x8ExtAddPairwiseI8x16S,
+ kExprI8x16Splat);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I16x8ExtAddPairwiseI8x16U) {
+ RunExtAddPairwiseTest<uint8_t, uint16_t>(execution_tier, lower_simd,
+ kExprI16x8ExtAddPairwiseI8x16U,
+ kExprI8x16Splat);
}
+#endif // V8_TARGET_ARCH_ARM64
void RunI32x4BinOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode opcode, Int32BinOp expected_op) {
@@ -1932,20 +1987,24 @@ WASM_SIMD_TEST(I32x4MaxU) {
}
WASM_SIMD_TEST(S128And) {
- RunI32x4BinOpTest(execution_tier, lower_simd, kExprS128And, And);
+ RunI32x4BinOpTest(execution_tier, lower_simd, kExprS128And,
+ [](int32_t x, int32_t y) { return x & y; });
}
WASM_SIMD_TEST(S128Or) {
- RunI32x4BinOpTest(execution_tier, lower_simd, kExprS128Or, Or);
+ RunI32x4BinOpTest(execution_tier, lower_simd, kExprS128Or,
+ [](int32_t x, int32_t y) { return x | y; });
}
WASM_SIMD_TEST(S128Xor) {
- RunI32x4BinOpTest(execution_tier, lower_simd, kExprS128Xor, Xor);
+ RunI32x4BinOpTest(execution_tier, lower_simd, kExprS128Xor,
+ [](int32_t x, int32_t y) { return x ^ y; });
}
// Bitwise operation, doesn't really matter what simd type we test it with.
WASM_SIMD_TEST(S128AndNot) {
- RunI32x4BinOpTest(execution_tier, lower_simd, kExprS128AndNot, AndNot);
+ RunI32x4BinOpTest(execution_tier, lower_simd, kExprS128AndNot,
+ [](int32_t x, int32_t y) { return x & ~y; });
}
WASM_SIMD_TEST(I32x4Eq) {
@@ -2061,8 +2120,8 @@ WASM_SIMD_TEST(I16x8ConvertI8x16) {
FOR_INT8_INPUTS(x) {
r.Call(x);
- int16_t expected_signed = static_cast<int16_t>(Widen<int8_t>(x));
- int16_t expected_unsigned = static_cast<int16_t>(UnsignedWiden<int8_t>(x));
+ int16_t expected_signed = static_cast<int16_t>(x);
+ int16_t expected_unsigned = static_cast<int16_t>(static_cast<uint8_t>(x));
for (int i = 0; i < 8; i++) {
CHECK_EQ(expected_signed, ReadLittleEndianValue<int16_t>(&g0[i]));
CHECK_EQ(expected_signed, ReadLittleEndianValue<int16_t>(&g1[i]));
@@ -2092,8 +2151,8 @@ WASM_SIMD_TEST(I16x8ConvertI32x4) {
FOR_INT32_INPUTS(x) {
r.Call(x);
- int16_t expected_signed = Narrow<int16_t>(x);
- int16_t expected_unsigned = Narrow<uint16_t>(x);
+ int16_t expected_signed = Saturate<int16_t>(x);
+ int16_t expected_unsigned = Saturate<uint16_t>(x);
for (int i = 0; i < 8; i++) {
CHECK_EQ(expected_signed, ReadLittleEndianValue<int16_t>(&g0[i]));
CHECK_EQ(expected_unsigned, ReadLittleEndianValue<int16_t>(&g1[i]));
@@ -2163,9 +2222,9 @@ WASM_SIMD_TEST(I16x8Add) {
base::AddWithWraparound);
}
-WASM_SIMD_TEST(I16x8AddSaturateS) {
- RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8AddSaturateS,
- AddSaturate);
+WASM_SIMD_TEST(I16x8AddSatS) {
+ RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8AddSatS,
+ SaturateAdd<int16_t>);
}
WASM_SIMD_TEST(I16x8Sub) {
@@ -2173,9 +2232,9 @@ WASM_SIMD_TEST(I16x8Sub) {
base::SubWithWraparound);
}
-WASM_SIMD_TEST(I16x8SubSaturateS) {
- RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8SubSaturateS,
- SubSaturate);
+WASM_SIMD_TEST(I16x8SubSatS) {
+ RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8SubSatS,
+ SaturateSub<int16_t>);
}
WASM_SIMD_TEST(I16x8Mul) {
@@ -2191,14 +2250,14 @@ WASM_SIMD_TEST(I16x8MaxS) {
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8MaxS, Maximum);
}
-WASM_SIMD_TEST(I16x8AddSaturateU) {
- RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8AddSaturateU,
- UnsignedAddSaturate);
+WASM_SIMD_TEST(I16x8AddSatU) {
+ RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8AddSatU,
+ SaturateAdd<uint16_t>);
}
-WASM_SIMD_TEST(I16x8SubSaturateU) {
- RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8SubSaturateU,
- UnsignedSubSaturate);
+WASM_SIMD_TEST(I16x8SubSatU) {
+ RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8SubSatU,
+ SaturateSub<uint16_t>);
}
WASM_SIMD_TEST(I16x8MinU) {
@@ -2259,12 +2318,128 @@ WASM_SIMD_TEST(I16x8RoundingAverageU) {
base::RoundingAverageUnsigned);
}
-// TODO(v8:10583) Prototype i32x4.dot_i16x8_s
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64 || \
- V8_TARGET_ARCH_ARM
-WASM_SIMD_TEST_NO_LOWERING(I32x4DotI16x8S) {
+#if V8_TARGET_ARCH_ARM64
+// TODO(v8:10971) Prototype i16x8.q15mulr_sat_s
+WASM_SIMD_TEST_NO_LOWERING(I16x8Q15MulRSatS) {
+ FLAG_SCOPE(wasm_simd_post_mvp);
+ RunI16x8BinOpTest<int16_t>(execution_tier, lower_simd, kExprI16x8Q15MulRSatS,
+ SaturateRoundingQMul<int16_t>);
+}
+
+// TODO(v8:11008) Prototype extended multiplication.
+namespace {
+enum class MulHalf { kLow, kHigh };
+
+// Helper to run ext mul tests. It will splat 2 input values into 2 v128, call
+// the mul op on these operands, and set the result into a global.
+// It will zero the top or bottom half of one of the operands, this will catch
+// mistakes if we are multiply the incorrect halves.
+template <typename S, typename T, typename OpType = T (*)(S, S)>
+void RunExtMulTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, OpType expected_op, WasmOpcode splat,
+ MulHalf half) {
FLAG_SCOPE(wasm_simd_post_mvp);
+ WasmRunner<int32_t, S, S> r(execution_tier, lower_simd);
+ int lane_to_zero = half == MulHalf::kLow ? 1 : 0;
+ T* g = r.builder().template AddGlobal<T>(kWasmS128);
+
+ BUILD(r,
+ WASM_SET_GLOBAL(
+ 0, WASM_SIMD_BINOP(
+ opcode,
+ WASM_SIMD_I64x2_REPLACE_LANE(
+ lane_to_zero, WASM_SIMD_UNOP(splat, WASM_GET_LOCAL(0)),
+ WASM_I64V_1(0)),
+ WASM_SIMD_UNOP(splat, WASM_GET_LOCAL(1)))),
+ WASM_ONE);
+ constexpr int lanes = kSimd128Size / sizeof(T);
+ for (S x : compiler::ValueHelper::GetVector<S>()) {
+ for (S y : compiler::ValueHelper::GetVector<S>()) {
+ r.Call(x, y);
+ T expected = expected_op(x, y);
+ for (int i = 0; i < lanes; i++) {
+ CHECK_EQ(expected, ReadLittleEndianValue<T>(&g[i]));
+ }
+ }
+ }
+}
+} // namespace
+
+WASM_SIMD_TEST_NO_LOWERING(I16x8ExtMulLowI8x16S) {
+ RunExtMulTest<int8_t, int16_t>(execution_tier, lower_simd,
+ kExprI16x8ExtMulLowI8x16S, MultiplyLong,
+ kExprI8x16Splat, MulHalf::kLow);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I16x8ExtMulHighI8x16S) {
+ RunExtMulTest<int8_t, int16_t>(execution_tier, lower_simd,
+ kExprI16x8ExtMulHighI8x16S, MultiplyLong,
+ kExprI8x16Splat, MulHalf::kHigh);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I16x8ExtMulLowI8x16U) {
+ RunExtMulTest<uint8_t, uint16_t>(execution_tier, lower_simd,
+ kExprI16x8ExtMulLowI8x16U, MultiplyLong,
+ kExprI8x16Splat, MulHalf::kLow);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I16x8ExtMulHighI8x16U) {
+ RunExtMulTest<uint8_t, uint16_t>(execution_tier, lower_simd,
+ kExprI16x8ExtMulHighI8x16U, MultiplyLong,
+ kExprI8x16Splat, MulHalf::kHigh);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I32x4ExtMulLowI16x8S) {
+ RunExtMulTest<int16_t, int32_t>(execution_tier, lower_simd,
+ kExprI32x4ExtMulLowI16x8S, MultiplyLong,
+ kExprI16x8Splat, MulHalf::kLow);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I32x4ExtMulHighI16x8S) {
+ RunExtMulTest<int16_t, int32_t>(execution_tier, lower_simd,
+ kExprI32x4ExtMulHighI16x8S, MultiplyLong,
+ kExprI16x8Splat, MulHalf::kHigh);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I32x4ExtMulLowI16x8U) {
+ RunExtMulTest<uint16_t, uint32_t>(execution_tier, lower_simd,
+ kExprI32x4ExtMulLowI16x8U, MultiplyLong,
+ kExprI16x8Splat, MulHalf::kLow);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I32x4ExtMulHighI16x8U) {
+ RunExtMulTest<uint16_t, uint32_t>(execution_tier, lower_simd,
+ kExprI32x4ExtMulHighI16x8U, MultiplyLong,
+ kExprI16x8Splat, MulHalf::kHigh);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2ExtMulLowI32x4S) {
+ RunExtMulTest<int32_t, int64_t>(execution_tier, lower_simd,
+ kExprI64x2ExtMulLowI32x4S, MultiplyLong,
+ kExprI32x4Splat, MulHalf::kLow);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2ExtMulHighI32x4S) {
+ RunExtMulTest<int32_t, int64_t>(execution_tier, lower_simd,
+ kExprI64x2ExtMulHighI32x4S, MultiplyLong,
+ kExprI32x4Splat, MulHalf::kHigh);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2ExtMulLowI32x4U) {
+ RunExtMulTest<uint32_t, uint64_t>(execution_tier, lower_simd,
+ kExprI64x2ExtMulLowI32x4U, MultiplyLong,
+ kExprI32x4Splat, MulHalf::kLow);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2ExtMulHighI32x4U) {
+ RunExtMulTest<uint32_t, uint64_t>(execution_tier, lower_simd,
+ kExprI64x2ExtMulHighI32x4U, MultiplyLong,
+ kExprI32x4Splat, MulHalf::kHigh);
+}
+#endif // V8_TARGET_ARCH_ARM64
+
+WASM_SIMD_TEST(I32x4DotI16x8S) {
WasmRunner<int32_t, int16_t, int16_t> r(execution_tier, lower_simd);
int32_t* g = r.builder().template AddGlobal<int32_t>(kWasmS128);
byte value1 = 0, value2 = 1;
@@ -2288,8 +2463,6 @@ WASM_SIMD_TEST_NO_LOWERING(I32x4DotI16x8S) {
}
}
}
-#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64 ||
- // V8_TARGET_ARCH_ARM
void RunI16x8ShiftOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode opcode, Int16ShiftOp expected_op) {
@@ -2368,6 +2541,31 @@ WASM_SIMD_TEST(I8x16Abs) {
RunI8x16UnOpTest(execution_tier, lower_simd, kExprI8x16Abs, Abs);
}
+#if V8_TARGET_ARCH_ARM64
+// TODO(v8:11002) Prototype i8x16.popcnt.
+WASM_SIMD_TEST_NO_LOWERING(I8x16Popcnt) {
+ FLAG_SCOPE(wasm_simd_post_mvp);
+ WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
+ // Global to hold output.
+ int8_t* g = r.builder().AddGlobal<int8_t>(kWasmS128);
+ // Build fn to splat test value, perform unop, and write the result.
+ byte value = 0;
+ byte temp1 = r.AllocateLocal(kWasmS128);
+ BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_SPLAT(WASM_GET_LOCAL(value))),
+ WASM_SET_GLOBAL(
+ 0, WASM_SIMD_UNOP(kExprI8x16Popcnt, WASM_GET_LOCAL(temp1))),
+ WASM_ONE);
+
+ FOR_UINT8_INPUTS(x) {
+ r.Call(x);
+ unsigned expected = base::bits::CountPopulation(x);
+ for (int i = 0; i < 16; i++) {
+ CHECK_EQ(expected, ReadLittleEndianValue<int8_t>(&g[i]));
+ }
+ }
+}
+#endif // V8_TARGET_ARCH_ARM64
+
// Tests both signed and unsigned conversion from I16x8 (packing).
WASM_SIMD_TEST(I8x16ConvertI16x8) {
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
@@ -2388,8 +2586,8 @@ WASM_SIMD_TEST(I8x16ConvertI16x8) {
FOR_INT16_INPUTS(x) {
r.Call(x);
- int8_t expected_signed = Narrow<int8_t>(x);
- int8_t expected_unsigned = Narrow<uint8_t>(x);
+ int8_t expected_signed = Saturate<int8_t>(x);
+ int8_t expected_unsigned = Saturate<uint8_t>(x);
for (int i = 0; i < 16; i++) {
CHECK_EQ(expected_signed, ReadLittleEndianValue<int8_t>(&g0[i]));
CHECK_EQ(expected_unsigned, ReadLittleEndianValue<int8_t>(&g1[i]));
@@ -2429,9 +2627,9 @@ WASM_SIMD_TEST(I8x16Add) {
base::AddWithWraparound);
}
-WASM_SIMD_TEST(I8x16AddSaturateS) {
- RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16AddSaturateS,
- AddSaturate);
+WASM_SIMD_TEST(I8x16AddSatS) {
+ RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16AddSatS,
+ SaturateAdd<int8_t>);
}
WASM_SIMD_TEST(I8x16Sub) {
@@ -2439,9 +2637,9 @@ WASM_SIMD_TEST(I8x16Sub) {
base::SubWithWraparound);
}
-WASM_SIMD_TEST(I8x16SubSaturateS) {
- RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16SubSaturateS,
- SubSaturate);
+WASM_SIMD_TEST(I8x16SubSatS) {
+ RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16SubSatS,
+ SaturateSub<int8_t>);
}
WASM_SIMD_TEST(I8x16MinS) {
@@ -2452,14 +2650,14 @@ WASM_SIMD_TEST(I8x16MaxS) {
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16MaxS, Maximum);
}
-WASM_SIMD_TEST(I8x16AddSaturateU) {
- RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16AddSaturateU,
- UnsignedAddSaturate);
+WASM_SIMD_TEST(I8x16AddSatU) {
+ RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16AddSatU,
+ SaturateAdd<uint8_t>);
}
-WASM_SIMD_TEST(I8x16SubSaturateU) {
- RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16SubSaturateU,
- UnsignedSubSaturate);
+WASM_SIMD_TEST(I8x16SubSatU) {
+ RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16SubSatU,
+ SaturateSub<uint8_t>);
}
WASM_SIMD_TEST(I8x16MinU) {
@@ -3110,9 +3308,6 @@ WASM_SIMD_TEST(S8x16MultiShuffleFuzz) {
CHECK_EQ(1, r.Call()); \
}
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
-WASM_SIMD_BOOL_REDUCTION_TEST(64x2, 2, WASM_I64V)
-#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
WASM_SIMD_BOOL_REDUCTION_TEST(32x4, 4, WASM_I32V)
WASM_SIMD_BOOL_REDUCTION_TEST(16x8, 8, WASM_I32V)
WASM_SIMD_BOOL_REDUCTION_TEST(8x16, 16, WASM_I32V)
@@ -3481,7 +3676,7 @@ WASM_SIMD_TEST(S128Load32Splat) {
RunLoadSplatTest<int32_t>(execution_tier, lower_simd, kExprS128Load32Splat);
}
-WASM_SIMD_TEST_NO_LOWERING(S128Load64Splat) {
+WASM_SIMD_TEST(S128Load64Splat) {
RunLoadSplatTest<int64_t>(execution_tier, lower_simd, kExprS128Load64Splat);
}
@@ -3589,40 +3784,266 @@ WASM_SIMD_TEST(S128Load32x2S) {
kExprS128Load32x2S);
}
-// TODO(v8:10713): Prototyping v128.load32_zero and v128.load64_zero.
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
template <typename S>
void RunLoadZeroTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode op) {
- FLAG_SCOPE(wasm_simd_post_mvp);
constexpr int lanes_s = kSimd128Size / sizeof(S);
constexpr int mem_index = 16; // Load from mem index 16 (bytes).
- WasmRunner<int32_t> r(execution_tier, lower_simd);
- S* memory = r.builder().AddMemoryElems<S>(kWasmPageSize / sizeof(S));
- S* global = r.builder().AddGlobal<S>(kWasmS128);
- BUILD(r, WASM_SET_GLOBAL(0, WASM_SIMD_LOAD_OP(op, WASM_I32V(mem_index))),
+ constexpr S sentinel = S{-1};
+ S* memory;
+ S* global;
+
+ auto initialize_builder = [=](WasmRunner<int32_t>* r) -> std::tuple<S*, S*> {
+ S* memory = r->builder().AddMemoryElems<S>(kWasmPageSize / sizeof(S));
+ S* global = r->builder().AddGlobal<S>(kWasmS128);
+ r->builder().RandomizeMemory();
+ r->builder().WriteMemory(&memory[lanes_s], sentinel);
+ return std::make_tuple(memory, global);
+ };
+
+ // Check all supported alignments.
+ constexpr int max_alignment = base::bits::CountTrailingZeros(sizeof(S));
+ for (byte alignment = 0; alignment <= max_alignment; alignment++) {
+ WasmRunner<int32_t> r(execution_tier, lower_simd);
+ std::tie(memory, global) = initialize_builder(&r);
+
+ BUILD(r, WASM_SET_GLOBAL(0, WASM_SIMD_LOAD_OP(op, WASM_I32V(mem_index))),
+ WASM_ONE);
+ r.Call();
+
+ // Only first lane is set to sentinel.
+ CHECK_EQ(sentinel, ReadLittleEndianValue<S>(&global[0]));
+ // The other lanes are zero.
+ for (int i = 1; i < lanes_s; i++) {
+ CHECK_EQ(S{0}, ReadLittleEndianValue<S>(&global[i]));
+ }
+ }
+
+ {
+ // Use memarg to specific offset.
+ WasmRunner<int32_t> r(execution_tier, lower_simd);
+ std::tie(memory, global) = initialize_builder(&r);
+
+ BUILD(
+ r,
+ WASM_SET_GLOBAL(0, WASM_SIMD_LOAD_OP_OFFSET(op, WASM_ZERO, mem_index)),
WASM_ONE);
+ r.Call();
- S sentinel = S{-1};
- r.builder().WriteMemory(&memory[lanes_s], sentinel);
- r.Call();
+ // Only first lane is set to sentinel.
+ CHECK_EQ(sentinel, ReadLittleEndianValue<S>(&global[0]));
+ // The other lanes are zero.
+ for (int i = 1; i < lanes_s; i++) {
+ CHECK_EQ(S{0}, ReadLittleEndianValue<S>(&global[i]));
+ }
+ }
+
+ // Test for OOB.
+ {
+ WasmRunner<int32_t, uint32_t> r(execution_tier, lower_simd);
+ r.builder().AddMemoryElems<S>(kWasmPageSize / sizeof(S));
+ r.builder().AddGlobal<S>(kWasmS128);
+
+ BUILD(r, WASM_SET_GLOBAL(0, WASM_SIMD_LOAD_OP(op, WASM_GET_LOCAL(0))),
+ WASM_ONE);
+
+ // Load extends load sizeof(S) bytes.
+ for (uint32_t offset = kWasmPageSize - (sizeof(S) - 1);
+ offset < kWasmPageSize; ++offset) {
+ CHECK_TRAP(r.Call(offset));
+ }
+ }
+}
+
+WASM_SIMD_TEST(S128Load32Zero) {
+ RunLoadZeroTest<int32_t>(execution_tier, lower_simd, kExprS128Load32Zero);
+}
+
+WASM_SIMD_TEST(S128Load64Zero) {
+ RunLoadZeroTest<int64_t>(execution_tier, lower_simd, kExprS128Load64Zero);
+}
+
+#if V8_TARGET_ARCH_X64
+// TODO(v8:10975): Prototyping load lane and store lane.
+template <typename T>
+void RunLoadLaneTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode load_op, WasmOpcode splat_op) {
+ FLAG_SCOPE(wasm_simd_post_mvp);
+ if (execution_tier == TestExecutionTier::kLiftoff) {
+ // Not yet implemented.
+ return;
+ }
+
+ WasmOpcode const_op =
+ splat_op == kExprI64x2Splat ? kExprI64Const : kExprI32Const;
+
+ constexpr int lanes_s = kSimd128Size / sizeof(T);
+ constexpr int mem_index = 16; // Load from mem index 16 (bytes).
+ constexpr int splat_value = 33;
+ T sentinel = T{-1};
+
+ T* memory;
+ T* global;
+
+ auto build_fn = [=, &memory, &global](WasmRunner<int32_t>& r, int mem_index,
+ int lane, int alignment, int offset) {
+ memory = r.builder().AddMemoryElems<T>(kWasmPageSize / sizeof(T));
+ global = r.builder().AddGlobal<T>(kWasmS128);
+ r.builder().WriteMemory(&memory[lanes_s], sentinel);
+ // Splat splat_value, then only load and replace a single lane with the
+ // sentinel value.
+ BUILD(r, WASM_I32V(mem_index), const_op, splat_value,
+ WASM_SIMD_OP(splat_op), WASM_SIMD_OP(load_op), alignment, offset,
+ lane, kExprGlobalSet, 0, WASM_ONE);
+ };
+
+ auto check_results = [=](T* global, int sentinel_lane = 0) {
+ // Only one lane is loaded, the rest of the lanes are unchanged.
+ for (int i = 0; i < lanes_s; i++) {
+ T expected = i == sentinel_lane ? sentinel : static_cast<T>(splat_value);
+ CHECK_EQ(expected, ReadLittleEndianValue<T>(&global[i]));
+ }
+ };
+
+ for (int lane_index = 0; lane_index < lanes_s; ++lane_index) {
+ WasmRunner<int32_t> r(execution_tier, lower_simd);
+ build_fn(r, mem_index, lane_index, /*alignment=*/0, /*offset=*/0);
+ r.Call();
+ check_results(global, lane_index);
+ }
+
+ // Check all possible alignments.
+ constexpr int max_alignment = base::bits::CountTrailingZeros(sizeof(T));
+ for (byte alignment = 0; alignment <= max_alignment; ++alignment) {
+ WasmRunner<int32_t> r(execution_tier, lower_simd);
+ build_fn(r, mem_index, /*lane=*/0, alignment, /*offset=*/0);
+ r.Call();
+ check_results(global);
+ }
+
+ {
+ // Use memarg to specify offset.
+ int lane_index = 0;
+ WasmRunner<int32_t> r(execution_tier, lower_simd);
+ build_fn(r, /*mem_index=*/0, /*lane=*/0, /*alignment=*/0,
+ /*offset=*/mem_index);
+ r.Call();
+ check_results(global, lane_index);
+ }
+
+ // Test for OOB.
+ {
+ WasmRunner<int32_t, uint32_t> r(execution_tier, lower_simd);
+ r.builder().AddMemoryElems<T>(kWasmPageSize / sizeof(T));
+ r.builder().AddGlobal<T>(kWasmS128);
+
+ BUILD(r, WASM_GET_LOCAL(0), const_op, splat_value, WASM_SIMD_OP(splat_op),
+ WASM_SIMD_OP(load_op), ZERO_ALIGNMENT, ZERO_OFFSET, 0, kExprGlobalSet,
+ 0, WASM_ONE);
+
+ // Load lane load sizeof(T) bytes.
+ for (uint32_t index = kWasmPageSize - (sizeof(T) - 1);
+ index < kWasmPageSize; ++index) {
+ CHECK_TRAP(r.Call(index));
+ }
+ }
+}
+
+WASM_SIMD_TEST_NO_LOWERING(S128Load8Lane) {
+ RunLoadLaneTest<int8_t>(execution_tier, lower_simd, kExprS128Load8Lane,
+ kExprI8x16Splat);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(S128Load16Lane) {
+ RunLoadLaneTest<int16_t>(execution_tier, lower_simd, kExprS128Load16Lane,
+ kExprI16x8Splat);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(S128Load32Lane) {
+ RunLoadLaneTest<int32_t>(execution_tier, lower_simd, kExprS128Load32Lane,
+ kExprI32x4Splat);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(S128Load64Lane) {
+ RunLoadLaneTest<int64_t>(execution_tier, lower_simd, kExprS128Load64Lane,
+ kExprI64x2Splat);
+}
+
+template <typename T>
+void RunStoreLaneTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode store_op, WasmOpcode splat_op) {
+ FLAG_SCOPE(wasm_simd_post_mvp);
+ if (execution_tier == TestExecutionTier::kLiftoff) {
+ // Not yet implemented.
+ return;
+ }
+
+ constexpr int lanes = kSimd128Size / sizeof(T);
+ constexpr int mem_index = 16; // Store from mem index 16 (bytes).
+ constexpr int splat_value = 33;
+ WasmOpcode const_op =
+ splat_op == kExprI64x2Splat ? kExprI64Const : kExprI32Const;
+
+ for (int lane_index = 0; lane_index < lanes; lane_index++) {
+ WasmRunner<int32_t> r(execution_tier, lower_simd);
+ T* memory = r.builder().AddMemoryElems<T>(kWasmPageSize / sizeof(T));
+
+ // Splat splat_value, then only Store and replace a single lane with the
+ BUILD(r, WASM_I32V(mem_index), const_op, splat_value,
+ WASM_SIMD_OP(splat_op), WASM_SIMD_OP(store_op), ZERO_ALIGNMENT,
+ ZERO_OFFSET, lane_index, WASM_ONE);
- // Only first lane is set to sentinel.
- CHECK_EQ(sentinel, ReadLittleEndianValue<S>(&global[0]));
- // The other lanes are zero.
- for (int i = 1; i < lanes_s; i++) {
- CHECK_EQ(S{0}, ReadLittleEndianValue<S>(&global[i]));
+ r.builder().BlankMemory();
+ r.Call();
+
+ for (int i = 0; i < lanes; i++) {
+ CHECK_EQ(0, r.builder().ReadMemory(&memory[i]));
+ }
+
+ CHECK_EQ(splat_value, r.builder().ReadMemory(&memory[lanes]));
+
+ for (int i = lanes + 1; i < lanes * 2; i++) {
+ CHECK_EQ(0, r.builder().ReadMemory(&memory[i]));
+ }
+ }
+
+ // OOB stores
+ {
+ WasmRunner<int32_t, uint32_t> r(execution_tier, lower_simd);
+ r.builder().AddMemoryElems<T>(kWasmPageSize / sizeof(T));
+
+ BUILD(r, WASM_GET_LOCAL(0), const_op, splat_value, WASM_SIMD_OP(splat_op),
+ WASM_SIMD_OP(store_op), ZERO_ALIGNMENT, ZERO_OFFSET, 0, WASM_ONE);
+
+ // StoreLane stores sizeof(T) bytes.
+ for (uint32_t index = kWasmPageSize - (sizeof(T) - 1);
+ index < kWasmPageSize; ++index) {
+ CHECK_TRAP(r.Call(index));
+ }
}
}
-WASM_SIMD_TEST_NO_LOWERING(S128LoadMem32Zero) {
- RunLoadZeroTest<int32_t>(execution_tier, lower_simd, kExprS128LoadMem32Zero);
+WASM_SIMD_TEST_NO_LOWERING(S128Store8Lane) {
+ RunStoreLaneTest<int8_t>(execution_tier, lower_simd, kExprS128Store8Lane,
+ kExprI8x16Splat);
}
-WASM_SIMD_TEST_NO_LOWERING(S128LoadMem64Zero) {
- RunLoadZeroTest<int64_t>(execution_tier, lower_simd, kExprS128LoadMem64Zero);
+WASM_SIMD_TEST_NO_LOWERING(S128Store16Lane) {
+ RunStoreLaneTest<int16_t>(execution_tier, lower_simd, kExprS128Store16Lane,
+ kExprI16x8Splat);
}
-#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
+
+WASM_SIMD_TEST_NO_LOWERING(S128Store32Lane) {
+ RunStoreLaneTest<int32_t>(execution_tier, lower_simd, kExprS128Store32Lane,
+ kExprI32x4Splat);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(S128Store64Lane) {
+ RunStoreLaneTest<int64_t>(execution_tier, lower_simd, kExprS128Store64Lane,
+ kExprI64x2Splat);
+}
+
+#endif // V8_TARGET_ARCH_X64
#define WASM_SIMD_ANYTRUE_TEST(format, lanes, max, param_type) \
WASM_SIMD_TEST(S##format##AnyTrue) { \
@@ -3634,13 +4055,10 @@ WASM_SIMD_TEST_NO_LOWERING(S128LoadMem64Zero) {
r, \
WASM_SET_LOCAL(simd, WASM_SIMD_I##format##_SPLAT(WASM_GET_LOCAL(0))), \
WASM_SIMD_UNOP(kExprV##format##AnyTrue, WASM_GET_LOCAL(simd))); \
- DCHECK_EQ(1, r.Call(max)); \
- DCHECK_EQ(1, r.Call(5)); \
- DCHECK_EQ(0, r.Call(0)); \
+ CHECK_EQ(1, r.Call(max)); \
+ CHECK_EQ(1, r.Call(5)); \
+ CHECK_EQ(0, r.Call(0)); \
}
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
-WASM_SIMD_ANYTRUE_TEST(64x2, 2, 0xffffffffffffffff, int64_t)
-#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
WASM_SIMD_ANYTRUE_TEST(32x4, 4, 0xffffffff, int32_t)
WASM_SIMD_ANYTRUE_TEST(16x8, 8, 0xffff, int32_t)
WASM_SIMD_ANYTRUE_TEST(8x16, 16, 0xff, int32_t)
@@ -3648,13 +4066,13 @@ WASM_SIMD_ANYTRUE_TEST(8x16, 16, 0xff, int32_t)
// Special any true test cases that splats a -0.0 double into a i64x2.
// This is specifically to ensure that our implementation correct handles that
// 0.0 and -0.0 will be different in an anytrue (IEEE753 says they are equals).
-WASM_SIMD_TEST_NO_LOWERING(V32x4AnytrueWithNegativeZero) {
+WASM_SIMD_TEST(V32x4AnytrueWithNegativeZero) {
WasmRunner<int32_t, int64_t> r(execution_tier, lower_simd);
byte simd = r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(simd, WASM_SIMD_I64x2_SPLAT(WASM_GET_LOCAL(0))),
WASM_SIMD_UNOP(kExprV32x4AnyTrue, WASM_GET_LOCAL(simd)));
- DCHECK_EQ(1, r.Call(0x8000000000000000));
- DCHECK_EQ(0, r.Call(0x0000000000000000));
+ CHECK_EQ(1, r.Call(0x8000000000000000));
+ CHECK_EQ(0, r.Call(0x0000000000000000));
}
#define WASM_SIMD_ALLTRUE_TEST(format, lanes, max, param_type) \
@@ -3667,13 +4085,10 @@ WASM_SIMD_TEST_NO_LOWERING(V32x4AnytrueWithNegativeZero) {
r, \
WASM_SET_LOCAL(simd, WASM_SIMD_I##format##_SPLAT(WASM_GET_LOCAL(0))), \
WASM_SIMD_UNOP(kExprV##format##AllTrue, WASM_GET_LOCAL(simd))); \
- DCHECK_EQ(1, r.Call(max)); \
- DCHECK_EQ(1, r.Call(0x1)); \
- DCHECK_EQ(0, r.Call(0)); \
+ CHECK_EQ(1, r.Call(max)); \
+ CHECK_EQ(1, r.Call(0x1)); \
+ CHECK_EQ(0, r.Call(0)); \
}
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
-WASM_SIMD_ALLTRUE_TEST(64x2, 2, 0xffffffffffffffff, int64_t)
-#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
WASM_SIMD_ALLTRUE_TEST(32x4, 4, 0xffffffff, int32_t)
WASM_SIMD_ALLTRUE_TEST(16x8, 8, 0xffff, int32_t)
WASM_SIMD_ALLTRUE_TEST(8x16, 16, 0xff, int32_t)
@@ -3688,7 +4103,7 @@ WASM_SIMD_TEST(BitSelect) {
WASM_SIMD_I32x4_SPLAT(WASM_I32V(0)),
WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(0)))),
WASM_SIMD_I32x4_EXTRACT_LANE(0, WASM_GET_LOCAL(simd)));
- DCHECK_EQ(0x01020304, r.Call(0xFFFFFFFF));
+ CHECK_EQ(0x01020304, r.Call(0xFFFFFFFF));
}
void RunSimdConstTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
@@ -3703,7 +4118,7 @@ void RunSimdConstTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
}
}
-WASM_SIMD_TEST_NO_LOWERING(S128Const) {
+WASM_SIMD_TEST(S128Const) {
std::array<uint8_t, kSimd128Size> expected;
// Test for generic constant
for (int i = 0; i < kSimd128Size; i++) {
@@ -3727,12 +4142,12 @@ WASM_SIMD_TEST_NO_LOWERING(S128Const) {
RunSimdConstTest(execution_tier, lower_simd, expected);
}
-WASM_SIMD_TEST_NO_LOWERING(S128ConstAllZero) {
+WASM_SIMD_TEST(S128ConstAllZero) {
std::array<uint8_t, kSimd128Size> expected = {0};
RunSimdConstTest(execution_tier, lower_simd, expected);
}
-WASM_SIMD_TEST_NO_LOWERING(S128ConstAllOnes) {
+WASM_SIMD_TEST(S128ConstAllOnes) {
std::array<uint8_t, kSimd128Size> expected;
// Test for generic constant
for (int i = 0; i < kSimd128Size; i++) {
@@ -3755,27 +4170,27 @@ void RunI8x16MixedRelationalOpTest(TestExecutionTier execution_tier,
WASM_GET_LOCAL(temp2))),
WASM_SIMD_I8x16_EXTRACT_LANE(0, WASM_GET_LOCAL(temp3)));
- DCHECK_EQ(expected_op(0xff, static_cast<uint8_t>(0x7fff)),
- r.Call(0xff, 0x7fff));
- DCHECK_EQ(expected_op(0xfe, static_cast<uint8_t>(0x7fff)),
- r.Call(0xfe, 0x7fff));
- DCHECK_EQ(expected_op(0xff, static_cast<uint8_t>(0x7ffe)),
- r.Call(0xff, 0x7ffe));
+ CHECK_EQ(expected_op(0xff, static_cast<uint8_t>(0x7fff)),
+ r.Call(0xff, 0x7fff));
+ CHECK_EQ(expected_op(0xfe, static_cast<uint8_t>(0x7fff)),
+ r.Call(0xfe, 0x7fff));
+ CHECK_EQ(expected_op(0xff, static_cast<uint8_t>(0x7ffe)),
+ r.Call(0xff, 0x7ffe));
}
-WASM_SIMD_TEST_NO_LOWERING(I8x16LeUMixed) {
+WASM_SIMD_TEST(I8x16LeUMixed) {
RunI8x16MixedRelationalOpTest(execution_tier, lower_simd, kExprI8x16LeU,
UnsignedLessEqual);
}
-WASM_SIMD_TEST_NO_LOWERING(I8x16LtUMixed) {
+WASM_SIMD_TEST(I8x16LtUMixed) {
RunI8x16MixedRelationalOpTest(execution_tier, lower_simd, kExprI8x16LtU,
UnsignedLess);
}
-WASM_SIMD_TEST_NO_LOWERING(I8x16GeUMixed) {
+WASM_SIMD_TEST(I8x16GeUMixed) {
RunI8x16MixedRelationalOpTest(execution_tier, lower_simd, kExprI8x16GeU,
UnsignedGreaterEqual);
}
-WASM_SIMD_TEST_NO_LOWERING(I8x16GtUMixed) {
+WASM_SIMD_TEST(I8x16GtUMixed) {
RunI8x16MixedRelationalOpTest(execution_tier, lower_simd, kExprI8x16GtU,
UnsignedGreater);
}
@@ -3794,32 +4209,32 @@ void RunI16x8MixedRelationalOpTest(TestExecutionTier execution_tier,
WASM_GET_LOCAL(temp2))),
WASM_SIMD_I16x8_EXTRACT_LANE(0, WASM_GET_LOCAL(temp3)));
- DCHECK_EQ(expected_op(0xffff, static_cast<uint16_t>(0x7fffffff)),
- r.Call(0xffff, 0x7fffffff));
- DCHECK_EQ(expected_op(0xfeff, static_cast<uint16_t>(0x7fffffff)),
- r.Call(0xfeff, 0x7fffffff));
- DCHECK_EQ(expected_op(0xffff, static_cast<uint16_t>(0x7ffffeff)),
- r.Call(0xffff, 0x7ffffeff));
+ CHECK_EQ(expected_op(0xffff, static_cast<uint16_t>(0x7fffffff)),
+ r.Call(0xffff, 0x7fffffff));
+ CHECK_EQ(expected_op(0xfeff, static_cast<uint16_t>(0x7fffffff)),
+ r.Call(0xfeff, 0x7fffffff));
+ CHECK_EQ(expected_op(0xffff, static_cast<uint16_t>(0x7ffffeff)),
+ r.Call(0xffff, 0x7ffffeff));
}
-WASM_SIMD_TEST_NO_LOWERING(I16x8LeUMixed) {
+WASM_SIMD_TEST(I16x8LeUMixed) {
RunI16x8MixedRelationalOpTest(execution_tier, lower_simd, kExprI16x8LeU,
UnsignedLessEqual);
}
-WASM_SIMD_TEST_NO_LOWERING(I16x8LtUMixed) {
+WASM_SIMD_TEST(I16x8LtUMixed) {
RunI16x8MixedRelationalOpTest(execution_tier, lower_simd, kExprI16x8LtU,
UnsignedLess);
}
-WASM_SIMD_TEST_NO_LOWERING(I16x8GeUMixed) {
+WASM_SIMD_TEST(I16x8GeUMixed) {
RunI16x8MixedRelationalOpTest(execution_tier, lower_simd, kExprI16x8GeU,
UnsignedGreaterEqual);
}
-WASM_SIMD_TEST_NO_LOWERING(I16x8GtUMixed) {
+WASM_SIMD_TEST(I16x8GtUMixed) {
RunI16x8MixedRelationalOpTest(execution_tier, lower_simd, kExprI16x8GtU,
UnsignedGreater);
}
-WASM_SIMD_TEST_NO_LOWERING(I16x8ExtractLaneU_I8x16Splat) {
+WASM_SIMD_TEST(I16x8ExtractLaneU_I8x16Splat) {
// Test that we are correctly signed/unsigned extending when extracting.
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
byte simd_val = r.AllocateLocal(kWasmS128);
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-wrappers.cc b/deps/v8/test/cctest/wasm/test-run-wasm-wrappers.cc
new file mode 100644
index 0000000000..6026d63bfc
--- /dev/null
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-wrappers.cc
@@ -0,0 +1,184 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/wasm-module-builder.h"
+#include "src/wasm/wasm-objects-inl.h"
+#include "test/cctest/cctest.h"
+#include "test/common/wasm/flag-utils.h"
+#include "test/common/wasm/test-signatures.h"
+#include "test/common/wasm/wasm-macro-gen.h"
+#include "test/common/wasm/wasm-module-runner.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+namespace test_run_wasm_wrappers {
+
+using testing::CompileAndInstantiateForTesting;
+
+#ifdef V8_TARGET_ARCH_X64
+namespace {
+void Cleanup() {
+ // By sending a low memory notifications, we will try hard to collect all
+ // garbage and will therefore also invoke all weak callbacks of actually
+ // unreachable persistent handles.
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ reinterpret_cast<v8::Isolate*>(isolate)->LowMemoryNotification();
+}
+
+} // namespace
+
+TEST(CallCounter) {
+ {
+ // This test assumes use of the generic wrapper.
+ FlagScope<bool> use_wasm_generic_wrapper(&FLAG_wasm_generic_wrapper, true);
+
+ TestSignatures sigs;
+ AccountingAllocator allocator;
+ Zone zone(&allocator, ZONE_NAME);
+
+ // Define the Wasm function.
+ WasmModuleBuilder* builder = zone.New<WasmModuleBuilder>(&zone);
+ WasmFunctionBuilder* f = builder->AddFunction(sigs.i_ii());
+ f->builder()->AddExport(CStrVector("main"), f);
+ byte code[] = {WASM_I32_MUL(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)),
+ WASM_END};
+ f->EmitCode(code, sizeof(code));
+
+ // Compile module.
+ ZoneBuffer buffer(&zone);
+ builder->WriteTo(&buffer);
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ HandleScope scope(isolate);
+ testing::SetupIsolateForWasmModule(isolate);
+ ErrorThrower thrower(isolate, "CompileAndRunWasmModule");
+ MaybeHandle<WasmInstanceObject> instance = CompileAndInstantiateForTesting(
+ isolate, &thrower, ModuleWireBytes(buffer.begin(), buffer.end()));
+
+ MaybeHandle<WasmExportedFunction> maybe_export =
+ testing::GetExportedFunction(isolate, instance.ToHandleChecked(),
+ "main");
+ Handle<WasmExportedFunction> main_export = maybe_export.ToHandleChecked();
+
+ // Check that the counter has initially a value of 0.
+ CHECK_EQ(main_export->shared().wasm_exported_function_data().call_count(),
+ 0);
+
+ // Call the exported Wasm function and get the result.
+ Handle<Object> params[2] = {Handle<Object>(Smi::FromInt(6), isolate),
+ Handle<Object>(Smi::FromInt(7), isolate)};
+ static const int32_t kExpectedValue = 42;
+ Handle<Object> receiver = isolate->factory()->undefined_value();
+ MaybeHandle<Object> maybe_result =
+ Execution::Call(isolate, main_export, receiver, 2, params);
+ Handle<Object> result = maybe_result.ToHandleChecked();
+
+ // Check that the counter has now a value of 1.
+ CHECK_EQ(main_export->shared().wasm_exported_function_data().call_count(),
+ 1);
+
+ CHECK(result->IsSmi() && Smi::ToInt(*result) == kExpectedValue);
+ }
+ Cleanup();
+}
+
+TEST(WrapperReplacement) {
+ {
+ // This test assumes use of the generic wrapper.
+ FlagScope<bool> use_wasm_generic_wrapper(&FLAG_wasm_generic_wrapper, true);
+
+ TestSignatures sigs;
+ AccountingAllocator allocator;
+ Zone zone(&allocator, ZONE_NAME);
+
+ // Define the Wasm function.
+ WasmModuleBuilder* builder = zone.New<WasmModuleBuilder>(&zone);
+ WasmFunctionBuilder* f = builder->AddFunction(sigs.i_i());
+ f->builder()->AddExport(CStrVector("main"), f);
+ byte code[] = {WASM_RETURN1(WASM_GET_LOCAL(0)), WASM_END};
+ f->EmitCode(code, sizeof(code));
+
+ // Compile module.
+ ZoneBuffer buffer(&zone);
+ builder->WriteTo(&buffer);
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ HandleScope scope(isolate);
+ testing::SetupIsolateForWasmModule(isolate);
+ ErrorThrower thrower(isolate, "CompileAndRunWasmModule");
+ MaybeHandle<WasmInstanceObject> instance = CompileAndInstantiateForTesting(
+ isolate, &thrower, ModuleWireBytes(buffer.begin(), buffer.end()));
+
+ // Get the exported function.
+ MaybeHandle<WasmExportedFunction> maybe_export =
+ testing::GetExportedFunction(isolate, instance.ToHandleChecked(),
+ "main");
+ Handle<WasmExportedFunction> main_export = maybe_export.ToHandleChecked();
+
+ // Check that the counter has initially a value of 0.
+ CHECK_EQ(main_export->shared().wasm_exported_function_data().call_count(),
+ 0);
+ CHECK_GT(kGenericWrapperThreshold, 0);
+
+ // Call the exported Wasm function as many times as required to reach the
+ // threshold for compiling the specific wrapper.
+ const int threshold = static_cast<int>(kGenericWrapperThreshold);
+ for (int i = 1; i < threshold; ++i) {
+ // Verify that the wrapper to be used is still the generic one.
+ Code wrapper =
+ main_export->shared().wasm_exported_function_data().wrapper_code();
+ CHECK(wrapper.is_builtin() &&
+ wrapper.builtin_index() == Builtins::kGenericJSToWasmWrapper);
+ // Call the function.
+ int32_t expected_value = i;
+ Handle<Object> params[1] = {
+ Handle<Object>(Smi::FromInt(expected_value), isolate)};
+ Handle<Object> receiver = isolate->factory()->undefined_value();
+ MaybeHandle<Object> maybe_result =
+ Execution::Call(isolate, main_export, receiver, 1, params);
+ Handle<Object> result = maybe_result.ToHandleChecked();
+ // Verify that the counter has now a value of i and the return value is
+ // correct.
+ CHECK_EQ(main_export->shared().wasm_exported_function_data().call_count(),
+ i);
+ CHECK(result->IsSmi() && Smi::ToInt(*result) == expected_value);
+ }
+
+ // Get the wrapper-code object before making the call that will kick off the
+ // wrapper replacement.
+ Code wrapper_before_call =
+ main_export->shared().wasm_exported_function_data().wrapper_code();
+ // Verify that the wrapper before the call is the generic wrapper.
+ CHECK(wrapper_before_call.is_builtin() &&
+ wrapper_before_call.builtin_index() ==
+ Builtins::kGenericJSToWasmWrapper);
+
+ // Call the exported Wasm function one more time to kick off the wrapper
+ // replacement.
+ int32_t expected_value = 42;
+ Handle<Object> params[1] = {
+ Handle<Object>(Smi::FromInt(expected_value), isolate)};
+ Handle<Object> receiver = isolate->factory()->undefined_value();
+ MaybeHandle<Object> maybe_result =
+ Execution::Call(isolate, main_export, receiver, 1, params);
+ Handle<Object> result = maybe_result.ToHandleChecked();
+ // Check that the counter has the threshold value and the result is correct.
+ CHECK_EQ(main_export->shared().wasm_exported_function_data().call_count(),
+ kGenericWrapperThreshold);
+ CHECK(result->IsSmi() && Smi::ToInt(*result) == expected_value);
+
+ // Verify that the wrapper-code object has changed.
+ Code wrapper_after_call =
+ main_export->shared().wasm_exported_function_data().wrapper_code();
+ CHECK_NE(wrapper_after_call, wrapper_before_call);
+ // Verify that the wrapper is now a specific one.
+ CHECK(wrapper_after_call.kind() == CodeKind::JS_TO_WASM_FUNCTION);
+ }
+ Cleanup();
+}
+#endif
+
+} // namespace test_run_wasm_wrappers
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm.cc b/deps/v8/test/cctest/wasm/test-run-wasm.cc
index d96cd96874..8a18447ceb 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm.cc
@@ -3490,6 +3490,29 @@ WASM_EXEC_TEST(IfInsideUnreachable) {
CHECK_EQ(17, r.Call());
}
+WASM_EXEC_TEST(IndirectNull) {
+ WasmRunner<int32_t> r(execution_tier);
+ FunctionSig sig(1, 0, &kWasmI32);
+ byte sig_index = r.builder().AddSignature(&sig);
+ r.builder().AddIndirectFunctionTable(nullptr, 1);
+
+ BUILD(r, WASM_CALL_INDIRECT(sig_index, WASM_I32V(0)));
+
+ CHECK_TRAP(r.Call());
+}
+
+WASM_EXEC_TEST(IndirectNullTyped) {
+ WasmRunner<int32_t> r(execution_tier);
+ FunctionSig sig(1, 0, &kWasmI32);
+ byte sig_index = r.builder().AddSignature(&sig);
+ r.builder().AddIndirectFunctionTable(nullptr, 1,
+ ValueType::Ref(sig_index, kNullable));
+
+ BUILD(r, WASM_CALL_INDIRECT(sig_index, WASM_I32V(0)));
+
+ CHECK_TRAP(r.Call());
+}
+
// This test targets binops in Liftoff.
// Initialize a number of local variables to force them into different
// registers, then perform a binary operation on two of the locals.
diff --git a/deps/v8/test/cctest/wasm/test-streaming-compilation.cc b/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
index 0f594e63fc..9bb5182cfa 100644
--- a/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
+++ b/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
@@ -58,7 +58,7 @@ class MockPlatform final : public TestPlatform {
void ExecuteTasks() {
for (auto* job_handle : job_handles_) {
- if (job_handle->IsRunning()) job_handle->Join();
+ if (job_handle->IsValid()) job_handle->Join();
}
task_runner_->ExecuteTasks();
}
@@ -70,9 +70,18 @@ class MockPlatform final : public TestPlatform {
tasks_.push(std::move(task));
}
+ void PostNonNestableTask(std::unique_ptr<Task> task) override {
+ PostTask(std::move(task));
+ }
+
void PostDelayedTask(std::unique_ptr<Task> task,
double delay_in_seconds) override {
- tasks_.push(std::move(task));
+ PostTask(std::move(task));
+ }
+
+ void PostNonNestableDelayedTask(std::unique_ptr<Task> task,
+ double delay_in_seconds) override {
+ PostTask(std::move(task));
}
void PostIdleTask(std::unique_ptr<IdleTask> task) override {
@@ -80,6 +89,8 @@ class MockPlatform final : public TestPlatform {
}
bool IdleTasksEnabled() override { return false; }
+ bool NonNestableTasksEnabled() const override { return true; }
+ bool NonNestableDelayedTasksEnabled() const override { return true; }
void ExecuteTasks() {
while (!tasks_.empty()) {
@@ -111,8 +122,11 @@ class MockPlatform final : public TestPlatform {
}
void Join() override { orig_handle_->Join(); }
void Cancel() override { orig_handle_->Cancel(); }
+ void CancelAndDetach() override { orig_handle_->CancelAndDetach(); }
bool IsCompleted() override { return orig_handle_->IsCompleted(); }
+ bool IsActive() override { return orig_handle_->IsActive(); }
bool IsRunning() override { return orig_handle_->IsRunning(); }
+ bool IsValid() override { return orig_handle_->IsValid(); }
private:
std::unique_ptr<JobHandle> orig_handle_;
@@ -133,9 +147,11 @@ enum class CompilationState {
class TestResolver : public CompilationResultResolver {
public:
- TestResolver(CompilationState* state, std::string* error_message,
+ TestResolver(i::Isolate* isolate, CompilationState* state,
+ std::string* error_message,
std::shared_ptr<NativeModule>* native_module)
- : state_(state),
+ : isolate_(isolate),
+ state_(state),
error_message_(error_message),
native_module_(native_module) {}
@@ -149,11 +165,12 @@ class TestResolver : public CompilationResultResolver {
void OnCompilationFailed(i::Handle<i::Object> error_reason) override {
*state_ = CompilationState::kFailed;
Handle<String> str =
- Object::ToString(CcTest::i_isolate(), error_reason).ToHandleChecked();
+ Object::ToString(isolate_, error_reason).ToHandleChecked();
error_message_->assign(str->ToCString().get());
}
private:
+ i::Isolate* isolate_;
CompilationState* const state_;
std::string* const error_message_;
std::shared_ptr<NativeModule>* const native_module_;
@@ -161,18 +178,16 @@ class TestResolver : public CompilationResultResolver {
class StreamTester {
public:
- StreamTester()
+ explicit StreamTester(v8::Isolate* isolate)
: zone_(&allocator_, "StreamTester"),
- internal_scope_(CcTest::i_isolate()) {
- v8::Isolate* isolate = CcTest::isolate();
- i::Isolate* i_isolate = CcTest::i_isolate();
-
+ internal_scope_(reinterpret_cast<i::Isolate*>(isolate)) {
+ Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
v8::Local<v8::Context> context = isolate->GetCurrentContext();
stream_ = i_isolate->wasm_engine()->StartStreamingCompilation(
i_isolate, WasmFeatures::All(), v8::Utils::OpenHandle(*context),
"WebAssembly.compileStreaming()",
- std::make_shared<TestResolver>(&state_, &error_message_,
+ std::make_shared<TestResolver>(i_isolate, &state_, &error_message_,
&native_module_));
}
@@ -217,21 +232,29 @@ class StreamTester {
};
} // namespace
+#define RUN_STREAM(name) \
+ MockPlatform mock_platform; \
+ CHECK_EQ(V8::GetCurrentPlatform(), &mock_platform); \
+ v8::Isolate::CreateParams create_params; \
+ create_params.array_buffer_allocator = CcTest::array_buffer_allocator(); \
+ v8::Isolate* isolate = v8::Isolate::New(create_params); \
+ { \
+ v8::HandleScope handle_scope(isolate); \
+ v8::Local<v8::Context> context = v8::Context::New(isolate); \
+ v8::Context::Scope context_scope(context); \
+ RunStream_##name(&mock_platform, isolate); \
+ } \
+ isolate->Dispose();
+
#define STREAM_TEST(name) \
- void RunStream_##name(); \
- TEST(Async##name) { \
- MockPlatform platform; \
- CcTest::InitializeVM(); \
- RunStream_##name(); \
- } \
+ void RunStream_##name(MockPlatform*, v8::Isolate*); \
+ UNINITIALIZED_TEST(Async##name) { RUN_STREAM(name); } \
\
- TEST(SingleThreaded##name) { \
+ UNINITIALIZED_TEST(SingleThreaded##name) { \
i::FlagScope<bool> single_threaded_scope(&i::FLAG_single_threaded, true); \
- MockPlatform platform; \
- CcTest::InitializeVM(); \
- RunStream_##name(); \
+ RUN_STREAM(name); \
} \
- void RunStream_##name()
+ void RunStream_##name(MockPlatform* platform, v8::Isolate* isolate)
// Create a valid module with 3 functions.
ZoneBuffer GetValidModuleBytes(Zone* zone) {
@@ -259,9 +282,10 @@ ZoneBuffer GetValidModuleBytes(Zone* zone) {
// Create the same valid module as above and serialize it to test streaming
// with compiled module caching.
-ZoneBuffer GetValidCompiledModuleBytes(Zone* zone, ZoneBuffer wire_bytes) {
+ZoneBuffer GetValidCompiledModuleBytes(v8::Isolate* isolate, Zone* zone,
+ ZoneBuffer wire_bytes) {
// Use a tester to compile to a NativeModule.
- StreamTester tester;
+ StreamTester tester(isolate);
tester.OnBytesReceived(wire_bytes.begin(), wire_bytes.size());
tester.FinishStream();
tester.RunCompilerTasks();
@@ -282,7 +306,7 @@ ZoneBuffer GetValidCompiledModuleBytes(Zone* zone, ZoneBuffer wire_bytes) {
// Test that all bytes arrive before doing any compilation. FinishStream is
// called immediately.
STREAM_TEST(TestAllBytesArriveImmediatelyStreamFinishesFirst) {
- StreamTester tester;
+ StreamTester tester(isolate);
ZoneBuffer buffer = GetValidModuleBytes(tester.zone());
tester.OnBytesReceived(buffer.begin(), buffer.end() - buffer.begin());
@@ -296,7 +320,7 @@ STREAM_TEST(TestAllBytesArriveImmediatelyStreamFinishesFirst) {
// Test that all bytes arrive before doing any compilation. FinishStream is
// called after the compilation is done.
STREAM_TEST(TestAllBytesArriveAOTCompilerFinishesFirst) {
- StreamTester tester;
+ StreamTester tester(isolate);
ZoneBuffer buffer = GetValidModuleBytes(tester.zone());
tester.OnBytesReceived(buffer.begin(), buffer.end() - buffer.begin());
@@ -323,11 +347,12 @@ size_t GetFunctionOffset(i::Isolate* isolate, const uint8_t* buffer,
// Test that some functions come in the beginning, some come after some
// functions already got compiled.
STREAM_TEST(TestCutAfterOneFunctionStreamFinishesFirst) {
- i::Isolate* isolate = CcTest::i_isolate();
- StreamTester tester;
+ StreamTester tester(isolate);
ZoneBuffer buffer = GetValidModuleBytes(tester.zone());
- size_t offset = GetFunctionOffset(isolate, buffer.begin(), buffer.size(), 1);
+ Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ size_t offset =
+ GetFunctionOffset(i_isolate, buffer.begin(), buffer.size(), 1);
tester.OnBytesReceived(buffer.begin(), offset);
tester.RunCompilerTasks();
CHECK(tester.IsPromisePending());
@@ -341,11 +366,12 @@ STREAM_TEST(TestCutAfterOneFunctionStreamFinishesFirst) {
// functions already got compiled. Call FinishStream after the compilation is
// done.
STREAM_TEST(TestCutAfterOneFunctionCompilerFinishesFirst) {
- i::Isolate* isolate = CcTest::i_isolate();
- StreamTester tester;
+ StreamTester tester(isolate);
ZoneBuffer buffer = GetValidModuleBytes(tester.zone());
- size_t offset = GetFunctionOffset(isolate, buffer.begin(), buffer.size(), 1);
+ Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ size_t offset =
+ GetFunctionOffset(i_isolate, buffer.begin(), buffer.size(), 1);
tester.OnBytesReceived(buffer.begin(), offset);
tester.RunCompilerTasks();
CHECK(tester.IsPromisePending());
@@ -384,7 +410,7 @@ ZoneBuffer GetModuleWithInvalidSection(Zone* zone) {
// Test an error in a section, found by the ModuleDecoder.
STREAM_TEST(TestErrorInSectionStreamFinishesFirst) {
- StreamTester tester;
+ StreamTester tester(isolate);
ZoneBuffer buffer = GetModuleWithInvalidSection(tester.zone());
tester.OnBytesReceived(buffer.begin(), buffer.end() - buffer.begin());
@@ -396,7 +422,7 @@ STREAM_TEST(TestErrorInSectionStreamFinishesFirst) {
}
STREAM_TEST(TestErrorInSectionCompilerFinishesFirst) {
- StreamTester tester;
+ StreamTester tester(isolate);
ZoneBuffer buffer = GetModuleWithInvalidSection(tester.zone());
tester.OnBytesReceived(buffer.begin(), buffer.end() - buffer.begin());
@@ -408,7 +434,7 @@ STREAM_TEST(TestErrorInSectionCompilerFinishesFirst) {
}
STREAM_TEST(TestErrorInSectionWithCuts) {
- StreamTester tester;
+ StreamTester tester(isolate);
ZoneBuffer buffer = GetModuleWithInvalidSection(tester.zone());
const uint8_t* current = buffer.begin();
@@ -442,7 +468,7 @@ ZoneBuffer GetModuleWithInvalidSectionSize(Zone* zone) {
}
STREAM_TEST(TestErrorInSectionSizeStreamFinishesFirst) {
- StreamTester tester;
+ StreamTester tester(isolate);
ZoneBuffer buffer = GetModuleWithInvalidSectionSize(tester.zone());
tester.OnBytesReceived(buffer.begin(), buffer.end() - buffer.begin());
tester.FinishStream();
@@ -452,7 +478,7 @@ STREAM_TEST(TestErrorInSectionSizeStreamFinishesFirst) {
}
STREAM_TEST(TestErrorInSectionSizeCompilerFinishesFirst) {
- StreamTester tester;
+ StreamTester tester(isolate);
ZoneBuffer buffer = GetModuleWithInvalidSectionSize(tester.zone());
tester.OnBytesReceived(buffer.begin(), buffer.end() - buffer.begin());
tester.RunCompilerTasks();
@@ -463,7 +489,7 @@ STREAM_TEST(TestErrorInSectionSizeCompilerFinishesFirst) {
}
STREAM_TEST(TestErrorInSectionSizeWithCuts) {
- StreamTester tester;
+ StreamTester tester(isolate);
ZoneBuffer buffer = GetModuleWithInvalidSectionSize(tester.zone());
const uint8_t* current = buffer.begin();
size_t remaining = buffer.end() - buffer.begin();
@@ -485,7 +511,7 @@ STREAM_TEST(TestErrorInSectionSizeWithCuts) {
// functions count in the code section which differs from the functions count in
// the function section.
STREAM_TEST(TestErrorInCodeSectionDetectedByModuleDecoder) {
- StreamTester tester;
+ StreamTester tester(isolate);
uint8_t code[] = {
U32V_1(4), // body size
@@ -524,7 +550,7 @@ STREAM_TEST(TestErrorInCodeSectionDetectedByModuleDecoder) {
// is an invalid function body size, so that there are not enough bytes in the
// code section for the function body.
STREAM_TEST(TestErrorInCodeSectionDetectedByStreamingDecoder) {
- StreamTester tester;
+ StreamTester tester(isolate);
uint8_t code[] = {
U32V_1(26), // !!! invalid body size !!!
@@ -563,7 +589,7 @@ STREAM_TEST(TestErrorInCodeSectionDetectedByStreamingDecoder) {
// Test an error in the code section, found by the Compiler. The error is an
// invalid return type.
STREAM_TEST(TestErrorInCodeSectionDetectedByCompiler) {
- StreamTester tester;
+ StreamTester tester(isolate);
uint8_t code[] = {
U32V_1(4), // !!! invalid body size !!!
@@ -611,14 +637,14 @@ STREAM_TEST(TestErrorInCodeSectionDetectedByCompiler) {
// Test Abort before any bytes arrive.
STREAM_TEST(TestAbortImmediately) {
- StreamTester tester;
+ StreamTester tester(isolate);
tester.stream()->Abort();
tester.RunCompilerTasks();
}
// Test Abort within a section.
STREAM_TEST(TestAbortWithinSection1) {
- StreamTester tester;
+ StreamTester tester(isolate);
const uint8_t bytes[] = {
WASM_MODULE_HEADER, // module header
kTypeSectionCode, // section code
@@ -634,7 +660,7 @@ STREAM_TEST(TestAbortWithinSection1) {
// Test Abort within a section.
STREAM_TEST(TestAbortWithinSection2) {
- StreamTester tester;
+ StreamTester tester(isolate);
const uint8_t bytes[] = {
WASM_MODULE_HEADER, // module header
kTypeSectionCode, // section code
@@ -654,7 +680,7 @@ STREAM_TEST(TestAbortWithinSection2) {
// Test Abort just before the code section.
STREAM_TEST(TestAbortAfterSection) {
- StreamTester tester;
+ StreamTester tester(isolate);
const uint8_t bytes[] = {
WASM_MODULE_HEADER, // module header
kTypeSectionCode, // section code
@@ -671,7 +697,7 @@ STREAM_TEST(TestAbortAfterSection) {
// Test Abort after the function count in the code section. The compiler tasks
// execute before the abort.
STREAM_TEST(TestAbortAfterFunctionsCount1) {
- StreamTester tester;
+ StreamTester tester(isolate);
const uint8_t bytes[] = {
WASM_MODULE_HEADER, // module header
kTypeSectionCode, // section code
@@ -697,7 +723,7 @@ STREAM_TEST(TestAbortAfterFunctionsCount1) {
// Test Abort after the function count in the code section. The compiler tasks
// do not execute before the abort.
STREAM_TEST(TestAbortAfterFunctionsCount2) {
- StreamTester tester;
+ StreamTester tester(isolate);
const uint8_t bytes[] = {
WASM_MODULE_HEADER, // module header
kTypeSectionCode, // section code
@@ -722,7 +748,7 @@ STREAM_TEST(TestAbortAfterFunctionsCount2) {
// Test Abort after some functions got compiled. The compiler tasks execute
// before the abort.
STREAM_TEST(TestAbortAfterFunctionGotCompiled1) {
- StreamTester tester;
+ StreamTester tester(isolate);
uint8_t code[] = {
U32V_1(4), // !!! invalid body size !!!
@@ -756,7 +782,7 @@ STREAM_TEST(TestAbortAfterFunctionGotCompiled1) {
// Test Abort after some functions got compiled. The compiler tasks execute
// before the abort.
STREAM_TEST(TestAbortAfterFunctionGotCompiled2) {
- StreamTester tester;
+ StreamTester tester(isolate);
uint8_t code[] = {
U32V_1(4), // !!! invalid body size !!!
@@ -788,7 +814,7 @@ STREAM_TEST(TestAbortAfterFunctionGotCompiled2) {
// Test Abort after all functions got compiled.
STREAM_TEST(TestAbortAfterCodeSection1) {
- StreamTester tester;
+ StreamTester tester(isolate);
uint8_t code[] = {
U32V_1(4), // body size
@@ -824,7 +850,7 @@ STREAM_TEST(TestAbortAfterCodeSection1) {
// Test Abort after all functions got compiled.
STREAM_TEST(TestAbortAfterCodeSection2) {
- StreamTester tester;
+ StreamTester tester(isolate);
uint8_t code[] = {
U32V_1(4), // body size
@@ -858,7 +884,7 @@ STREAM_TEST(TestAbortAfterCodeSection2) {
}
STREAM_TEST(TestAbortAfterCompilationError1) {
- StreamTester tester;
+ StreamTester tester(isolate);
uint8_t code[] = {
U32V_1(4), // !!! invalid body size !!!
@@ -900,7 +926,7 @@ STREAM_TEST(TestAbortAfterCompilationError1) {
}
STREAM_TEST(TestAbortAfterCompilationError2) {
- StreamTester tester;
+ StreamTester tester(isolate);
uint8_t code[] = {
U32V_1(4), // !!! invalid body size !!!
@@ -941,7 +967,7 @@ STREAM_TEST(TestAbortAfterCompilationError2) {
}
STREAM_TEST(TestOnlyModuleHeader) {
- StreamTester tester;
+ StreamTester tester(isolate);
const uint8_t bytes[] = {
WASM_MODULE_HEADER, // module header
@@ -955,7 +981,7 @@ STREAM_TEST(TestOnlyModuleHeader) {
}
STREAM_TEST(TestModuleWithZeroFunctions) {
- StreamTester tester;
+ StreamTester tester(isolate);
const uint8_t bytes[] = {
WASM_MODULE_HEADER, // module header
@@ -977,7 +1003,7 @@ STREAM_TEST(TestModuleWithZeroFunctions) {
}
STREAM_TEST(TestModuleWithMultipleFunctions) {
- StreamTester tester;
+ StreamTester tester(isolate);
uint8_t code[] = {
U32V_1(4), // body size
@@ -1013,7 +1039,7 @@ STREAM_TEST(TestModuleWithMultipleFunctions) {
}
STREAM_TEST(TestModuleWithDataSection) {
- StreamTester tester;
+ StreamTester tester(isolate);
uint8_t code[] = {
U32V_1(4), // body size
@@ -1057,7 +1083,7 @@ STREAM_TEST(TestModuleWithDataSection) {
// Test that all bytes arrive before doing any compilation. FinishStream is
// called immediately.
STREAM_TEST(TestModuleWithImportedFunction) {
- StreamTester tester;
+ StreamTester tester(isolate);
ZoneBuffer buffer(tester.zone());
TestSignatures sigs;
WasmModuleBuilder builder(tester.zone());
@@ -1078,7 +1104,7 @@ STREAM_TEST(TestModuleWithImportedFunction) {
}
STREAM_TEST(TestModuleWithErrorAfterDataSection) {
- StreamTester tester;
+ StreamTester tester(isolate);
const uint8_t bytes[] = {
WASM_MODULE_HEADER, // module header
@@ -1113,10 +1139,10 @@ STREAM_TEST(TestModuleWithErrorAfterDataSection) {
// Test that cached bytes work.
STREAM_TEST(TestDeserializationBypassesCompilation) {
- StreamTester tester;
+ StreamTester tester(isolate);
ZoneBuffer wire_bytes = GetValidModuleBytes(tester.zone());
ZoneBuffer module_bytes =
- GetValidCompiledModuleBytes(tester.zone(), wire_bytes);
+ GetValidCompiledModuleBytes(isolate, tester.zone(), wire_bytes);
tester.SetCompiledModuleBytes(module_bytes.begin(), module_bytes.size());
tester.OnBytesReceived(wire_bytes.begin(), wire_bytes.size());
tester.FinishStream();
@@ -1128,10 +1154,10 @@ STREAM_TEST(TestDeserializationBypassesCompilation) {
// Test that bad cached bytes don't cause compilation of wire bytes to fail.
STREAM_TEST(TestDeserializationFails) {
- StreamTester tester;
+ StreamTester tester(isolate);
ZoneBuffer wire_bytes = GetValidModuleBytes(tester.zone());
ZoneBuffer module_bytes =
- GetValidCompiledModuleBytes(tester.zone(), wire_bytes);
+ GetValidCompiledModuleBytes(isolate, tester.zone(), wire_bytes);
// corrupt header
byte first_byte = *module_bytes.begin();
module_bytes.patch_u8(0, first_byte + 1);
@@ -1146,7 +1172,7 @@ STREAM_TEST(TestDeserializationFails) {
// Test that a non-empty function section with a missing code section fails.
STREAM_TEST(TestFunctionSectionWithoutCodeSection) {
- StreamTester tester;
+ StreamTester tester(isolate);
const uint8_t bytes[] = {
WASM_MODULE_HEADER, // module header
@@ -1171,7 +1197,7 @@ STREAM_TEST(TestFunctionSectionWithoutCodeSection) {
}
STREAM_TEST(TestSetModuleCompiledCallback) {
- StreamTester tester;
+ StreamTester tester(isolate);
bool callback_called = false;
tester.stream()->SetModuleCompiledCallback(
[&callback_called](const std::shared_ptr<NativeModule> module) {
@@ -1249,7 +1275,7 @@ STREAM_TEST(TestCompileErrorFunctionName) {
};
for (bool late_names : {false, true}) {
- StreamTester tester;
+ StreamTester tester(isolate);
tester.OnBytesReceived(bytes_module_with_code,
arraysize(bytes_module_with_code));
@@ -1268,7 +1294,7 @@ STREAM_TEST(TestCompileErrorFunctionName) {
}
STREAM_TEST(TestSetModuleCodeSection) {
- StreamTester tester;
+ StreamTester tester(isolate);
uint8_t code[] = {
U32V_1(1), // functions count
@@ -1302,9 +1328,8 @@ STREAM_TEST(TestSetModuleCodeSection) {
// Test that profiler does not crash when module is only partly compiled.
STREAM_TEST(TestProfilingMidStreaming) {
- StreamTester tester;
- v8::Isolate* isolate = CcTest::isolate();
- Isolate* i_isolate = CcTest::i_isolate();
+ StreamTester tester(isolate);
+ Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
Zone* zone = tester.zone();
// Build module with one exported (named) function.
diff --git a/deps/v8/test/cctest/wasm/test-wasm-debug-evaluate.cc b/deps/v8/test/cctest/wasm/test-wasm-debug-evaluate.cc
index 1ee8d48504..8696a70b92 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-debug-evaluate.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-debug-evaluate.cc
@@ -38,6 +38,10 @@ namespace v8 {
namespace internal {
namespace wasm {
+static Handle<String> V8String(Isolate* isolate, const char* str) {
+ return isolate->factory()->NewStringFromAsciiChecked(str);
+}
+
namespace {
template <typename... FunctionArgsT>
class TestCode {
@@ -246,6 +250,71 @@ class WasmBreakHandler : public debug::DebugDelegate {
}
};
+class WasmJSBreakHandler : public debug::DebugDelegate {
+ public:
+ struct EvaluationResult {
+ Maybe<std::string> result = Nothing<std::string>();
+ Maybe<std::string> error = Nothing<std::string>();
+ };
+
+ WasmJSBreakHandler(Isolate* isolate, Handle<String> snippet)
+ : isolate_(isolate),
+ snippet_(snippet),
+ result_(Nothing<EvaluationResult>()) {
+ v8::debug::SetDebugDelegate(reinterpret_cast<v8::Isolate*>(isolate_), this);
+ }
+
+ ~WasmJSBreakHandler() override {
+ v8::debug::SetDebugDelegate(reinterpret_cast<v8::Isolate*>(isolate_),
+ nullptr);
+ }
+
+ const Maybe<EvaluationResult>& result() const { return result_; }
+
+ private:
+ Isolate* isolate_;
+ Handle<String> snippet_;
+ Maybe<EvaluationResult> result_;
+
+ Maybe<std::string> GetPendingExceptionAsString() const {
+ if (!isolate_->has_pending_exception()) return Nothing<std::string>();
+ Handle<Object> exception(isolate_->pending_exception(), isolate_);
+ isolate_->clear_pending_exception();
+
+ Handle<String> exception_string;
+ if (!Object::ToString(isolate_, exception).ToHandle(&exception_string)) {
+ return Just<std::string>("");
+ }
+ return Just<std::string>(exception_string->ToCString().get());
+ }
+
+ Maybe<std::string> GetResultAsString(MaybeHandle<Object> result) const {
+ Handle<Object> just_result;
+ if (!result.ToHandle(&just_result)) return Nothing<std::string>();
+ MaybeHandle<String> maybe_string = Object::ToString(isolate_, just_result);
+ Handle<String> just_string;
+ if (!maybe_string.ToHandle(&just_string)) return Nothing<std::string>();
+ return Just<std::string>(just_string->ToCString().get());
+ }
+
+ void BreakProgramRequested(v8::Local<v8::Context> paused_context,
+ const std::vector<int>&) override {
+ StackTraceFrameIterator frame_it(isolate_);
+
+ WasmFrame* frame = WasmFrame::cast(frame_it.frame());
+ Handle<WasmInstanceObject> instance{frame->wasm_instance(), isolate_};
+
+ MaybeHandle<Object> result_handle = DebugEvaluate::WebAssembly(
+ instance, frame_it.frame()->id(), snippet_, false);
+
+ Maybe<std::string> error_message = GetPendingExceptionAsString();
+ Maybe<std::string> result_message = GetResultAsString(result_handle);
+
+ isolate_->clear_pending_exception();
+ result_ = Just<EvaluationResult>({result_message, error_message});
+ }
+};
+
WASM_COMPILED_EXEC_TEST(WasmDebugEvaluate_CompileFailed) {
WasmRunner<int> runner(execution_tier);
@@ -449,6 +518,57 @@ WASM_COMPILED_EXEC_TEST(WasmDebugEvaluate_Operands) {
CHECK_EQ(result.result.ToChecked(), "45");
}
+WASM_COMPILED_EXEC_TEST(WasmDebugEvaluate_JavaScript) {
+ WasmRunner<int> runner(execution_tier);
+ runner.builder().AddGlobal<int32_t>();
+ runner.builder().AddMemoryElems<int32_t>(64);
+ uint16_t index = 0;
+ runner.builder().AddIndirectFunctionTable(&index, 1);
+
+ TestCode<int64_t> code(
+ &runner,
+ {WASM_SET_GLOBAL(0, WASM_I32V_2('B')),
+ WASM_SET_LOCAL(0, WASM_I64V_2('A')), WASM_RETURN1(WASM_GET_LOCAL(0))},
+ {ValueType::kI64});
+ code.BreakOnReturn(&runner);
+
+ Isolate* isolate = runner.main_isolate();
+ Handle<String> snippet =
+ V8String(isolate,
+ "JSON.stringify(["
+ "$global0, "
+ "$table0, "
+ "$var0, "
+ "$main, "
+ "$memory0, "
+ "globals[0], "
+ "tables[0], "
+ "locals[0], "
+ "functions[0], "
+ "memories[0], "
+ "memories, "
+ "tables, "
+ "stack, "
+ "imports, "
+ "exports, "
+ "globals, "
+ "locals, "
+ "functions, "
+ "], (k, v) => k === 'at' || typeof v === 'undefined' || typeof "
+ "v === 'object' ? v : v.toString())");
+
+ WasmJSBreakHandler break_handler(isolate, snippet);
+ CHECK(!code.Run(&runner).is_null());
+
+ WasmJSBreakHandler::EvaluationResult result =
+ break_handler.result().ToChecked();
+ CHECK_WITH_MSG(result.error.IsNothing(), result.error.ToChecked().c_str());
+ CHECK_EQ(result.result.ToChecked(),
+ "[\"66\",{},\"65\",\"function 0() { [native code] }\",{},"
+ "\"66\",{},\"65\",\"function 0() { [native code] }\",{},"
+ "{},{},{\"0\":\"65\"},{},{},{},{},{}]");
+}
+
} // namespace
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/test/cctest/wasm/test-wasm-metrics.cc b/deps/v8/test/cctest/wasm/test-wasm-metrics.cc
index b05a4e88b1..03e9ea420c 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-metrics.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-metrics.cc
@@ -53,7 +53,7 @@ class MockPlatform final : public TestPlatform {
void ExecuteTasks() {
for (auto* job_handle : job_handles_) {
- if (job_handle->IsRunning()) job_handle->Join();
+ if (job_handle->IsValid()) job_handle->Join();
}
task_runner_->ExecuteTasks();
}
@@ -66,10 +66,18 @@ class MockPlatform final : public TestPlatform {
tasks_.push(std::move(task));
}
+ void PostNonNestableTask(std::unique_ptr<Task> task) override {
+ PostTask(std::move(task));
+ }
+
void PostDelayedTask(std::unique_ptr<Task> task,
double delay_in_seconds) override {
- base::MutexGuard lock_scope(&tasks_lock_);
- tasks_.push(std::move(task));
+ PostTask(std::move(task));
+ }
+
+ void PostNonNestableDelayedTask(std::unique_ptr<Task> task,
+ double delay_in_seconds) override {
+ PostTask(std::move(task));
}
void PostIdleTask(std::unique_ptr<IdleTask> task) override {
@@ -77,6 +85,8 @@ class MockPlatform final : public TestPlatform {
}
bool IdleTasksEnabled() override { return false; }
+ bool NonNestableTasksEnabled() const override { return true; }
+ bool NonNestableDelayedTasksEnabled() const override { return true; }
void ExecuteTasks() {
std::queue<std::unique_ptr<v8::Task>> tasks;
@@ -114,8 +124,11 @@ class MockPlatform final : public TestPlatform {
}
void Join() override { orig_handle_->Join(); }
void Cancel() override { orig_handle_->Cancel(); }
+ void CancelAndDetach() override { orig_handle_->CancelAndDetach(); }
bool IsRunning() override { return orig_handle_->IsRunning(); }
+ bool IsValid() override { return orig_handle_->IsValid(); }
bool IsCompleted() override { return orig_handle_->IsCompleted(); }
+ bool IsActive() override { return orig_handle_->IsActive(); }
private:
std::unique_ptr<JobHandle> orig_handle_;
@@ -134,8 +147,9 @@ enum class CompilationStatus {
class TestInstantiateResolver : public InstantiationResultResolver {
public:
- TestInstantiateResolver(CompilationStatus* status, std::string* error_message)
- : status_(status), error_message_(error_message) {}
+ TestInstantiateResolver(Isolate* isolate, CompilationStatus* status,
+ std::string* error_message)
+ : isolate_(isolate), status_(status), error_message_(error_message) {}
void OnInstantiationSucceeded(
i::Handle<i::WasmInstanceObject> instance) override {
@@ -145,11 +159,12 @@ class TestInstantiateResolver : public InstantiationResultResolver {
void OnInstantiationFailed(i::Handle<i::Object> error_reason) override {
*status_ = CompilationStatus::kFailed;
Handle<String> str =
- Object::ToString(CcTest::i_isolate(), error_reason).ToHandleChecked();
+ Object::ToString(isolate_, error_reason).ToHandleChecked();
error_message_->assign(str->ToCString().get());
}
private:
+ Isolate* isolate_;
CompilationStatus* const status_;
std::string* const error_message_;
};
@@ -169,7 +184,8 @@ class TestCompileResolver : public CompilationResultResolver {
*native_module_ = module->shared_native_module();
isolate_->wasm_engine()->AsyncInstantiate(
isolate_,
- std::make_unique<TestInstantiateResolver>(status_, error_message_),
+ std::make_unique<TestInstantiateResolver>(isolate_, status_,
+ error_message_),
module, MaybeHandle<JSReceiver>());
}
}
@@ -190,20 +206,36 @@ class TestCompileResolver : public CompilationResultResolver {
} // namespace
+#define RUN_COMPILE(name) \
+ MockPlatform mock_platform; \
+ CHECK_EQ(V8::GetCurrentPlatform(), &mock_platform); \
+ v8::Isolate::CreateParams create_params; \
+ create_params.array_buffer_allocator = CcTest::array_buffer_allocator(); \
+ v8::Isolate* isolate = v8::Isolate::New(create_params); \
+ { \
+ v8::HandleScope handle_scope(isolate); \
+ v8::Local<v8::Context> context = v8::Context::New(isolate); \
+ v8::Context::Scope context_scope(context); \
+ Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate); \
+ testing::SetupIsolateForWasmModule(i_isolate); \
+ RunCompile_##name(&mock_platform, i_isolate); \
+ } \
+ isolate->Dispose();
+
#define COMPILE_TEST(name) \
- void RunCompile_##name(); \
- TEST(Sync##name) { \
+ void RunCompile_##name(MockPlatform*, i::Isolate*); \
+ UNINITIALIZED_TEST(Sync##name) { \
i::FlagScope<bool> sync_scope(&i::FLAG_wasm_async_compilation, false); \
- RunCompile_##name(); \
+ RUN_COMPILE(name); \
} \
\
- TEST(Async##name) { RunCompile_##name(); } \
+ UNINITIALIZED_TEST(Async##name) { RUN_COMPILE(name); } \
\
- TEST(Streaming##name) { \
+ UNINITIALIZED_TEST(Streaming##name) { \
i::FlagScope<bool> streaming_scope(&i::FLAG_wasm_test_streaming, true); \
- RunCompile_##name(); \
+ RUN_COMPILE(name); \
} \
- void RunCompile_##name()
+ void RunCompile_##name(MockPlatform* platform, i::Isolate* isolate)
class MetricsRecorder : public v8::metrics::Recorder {
public:
@@ -235,14 +267,9 @@ class MetricsRecorder : public v8::metrics::Recorder {
};
COMPILE_TEST(TestEventMetrics) {
- MockPlatform platform;
- Isolate* isolate = CcTest::InitIsolateOnce();
- CHECK_EQ(V8::GetCurrentPlatform(), &platform);
- HandleScope scope(isolate);
- testing::SetupIsolateForWasmModule(isolate);
std::shared_ptr<MetricsRecorder> recorder =
std::make_shared<MetricsRecorder>();
- CcTest::isolate()->SetMetricsRecorder(recorder);
+ reinterpret_cast<v8::Isolate*>(isolate)->SetMetricsRecorder(recorder);
TestSignatures sigs;
v8::internal::AccountingAllocator allocator;
@@ -270,9 +297,9 @@ COMPILE_TEST(TestEventMetrics) {
// Finish compilation tasks.
while (status == CompilationStatus::kPending) {
- platform.ExecuteTasks();
+ platform->ExecuteTasks();
}
- platform.ExecuteTasks(); // Complete pending tasks beyond compilation.
+ platform->ExecuteTasks(); // Complete pending tasks beyond compilation.
CHECK_EQ(CompilationStatus::kFinished, status);
CHECK_EQ(1, recorder->module_decoded_.size());
@@ -284,7 +311,7 @@ COMPILE_TEST(TestEventMetrics) {
CHECK_EQ(buffer.size(),
recorder->module_decoded_.back().module_size_in_bytes);
CHECK_EQ(1, recorder->module_decoded_.back().function_count);
- CHECK_LE(0, recorder->module_decoded_.back().wall_clock_time_in_us);
+ CHECK_LE(0, recorder->module_decoded_.back().wall_clock_duration_in_us);
CHECK_EQ(1, recorder->module_compiled_.size());
CHECK(recorder->module_compiled_.back().success);
@@ -304,14 +331,14 @@ COMPILE_TEST(TestEventMetrics) {
CHECK_GE(native_module->generated_code_size(),
recorder->module_compiled_.back().code_size_in_bytes);
CHECK_EQ(0, recorder->module_compiled_.back().liftoff_bailout_count);
- CHECK_LE(0, recorder->module_compiled_.back().wall_clock_time_in_us);
+ CHECK_LE(0, recorder->module_compiled_.back().wall_clock_duration_in_us);
CHECK_EQ(1, recorder->module_instantiated_.size());
CHECK(recorder->module_instantiated_.back().success);
// We currently don't support true async instantiation.
CHECK(!recorder->module_instantiated_.back().async);
CHECK_EQ(0, recorder->module_instantiated_.back().imported_function_count);
- CHECK_LE(0, recorder->module_instantiated_.back().wall_clock_time_in_us);
+ CHECK_LE(0, recorder->module_instantiated_.back().wall_clock_duration_in_us);
CHECK_EQ(1, recorder->module_tiered_up_.size());
CHECK(!recorder->module_tiered_up_.back().lazy);
@@ -322,7 +349,7 @@ COMPILE_TEST(TestEventMetrics) {
recorder->module_tiered_up_.back().code_size_in_bytes);
CHECK_GE(native_module->committed_code_space(),
recorder->module_tiered_up_.back().code_size_in_bytes);
- CHECK_LE(0, recorder->module_tiered_up_.back().wall_clock_time_in_us);
+ CHECK_LE(0, recorder->module_tiered_up_.back().wall_clock_duration_in_us);
}
} // namespace wasm
diff --git a/deps/v8/test/cctest/wasm/wasm-run-utils.cc b/deps/v8/test/cctest/wasm/wasm-run-utils.cc
index ea37eb4023..f15d383386 100644
--- a/deps/v8/test/cctest/wasm/wasm-run-utils.cc
+++ b/deps/v8/test/cctest/wasm/wasm-run-utils.cc
@@ -170,7 +170,8 @@ Handle<JSFunction> TestingModuleBuilder::WrapCode(uint32_t index) {
}
void TestingModuleBuilder::AddIndirectFunctionTable(
- const uint16_t* function_indexes, uint32_t table_size) {
+ const uint16_t* function_indexes, uint32_t table_size,
+ ValueType table_type) {
Handle<WasmInstanceObject> instance = instance_object();
uint32_t table_index = static_cast<uint32_t>(test_module_->tables.size());
test_module_->tables.emplace_back();
@@ -178,7 +179,7 @@ void TestingModuleBuilder::AddIndirectFunctionTable(
table.initial_size = table_size;
table.maximum_size = table_size;
table.has_maximum_size = true;
- table.type = kWasmFuncRef;
+ table.type = table_type;
{
// Allocate the indirect function table.
diff --git a/deps/v8/test/cctest/wasm/wasm-run-utils.h b/deps/v8/test/cctest/wasm/wasm-run-utils.h
index c02e1b5d70..5c64c2f468 100644
--- a/deps/v8/test/cctest/wasm/wasm-run-utils.h
+++ b/deps/v8/test/cctest/wasm/wasm-run-utils.h
@@ -120,8 +120,8 @@ class TestingModuleBuilder {
}
byte AddSignature(const FunctionSig* sig) {
- // TODO(7748): This will need updating for struct/array types support.
- DCHECK_EQ(test_module_->types.size(), test_module_->signature_ids.size());
+ DCHECK_EQ(test_module_->types.size(),
+ test_module_->canonicalized_type_ids.size());
test_module_->add_signature(sig);
size_t size = test_module_->types.size();
CHECK_GT(127, size);
@@ -201,7 +201,8 @@ class TestingModuleBuilder {
// If function_indexes is {nullptr}, the contents of the table will be
// initialized with null functions.
void AddIndirectFunctionTable(const uint16_t* function_indexes,
- uint32_t table_size);
+ uint32_t table_size,
+ ValueType table_type = kWasmFuncRef);
uint32_t AddBytes(Vector<const byte> bytes);
diff --git a/deps/v8/test/common/flag-utils.h b/deps/v8/test/common/flag-utils.h
new file mode 100644
index 0000000000..04ee44cb1c
--- /dev/null
+++ b/deps/v8/test/common/flag-utils.h
@@ -0,0 +1,38 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TEST_COMMON_FLAG_UTILS_H
+#define V8_TEST_COMMON_FLAG_UTILS_H
+
+namespace v8 {
+namespace internal {
+
+template <typename T>
+class FlagScope {
+ public:
+ FlagScope(T* flag, T new_value) : flag_(flag), previous_value_(*flag) {
+ *flag = new_value;
+ }
+ ~FlagScope() { *flag_ = previous_value_; }
+
+ private:
+ T* flag_;
+ T previous_value_;
+};
+
+#define FLAG_SCOPE(flag) \
+ FlagScope<bool> __scope_##flag##__LINE__(&FLAG_##flag, true)
+
+} // namespace internal
+} // namespace v8
+
+#define FLAG_SCOPE_EXTERNAL(flag) \
+ v8::internal::FlagScope<bool> __scope_##flag##__LINE__( \
+ &v8::internal::FLAG_##flag, true)
+
+#define UNFLAG_SCOPE_EXTERNAL(flag) \
+ v8::internal::FlagScope<bool> __scope_##flag##__LINE__( \
+ &v8::internal::FLAG_##flag, false)
+
+#endif // V8_TEST_COMMON_FLAG_UTILS_H
diff --git a/deps/v8/test/common/wasm/flag-utils.h b/deps/v8/test/common/wasm/flag-utils.h
index 98830c3361..115c6033b1 100644
--- a/deps/v8/test/common/wasm/flag-utils.h
+++ b/deps/v8/test/common/wasm/flag-utils.h
@@ -2,30 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_TEST_COMMON_FLAG_UTILS_H
-#define V8_TEST_COMMON_FLAG_UTILS_H
+#ifndef V8_TEST_COMMON_WASM_FLAG_UTILS_H
+#define V8_TEST_COMMON_WASM_FLAG_UTILS_H
#include "src/wasm/wasm-features.h"
+#include "test/common/flag-utils.h"
namespace v8 {
namespace internal {
-template <typename T>
-class FlagScope {
- public:
- FlagScope(T* flag, T new_value) : flag_(flag), previous_value_(*flag) {
- *flag = new_value;
- }
- ~FlagScope() { *flag_ = previous_value_; }
-
- private:
- T* flag_;
- T previous_value_;
-};
-
-#define FLAG_SCOPE(flag) \
- FlagScope<bool> __scope_##flag##__LINE__(&FLAG_##flag, true)
-
#define EXPERIMENTAL_FLAG_SCOPE(flag) FLAG_SCOPE(experimental_wasm_##flag)
namespace wasm {
@@ -65,4 +50,4 @@ class WasmFeatureScope {
} // namespace internal
} // namespace v8
-#endif // V8_TEST_COMMON_FLAG_UTILS_H
+#endif // V8_TEST_COMMON_WASM_FLAG_UTILS_H
diff --git a/deps/v8/test/common/wasm/wasm-interpreter.cc b/deps/v8/test/common/wasm/wasm-interpreter.cc
index b1d57bdd65..cdbcff8b41 100644
--- a/deps/v8/test/common/wasm/wasm-interpreter.cc
+++ b/deps/v8/test/common/wasm/wasm-interpreter.cc
@@ -9,6 +9,7 @@
#include "src/base/overflowing-math.h"
#include "src/codegen/assembler-inl.h"
+#include "src/common/globals.h"
#include "src/compiler/wasm-compiler.h"
#include "src/numbers/conversions.h"
#include "src/objects/objects-inl.h"
@@ -798,8 +799,8 @@ class SideTable : public ZoneObject {
case kExprBlock:
case kExprLoop: {
bool is_loop = opcode == kExprLoop;
- BlockTypeImmediate<Decoder::kNoValidate> imm(WasmFeatures::All(), &i,
- i.pc() + 1);
+ BlockTypeImmediate<Decoder::kNoValidation> imm(WasmFeatures::All(),
+ &i, i.pc() + 1);
if (imm.type == kWasmBottom) {
imm.sig = module->signature(imm.sig_index);
}
@@ -820,8 +821,8 @@ class SideTable : public ZoneObject {
break;
}
case kExprIf: {
- BlockTypeImmediate<Decoder::kNoValidate> imm(WasmFeatures::All(), &i,
- i.pc() + 1);
+ BlockTypeImmediate<Decoder::kNoValidation> imm(WasmFeatures::All(),
+ &i, i.pc() + 1);
if (imm.type == kWasmBottom) {
imm.sig = module->signature(imm.sig_index);
}
@@ -860,8 +861,8 @@ class SideTable : public ZoneObject {
break;
}
case kExprTry: {
- BlockTypeImmediate<Decoder::kNoValidate> imm(WasmFeatures::All(), &i,
- i.pc() + 1);
+ BlockTypeImmediate<Decoder::kNoValidation> imm(WasmFeatures::All(),
+ &i, i.pc() + 1);
if (imm.type == kWasmBottom) {
imm.sig = module->signature(imm.sig_index);
}
@@ -896,7 +897,8 @@ class SideTable : public ZoneObject {
break;
}
case kExprBrOnExn: {
- BranchOnExceptionImmediate<Decoder::kNoValidate> imm(&i, i.pc() + 1);
+ BranchOnExceptionImmediate<Decoder::kNoValidation> imm(&i,
+ i.pc() + 1);
uint32_t depth = imm.depth.depth; // Extracted for convenience.
imm.index.exception = &module->exceptions[imm.index.index];
DCHECK_EQ(0, imm.index.exception->sig->return_count());
@@ -925,22 +927,22 @@ class SideTable : public ZoneObject {
break;
}
case kExprBr: {
- BranchDepthImmediate<Decoder::kNoValidate> imm(&i, i.pc() + 1);
+ BranchDepthImmediate<Decoder::kNoValidation> imm(&i, i.pc() + 1);
TRACE("control @%u: Br[depth=%u]\n", i.pc_offset(), imm.depth);
Control* c = &control_stack[control_stack.size() - imm.depth - 1];
if (!unreachable) c->end_label->Ref(i.pc(), stack_height);
break;
}
case kExprBrIf: {
- BranchDepthImmediate<Decoder::kNoValidate> imm(&i, i.pc() + 1);
+ BranchDepthImmediate<Decoder::kNoValidation> imm(&i, i.pc() + 1);
TRACE("control @%u: BrIf[depth=%u]\n", i.pc_offset(), imm.depth);
Control* c = &control_stack[control_stack.size() - imm.depth - 1];
if (!unreachable) c->end_label->Ref(i.pc(), stack_height);
break;
}
case kExprBrTable: {
- BranchTableImmediate<Decoder::kNoValidate> imm(&i, i.pc() + 1);
- BranchTableIterator<Decoder::kNoValidate> iterator(&i, imm);
+ BranchTableImmediate<Decoder::kNoValidation> imm(&i, i.pc() + 1);
+ BranchTableIterator<Decoder::kNoValidation> iterator(&i, imm);
TRACE("control @%u: BrTable[count=%u]\n", i.pc_offset(),
imm.table_count);
if (!unreachable) {
@@ -1381,12 +1383,12 @@ class WasmInterpreterInternals {
pc_t ReturnPc(Decoder* decoder, InterpreterCode* code, pc_t pc) {
switch (code->start[pc]) {
case kExprCallFunction: {
- CallFunctionImmediate<Decoder::kNoValidate> imm(decoder,
- code->at(pc + 1));
+ CallFunctionImmediate<Decoder::kNoValidation> imm(decoder,
+ code->at(pc + 1));
return pc + 1 + imm.length;
}
case kExprCallIndirect: {
- CallIndirectImmediate<Decoder::kNoValidate> imm(
+ CallIndirectImmediate<Decoder::kNoValidation> imm(
WasmFeatures::All(), decoder, code->at(pc + 1));
return pc + 1 + imm.length;
}
@@ -1529,7 +1531,7 @@ class WasmInterpreterInternals {
// increment pc at the caller, because we want to keep pc to the start of
// the operation to keep trap reporting and tracing accurate, otherwise
// those will report at the middle of an opcode.
- MemoryAccessImmediate<Decoder::kNoValidate> imm(
+ MemoryAccessImmediate<Decoder::kNoValidation> imm(
decoder, code->at(pc + prefix_len), sizeof(ctype));
uint32_t index = Pop().to<uint32_t>();
Address addr = BoundsCheckMem<mtype>(imm.offset, index);
@@ -1561,7 +1563,7 @@ class WasmInterpreterInternals {
// increment pc at the caller, because we want to keep pc to the start of
// the operation to keep trap reporting and tracing accurate, otherwise
// those will report at the middle of an opcode.
- MemoryAccessImmediate<Decoder::kNoValidate> imm(
+ MemoryAccessImmediate<Decoder::kNoValidation> imm(
decoder, code->at(pc + prefix_len), sizeof(ctype));
ctype val = Pop().to<ctype>();
@@ -1588,8 +1590,8 @@ class WasmInterpreterInternals {
bool ExtractAtomicOpParams(Decoder* decoder, InterpreterCode* code,
Address* address, pc_t pc, int* const len,
type* val = nullptr, type* val2 = nullptr) {
- MemoryAccessImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc + 2),
- sizeof(type));
+ MemoryAccessImmediate<Decoder::kNoValidation> imm(
+ decoder, code->at(pc + *len), sizeof(type));
if (val2) *val2 = static_cast<type>(Pop().to<op_type>());
if (val) *val = static_cast<type>(Pop().to<op_type>());
uint32_t index = Pop().to<uint32_t>();
@@ -1612,8 +1614,8 @@ class WasmInterpreterInternals {
uint32_t* buffer_offset, type* val,
int64_t* timeout = nullptr) {
// TODO(manoskouk): Introduce test which exposes wrong pc offset below.
- MemoryAccessImmediate<Decoder::kValidate> imm(decoder, code->at(pc + *len),
- sizeof(type));
+ MemoryAccessImmediate<Decoder::kFullValidation> imm(
+ decoder, code->at(pc + *len), sizeof(type));
if (timeout) {
*timeout = Pop().to<int64_t>();
}
@@ -1664,8 +1666,8 @@ class WasmInterpreterInternals {
Push(WasmValue(ExecuteI64UConvertSatF64(Pop().to<double>())));
return true;
case kExprMemoryInit: {
- MemoryInitImmediate<Decoder::kNoValidate> imm(decoder,
- code->at(pc + 2));
+ MemoryInitImmediate<Decoder::kNoValidation> imm(decoder,
+ code->at(pc + 2));
// The data segment index must be in bounds since it is required by
// validation.
DCHECK_LT(imm.data_segment_index, module()->num_declared_data_segments);
@@ -1689,7 +1691,8 @@ class WasmInterpreterInternals {
return true;
}
case kExprDataDrop: {
- DataDropImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc + 2));
+ DataDropImmediate<Decoder::kNoValidation> imm(decoder,
+ code->at(pc + 2));
// The data segment index must be in bounds since it is required by
// validation.
DCHECK_LT(imm.index, module()->num_declared_data_segments);
@@ -1698,8 +1701,8 @@ class WasmInterpreterInternals {
return true;
}
case kExprMemoryCopy: {
- MemoryCopyImmediate<Decoder::kNoValidate> imm(decoder,
- code->at(pc + 2));
+ MemoryCopyImmediate<Decoder::kNoValidation> imm(decoder,
+ code->at(pc + 2));
*len += imm.length;
auto size = Pop().to<uint32_t>();
auto src = Pop().to<uint32_t>();
@@ -1717,8 +1720,8 @@ class WasmInterpreterInternals {
return true;
}
case kExprMemoryFill: {
- MemoryIndexImmediate<Decoder::kNoValidate> imm(decoder,
- code->at(pc + 2));
+ MemoryIndexImmediate<Decoder::kNoValidation> imm(decoder,
+ code->at(pc + 2));
*len += imm.length;
auto size = Pop().to<uint32_t>();
auto value = Pop().to<uint32_t>();
@@ -1733,7 +1736,8 @@ class WasmInterpreterInternals {
return true;
}
case kExprTableInit: {
- TableInitImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc + 2));
+ TableInitImmediate<Decoder::kNoValidation> imm(decoder,
+ code->at(pc + 2));
*len += imm.length;
auto size = Pop().to<uint32_t>();
auto src = Pop().to<uint32_t>();
@@ -1746,13 +1750,15 @@ class WasmInterpreterInternals {
return ok;
}
case kExprElemDrop: {
- ElemDropImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc + 2));
+ ElemDropImmediate<Decoder::kNoValidation> imm(decoder,
+ code->at(pc + 2));
*len += imm.length;
instance_object_->dropped_elem_segments()[imm.index] = 1;
return true;
}
case kExprTableCopy: {
- TableCopyImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc + 2));
+ TableCopyImmediate<Decoder::kNoValidation> imm(decoder,
+ code->at(pc + 2));
auto size = Pop().to<uint32_t>();
auto src = Pop().to<uint32_t>();
auto dst = Pop().to<uint32_t>();
@@ -1765,8 +1771,8 @@ class WasmInterpreterInternals {
return ok;
}
case kExprTableGrow: {
- TableIndexImmediate<Decoder::kNoValidate> imm(decoder,
- code->at(pc + 2));
+ TableIndexImmediate<Decoder::kNoValidation> imm(decoder,
+ code->at(pc + 2));
HandleScope handle_scope(isolate_);
auto table = handle(
WasmTableObject::cast(instance_object_->tables().get(imm.index)),
@@ -1779,8 +1785,8 @@ class WasmInterpreterInternals {
return true;
}
case kExprTableSize: {
- TableIndexImmediate<Decoder::kNoValidate> imm(decoder,
- code->at(pc + 2));
+ TableIndexImmediate<Decoder::kNoValidation> imm(decoder,
+ code->at(pc + 2));
HandleScope handle_scope(isolate_);
auto table = handle(
WasmTableObject::cast(instance_object_->tables().get(imm.index)),
@@ -1791,8 +1797,8 @@ class WasmInterpreterInternals {
return true;
}
case kExprTableFill: {
- TableIndexImmediate<Decoder::kNoValidate> imm(decoder,
- code->at(pc + 2));
+ TableIndexImmediate<Decoder::kNoValidation> imm(decoder,
+ code->at(pc + 2));
HandleScope handle_scope(isolate_);
auto count = Pop().to<uint32_t>();
auto value = Pop().to_externref();
@@ -2028,6 +2034,10 @@ class WasmInterpreterInternals {
*len += 1;
break;
case kExprI32AtomicWait: {
+ if (!module()->has_shared_memory) {
+ DoTrap(kTrapUnreachable, pc);
+ return false;
+ }
int32_t val;
int64_t timeout;
uint32_t buffer_offset;
@@ -2044,6 +2054,10 @@ class WasmInterpreterInternals {
break;
}
case kExprI64AtomicWait: {
+ if (!module()->has_shared_memory) {
+ DoTrap(kTrapUnreachable, pc);
+ return false;
+ }
int64_t val;
int64_t timeout;
uint32_t buffer_offset;
@@ -2066,6 +2080,10 @@ class WasmInterpreterInternals {
&buffer_offset, &val)) {
return false;
}
+ if (!module()->has_shared_memory) {
+ Push(WasmValue(0));
+ break;
+ }
HandleScope handle_scope(isolate_);
Handle<JSArrayBuffer> array_buffer(
instance_object_->memory_object().array_buffer(), isolate_);
@@ -2080,6 +2098,14 @@ class WasmInterpreterInternals {
return true;
}
+ template <typename T, T (*float_round_op)(T)>
+ T AixFpOpWorkaround(T input) {
+#if V8_OS_AIX
+ return FpOpWorkaround<T>(input, float_round_op(input));
+#else
+ return float_round_op(input);
+#endif
+ }
bool ExecuteSimdOp(WasmOpcode opcode, Decoder* decoder, InterpreterCode* code,
pc_t pc, int* const len) {
switch (opcode) {
@@ -2099,15 +2125,16 @@ class WasmInterpreterInternals {
SPLAT_CASE(I16x8, int8, int32_t, 8)
SPLAT_CASE(I8x16, int16, int32_t, 16)
#undef SPLAT_CASE
-#define EXTRACT_LANE_CASE(format, name) \
- case kExpr##format##ExtractLane: { \
- SimdLaneImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc + *len)); \
- *len += 1; \
- WasmValue val = Pop(); \
- Simd128 s = val.to_s128(); \
- auto ss = s.to_##name(); \
- Push(WasmValue(ss.val[LANE(imm.lane, ss)])); \
- return true; \
+#define EXTRACT_LANE_CASE(format, name) \
+ case kExpr##format##ExtractLane: { \
+ SimdLaneImmediate<Decoder::kNoValidation> imm(decoder, \
+ code->at(pc + *len)); \
+ *len += 1; \
+ WasmValue val = Pop(); \
+ Simd128 s = val.to_s128(); \
+ auto ss = s.to_##name(); \
+ Push(WasmValue(ss.val[LANE(imm.lane, ss)])); \
+ return true; \
}
EXTRACT_LANE_CASE(F64x2, f64x2)
EXTRACT_LANE_CASE(F32x4, f32x4)
@@ -2121,23 +2148,24 @@ class WasmInterpreterInternals {
// unsigned extracts, we will cast it int8_t -> uint8_t -> uint32_t. We
// add the DCHECK to ensure that if the array type changes, we know to
// change this function.
-#define EXTRACT_LANE_EXTEND_CASE(format, name, sign, extended_type) \
- case kExpr##format##ExtractLane##sign: { \
- SimdLaneImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc + *len)); \
- *len += 1; \
- WasmValue val = Pop(); \
- Simd128 s = val.to_s128(); \
- auto ss = s.to_##name(); \
- auto res = ss.val[LANE(imm.lane, ss)]; \
- DCHECK(std::is_signed<decltype(res)>::value); \
- if (std::is_unsigned<extended_type>::value) { \
- using unsigned_type = std::make_unsigned<decltype(res)>::type; \
- Push(WasmValue( \
- static_cast<extended_type>(static_cast<unsigned_type>(res)))); \
- } else { \
- Push(WasmValue(static_cast<extended_type>(res))); \
- } \
- return true; \
+#define EXTRACT_LANE_EXTEND_CASE(format, name, sign, extended_type) \
+ case kExpr##format##ExtractLane##sign: { \
+ SimdLaneImmediate<Decoder::kNoValidation> imm(decoder, \
+ code->at(pc + *len)); \
+ *len += 1; \
+ WasmValue val = Pop(); \
+ Simd128 s = val.to_s128(); \
+ auto ss = s.to_##name(); \
+ auto res = ss.val[LANE(imm.lane, ss)]; \
+ DCHECK(std::is_signed<decltype(res)>::value); \
+ if (std::is_unsigned<extended_type>::value) { \
+ using unsigned_type = std::make_unsigned<decltype(res)>::type; \
+ Push(WasmValue( \
+ static_cast<extended_type>(static_cast<unsigned_type>(res)))); \
+ } else { \
+ Push(WasmValue(static_cast<extended_type>(res))); \
+ } \
+ return true; \
}
EXTRACT_LANE_EXTEND_CASE(I16x8, i16x8, S, int32_t)
EXTRACT_LANE_EXTEND_CASE(I16x8, i16x8, U, uint32_t)
@@ -2154,10 +2182,10 @@ class WasmInterpreterInternals {
stype res; \
for (size_t i = 0; i < count; ++i) { \
auto a = s1.val[LANE(i, s1)]; \
- auto b = s2.val[LANE(i, s1)]; \
+ auto b = s2.val[LANE(i, s2)]; \
auto result = expr; \
possible_nondeterminism_ |= has_nondeterminism(result); \
- res.val[LANE(i, s1)] = expr; \
+ res.val[LANE(i, res)] = expr; \
} \
Push(WasmValue(Simd128(res))); \
return true; \
@@ -2181,12 +2209,6 @@ class WasmInterpreterInternals {
BINOP_CASE(I64x2Add, i64x2, int2, 2, base::AddWithWraparound(a, b))
BINOP_CASE(I64x2Sub, i64x2, int2, 2, base::SubWithWraparound(a, b))
BINOP_CASE(I64x2Mul, i64x2, int2, 2, base::MulWithWraparound(a, b))
- BINOP_CASE(I64x2MinS, i64x2, int2, 2, a < b ? a : b)
- BINOP_CASE(I64x2MinU, i64x2, int2, 2,
- static_cast<uint64_t>(a) < static_cast<uint64_t>(b) ? a : b)
- BINOP_CASE(I64x2MaxS, i64x2, int2, 2, a > b ? a : b)
- BINOP_CASE(I64x2MaxU, i64x2, int2, 2,
- static_cast<uint64_t>(a) > static_cast<uint64_t>(b) ? a : b)
BINOP_CASE(I32x4Add, i32x4, int4, 4, base::AddWithWraparound(a, b))
BINOP_CASE(I32x4Sub, i32x4, int4, 4, base::SubWithWraparound(a, b))
BINOP_CASE(I32x4Mul, i32x4, int4, 4, base::MulWithWraparound(a, b))
@@ -2209,12 +2231,14 @@ class WasmInterpreterInternals {
BINOP_CASE(I16x8MaxS, i16x8, int8, 8, a > b ? a : b)
BINOP_CASE(I16x8MaxU, i16x8, int8, 8,
static_cast<uint16_t>(a) > static_cast<uint16_t>(b) ? a : b)
- BINOP_CASE(I16x8AddSaturateS, i16x8, int8, 8, SaturateAdd<int16_t>(a, b))
- BINOP_CASE(I16x8AddSaturateU, i16x8, int8, 8, SaturateAdd<uint16_t>(a, b))
- BINOP_CASE(I16x8SubSaturateS, i16x8, int8, 8, SaturateSub<int16_t>(a, b))
- BINOP_CASE(I16x8SubSaturateU, i16x8, int8, 8, SaturateSub<uint16_t>(a, b))
+ BINOP_CASE(I16x8AddSatS, i16x8, int8, 8, SaturateAdd<int16_t>(a, b))
+ BINOP_CASE(I16x8AddSatU, i16x8, int8, 8, SaturateAdd<uint16_t>(a, b))
+ BINOP_CASE(I16x8SubSatS, i16x8, int8, 8, SaturateSub<int16_t>(a, b))
+ BINOP_CASE(I16x8SubSatU, i16x8, int8, 8, SaturateSub<uint16_t>(a, b))
BINOP_CASE(I16x8RoundingAverageU, i16x8, int8, 8,
base::RoundingAverageUnsigned<uint16_t>(a, b))
+ BINOP_CASE(I16x8Q15MulRSatS, i16x8, int8, 8,
+ SaturateRoundingQMul<int16_t>(a, b))
BINOP_CASE(I8x16Add, i8x16, int16, 16, base::AddWithWraparound(a, b))
BINOP_CASE(I8x16Sub, i8x16, int16, 16, base::SubWithWraparound(a, b))
BINOP_CASE(I8x16Mul, i8x16, int16, 16, base::MulWithWraparound(a, b))
@@ -2224,12 +2248,10 @@ class WasmInterpreterInternals {
BINOP_CASE(I8x16MaxS, i8x16, int16, 16, a > b ? a : b)
BINOP_CASE(I8x16MaxU, i8x16, int16, 16,
static_cast<uint8_t>(a) > static_cast<uint8_t>(b) ? a : b)
- BINOP_CASE(I8x16AddSaturateS, i8x16, int16, 16, SaturateAdd<int8_t>(a, b))
- BINOP_CASE(I8x16AddSaturateU, i8x16, int16, 16,
- SaturateAdd<uint8_t>(a, b))
- BINOP_CASE(I8x16SubSaturateS, i8x16, int16, 16, SaturateSub<int8_t>(a, b))
- BINOP_CASE(I8x16SubSaturateU, i8x16, int16, 16,
- SaturateSub<uint8_t>(a, b))
+ BINOP_CASE(I8x16AddSatS, i8x16, int16, 16, SaturateAdd<int8_t>(a, b))
+ BINOP_CASE(I8x16AddSatU, i8x16, int16, 16, SaturateAdd<uint8_t>(a, b))
+ BINOP_CASE(I8x16SubSatS, i8x16, int16, 16, SaturateSub<int8_t>(a, b))
+ BINOP_CASE(I8x16SubSatU, i8x16, int16, 16, SaturateSub<uint8_t>(a, b))
BINOP_CASE(I8x16RoundingAverageU, i8x16, int16, 16,
base::RoundingAverageUnsigned<uint8_t>(a, b))
#undef BINOP_CASE
@@ -2239,10 +2261,10 @@ class WasmInterpreterInternals {
stype s = v.to_s128().to_##name(); \
stype res; \
for (size_t i = 0; i < count; ++i) { \
- auto a = s.val[i]; \
+ auto a = s.val[LANE(i, s)]; \
auto result = expr; \
possible_nondeterminism_ |= has_nondeterminism(result); \
- res.val[i] = result; \
+ res.val[LANE(i, res)] = result; \
} \
Push(WasmValue(Simd128(res))); \
return true; \
@@ -2250,19 +2272,27 @@ class WasmInterpreterInternals {
UNOP_CASE(F64x2Abs, f64x2, float2, 2, std::abs(a))
UNOP_CASE(F64x2Neg, f64x2, float2, 2, -a)
UNOP_CASE(F64x2Sqrt, f64x2, float2, 2, std::sqrt(a))
- UNOP_CASE(F64x2Ceil, f64x2, float2, 2, ceil(a))
- UNOP_CASE(F64x2Floor, f64x2, float2, 2, floor(a))
- UNOP_CASE(F64x2Trunc, f64x2, float2, 2, trunc(a))
- UNOP_CASE(F64x2NearestInt, f64x2, float2, 2, nearbyint(a))
+ UNOP_CASE(F64x2Ceil, f64x2, float2, 2,
+ (AixFpOpWorkaround<double, &ceil>(a)))
+ UNOP_CASE(F64x2Floor, f64x2, float2, 2,
+ (AixFpOpWorkaround<double, &floor>(a)))
+ UNOP_CASE(F64x2Trunc, f64x2, float2, 2,
+ (AixFpOpWorkaround<double, &trunc>(a)))
+ UNOP_CASE(F64x2NearestInt, f64x2, float2, 2,
+ (AixFpOpWorkaround<double, &nearbyint>(a)))
UNOP_CASE(F32x4Abs, f32x4, float4, 4, std::abs(a))
UNOP_CASE(F32x4Neg, f32x4, float4, 4, -a)
UNOP_CASE(F32x4Sqrt, f32x4, float4, 4, std::sqrt(a))
UNOP_CASE(F32x4RecipApprox, f32x4, float4, 4, base::Recip(a))
UNOP_CASE(F32x4RecipSqrtApprox, f32x4, float4, 4, base::RecipSqrt(a))
- UNOP_CASE(F32x4Ceil, f32x4, float4, 4, ceilf(a))
- UNOP_CASE(F32x4Floor, f32x4, float4, 4, floorf(a))
- UNOP_CASE(F32x4Trunc, f32x4, float4, 4, truncf(a))
- UNOP_CASE(F32x4NearestInt, f32x4, float4, 4, nearbyintf(a))
+ UNOP_CASE(F32x4Ceil, f32x4, float4, 4,
+ (AixFpOpWorkaround<float, &ceilf>(a)))
+ UNOP_CASE(F32x4Floor, f32x4, float4, 4,
+ (AixFpOpWorkaround<float, &floorf>(a)))
+ UNOP_CASE(F32x4Trunc, f32x4, float4, 4,
+ (AixFpOpWorkaround<float, &truncf>(a)))
+ UNOP_CASE(F32x4NearestInt, f32x4, float4, 4,
+ (AixFpOpWorkaround<float, &nearbyintf>(a)))
UNOP_CASE(I64x2Neg, i64x2, int2, 2, base::NegateWithWraparound(a))
UNOP_CASE(I32x4Neg, i32x4, int4, 4, base::NegateWithWraparound(a))
UNOP_CASE(I32x4Abs, i32x4, int4, 4, std::abs(a))
@@ -2271,6 +2301,8 @@ class WasmInterpreterInternals {
UNOP_CASE(I16x8Abs, i16x8, int8, 8, std::abs(a))
UNOP_CASE(I8x16Neg, i8x16, int16, 16, base::NegateWithWraparound(a))
UNOP_CASE(I8x16Abs, i8x16, int16, 16, std::abs(a))
+ UNOP_CASE(I8x16Popcnt, i8x16, int16, 16,
+ base::bits::CountPopulation<uint8_t>(a))
#undef UNOP_CASE
// Cast to double in call to signbit is due to MSCV issue, see
@@ -2290,6 +2322,7 @@ class WasmInterpreterInternals {
BITMASK_CASE(I8x16BitMask, i8x16, int16, 16)
BITMASK_CASE(I16x8BitMask, i16x8, int8, 8)
BITMASK_CASE(I32x4BitMask, i32x4, int4, 4)
+ BITMASK_CASE(I64x2BitMask, i64x2, int2, 2)
#undef BITMASK_CASE
#define CMPOP_CASE(op, name, stype, out_stype, count, expr) \
@@ -2300,11 +2333,11 @@ class WasmInterpreterInternals {
stype s2 = v2.to_s128().to_##name(); \
out_stype res; \
for (size_t i = 0; i < count; ++i) { \
- auto a = s1.val[i]; \
- auto b = s2.val[i]; \
+ auto a = s1.val[LANE(i, s1)]; \
+ auto b = s2.val[LANE(i, s2)]; \
auto result = expr; \
possible_nondeterminism_ |= has_nondeterminism(result); \
- res.val[i] = result ? -1 : 0; \
+ res.val[LANE(i, res)] = result ? -1 : 0; \
} \
Push(WasmValue(Simd128(res))); \
return true; \
@@ -2322,19 +2355,6 @@ class WasmInterpreterInternals {
CMPOP_CASE(F32x4Lt, f32x4, float4, int4, 4, a < b)
CMPOP_CASE(F32x4Le, f32x4, float4, int4, 4, a <= b)
CMPOP_CASE(I64x2Eq, i64x2, int2, int2, 2, a == b)
- CMPOP_CASE(I64x2Ne, i64x2, int2, int2, 2, a != b)
- CMPOP_CASE(I64x2GtS, i64x2, int2, int2, 2, a > b)
- CMPOP_CASE(I64x2GeS, i64x2, int2, int2, 2, a >= b)
- CMPOP_CASE(I64x2LtS, i64x2, int2, int2, 2, a < b)
- CMPOP_CASE(I64x2LeS, i64x2, int2, int2, 2, a <= b)
- CMPOP_CASE(I64x2GtU, i64x2, int2, int2, 2,
- static_cast<uint64_t>(a) > static_cast<uint64_t>(b))
- CMPOP_CASE(I64x2GeU, i64x2, int2, int2, 2,
- static_cast<uint64_t>(a) >= static_cast<uint64_t>(b))
- CMPOP_CASE(I64x2LtU, i64x2, int2, int2, 2,
- static_cast<uint64_t>(a) < static_cast<uint64_t>(b))
- CMPOP_CASE(I64x2LeU, i64x2, int2, int2, 2,
- static_cast<uint64_t>(a) <= static_cast<uint64_t>(b))
CMPOP_CASE(I32x4Eq, i32x4, int4, int4, 4, a == b)
CMPOP_CASE(I32x4Ne, i32x4, int4, int4, 4, a != b)
CMPOP_CASE(I32x4GtS, i32x4, int4, int4, 4, a > b)
@@ -2378,16 +2398,17 @@ class WasmInterpreterInternals {
CMPOP_CASE(I8x16LeU, i8x16, int16, int16, 16,
static_cast<uint8_t>(a) <= static_cast<uint8_t>(b))
#undef CMPOP_CASE
-#define REPLACE_LANE_CASE(format, name, stype, ctype) \
- case kExpr##format##ReplaceLane: { \
- SimdLaneImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc + *len)); \
- *len += 1; \
- WasmValue new_val = Pop(); \
- WasmValue simd_val = Pop(); \
- stype s = simd_val.to_s128().to_##name(); \
- s.val[LANE(imm.lane, s)] = new_val.to<ctype>(); \
- Push(WasmValue(Simd128(s))); \
- return true; \
+#define REPLACE_LANE_CASE(format, name, stype, ctype) \
+ case kExpr##format##ReplaceLane: { \
+ SimdLaneImmediate<Decoder::kNoValidation> imm(decoder, \
+ code->at(pc + *len)); \
+ *len += 1; \
+ WasmValue new_val = Pop(); \
+ WasmValue simd_val = Pop(); \
+ stype s = simd_val.to_s128().to_##name(); \
+ s.val[LANE(imm.lane, s)] = new_val.to<ctype>(); \
+ Push(WasmValue(Simd128(s))); \
+ return true; \
}
REPLACE_LANE_CASE(F64x2, f64x2, float2, double)
REPLACE_LANE_CASE(F32x4, f32x4, float4, float)
@@ -2411,8 +2432,8 @@ class WasmInterpreterInternals {
stype s = v.to_s128().to_##name(); \
stype res; \
for (size_t i = 0; i < count; ++i) { \
- auto a = s.val[i]; \
- res.val[i] = expr; \
+ auto a = s.val[LANE(i, s)]; \
+ res.val[LANE(i, res)] = expr; \
} \
Push(WasmValue(Simd128(res))); \
return true; \
@@ -2437,6 +2458,42 @@ class WasmInterpreterInternals {
SHIFT_CASE(I8x16ShrS, i8x16, int16, 16, a >> (shift % 8))
SHIFT_CASE(I8x16ShrU, i8x16, int16, 16,
static_cast<uint8_t>(a) >> (shift % 8))
+ case kExprI16x8ExtMulLowI8x16S: {
+ return DoSimdExtMul<int16, int8, int8_t, int16_t>(0);
+ }
+ case kExprI16x8ExtMulHighI8x16S: {
+ return DoSimdExtMul<int16, int8, int8_t, int16_t>(8);
+ }
+ case kExprI16x8ExtMulLowI8x16U: {
+ return DoSimdExtMul<int16, int8, uint8_t, uint16_t>(0);
+ }
+ case kExprI16x8ExtMulHighI8x16U: {
+ return DoSimdExtMul<int16, int8, uint8_t, uint16_t>(8);
+ }
+ case kExprI32x4ExtMulLowI16x8S: {
+ return DoSimdExtMul<int8, int4, int16_t, int32_t>(0);
+ }
+ case kExprI32x4ExtMulHighI16x8S: {
+ return DoSimdExtMul<int8, int4, int16_t, int32_t>(4);
+ }
+ case kExprI32x4ExtMulLowI16x8U: {
+ return DoSimdExtMul<int8, int4, uint16_t, uint32_t>(0);
+ }
+ case kExprI32x4ExtMulHighI16x8U: {
+ return DoSimdExtMul<int8, int4, uint16_t, uint32_t>(4);
+ }
+ case kExprI64x2ExtMulLowI32x4S: {
+ return DoSimdExtMul<int4, int2, int32_t, int64_t>(0);
+ }
+ case kExprI64x2ExtMulHighI32x4S: {
+ return DoSimdExtMul<int4, int2, int32_t, int64_t>(2);
+ }
+ case kExprI64x2ExtMulLowI32x4U: {
+ return DoSimdExtMul<int4, int2, uint32_t, uint64_t>(0);
+ }
+ case kExprI64x2ExtMulHighI32x4U: {
+ return DoSimdExtMul<int4, int2, uint32_t, uint64_t>(2);
+ }
#undef SHIFT_CASE
#define CONVERT_CASE(op, src_type, name, dst_type, count, start_index, ctype, \
expr) \
@@ -2467,6 +2524,13 @@ class WasmInterpreterInternals {
? 0
: a<0 ? 0 : a> kMaxUInt32 ? kMaxUInt32
: static_cast<uint32_t>(a))
+ CONVERT_CASE(I64x2SConvertI32x4Low, int4, i32x4, int2, 2, 0, int32_t, a)
+ CONVERT_CASE(I64x2SConvertI32x4High, int4, i32x4, int2, 2, 2, int32_t,
+ a)
+ CONVERT_CASE(I64x2UConvertI32x4Low, int4, i32x4, int2, 2, 0, uint32_t,
+ a)
+ CONVERT_CASE(I64x2UConvertI32x4High, int4, i32x4, int2, 2, 2, uint32_t,
+ a)
CONVERT_CASE(I32x4SConvertI16x8High, int8, i16x8, int4, 4, 4, int16_t,
a)
CONVERT_CASE(I32x4UConvertI16x8High, int8, i16x8, int4, 4, 4, uint16_t,
@@ -2511,7 +2575,9 @@ class WasmInterpreterInternals {
int4 v1 = Pop().to_s128().to_i32x4();
int4 res;
for (size_t i = 0; i < 4; ++i) {
- res.val[i] = v2.val[i] ^ ((v1.val[i] ^ v2.val[i]) & bool_val.val[i]);
+ res.val[LANE(i, res)] = v2.val[LANE(i, v2)] ^
+ ((v1.val[LANE(i, v1)] ^ v2.val[LANE(i, v2)]) &
+ bool_val.val[LANE(i, bool_val)]);
}
Push(WasmValue(Simd128(res)));
return true;
@@ -2526,10 +2592,10 @@ class WasmInterpreterInternals {
for (size_t i = 0; i < count / 2; ++i) { \
auto result1 = s1.val[LANE(i * 2, s1)] + s1.val[LANE(i * 2 + 1, s1)]; \
possible_nondeterminism_ |= has_nondeterminism(result1); \
- res.val[LANE(i, s1)] = result1; \
- auto result2 = s2.val[LANE(i * 2, s1)] + s2.val[LANE(i * 2 + 1, s1)]; \
+ res.val[LANE(i, res)] = result1; \
+ auto result2 = s2.val[LANE(i * 2, s2)] + s2.val[LANE(i * 2 + 1, s2)]; \
possible_nondeterminism_ |= has_nondeterminism(result2); \
- res.val[LANE(i + count / 2, s1)] = result2; \
+ res.val[LANE(i + count / 2, res)] = result2; \
} \
Push(WasmValue(Simd128(res))); \
return true; \
@@ -2552,8 +2618,8 @@ class WasmInterpreterInternals {
return true;
}
case kExprS128Const: {
- Simd128Immediate<Decoder::kNoValidate> imm(decoder,
- code->at(pc + *len));
+ Simd128Immediate<Decoder::kNoValidation> imm(decoder,
+ code->at(pc + *len));
int16 res;
for (size_t i = 0; i < kSimd128Size; ++i) {
res.val[LANE(i, res)] = imm.value[i];
@@ -2567,35 +2633,35 @@ class WasmInterpreterInternals {
int16 v1 = Pop().to_s128().to_i8x16();
int16 res;
for (size_t i = 0; i < kSimd128Size; ++i) {
- int lane = v2.val[LANE(i, v1)];
- res.val[LANE(i, v1)] =
+ int lane = v2.val[LANE(i, v2)];
+ res.val[LANE(i, res)] =
lane < kSimd128Size && lane >= 0 ? v1.val[LANE(lane, v1)] : 0;
}
Push(WasmValue(Simd128(res)));
return true;
}
case kExprI8x16Shuffle: {
- Simd128Immediate<Decoder::kNoValidate> imm(decoder,
- code->at(pc + *len));
+ Simd128Immediate<Decoder::kNoValidation> imm(decoder,
+ code->at(pc + *len));
*len += 16;
int16 v2 = Pop().to_s128().to_i8x16();
int16 v1 = Pop().to_s128().to_i8x16();
int16 res;
for (size_t i = 0; i < kSimd128Size; ++i) {
int lane = imm.value[i];
- res.val[LANE(i, v1)] = lane < kSimd128Size
- ? v1.val[LANE(lane, v1)]
- : v2.val[LANE(lane - kSimd128Size, v1)];
+ res.val[LANE(i, res)] = lane < kSimd128Size
+ ? v1.val[LANE(lane, v1)]
+ : v2.val[LANE(lane - kSimd128Size, v2)];
}
Push(WasmValue(Simd128(res)));
return true;
}
- case kExprV64x2AnyTrue:
case kExprV32x4AnyTrue:
case kExprV16x8AnyTrue:
case kExprV8x16AnyTrue: {
int4 s = Pop().to_s128().to_i32x4();
- bool res = s.val[0] | s.val[1] | s.val[2] | s.val[3];
+ bool res = s.val[LANE(0, s)] | s.val[LANE(1, s)] | s.val[LANE(2, s)] |
+ s.val[LANE(3, s)];
Push(WasmValue((res)));
return true;
}
@@ -2604,27 +2670,27 @@ class WasmInterpreterInternals {
stype s = Pop().to_s128().to_##name(); \
bool res = true; \
for (size_t i = 0; i < count; ++i) { \
- res = res & static_cast<bool>(s.val[i]); \
+ res = res & static_cast<bool>(s.val[LANE(i, s)]); \
} \
Push(WasmValue(res)); \
return true; \
}
- REDUCTION_CASE(V64x2AllTrue, i64x2, int2, 2, &)
REDUCTION_CASE(V32x4AllTrue, i32x4, int4, 4, &)
REDUCTION_CASE(V16x8AllTrue, i16x8, int8, 8, &)
REDUCTION_CASE(V8x16AllTrue, i8x16, int16, 16, &)
#undef REDUCTION_CASE
-#define QFM_CASE(op, name, stype, count, operation) \
- case kExpr##op: { \
- stype c = Pop().to_s128().to_##name(); \
- stype b = Pop().to_s128().to_##name(); \
- stype a = Pop().to_s128().to_##name(); \
- stype res; \
- for (size_t i = 0; i < count; i++) { \
- res.val[i] = a.val[i] operation(b.val[i] * c.val[i]); \
- } \
- Push(WasmValue(Simd128(res))); \
- return true; \
+#define QFM_CASE(op, name, stype, count, operation) \
+ case kExpr##op: { \
+ stype c = Pop().to_s128().to_##name(); \
+ stype b = Pop().to_s128().to_##name(); \
+ stype a = Pop().to_s128().to_##name(); \
+ stype res; \
+ for (size_t i = 0; i < count; i++) { \
+ res.val[LANE(i, res)] = \
+ a.val[LANE(i, a)] operation(b.val[LANE(i, b)] * c.val[LANE(i, c)]); \
+ } \
+ Push(WasmValue(Simd128(res))); \
+ return true; \
}
QFM_CASE(F32x4Qfma, f32x4, float4, 4, +)
QFM_CASE(F32x4Qfms, f32x4, float4, 4, -)
@@ -2671,14 +2737,70 @@ class WasmInterpreterInternals {
return DoSimdLoadExtend<int2, uint64_t, uint32_t>(
decoder, code, pc, len, MachineRepresentation::kWord64);
}
- case kExprS128LoadMem32Zero: {
+ case kExprS128Load32Zero: {
return DoSimdLoadZeroExtend<int4, uint32_t>(
decoder, code, pc, len, MachineRepresentation::kWord32);
}
- case kExprS128LoadMem64Zero: {
+ case kExprS128Load64Zero: {
return DoSimdLoadZeroExtend<int2, uint64_t>(
decoder, code, pc, len, MachineRepresentation::kWord64);
}
+ case kExprS128Load8Lane: {
+ return DoSimdLoadLane<int16, int32_t, int8_t>(
+ decoder, code, pc, len, MachineRepresentation::kWord8);
+ }
+ case kExprS128Load16Lane: {
+ return DoSimdLoadLane<int8, int32_t, int16_t>(
+ decoder, code, pc, len, MachineRepresentation::kWord16);
+ }
+ case kExprS128Load32Lane: {
+ return DoSimdLoadLane<int4, int32_t, int32_t>(
+ decoder, code, pc, len, MachineRepresentation::kWord32);
+ }
+ case kExprS128Load64Lane: {
+ return DoSimdLoadLane<int2, int64_t, int64_t>(
+ decoder, code, pc, len, MachineRepresentation::kWord64);
+ }
+ case kExprS128Store8Lane: {
+ return DoSimdStoreLane<int16, int32_t, int8_t>(
+ decoder, code, pc, len, MachineRepresentation::kWord8);
+ }
+ case kExprS128Store16Lane: {
+ return DoSimdStoreLane<int8, int32_t, int16_t>(
+ decoder, code, pc, len, MachineRepresentation::kWord16);
+ }
+ case kExprS128Store32Lane: {
+ return DoSimdStoreLane<int4, int32_t, int32_t>(
+ decoder, code, pc, len, MachineRepresentation::kWord32);
+ }
+ case kExprS128Store64Lane: {
+ return DoSimdStoreLane<int2, int64_t, int64_t>(
+ decoder, code, pc, len, MachineRepresentation::kWord64);
+ }
+ case kExprI8x16SignSelect: {
+ return DoSimdSignSelect<int16>();
+ }
+ case kExprI16x8SignSelect: {
+ return DoSimdSignSelect<int8>();
+ }
+ case kExprI32x4SignSelect: {
+ return DoSimdSignSelect<int4>();
+ }
+ case kExprI64x2SignSelect: {
+ return DoSimdSignSelect<int2>();
+ }
+ case kExprI32x4ExtAddPairwiseI16x8S: {
+ return DoSimdExtAddPairwise<int4, int8, int32_t, int16_t>();
+ }
+ case kExprI32x4ExtAddPairwiseI16x8U: {
+ return DoSimdExtAddPairwise<int4, int8, uint32_t, uint16_t>();
+ }
+ case kExprI16x8ExtAddPairwiseI8x16S: {
+ return DoSimdExtAddPairwise<int8, int16, int16_t, int8_t>();
+ }
+ case kExprI16x8ExtAddPairwiseI8x16U: {
+ return DoSimdExtAddPairwise<int8, int16, uint16_t, uint8_t>();
+ }
default:
return false;
}
@@ -2739,6 +2861,96 @@ class WasmInterpreterInternals {
return true;
}
+ template <typename s_type, typename result_type, typename load_type>
+ bool DoSimdLoadLane(Decoder* decoder, InterpreterCode* code, pc_t pc,
+ int* const len, MachineRepresentation rep) {
+ s_type value = Pop().to_s128().to<s_type>();
+ if (!ExecuteLoad<result_type, load_type>(decoder, code, pc, len, rep,
+ /*prefix_len=*/*len)) {
+ return false;
+ }
+
+ SimdLaneImmediate<Decoder::kNoValidation> lane_imm(decoder,
+ code->at(pc + *len));
+ *len += lane_imm.length;
+ result_type loaded = Pop().to<result_type>();
+ value.val[LANE(lane_imm.lane, value)] = loaded;
+ Push(WasmValue(Simd128(value)));
+ return true;
+ }
+
+ template <typename s_type, typename result_type, typename load_type>
+ bool DoSimdStoreLane(Decoder* decoder, InterpreterCode* code, pc_t pc,
+ int* const len, MachineRepresentation rep) {
+ // Extract a single lane, push it onto the stack, then store the lane.
+ s_type value = Pop().to_s128().to<s_type>();
+
+ MemoryAccessImmediate<Decoder::kNoValidation> imm(
+ decoder, code->at(pc + *len), sizeof(load_type));
+
+ SimdLaneImmediate<Decoder::kNoValidation> lane_imm(
+ decoder, code->at(pc + *len + imm.length));
+
+ Push(WasmValue(value.val[LANE(lane_imm.lane, value)]));
+
+ // ExecuteStore will update the len, so pass it unchanged here.
+ if (!ExecuteStore<result_type, load_type>(decoder, code, pc, len, rep,
+ /*prefix_len=*/*len)) {
+ return false;
+ }
+
+ *len += lane_imm.length;
+ return true;
+ }
+
+ template <typename s_type, typename d_type, typename narrow, typename wide>
+ bool DoSimdExtMul(unsigned start) {
+ WasmValue v2 = Pop();
+ WasmValue v1 = Pop();
+ auto s1 = v1.to_s128().to<s_type>();
+ auto s2 = v2.to_s128().to<s_type>();
+ auto end = start + (kSimd128Size / sizeof(wide));
+ d_type res;
+ for (size_t dst = 0; start < end; ++start, ++dst) {
+ // Need static_cast for unsigned narrow types.
+ res.val[LANE(dst, res)] =
+ MultiplyLong<wide>(static_cast<narrow>(s1.val[LANE(start, s1)]),
+ static_cast<narrow>(s2.val[LANE(start, s2)]));
+ }
+ Push(WasmValue(Simd128(res)));
+ return true;
+ }
+
+ template <typename s_type>
+ bool DoSimdSignSelect() {
+ constexpr int lanes = kSimd128Size / sizeof(s_type::val[0]);
+ auto c = Pop().to_s128().to<s_type>();
+ auto v2 = Pop().to_s128().to<s_type>();
+ auto v1 = Pop().to_s128().to<s_type>();
+ s_type res;
+ for (int i = 0; i < lanes; ++i) {
+ res.val[LANE(i, res)] =
+ c.val[LANE(i, c)] < 0 ? v1.val[LANE(i, v1)] : v2.val[LANE(i, v2)];
+ }
+ Push(WasmValue(Simd128(res)));
+ return true;
+ }
+
+ template <typename DstSimdType, typename SrcSimdType, typename Wide,
+ typename Narrow>
+ bool DoSimdExtAddPairwise() {
+ constexpr int lanes = kSimd128Size / sizeof(DstSimdType::val[0]);
+ auto v = Pop().to_s128().to<SrcSimdType>();
+ DstSimdType res;
+ for (int i = 0; i < lanes; ++i) {
+ res.val[LANE(i, res)] =
+ AddLong<Wide>(static_cast<Narrow>(v.val[LANE(i * 2, v)]),
+ static_cast<Narrow>(v.val[LANE(i * 2 + 1, v)]));
+ }
+ Push(WasmValue(Simd128(res)));
+ return true;
+ }
+
// Check if our control stack (frames_) exceeds the limit. Trigger stack
// overflow if it does, and unwinding the current frame.
// Returns true if execution can continue, false if the stack was fully
@@ -3000,13 +3212,12 @@ class WasmInterpreterInternals {
byte orig = code->start[pc];
WasmOpcode opcode = static_cast<WasmOpcode>(orig);
- // If the opcode is a prefix, read the suffix and add the extra length to
- // 'len'.
if (WasmOpcodes::IsPrefixOpcode(opcode)) {
uint32_t prefixed_opcode_length = 0;
- opcode = decoder.read_prefixed_opcode<Decoder::kNoValidate>(
+ opcode = decoder.read_prefixed_opcode<Decoder::kNoValidation>(
code->at(pc), &prefixed_opcode_length);
- len += prefixed_opcode_length;
+ // read_prefixed_opcode includes the prefix byte, overwrite len.
+ len = prefixed_opcode_length;
}
// If max is 0, break. If max is positive (a limit is set), decrement it.
@@ -3035,13 +3246,13 @@ class WasmInterpreterInternals {
case kExprBlock:
case kExprLoop:
case kExprTry: {
- BlockTypeImmediate<Decoder::kNoValidate> imm(
+ BlockTypeImmediate<Decoder::kNoValidation> imm(
WasmFeatures::All(), &decoder, code->at(pc + 1));
len = 1 + imm.length;
break;
}
case kExprIf: {
- BlockTypeImmediate<Decoder::kNoValidate> imm(
+ BlockTypeImmediate<Decoder::kNoValidation> imm(
WasmFeatures::All(), &decoder, code->at(pc + 1));
WasmValue cond = Pop();
bool is_true = cond.to<uint32_t>() != 0;
@@ -3062,8 +3273,8 @@ class WasmInterpreterInternals {
break;
}
case kExprThrow: {
- ExceptionIndexImmediate<Decoder::kNoValidate> imm(&decoder,
- code->at(pc + 1));
+ ExceptionIndexImmediate<Decoder::kNoValidation> imm(&decoder,
+ code->at(pc + 1));
CommitPc(pc); // Needed for local unwinding.
const WasmException* exception = &module()->exceptions[imm.index];
if (!DoThrowException(exception, imm.index)) return;
@@ -3082,7 +3293,7 @@ class WasmInterpreterInternals {
continue; // Do not bump pc.
}
case kExprBrOnExn: {
- BranchOnExceptionImmediate<Decoder::kNoValidate> imm(
+ BranchOnExceptionImmediate<Decoder::kNoValidation> imm(
&decoder, code->at(pc + 1));
HandleScope handle_scope(isolate_); // Avoid leaking handles.
WasmValue ex = Pop();
@@ -3101,7 +3312,7 @@ class WasmInterpreterInternals {
break;
}
case kExprSelectWithType: {
- SelectTypeImmediate<Decoder::kNoValidate> imm(
+ SelectTypeImmediate<Decoder::kNoValidation> imm(
WasmFeatures::All(), &decoder, code->at(pc + 1));
len = 1 + imm.length;
V8_FALLTHROUGH;
@@ -3115,15 +3326,15 @@ class WasmInterpreterInternals {
break;
}
case kExprBr: {
- BranchDepthImmediate<Decoder::kNoValidate> imm(&decoder,
- code->at(pc + 1));
+ BranchDepthImmediate<Decoder::kNoValidation> imm(&decoder,
+ code->at(pc + 1));
len = DoBreak(code, pc, imm.depth);
TRACE(" br => @%zu\n", pc + len);
break;
}
case kExprBrIf: {
- BranchDepthImmediate<Decoder::kNoValidate> imm(&decoder,
- code->at(pc + 1));
+ BranchDepthImmediate<Decoder::kNoValidation> imm(&decoder,
+ code->at(pc + 1));
WasmValue cond = Pop();
bool is_true = cond.to<uint32_t>() != 0;
if (is_true) {
@@ -3136,9 +3347,9 @@ class WasmInterpreterInternals {
break;
}
case kExprBrTable: {
- BranchTableImmediate<Decoder::kNoValidate> imm(&decoder,
- code->at(pc + 1));
- BranchTableIterator<Decoder::kNoValidate> iterator(&decoder, imm);
+ BranchTableImmediate<Decoder::kNoValidation> imm(&decoder,
+ code->at(pc + 1));
+ BranchTableIterator<Decoder::kNoValidation> iterator(&decoder, imm);
uint32_t key = Pop().to<uint32_t>();
uint32_t depth = 0;
if (key >= imm.table_count) key = imm.table_count;
@@ -3162,39 +3373,43 @@ class WasmInterpreterInternals {
break;
}
case kExprI32Const: {
- ImmI32Immediate<Decoder::kNoValidate> imm(&decoder, code->at(pc + 1));
+ ImmI32Immediate<Decoder::kNoValidation> imm(&decoder,
+ code->at(pc + 1));
Push(WasmValue(imm.value));
len = 1 + imm.length;
break;
}
case kExprI64Const: {
- ImmI64Immediate<Decoder::kNoValidate> imm(&decoder, code->at(pc + 1));
+ ImmI64Immediate<Decoder::kNoValidation> imm(&decoder,
+ code->at(pc + 1));
Push(WasmValue(imm.value));
len = 1 + imm.length;
break;
}
case kExprF32Const: {
- ImmF32Immediate<Decoder::kNoValidate> imm(&decoder, code->at(pc + 1));
+ ImmF32Immediate<Decoder::kNoValidation> imm(&decoder,
+ code->at(pc + 1));
Push(WasmValue(imm.value));
len = 1 + imm.length;
break;
}
case kExprF64Const: {
- ImmF64Immediate<Decoder::kNoValidate> imm(&decoder, code->at(pc + 1));
+ ImmF64Immediate<Decoder::kNoValidation> imm(&decoder,
+ code->at(pc + 1));
Push(WasmValue(imm.value));
len = 1 + imm.length;
break;
}
case kExprRefNull: {
- HeapTypeImmediate<Decoder::kNoValidate> imm(
+ HeapTypeImmediate<Decoder::kNoValidation> imm(
WasmFeatures::All(), &decoder, code->at(pc + 1));
len = 1 + imm.length;
Push(WasmValue(isolate_->factory()->null_value()));
break;
}
case kExprRefFunc: {
- FunctionIndexImmediate<Decoder::kNoValidate> imm(&decoder,
- code->at(pc + 1));
+ FunctionIndexImmediate<Decoder::kNoValidation> imm(&decoder,
+ code->at(pc + 1));
HandleScope handle_scope(isolate_); // Avoid leaking handles.
Handle<WasmExternalFunction> function =
@@ -3205,16 +3420,16 @@ class WasmInterpreterInternals {
break;
}
case kExprLocalGet: {
- LocalIndexImmediate<Decoder::kNoValidate> imm(&decoder,
- code->at(pc + 1));
+ LocalIndexImmediate<Decoder::kNoValidation> imm(&decoder,
+ code->at(pc + 1));
HandleScope handle_scope(isolate_); // Avoid leaking handles.
Push(GetStackValue(frames_.back().sp + imm.index));
len = 1 + imm.length;
break;
}
case kExprLocalSet: {
- LocalIndexImmediate<Decoder::kNoValidate> imm(&decoder,
- code->at(pc + 1));
+ LocalIndexImmediate<Decoder::kNoValidation> imm(&decoder,
+ code->at(pc + 1));
HandleScope handle_scope(isolate_); // Avoid leaking handles.
WasmValue val = Pop();
SetStackValue(frames_.back().sp + imm.index, val);
@@ -3222,8 +3437,8 @@ class WasmInterpreterInternals {
break;
}
case kExprLocalTee: {
- LocalIndexImmediate<Decoder::kNoValidate> imm(&decoder,
- code->at(pc + 1));
+ LocalIndexImmediate<Decoder::kNoValidation> imm(&decoder,
+ code->at(pc + 1));
HandleScope handle_scope(isolate_); // Avoid leaking handles.
WasmValue val = Pop();
SetStackValue(frames_.back().sp + imm.index, val);
@@ -3236,8 +3451,8 @@ class WasmInterpreterInternals {
break;
}
case kExprCallFunction: {
- CallFunctionImmediate<Decoder::kNoValidate> imm(&decoder,
- code->at(pc + 1));
+ CallFunctionImmediate<Decoder::kNoValidation> imm(&decoder,
+ code->at(pc + 1));
InterpreterCode* target = codemap_.GetCode(imm.index);
CHECK(!target->function->imported);
// Execute an internal call.
@@ -3247,7 +3462,7 @@ class WasmInterpreterInternals {
} break;
case kExprCallIndirect: {
- CallIndirectImmediate<Decoder::kNoValidate> imm(
+ CallIndirectImmediate<Decoder::kNoValidation> imm(
WasmFeatures::All(), &decoder, code->at(pc + 1));
uint32_t entry_index = Pop().to<uint32_t>();
CommitPc(pc); // TODO(wasm): Be more disciplined about committing PC.
@@ -3261,7 +3476,7 @@ class WasmInterpreterInternals {
code = result.interpreter_code;
continue; // Do not bump pc.
case CallResult::INVALID_FUNC:
- return DoTrap(kTrapFuncInvalid, pc);
+ return DoTrap(kTrapTableOutOfBounds, pc);
case CallResult::SIGNATURE_MISMATCH:
return DoTrap(kTrapFuncSigMismatch, pc);
}
@@ -3271,8 +3486,8 @@ class WasmInterpreterInternals {
// Make return calls more expensive, so that return call recursions
// don't cause a timeout.
if (max > 0) max = std::max(0, max - 100);
- CallFunctionImmediate<Decoder::kNoValidate> imm(&decoder,
- code->at(pc + 1));
+ CallFunctionImmediate<Decoder::kNoValidation> imm(&decoder,
+ code->at(pc + 1));
InterpreterCode* target = codemap_.GetCode(imm.index);
CHECK(!target->function->imported);
@@ -3286,7 +3501,7 @@ class WasmInterpreterInternals {
// Make return calls more expensive, so that return call recursions
// don't cause a timeout.
if (max > 0) max = std::max(0, max - 100);
- CallIndirectImmediate<Decoder::kNoValidate> imm(
+ CallIndirectImmediate<Decoder::kNoValidation> imm(
WasmFeatures::All(), &decoder, code->at(pc + 1));
uint32_t entry_index = Pop().to<uint32_t>();
CommitPc(pc); // TODO(wasm): Be more disciplined about committing PC.
@@ -3307,15 +3522,15 @@ class WasmInterpreterInternals {
continue; // Do not bump pc.
}
case CallResult::INVALID_FUNC:
- return DoTrap(kTrapFuncInvalid, pc);
+ return DoTrap(kTrapTableOutOfBounds, pc);
case CallResult::SIGNATURE_MISMATCH:
return DoTrap(kTrapFuncSigMismatch, pc);
}
} break;
case kExprGlobalGet: {
- GlobalIndexImmediate<Decoder::kNoValidate> imm(&decoder,
- code->at(pc + 1));
+ GlobalIndexImmediate<Decoder::kNoValidation> imm(&decoder,
+ code->at(pc + 1));
HandleScope handle_scope(isolate_);
Push(WasmInstanceObject::GetGlobalValue(
instance_object_, module()->globals[imm.index]));
@@ -3323,8 +3538,8 @@ class WasmInterpreterInternals {
break;
}
case kExprGlobalSet: {
- GlobalIndexImmediate<Decoder::kNoValidate> imm(&decoder,
- code->at(pc + 1));
+ GlobalIndexImmediate<Decoder::kNoValidation> imm(&decoder,
+ code->at(pc + 1));
auto& global = module()->globals[imm.index];
switch (global.type.kind()) {
#define CASE_TYPE(valuetype, ctype) \
@@ -3361,8 +3576,8 @@ class WasmInterpreterInternals {
break;
}
case kExprTableGet: {
- TableIndexImmediate<Decoder::kNoValidate> imm(&decoder,
- code->at(pc + 1));
+ TableIndexImmediate<Decoder::kNoValidation> imm(&decoder,
+ code->at(pc + 1));
HandleScope handle_scope(isolate_);
auto table = handle(
WasmTableObject::cast(instance_object_->tables().get(imm.index)),
@@ -3379,8 +3594,8 @@ class WasmInterpreterInternals {
break;
}
case kExprTableSet: {
- TableIndexImmediate<Decoder::kNoValidate> imm(&decoder,
- code->at(pc + 1));
+ TableIndexImmediate<Decoder::kNoValidation> imm(&decoder,
+ code->at(pc + 1));
HandleScope handle_scope(isolate_);
auto table = handle(
WasmTableObject::cast(instance_object_->tables().get(imm.index)),
@@ -3482,8 +3697,8 @@ class WasmInterpreterInternals {
ASMJS_STORE_CASE(F64AsmjsStoreMem, double, double);
#undef ASMJS_STORE_CASE
case kExprMemoryGrow: {
- MemoryIndexImmediate<Decoder::kNoValidate> imm(&decoder,
- code->at(pc + 1));
+ MemoryIndexImmediate<Decoder::kNoValidation> imm(&decoder,
+ code->at(pc + 1));
uint32_t delta_pages = Pop().to<uint32_t>();
HandleScope handle_scope(isolate_); // Avoid leaking handles.
Handle<WasmMemoryObject> memory(instance_object_->memory_object(),
@@ -3498,8 +3713,8 @@ class WasmInterpreterInternals {
break;
}
case kExprMemorySize: {
- MemoryIndexImmediate<Decoder::kNoValidate> imm(&decoder,
- code->at(pc + 1));
+ MemoryIndexImmediate<Decoder::kNoValidation> imm(&decoder,
+ code->at(pc + 1));
Push(WasmValue(static_cast<uint32_t>(instance_object_->memory_size() /
kWasmPageSize)));
len = 1 + imm.length;
@@ -3680,7 +3895,8 @@ class WasmInterpreterInternals {
size_t old_size = stack_limit_ - stack_.get();
size_t requested_size =
base::bits::RoundUpToPowerOfTwo64((sp_ - stack_.get()) + size);
- size_t new_size = Max(size_t{8}, Max(2 * old_size, requested_size));
+ size_t new_size =
+ std::max(size_t{8}, std::max(2 * old_size, requested_size));
std::unique_ptr<StackValue[]> new_stack(new StackValue[new_size]);
if (old_size > 0) {
memcpy(new_stack.get(), stack_.get(), old_size * sizeof(*sp_));
@@ -3774,7 +3990,7 @@ class WasmInterpreterInternals {
CallResult CallIndirectFunction(uint32_t table_index, uint32_t entry_index,
uint32_t sig_index) {
HandleScope handle_scope(isolate_); // Avoid leaking handles.
- uint32_t expected_sig_id = module()->signature_ids[sig_index];
+ uint32_t expected_sig_id = module()->canonicalized_type_ids[sig_index];
DCHECK_EQ(expected_sig_id,
module()->signature_map.Find(*module()->signature(sig_index)));
// Bounds check against table size.
diff --git a/deps/v8/test/common/wasm/wasm-interpreter.h b/deps/v8/test/common/wasm/wasm-interpreter.h
index fd541d1a47..4df373df46 100644
--- a/deps/v8/test/common/wasm/wasm-interpreter.h
+++ b/deps/v8/test/common/wasm/wasm-interpreter.h
@@ -44,6 +44,9 @@ using ControlTransferMap = ZoneMap<pc_t, ControlTransferEntry>;
// An interpreter capable of executing WebAssembly.
class WasmInterpreter {
public:
+ WasmInterpreter(const WasmInterpreter&) = delete;
+ WasmInterpreter& operator=(const WasmInterpreter&) = delete;
+
// State machine for the interpreter:
// +----------------------------------------------------------+
// | +--------Run()/Step()---------+ |
@@ -104,8 +107,6 @@ class WasmInterpreter {
private:
Zone zone_;
std::unique_ptr<WasmInterpreterInternals> internals_;
-
- DISALLOW_COPY_AND_ASSIGN(WasmInterpreter);
};
} // namespace wasm
diff --git a/deps/v8/test/common/wasm/wasm-macro-gen.h b/deps/v8/test/common/wasm/wasm-macro-gen.h
index f888922410..4f998a4bc5 100644
--- a/deps/v8/test/common/wasm/wasm-macro-gen.h
+++ b/deps/v8/test/common/wasm/wasm-macro-gen.h
@@ -206,8 +206,10 @@
// Misc expressions.
//------------------------------------------------------------------------------
#define WASM_STMTS(...) __VA_ARGS__
-#define WASM_ZERO kExprI32Const, 0
-#define WASM_ONE kExprI32Const, 1
+#define WASM_ZERO WASM_I32V_1(0)
+#define WASM_ONE WASM_I32V_1(1)
+#define WASM_ZERO64 WASM_I64V_1(0)
+#define WASM_ONE64 WASM_I64V_1(1)
#define I32V_MIN(length) -(1 << (6 + (7 * ((length)-1))))
#define I32V_MAX(length) ((1 << (6 + (7 * ((length)-1)))) - 1)
diff --git a/deps/v8/test/common/wasm/wasm-module-runner.cc b/deps/v8/test/common/wasm/wasm-module-runner.cc
index e2d8cde531..1a62eea0dd 100644
--- a/deps/v8/test/common/wasm/wasm-module-runner.cc
+++ b/deps/v8/test/common/wasm/wasm-module-runner.cc
@@ -14,6 +14,7 @@
#include "src/wasm/wasm-js.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects.h"
+#include "src/wasm/wasm-opcodes.h"
#include "src/wasm/wasm-result.h"
#include "test/common/wasm/wasm-interpreter.h"
@@ -137,6 +138,13 @@ WasmInterpretationResult InterpretWasmModule(
v8::internal::HandleScope scope(isolate);
const WasmFunction* func = &instance->module()->functions[function_index];
+ CHECK(func->exported);
+ // This would normally be handled by export wrappers.
+ if (!IsJSCompatibleSignature(func->sig, instance->module(),
+ WasmFeatures::FromIsolate(isolate))) {
+ return WasmInterpretationResult::Trapped(false);
+ }
+
WasmInterpreter interpreter{
isolate, instance->module(),
ModuleWireBytes{instance->module_object().native_module()->wire_bytes()},
diff --git a/deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect-builtins.js b/deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect-builtins.js
index 82c7b1082e..c515c5d589 100644
--- a/deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect-builtins.js
+++ b/deps/v8/test/debugger/debug/side-effect/debug-evaluate-no-side-effect-builtins.js
@@ -45,6 +45,8 @@ function listener(event, exec_state, event_data, data) {
success(false, `Object.isFrozen({})`);
success(false, `Object.isSealed({})`);
success([1, 2], `Object.values({a:1, b:2})`);
+ success(["a", 1, "b", 2], `Object.entries({a:1, b:2}).flat()`);
+ success(["a", "b"], `Object.keys({a:1, b:2})`);
fail(`Object.assign({}, {})`);
fail(`Object.defineProperties({}, [{p:{value:3}}])`);
diff --git a/deps/v8/test/debugger/test-api.js b/deps/v8/test/debugger/test-api.js
index 73f16e0acc..dc9cd10411 100644
--- a/deps/v8/test/debugger/test-api.js
+++ b/deps/v8/test/debugger/test-api.js
@@ -593,7 +593,7 @@ class DebugWrapper {
const column = frame.location.columnNumber;
const loc = %ScriptLocationFromLine2(scriptid, line, column, 0);
const func = { name : () => frame.functionName };
- const index = JSON.parse(frame.callFrameId).ordinal;
+ const index = +frame.callFrameId.split(".")[2];
function allScopes() {
const scopes = [];
diff --git a/deps/v8/test/debugging/wasm/gdb-server/DIR_METADATA b/deps/v8/test/debugging/wasm/gdb-server/DIR_METADATA
new file mode 100644
index 0000000000..3b428d9660
--- /dev/null
+++ b/deps/v8/test/debugging/wasm/gdb-server/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>WebAssembly"
+} \ No newline at end of file
diff --git a/deps/v8/test/debugging/wasm/gdb-server/OWNERS b/deps/v8/test/debugging/wasm/gdb-server/OWNERS
index 4b8c1919e8..e2c94e8d24 100644
--- a/deps/v8/test/debugging/wasm/gdb-server/OWNERS
+++ b/deps/v8/test/debugging/wasm/gdb-server/OWNERS
@@ -1,3 +1 @@
paolosev@microsoft.com
-
-# COMPONENT: Blink>JavaScript>WebAssembly \ No newline at end of file
diff --git a/deps/v8/test/fuzzer/BUILD.gn b/deps/v8/test/fuzzer/BUILD.gn
index 5b768ea88c..7c837464c5 100644
--- a/deps/v8/test/fuzzer/BUILD.gn
+++ b/deps/v8/test/fuzzer/BUILD.gn
@@ -6,13 +6,14 @@ group("v8_fuzzer") {
testonly = true
data_deps = [
- "../../tools:v8_testrunner",
"../..:v8_fuzzers",
+ "../../tools:v8_testrunner",
]
data = [
"./fuzzer.status",
"./testcfg.py",
+ "./inspector/",
"./json/",
"./parser/",
"./regexp/",
diff --git a/deps/v8/test/fuzzer/inspector-fuzzer.cc b/deps/v8/test/fuzzer/inspector-fuzzer.cc
new file mode 100644
index 0000000000..e136a99d67
--- /dev/null
+++ b/deps/v8/test/fuzzer/inspector-fuzzer.cc
@@ -0,0 +1,616 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if !defined(_WIN32) && !defined(_WIN64)
+#include <unistd.h> // NOLINT
+#endif // !defined(_WIN32) && !defined(_WIN64)
+
+#include <locale.h>
+
+#include <string>
+#include <vector>
+
+#include "include/libplatform/libplatform.h"
+#include "include/v8.h"
+#include "src/base/platform/platform.h"
+#include "src/base/platform/time.h"
+#include "src/base/small-vector.h"
+#include "src/flags/flags.h"
+#include "src/heap/read-only-heap.h"
+#include "src/libplatform/default-platform.h"
+#include "src/utils/utils.h"
+#include "src/utils/vector.h"
+#include "test/inspector/frontend-channel.h"
+#include "test/inspector/isolate-data.h"
+#include "test/inspector/task-runner.h"
+#include "test/inspector/tasks.h"
+
+namespace v8 {
+namespace internal {
+namespace {
+
+base::SmallVector<TaskRunner*, 2> task_runners;
+
+class UtilsExtension : public IsolateData::SetupGlobalTask {
+ public:
+ ~UtilsExtension() override = default;
+ void Run(v8::Isolate* isolate,
+ v8::Local<v8::ObjectTemplate> global) override {
+ v8::Local<v8::ObjectTemplate> utils = v8::ObjectTemplate::New(isolate);
+ auto Set = [isolate](v8::Local<v8::ObjectTemplate> tmpl, const char* str,
+ v8::Local<v8::Data> value) {
+ tmpl->Set(ToV8String(isolate, str), value,
+ static_cast<v8::PropertyAttribute>(
+ v8::PropertyAttribute::ReadOnly |
+ v8::PropertyAttribute::DontDelete));
+ };
+ Set(utils, "quit",
+ v8::FunctionTemplate::New(isolate, &UtilsExtension::Quit));
+ Set(utils, "compileAndRunWithOrigin",
+ v8::FunctionTemplate::New(isolate,
+ &UtilsExtension::CompileAndRunWithOrigin));
+ Set(utils, "schedulePauseOnNextStatement",
+ v8::FunctionTemplate::New(
+ isolate, &UtilsExtension::SchedulePauseOnNextStatement));
+ Set(utils, "cancelPauseOnNextStatement",
+ v8::FunctionTemplate::New(isolate,
+ &UtilsExtension::CancelPauseOnNextStatement));
+ Set(utils, "createContextGroup",
+ v8::FunctionTemplate::New(isolate,
+ &UtilsExtension::CreateContextGroup));
+ Set(utils, "resetContextGroup",
+ v8::FunctionTemplate::New(isolate, &UtilsExtension::ResetContextGroup));
+ Set(utils, "connectSession",
+ v8::FunctionTemplate::New(isolate, &UtilsExtension::ConnectSession));
+ Set(utils, "disconnectSession",
+ v8::FunctionTemplate::New(isolate, &UtilsExtension::DisconnectSession));
+ Set(utils, "sendMessageToBackend",
+ v8::FunctionTemplate::New(isolate,
+ &UtilsExtension::SendMessageToBackend));
+ Set(global, "utils", utils);
+ }
+
+ static void set_backend_task_runner(TaskRunner* runner) {
+ backend_runner_ = runner;
+ }
+
+ static void ClearAllSessions() { channels_.clear(); }
+
+ private:
+ static TaskRunner* backend_runner_;
+
+ static void Quit(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ // Only terminate, so not join the threads here, since joining concurrently
+ // from multiple threads can be undefined behaviour (see pthread_join).
+ for (TaskRunner* task_runner : task_runners) task_runner->Terminate();
+ }
+
+ static void CompileAndRunWithOrigin(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() != 5 || !args[0]->IsInt32() || !args[1]->IsString() ||
+ !args[2]->IsString() || !args[3]->IsInt32() || !args[4]->IsInt32()) {
+ return;
+ }
+
+ backend_runner_->Append(std::make_unique<ExecuteStringTask>(
+ args.GetIsolate(), args[0].As<v8::Int32>()->Value(),
+ ToVector(args.GetIsolate(), args[1].As<v8::String>()),
+ args[2].As<v8::String>(), args[3].As<v8::Int32>(),
+ args[4].As<v8::Int32>(), v8::Boolean::New(args.GetIsolate(), false)));
+ }
+
+ static void SchedulePauseOnNextStatement(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() != 3 || !args[0]->IsInt32() || !args[1]->IsString() ||
+ !args[2]->IsString()) {
+ return;
+ }
+ std::vector<uint16_t> reason =
+ ToVector(args.GetIsolate(), args[1].As<v8::String>());
+ std::vector<uint16_t> details =
+ ToVector(args.GetIsolate(), args[2].As<v8::String>());
+ int context_group_id = args[0].As<v8::Int32>()->Value();
+ RunSyncTask(backend_runner_,
+ [&context_group_id, &reason, &details](IsolateData* data) {
+ data->SchedulePauseOnNextStatement(
+ context_group_id,
+ v8_inspector::StringView(reason.data(), reason.size()),
+ v8_inspector::StringView(details.data(), details.size()));
+ });
+ }
+
+ static void CancelPauseOnNextStatement(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() != 1 || !args[0]->IsInt32()) {
+ return;
+ }
+ int context_group_id = args[0].As<v8::Int32>()->Value();
+ RunSyncTask(backend_runner_, [&context_group_id](IsolateData* data) {
+ data->CancelPauseOnNextStatement(context_group_id);
+ });
+ }
+
+ static void CreateContextGroup(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() != 0) {
+ return;
+ }
+ int context_group_id = 0;
+ RunSyncTask(backend_runner_, [&context_group_id](IsolateData* data) {
+ context_group_id = data->CreateContextGroup();
+ });
+ args.GetReturnValue().Set(
+ v8::Int32::New(args.GetIsolate(), context_group_id));
+ }
+
+ static void ResetContextGroup(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() != 1 || !args[0]->IsInt32()) {
+ return;
+ }
+ int context_group_id = args[0].As<v8::Int32>()->Value();
+ RunSyncTask(backend_runner_, [&context_group_id](IsolateData* data) {
+ data->ResetContextGroup(context_group_id);
+ });
+ }
+
+ static void ConnectSession(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() != 3 || !args[0]->IsInt32() || !args[1]->IsString() ||
+ !args[2]->IsFunction()) {
+ return;
+ }
+ v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
+ FrontendChannelImpl* channel = new FrontendChannelImpl(
+ IsolateData::FromContext(context)->task_runner(),
+ IsolateData::FromContext(context)->GetContextGroupId(context),
+ args.GetIsolate(), args[2].As<v8::Function>());
+
+ std::vector<uint8_t> state =
+ ToBytes(args.GetIsolate(), args[1].As<v8::String>());
+ int context_group_id = args[0].As<v8::Int32>()->Value();
+ int session_id = 0;
+ RunSyncTask(backend_runner_, [&context_group_id, &session_id, &channel,
+ &state](IsolateData* data) {
+ session_id = data->ConnectSession(
+ context_group_id,
+ v8_inspector::StringView(state.data(), state.size()), channel);
+ channel->set_session_id(session_id);
+ });
+
+ channels_[session_id].reset(channel);
+ args.GetReturnValue().Set(v8::Int32::New(args.GetIsolate(), session_id));
+ }
+
+ static void DisconnectSession(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() != 1 || !args[0]->IsInt32()) {
+ return;
+ }
+ int session_id = args[0].As<v8::Int32>()->Value();
+ std::vector<uint8_t> state;
+ RunSyncTask(backend_runner_, [&session_id, &state](IsolateData* data) {
+ state = data->DisconnectSession(session_id);
+ });
+ channels_.erase(session_id);
+ args.GetReturnValue().Set(ToV8String(args.GetIsolate(), state));
+ }
+
+ static void SendMessageToBackend(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() != 2 || !args[0]->IsInt32() || !args[1]->IsString()) {
+ return;
+ }
+ backend_runner_->Append(std::make_unique<SendMessageToBackendTask>(
+ args[0].As<v8::Int32>()->Value(),
+ ToVector(args.GetIsolate(), args[1].As<v8::String>())));
+ }
+
+ static std::map<int, std::unique_ptr<FrontendChannelImpl>> channels_;
+};
+
+TaskRunner* UtilsExtension::backend_runner_ = nullptr;
+std::map<int, std::unique_ptr<FrontendChannelImpl>> UtilsExtension::channels_;
+
+bool StrictAccessCheck(v8::Local<v8::Context> accessing_context,
+ v8::Local<v8::Object> accessed_object,
+ v8::Local<v8::Value> data) {
+ CHECK(accessing_context.IsEmpty());
+ return accessing_context.IsEmpty();
+}
+
+class InspectorExtension : public IsolateData::SetupGlobalTask {
+ public:
+ ~InspectorExtension() override = default;
+ void Run(v8::Isolate* isolate,
+ v8::Local<v8::ObjectTemplate> global) override {
+ v8::Local<v8::ObjectTemplate> inspector = v8::ObjectTemplate::New(isolate);
+ inspector->Set(ToV8String(isolate, "fireContextCreated"),
+ v8::FunctionTemplate::New(
+ isolate, &InspectorExtension::FireContextCreated));
+ inspector->Set(ToV8String(isolate, "fireContextDestroyed"),
+ v8::FunctionTemplate::New(
+ isolate, &InspectorExtension::FireContextDestroyed));
+ inspector->Set(
+ ToV8String(isolate, "freeContext"),
+ v8::FunctionTemplate::New(isolate, &InspectorExtension::FreeContext));
+ inspector->Set(ToV8String(isolate, "addInspectedObject"),
+ v8::FunctionTemplate::New(
+ isolate, &InspectorExtension::AddInspectedObject));
+ inspector->Set(ToV8String(isolate, "setMaxAsyncTaskStacks"),
+ v8::FunctionTemplate::New(
+ isolate, &InspectorExtension::SetMaxAsyncTaskStacks));
+ inspector->Set(
+ ToV8String(isolate, "dumpAsyncTaskStacksStateForTest"),
+ v8::FunctionTemplate::New(
+ isolate, &InspectorExtension::DumpAsyncTaskStacksStateForTest));
+ inspector->Set(
+ ToV8String(isolate, "breakProgram"),
+ v8::FunctionTemplate::New(isolate, &InspectorExtension::BreakProgram));
+ inspector->Set(
+ ToV8String(isolate, "createObjectWithStrictCheck"),
+ v8::FunctionTemplate::New(
+ isolate, &InspectorExtension::CreateObjectWithStrictCheck));
+ inspector->Set(ToV8String(isolate, "callWithScheduledBreak"),
+ v8::FunctionTemplate::New(
+ isolate, &InspectorExtension::CallWithScheduledBreak));
+ inspector->Set(ToV8String(isolate, "allowAccessorFormatting"),
+ v8::FunctionTemplate::New(
+ isolate, &InspectorExtension::AllowAccessorFormatting));
+ inspector->Set(
+ ToV8String(isolate, "markObjectAsNotInspectable"),
+ v8::FunctionTemplate::New(
+ isolate, &InspectorExtension::MarkObjectAsNotInspectable));
+ inspector->Set(ToV8String(isolate, "createObjectWithAccessor"),
+ v8::FunctionTemplate::New(
+ isolate, &InspectorExtension::CreateObjectWithAccessor));
+ inspector->Set(ToV8String(isolate, "storeCurrentStackTrace"),
+ v8::FunctionTemplate::New(
+ isolate, &InspectorExtension::StoreCurrentStackTrace));
+ inspector->Set(ToV8String(isolate, "externalAsyncTaskStarted"),
+ v8::FunctionTemplate::New(
+ isolate, &InspectorExtension::ExternalAsyncTaskStarted));
+ inspector->Set(
+ ToV8String(isolate, "externalAsyncTaskFinished"),
+ v8::FunctionTemplate::New(
+ isolate, &InspectorExtension::ExternalAsyncTaskFinished));
+ inspector->Set(ToV8String(isolate, "scheduleWithAsyncStack"),
+ v8::FunctionTemplate::New(
+ isolate, &InspectorExtension::ScheduleWithAsyncStack));
+ inspector->Set(
+ ToV8String(isolate, "setAllowCodeGenerationFromStrings"),
+ v8::FunctionTemplate::New(
+ isolate, &InspectorExtension::SetAllowCodeGenerationFromStrings));
+ inspector->Set(ToV8String(isolate, "setResourceNamePrefix"),
+ v8::FunctionTemplate::New(
+ isolate, &InspectorExtension::SetResourceNamePrefix));
+ global->Set(ToV8String(isolate, "inspector"), inspector);
+ }
+
+ private:
+ static void FireContextCreated(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
+ IsolateData* data = IsolateData::FromContext(context);
+ data->FireContextCreated(context, data->GetContextGroupId(context),
+ v8_inspector::StringView());
+ }
+
+ static void FireContextDestroyed(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
+ IsolateData* data = IsolateData::FromContext(context);
+ data->FireContextDestroyed(context);
+ }
+
+ static void FreeContext(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
+ IsolateData* data = IsolateData::FromContext(context);
+ data->FreeContext(context);
+ }
+
+ static void AddInspectedObject(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() != 2 || !args[0]->IsInt32()) {
+ return;
+ }
+ v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
+ IsolateData* data = IsolateData::FromContext(context);
+ data->AddInspectedObject(args[0].As<v8::Int32>()->Value(), args[1]);
+ }
+
+ static void SetMaxAsyncTaskStacks(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() != 1 || !args[0]->IsInt32()) {
+ return;
+ }
+ IsolateData::FromContext(args.GetIsolate()->GetCurrentContext())
+ ->SetMaxAsyncTaskStacksForTest(args[0].As<v8::Int32>()->Value());
+ }
+
+ static void DumpAsyncTaskStacksStateForTest(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() != 0) {
+ return;
+ }
+ IsolateData::FromContext(args.GetIsolate()->GetCurrentContext())
+ ->DumpAsyncTaskStacksStateForTest();
+ }
+
+ static void BreakProgram(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() != 2 || !args[0]->IsString() || !args[1]->IsString()) {
+ return;
+ }
+ v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
+ IsolateData* data = IsolateData::FromContext(context);
+ std::vector<uint16_t> reason =
+ ToVector(args.GetIsolate(), args[0].As<v8::String>());
+ v8_inspector::StringView reason_view(reason.data(), reason.size());
+ std::vector<uint16_t> details =
+ ToVector(args.GetIsolate(), args[1].As<v8::String>());
+ v8_inspector::StringView details_view(details.data(), details.size());
+ data->BreakProgram(data->GetContextGroupId(context), reason_view,
+ details_view);
+ }
+
+ static void CreateObjectWithStrictCheck(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() != 0) {
+ return;
+ }
+ v8::Local<v8::ObjectTemplate> templ =
+ v8::ObjectTemplate::New(args.GetIsolate());
+ templ->SetAccessCheckCallback(&StrictAccessCheck);
+ args.GetReturnValue().Set(
+ templ->NewInstance(args.GetIsolate()->GetCurrentContext())
+ .ToLocalChecked());
+ }
+
+ static void CallWithScheduledBreak(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() != 3 || !args[0]->IsFunction() || !args[1]->IsString() ||
+ !args[2]->IsString()) {
+ return;
+ }
+ std::vector<uint16_t> reason =
+ ToVector(args.GetIsolate(), args[1].As<v8::String>());
+ v8_inspector::StringView reason_view(reason.data(), reason.size());
+ std::vector<uint16_t> details =
+ ToVector(args.GetIsolate(), args[2].As<v8::String>());
+ v8_inspector::StringView details_view(details.data(), details.size());
+ v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
+ IsolateData* data = IsolateData::FromContext(context);
+ int context_group_id = data->GetContextGroupId(context);
+ data->SchedulePauseOnNextStatement(context_group_id, reason_view,
+ details_view);
+ v8::MaybeLocal<v8::Value> result;
+ result = args[0].As<v8::Function>()->Call(context, context->Global(), 0,
+ nullptr);
+ data->CancelPauseOnNextStatement(context_group_id);
+ }
+
+ static void AllowAccessorFormatting(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() != 1 || !args[0]->IsObject()) {
+ return;
+ }
+ v8::Local<v8::Object> object = args[0].As<v8::Object>();
+ v8::Isolate* isolate = args.GetIsolate();
+ v8::Local<v8::Private> shouldFormatAccessorsPrivate = v8::Private::ForApi(
+ isolate, ToV8String(isolate, "allowAccessorFormatting"));
+ object
+ ->SetPrivate(isolate->GetCurrentContext(), shouldFormatAccessorsPrivate,
+ v8::Null(isolate))
+ .ToChecked();
+ }
+
+ static void MarkObjectAsNotInspectable(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() != 1 || !args[0]->IsObject()) {
+ return;
+ }
+ v8::Local<v8::Object> object = args[0].As<v8::Object>();
+ v8::Isolate* isolate = args.GetIsolate();
+ v8::Local<v8::Private> notInspectablePrivate =
+ v8::Private::ForApi(isolate, ToV8String(isolate, "notInspectable"));
+ object
+ ->SetPrivate(isolate->GetCurrentContext(), notInspectablePrivate,
+ v8::True(isolate))
+ .ToChecked();
+ }
+
+ static void CreateObjectWithAccessor(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() != 2 || !args[0]->IsString() || !args[1]->IsBoolean()) {
+ return;
+ }
+ v8::Isolate* isolate = args.GetIsolate();
+ v8::Local<v8::ObjectTemplate> templ = v8::ObjectTemplate::New(isolate);
+ if (args[1].As<v8::Boolean>()->Value()) {
+ templ->SetAccessor(v8::Local<v8::String>::Cast(args[0]), AccessorGetter,
+ AccessorSetter);
+ } else {
+ templ->SetAccessor(v8::Local<v8::String>::Cast(args[0]), AccessorGetter);
+ }
+ args.GetReturnValue().Set(
+ templ->NewInstance(isolate->GetCurrentContext()).ToLocalChecked());
+ }
+
+ static void AccessorGetter(v8::Local<v8::String> property,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ v8::Isolate* isolate = info.GetIsolate();
+ isolate->ThrowException(ToV8String(isolate, "Getter is called"));
+ }
+
+ static void AccessorSetter(v8::Local<v8::String> property,
+ v8::Local<v8::Value> value,
+ const v8::PropertyCallbackInfo<void>& info) {
+ v8::Isolate* isolate = info.GetIsolate();
+ isolate->ThrowException(ToV8String(isolate, "Setter is called"));
+ }
+
+ static void StoreCurrentStackTrace(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() != 1 || !args[0]->IsString()) {
+ return;
+ }
+ v8::Isolate* isolate = args.GetIsolate();
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+ IsolateData* data = IsolateData::FromContext(context);
+ std::vector<uint16_t> description =
+ ToVector(isolate, args[0].As<v8::String>());
+ v8_inspector::StringView description_view(description.data(),
+ description.size());
+ v8_inspector::V8StackTraceId id =
+ data->StoreCurrentStackTrace(description_view);
+ v8::Local<v8::ArrayBuffer> buffer =
+ v8::ArrayBuffer::New(isolate, sizeof(id));
+ *static_cast<v8_inspector::V8StackTraceId*>(
+ buffer->GetBackingStore()->Data()) = id;
+ args.GetReturnValue().Set(buffer);
+ }
+
+ static void ExternalAsyncTaskStarted(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() != 1 || !args[0]->IsArrayBuffer()) {
+ return;
+ }
+ v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
+ IsolateData* data = IsolateData::FromContext(context);
+ v8_inspector::V8StackTraceId* id =
+ static_cast<v8_inspector::V8StackTraceId*>(
+ args[0].As<v8::ArrayBuffer>()->GetBackingStore()->Data());
+ data->ExternalAsyncTaskStarted(*id);
+ }
+
+ static void ExternalAsyncTaskFinished(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() != 1 || !args[0]->IsArrayBuffer()) {
+ return;
+ }
+ v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
+ IsolateData* data = IsolateData::FromContext(context);
+ v8_inspector::V8StackTraceId* id =
+ static_cast<v8_inspector::V8StackTraceId*>(
+ args[0].As<v8::ArrayBuffer>()->GetBackingStore()->Data());
+ data->ExternalAsyncTaskFinished(*id);
+ }
+
+ static void ScheduleWithAsyncStack(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() != 3 || !args[0]->IsFunction() || !args[1]->IsString() ||
+ !args[2]->IsBoolean()) {
+ return;
+ }
+ v8::Isolate* isolate = args.GetIsolate();
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+ IsolateData* data = IsolateData::FromContext(context);
+ int context_group_id = data->GetContextGroupId(context);
+ bool with_empty_stack = args[2].As<v8::Boolean>()->Value();
+ if (with_empty_stack) context->Exit();
+
+ std::vector<uint16_t> task_name =
+ ToVector(isolate, args[1].As<v8::String>());
+ v8_inspector::StringView task_name_view(task_name.data(), task_name.size());
+
+ RunAsyncTask(
+ data->task_runner(), task_name_view,
+ std::make_unique<SetTimeoutTask>(
+ context_group_id, isolate, v8::Local<v8::Function>::Cast(args[0])));
+ if (with_empty_stack) context->Enter();
+ }
+
+ static void SetAllowCodeGenerationFromStrings(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() != 1 || !args[0]->IsBoolean()) {
+ return;
+ }
+ args.GetIsolate()->GetCurrentContext()->AllowCodeGenerationFromStrings(
+ args[0].As<v8::Boolean>()->Value());
+ }
+
+ static void SetResourceNamePrefix(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() != 1 || !args[0]->IsString()) {
+ return;
+ }
+ v8::Isolate* isolate = args.GetIsolate();
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+ IsolateData* data = IsolateData::FromContext(context);
+ data->SetResourceNamePrefix(v8::Local<v8::String>::Cast(args[0]));
+ }
+};
+
+using CharVector = v8::internal::Vector<const char>;
+
+constexpr auto kMaxExecutionSeconds = v8::base::TimeDelta::FromSeconds(2);
+
+class Watchdog final : public base::Thread {
+ public:
+ explicit Watchdog(base::Semaphore* semaphore)
+ : base::Thread(base::Thread::Options("InspectorFuzzerWatchdog")),
+ semaphore_(semaphore) {
+ CHECK(Start());
+ }
+
+ private:
+ void Run() override {
+ if (semaphore_->WaitFor(kMaxExecutionSeconds)) return;
+ for (TaskRunner* task_runner : task_runners) task_runner->Terminate();
+ }
+
+ base::Semaphore* const semaphore_;
+};
+
+void FuzzInspector(const uint8_t* data, size_t size) {
+ base::Semaphore ready_semaphore(0);
+
+ IsolateData::SetupGlobalTasks frontend_extensions;
+ frontend_extensions.emplace_back(new UtilsExtension());
+ TaskRunner frontend_runner(std::move(frontend_extensions),
+ kDontCatchExceptions, &ready_semaphore, nullptr,
+ kNoInspector);
+ ready_semaphore.Wait();
+
+ int frontend_context_group_id = 0;
+ RunSyncTask(&frontend_runner,
+ [&frontend_context_group_id](IsolateData* data) {
+ frontend_context_group_id = data->CreateContextGroup();
+ });
+
+ IsolateData::SetupGlobalTasks backend_extensions;
+ backend_extensions.emplace_back(new SetTimeoutExtension());
+ backend_extensions.emplace_back(new InspectorExtension());
+ TaskRunner backend_runner(std::move(backend_extensions), kDontCatchExceptions,
+ &ready_semaphore, nullptr, kWithInspector);
+ ready_semaphore.Wait();
+ UtilsExtension::set_backend_task_runner(&backend_runner);
+
+ task_runners = {&frontend_runner, &backend_runner};
+
+ Watchdog watchdog(&ready_semaphore);
+
+ frontend_runner.Append(std::make_unique<ExecuteStringTask>(
+ std::string{reinterpret_cast<const char*>(data), size},
+ frontend_context_group_id));
+
+ frontend_runner.Join();
+ backend_runner.Join();
+
+ ready_semaphore.Signal();
+ watchdog.Join();
+
+ UtilsExtension::ClearAllSessions();
+
+ // TaskRunners go out of scope here, which causes Isolate teardown and all
+ // running background tasks to be properly joined.
+}
+
+} // namespace
+} // namespace internal
+} // namespace v8
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+ v8::internal::FuzzInspector(data, size);
+ return 0;
+}
diff --git a/deps/v8/test/fuzzer/inspector/empty b/deps/v8/test/fuzzer/inspector/empty
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/deps/v8/test/fuzzer/inspector/empty
diff --git a/deps/v8/test/fuzzer/inspector/invalid b/deps/v8/test/fuzzer/inspector/invalid
new file mode 100644
index 0000000000..4476525b2a
--- /dev/null
+++ b/deps/v8/test/fuzzer/inspector/invalid
@@ -0,0 +1 @@
+no valid javascript
diff --git a/deps/v8/test/fuzzer/multi-return.cc b/deps/v8/test/fuzzer/multi-return.cc
index 0b18173f10..e09a7227a5 100644
--- a/deps/v8/test/fuzzer/multi-return.cc
+++ b/deps/v8/test/fuzzer/multi-return.cc
@@ -239,7 +239,8 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
}
callee.Return(static_cast<int>(desc->ReturnCount()), returns.get());
- OptimizedCompilationInfo info(ArrayVector("testing"), &zone, CodeKind::STUB);
+ OptimizedCompilationInfo info(ArrayVector("testing"), &zone,
+ CodeKind::FOR_TESTING);
Handle<Code> code =
Pipeline::GenerateCodeForTesting(&info, i_isolate, desc, callee.graph(),
AssemblerOptions::Default(i_isolate),
@@ -285,7 +286,7 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
// Call the wrapper.
OptimizedCompilationInfo wrapper_info(ArrayVector("wrapper"), &zone,
- CodeKind::STUB);
+ CodeKind::FOR_TESTING);
Handle<Code> wrapper_code =
Pipeline::GenerateCodeForTesting(
&wrapper_info, i_isolate, wrapper_desc, caller.graph(),
diff --git a/deps/v8/test/fuzzer/regexp-builtins.cc b/deps/v8/test/fuzzer/regexp-builtins.cc
index 61149f134e..f02387bd92 100644
--- a/deps/v8/test/fuzzer/regexp-builtins.cc
+++ b/deps/v8/test/fuzzer/regexp-builtins.cc
@@ -240,9 +240,12 @@ std::string PickLimitForSplit(FuzzerArgs* args) {
}
std::string GenerateRandomFlags(FuzzerArgs* args) {
+ // TODO(mbid,v8:10765): Find a way to generate the kLinear flag sometimes,
+ // but only for patterns that are supported by the experimental engine.
constexpr size_t kFlagCount = JSRegExp::kFlagCount;
- CHECK_EQ(JSRegExp::kDotAll, 1 << (kFlagCount - 1));
- STATIC_ASSERT((1 << kFlagCount) - 1 < 0xFF);
+ CHECK_EQ(JSRegExp::kLinear, 1 << (kFlagCount - 1));
+ CHECK_EQ(JSRegExp::kDotAll, 1 << (kFlagCount - 2));
+ STATIC_ASSERT((1 << kFlagCount) - 1 <= 0xFF);
const size_t flags = RandomByte(args) & ((1 << kFlagCount) - 1);
diff --git a/deps/v8/test/fuzzer/testcfg.py b/deps/v8/test/fuzzer/testcfg.py
index 08969997b1..8033566597 100644
--- a/deps/v8/test/fuzzer/testcfg.py
+++ b/deps/v8/test/fuzzer/testcfg.py
@@ -8,10 +8,11 @@ from testrunner.local import testsuite
from testrunner.objects import testcase
SUB_TESTS = [
+ 'inspector',
'json',
'parser',
- 'regexp_builtins',
'regexp',
+ 'regexp_builtins',
'multi_return',
'wasm',
'wasm_async',
diff --git a/deps/v8/test/fuzzer/wasm-async.cc b/deps/v8/test/fuzzer/wasm-async.cc
index 5afe483136..4e8949412a 100644
--- a/deps/v8/test/fuzzer/wasm-async.cc
+++ b/deps/v8/test/fuzzer/wasm-async.cc
@@ -48,17 +48,13 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
// We explicitly enable staged WebAssembly features here to increase fuzzer
// coverage. For libfuzzer fuzzers it is not possible that the fuzzer enables
// the flag by itself.
-#define ENABLE_STAGED_FEATURES(feat, desc, val) \
- i::FlagScope<bool> enable_##feat(&i::FLAG_experimental_wasm_##feat, true);
- FOREACH_WASM_STAGING_FEATURE_FLAG(ENABLE_STAGED_FEATURES)
-#undef ENABLE_STAGED_FEATURES
-
- FlagScope<bool> turn_on_async_compile(
- &v8::internal::FLAG_wasm_async_compilation, true);
- FlagScope<uint32_t> max_mem_flag_scope(&v8::internal::FLAG_wasm_max_mem_pages,
- 32);
- FlagScope<uint32_t> max_table_size_scope(
- &v8::internal::FLAG_wasm_max_table_size, 100);
+ OneTimeEnableStagedWasmFeatures();
+
+ // Set some more flags.
+ FLAG_wasm_async_compilation = true;
+ FLAG_wasm_max_mem_pages = 32;
+ FLAG_wasm_max_table_size = 100;
+
v8_fuzzer::FuzzerSupport* support = v8_fuzzer::FuzzerSupport::Get();
v8::Isolate* isolate = support->GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<v8::internal::Isolate*>(isolate);
diff --git a/deps/v8/test/fuzzer/wasm-compile.cc b/deps/v8/test/fuzzer/wasm-compile.cc
index 8cace3230e..3f8fe07848 100644
--- a/deps/v8/test/fuzzer/wasm-compile.cc
+++ b/deps/v8/test/fuzzer/wasm-compile.cc
@@ -39,6 +39,8 @@ class DataRange {
public:
explicit DataRange(Vector<const uint8_t> data) : data_(data) {}
+ DataRange(const DataRange&) = delete;
+ DataRange& operator=(const DataRange&) = delete;
// Don't accidentally pass DataRange by value. This will reuse bytes and might
// lead to OOM because the end might not be reached.
@@ -83,8 +85,6 @@ class DataRange {
data_ += num_bytes;
return result;
}
-
- DISALLOW_COPY_AND_ASSIGN(DataRange);
};
ValueType GetValueType(DataRange* data) {
@@ -281,6 +281,7 @@ class WasmGenerator {
case kExprS128Load32x2S:
case kExprS128Load32x2U:
case kExprS128Load64Splat:
+ case kExprS128Load64Zero:
return 3;
case kExprI32LoadMem:
case kExprI64LoadMem32S:
@@ -308,6 +309,7 @@ class WasmGenerator {
case kExprI64AtomicExchange32U:
case kExprI64AtomicCompareExchange32U:
case kExprS128Load32Splat:
+ case kExprS128Load32Zero:
return 2;
case kExprI32LoadMem16S:
case kExprI32LoadMem16U:
@@ -1280,15 +1282,15 @@ void WasmGenerator::Generate<ValueType::kS128>(DataRange* data) {
ValueType::kI32>,
&WasmGenerator::op_with_prefix<kExprI8x16Add, ValueType::kS128,
ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI8x16AddSaturateS, ValueType::kS128,
+ &WasmGenerator::op_with_prefix<kExprI8x16AddSatS, ValueType::kS128,
ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI8x16AddSaturateU, ValueType::kS128,
+ &WasmGenerator::op_with_prefix<kExprI8x16AddSatU, ValueType::kS128,
ValueType::kS128>,
&WasmGenerator::op_with_prefix<kExprI8x16Sub, ValueType::kS128,
ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI8x16SubSaturateS, ValueType::kS128,
+ &WasmGenerator::op_with_prefix<kExprI8x16SubSatS, ValueType::kS128,
ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI8x16SubSaturateU, ValueType::kS128,
+ &WasmGenerator::op_with_prefix<kExprI8x16SubSatU, ValueType::kS128,
ValueType::kS128>,
&WasmGenerator::op_with_prefix<kExprI8x16MinS, ValueType::kS128,
ValueType::kS128>,
@@ -1333,15 +1335,15 @@ void WasmGenerator::Generate<ValueType::kS128>(DataRange* data) {
ValueType::kI32>,
&WasmGenerator::op_with_prefix<kExprI16x8Add, ValueType::kS128,
ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI16x8AddSaturateS, ValueType::kS128,
+ &WasmGenerator::op_with_prefix<kExprI16x8AddSatS, ValueType::kS128,
ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI16x8AddSaturateU, ValueType::kS128,
+ &WasmGenerator::op_with_prefix<kExprI16x8AddSatU, ValueType::kS128,
ValueType::kS128>,
&WasmGenerator::op_with_prefix<kExprI16x8Sub, ValueType::kS128,
ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI16x8SubSaturateS, ValueType::kS128,
+ &WasmGenerator::op_with_prefix<kExprI16x8SubSatS, ValueType::kS128,
ValueType::kS128>,
- &WasmGenerator::op_with_prefix<kExprI16x8SubSaturateU, ValueType::kS128,
+ &WasmGenerator::op_with_prefix<kExprI16x8SubSatU, ValueType::kS128,
ValueType::kS128>,
&WasmGenerator::op_with_prefix<kExprI16x8Mul, ValueType::kS128,
ValueType::kS128>,
@@ -1399,6 +1401,8 @@ void WasmGenerator::Generate<ValueType::kS128>(DataRange* data) {
ValueType::kS128>,
&WasmGenerator::op_with_prefix<kExprI32x4MaxU, ValueType::kS128,
ValueType::kS128>,
+ &WasmGenerator::op_with_prefix<kExprI32x4DotI16x8S, ValueType::kS128,
+ ValueType::kS128>,
&WasmGenerator::op_with_prefix<kExprI64x2Splat, ValueType::kI64>,
&WasmGenerator::op_with_prefix<kExprI64x2Neg, ValueType::kS128>,
@@ -1546,7 +1550,10 @@ void WasmGenerator::Generate<ValueType::kS128>(DataRange* data) {
&WasmGenerator::memop<kExprS128Load8Splat>,
&WasmGenerator::memop<kExprS128Load16Splat>,
&WasmGenerator::memop<kExprS128Load32Splat>,
- &WasmGenerator::memop<kExprS128Load64Splat>};
+ &WasmGenerator::memop<kExprS128Load64Splat>,
+ &WasmGenerator::memop<kExprS128Load32Zero>,
+ &WasmGenerator::memop<kExprS128Load64Zero>,
+ };
GenerateOneOf(alternatives, data);
}
diff --git a/deps/v8/test/fuzzer/wasm-fuzzer-common.cc b/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
index 3bc2ae5348..bcdba055a2 100644
--- a/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
+++ b/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
@@ -305,15 +305,25 @@ void GenerateTestCase(Isolate* isolate, ModuleWireBytes wire_bytes,
}
}
+void OneTimeEnableStagedWasmFeatures() {
+ struct EnableStagedWasmFeatures {
+ EnableStagedWasmFeatures() {
+#define ENABLE_STAGED_FEATURES(feat, desc, val) \
+ FLAG_experimental_wasm_##feat = true;
+ FOREACH_WASM_STAGING_FEATURE_FLAG(ENABLE_STAGED_FEATURES)
+#undef ENABLE_STAGED_FEATURES
+ }
+ };
+ // The compiler will properly synchronize the constructor call.
+ static EnableStagedWasmFeatures one_time_enable_staged_features;
+}
+
void WasmExecutionFuzzer::FuzzWasmModule(Vector<const uint8_t> data,
bool require_valid) {
// We explicitly enable staged WebAssembly features here to increase fuzzer
// coverage. For libfuzzer fuzzers it is not possible that the fuzzer enables
// the flag by itself.
-#define ENABLE_STAGED_FEATURES(feat, desc, val) \
- FlagScope<bool> enable_##feat(&FLAG_experimental_wasm_##feat, true);
- FOREACH_WASM_STAGING_FEATURE_FLAG(ENABLE_STAGED_FEATURES)
-#undef ENABLE_STAGED_FEATURES
+ OneTimeEnableStagedWasmFeatures();
// Strictly enforce the input size limit. Note that setting "max_len" on the
// fuzzer target is not enough, since different fuzzers are used and not all
diff --git a/deps/v8/test/fuzzer/wasm-fuzzer-common.h b/deps/v8/test/fuzzer/wasm-fuzzer-common.h
index 3c6d7c3752..d74a26ffab 100644
--- a/deps/v8/test/fuzzer/wasm-fuzzer-common.h
+++ b/deps/v8/test/fuzzer/wasm-fuzzer-common.h
@@ -29,6 +29,12 @@ void InterpretAndExecuteModule(Isolate* isolate,
void GenerateTestCase(Isolate* isolate, ModuleWireBytes wire_bytes,
bool compiles);
+// On the first call, enables all staged wasm features. All subsequent calls are
+// no-ops. This avoids race conditions with threads reading the flags. Fuzzers
+// are executed in their own process anyway, so this should not interfere with
+// anything.
+void OneTimeEnableStagedWasmFeatures();
+
class WasmExecutionFuzzer {
public:
virtual ~WasmExecutionFuzzer() = default;
diff --git a/deps/v8/test/fuzzer/wasm.cc b/deps/v8/test/fuzzer/wasm.cc
index aaa958c4d5..fe3cdfcbea 100644
--- a/deps/v8/test/fuzzer/wasm.cc
+++ b/deps/v8/test/fuzzer/wasm.cc
@@ -24,16 +24,13 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
// We explicitly enable staged WebAssembly features here to increase fuzzer
// coverage. For libfuzzer fuzzers it is not possible that the fuzzer enables
// the flag by itself.
-#define ENABLE_STAGED_FEATURES(feat, desc, val) \
- i::FlagScope<bool> enable_##feat(&i::FLAG_experimental_wasm_##feat, true);
- FOREACH_WASM_STAGING_FEATURE_FLAG(ENABLE_STAGED_FEATURES)
-#undef ENABLE_STAGED_FEATURES
+ i::wasm::fuzzer::OneTimeEnableStagedWasmFeatures();
// We reduce the maximum memory size and table size of WebAssembly instances
// to avoid OOMs in the fuzzer.
- i::FlagScope<uint32_t> max_mem_flag_scope(&i::FLAG_wasm_max_mem_pages, 32);
- i::FlagScope<uint32_t> max_table_size_scope(&i::FLAG_wasm_max_table_size,
- 100);
+ i::FLAG_wasm_max_mem_pages = 32;
+ i::FLAG_wasm_max_table_size = 100;
+
v8_fuzzer::FuzzerSupport* support = v8_fuzzer::FuzzerSupport::Get();
v8::Isolate* isolate = support->GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
diff --git a/deps/v8/test/fuzzilli/README.md b/deps/v8/test/fuzzilli/README.md
index 42add85368..48a0e5afbd 100644
--- a/deps/v8/test/fuzzilli/README.md
+++ b/deps/v8/test/fuzzilli/README.md
@@ -2,14 +2,13 @@
## Source code
-On low level fuzzilli communicates with v8 through Swift C API library in `Sources/libreprl/libreprl.c`
+On a low level, Fuzzilli communicates with v8 through the REPRL protocol, implemented on the fuzzer side by the libreprl C library in `Sources/libreprl/`. The main way of using the library is through the following three functions:
-`reprl_spawn_child` fucntions spawns child process. It does that by creating pipes, forking itself, then setting filedescriptors, and then transforming itself using `execve` into v8 process. Afterwords it checks for receiving 4 byte string and it sends the exact same string back.
+`reprl_create_context()` this creates a new, empty REPRL context to be used by the following APIs.
-`fetch_output` fetches the output from the child and returns its size and pointer to data.
+`reprl_initialize_context(ctx, argv, envp)` this initializes the given context and sets the argv and envp vectors to use for the child processes.
-`execute script`
-writes `exec`, and size of script, into the command write pipe and sends script through data write pipe
+`reprl_execute(ctx, code)` this executes the given code and returns the exit status. If necessary, a new child process is created for this. This involves creating pipes, forking itself, then setting filedescriptors, and using `execve` to execute the d8 binary. A child process can be reused for multiple executions, thus increasing fuzzing performance as the overhead of fork and execve are removed.
## Coverage
diff --git a/deps/v8/test/fuzzilli/main.cc b/deps/v8/test/fuzzilli/main.cc
index 8af9788d63..5dd5d4110a 100644
--- a/deps/v8/test/fuzzilli/main.cc
+++ b/deps/v8/test/fuzzilli/main.cc
@@ -5,12 +5,34 @@
extern "C" {
#include <stdio.h>
+#include <stdlib.h>
#include <string.h>
#include "libreprl.h"
+struct reprl_context* ctx;
+
+int execute(const char* code) {
+ uint64_t exec_time;
+ return reprl_execute(ctx, code, strlen(code), 1000, &exec_time, 0);
+}
+
+void expect_success(const char* code) {
+ if (execute(code) != 0) {
+ printf("Execution of \"%s\" failed\n", code);
+ exit(1);
+ }
+}
+
+void expect_failure(const char* code) {
+ if (execute(code) == 0) {
+ printf("Execution of \"%s\" unexpectedly succeeded\n", code);
+ exit(1);
+ }
+}
+
int main(int argc, char** argv) {
- struct reprl_context* ctx = reprl_create_context();
+ ctx = reprl_create_context();
const char* env[] = {nullptr};
const char* prog = argc > 1 ? argv[1] : "./out.gn/x64.debug/d8";
@@ -20,39 +42,27 @@ int main(int argc, char** argv) {
return -1;
}
- uint64_t exec_time;
-
// Basic functionality test
- const char* code = "let greeting = \"Hello World!\";";
- if (reprl_execute(ctx, code, strlen(code), 1000, &exec_time, 0) != 0) {
- printf("Execution of \"%s\" failed\n", code);
- printf("Is %s the path to d8 built with v8_fuzzilli=true?\n", prog);
+ if (execute("let greeting = \"Hello World!\";") != 0) {
+ printf(
+ "Script execution failed, is %s the path to d8 built with "
+ "v8_fuzzilli=true?\n",
+ prog);
return -1;
}
// Verify that runtime exceptions can be detected
- code = "throw 'failure';";
- if (reprl_execute(ctx, code, strlen(code), 1000, &exec_time, 0) == 0) {
- printf("Execution of \"%s\" unexpectedly succeeded\n", code);
- return -1;
- }
+ expect_failure("throw 'failure';");
// Verify that existing state is property reset between executions
- code = "globalProp = 42; Object.prototype.foo = \"bar\";";
- if (reprl_execute(ctx, code, strlen(code), 1000, &exec_time, 0) != 0) {
- printf("Execution of \"%s\" failed\n", code);
- return -1;
- }
- code = "if (typeof(globalProp) !== 'undefined') throw 'failure'";
- if (reprl_execute(ctx, code, strlen(code), 1000, &exec_time, 0) != 0) {
- printf("Execution of \"%s\" failed\n", code);
- return -1;
- }
- code = "if (typeof(Object.prototype.foo) !== 'undefined') throw 'failure'";
- if (reprl_execute(ctx, code, strlen(code), 1000, &exec_time, 0) != 0) {
- printf("Execution of \"%s\" failed\n", code);
- return -1;
- }
+ expect_success("globalProp = 42; Object.prototype.foo = \"bar\";");
+ expect_success("if (typeof(globalProp) !== 'undefined') throw 'failure'");
+ expect_success("if (typeof(({}).foo) !== 'undefined') throw 'failure'");
+
+ // Verify that rejected promises are properly reset between executions
+ expect_failure("async function fail() { throw 42; }; fail()");
+ expect_success("42");
+ expect_failure("async function fail() { throw 42; }; fail()");
puts("OK");
return 0;
diff --git a/deps/v8/test/inspector/BUILD.gn b/deps/v8/test/inspector/BUILD.gn
index f2faf8bc21..1fef57d14b 100644
--- a/deps/v8/test/inspector/BUILD.gn
+++ b/deps/v8/test/inspector/BUILD.gn
@@ -4,27 +4,41 @@
import("../../gni/v8.gni")
-v8_executable("inspector-test") {
- testonly = true
-
+v8_source_set("inspector_test") {
sources = [
- "inspector-test.cc",
+ "frontend-channel.h",
"isolate-data.cc",
"isolate-data.h",
"task-runner.cc",
"task-runner.h",
+ "tasks.cc",
+ "tasks.h",
+ "utils.cc",
+ "utils.h",
]
+ configs = [ "../..:internal_config_base" ]
+
+ public_deps = [
+ "../..:v8",
+ "../..:v8_libbase",
+ "../..:v8_libplatform",
+ "../../src/inspector:inspector_test_headers",
+ ]
+}
+
+v8_executable("inspector-test") {
+ testonly = true
+
+ sources = [ "inspector-test.cc" ]
+
configs = [
"../..:external_config",
"../..:internal_config_base",
]
deps = [
- "../..:v8",
- "../..:v8_libbase",
- "../..:v8_libplatform",
- "../../src/inspector:inspector_test_headers",
+ ":inspector_test",
"//build/win:default_exe_manifest",
]
diff --git a/deps/v8/test/inspector/DEPS b/deps/v8/test/inspector/DEPS
index 622048015f..543a38f0a7 100644
--- a/deps/v8/test/inspector/DEPS
+++ b/deps/v8/test/inspector/DEPS
@@ -1,12 +1,8 @@
include_rules = [
"-src",
- "+src/base/atomic-utils.h",
- "+src/base/macros.h",
- "+src/base/platform/platform.h",
+ "+src/base",
"+src/flags/flags.h",
"+src/heap/read-only-heap.h",
"+src/inspector/test-interface.h",
- "+src/utils/locked-queue-inl.h",
- "+src/utils/utils.h",
- "+src/utils/vector.h",
+ "+src/utils",
]
diff --git a/deps/v8/test/inspector/DIR_METADATA b/deps/v8/test/inspector/DIR_METADATA
new file mode 100644
index 0000000000..3ba1106a5f
--- /dev/null
+++ b/deps/v8/test/inspector/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Platform>DevTools>JavaScript"
+} \ No newline at end of file
diff --git a/deps/v8/test/inspector/OWNERS b/deps/v8/test/inspector/OWNERS
index 50cd83c40c..d8bedcc10e 100644
--- a/deps/v8/test/inspector/OWNERS
+++ b/deps/v8/test/inspector/OWNERS
@@ -1,3 +1 @@
file:../../src/inspector/OWNERS
-
-# COMPONENT: Platform>DevTools>JavaScript
diff --git a/deps/v8/test/inspector/cpu-profiler/console-profile-wasm.js b/deps/v8/test/inspector/cpu-profiler/console-profile-wasm.js
index dea818a351..192fdbe212 100644
--- a/deps/v8/test/inspector/cpu-profiler/console-profile-wasm.js
+++ b/deps/v8/test/inspector/cpu-profiler/console-profile-wasm.js
@@ -63,9 +63,17 @@ function checkError(message) {
// InspectorTest.log(function_names.join(', '));
// Check for at least one full cycle of
// fib -> wasm-to-js -> imp -> js-to-wasm -> fib.
- const expected = ['fib', 'wasm-to-js:i:i', 'imp', 'js-to-wasm:i:i', 'fib'];
- for (let i = 0; i <= function_names.length - expected.length; ++i) {
- if (expected.every((val, idx) => val == function_names[i + idx])) {
+ // There are two different kinds of js-to-wasm-wrappers, so there are two
+ // possible positive traces.
+ const expected_generic =
+ ['fib', 'wasm-to-js:i:i', 'imp', 'GenericJSToWasmWrapper', 'fib'];
+ const expected_optimized =
+ ['fib', 'wasm-to-js:i:i', 'imp', 'js-to-wasm:i:i', 'fib'];
+ for (let i = 0; i <= function_names.length - expected_generic.length; ++i) {
+ if (expected_generic.every(
+ (val, idx) => val == function_names[i + idx]) ||
+ expected_optimized.every(
+ (val, idx) => val == function_names[i + idx])) {
found_good_profile = true;
}
}
diff --git a/deps/v8/test/inspector/debugger/class-private-methods-static-nested-expected.txt b/deps/v8/test/inspector/debugger/class-private-methods-static-nested-expected.txt
index 7c9e6b2e1b..cb12a7446b 100644
--- a/deps/v8/test/inspector/debugger/class-private-methods-static-nested-expected.txt
+++ b/deps/v8/test/inspector/debugger/class-private-methods-static-nested-expected.txt
@@ -8,7 +8,7 @@ privateProperties on class A
value : {
className : Function
description : #method() { debugger; }
- objectId : {"injectedScriptId":1,"id":39}
+ objectId : <objectId>
type : function
}
}
diff --git a/deps/v8/test/inspector/debugger/class-private-methods-static-nested.js b/deps/v8/test/inspector/debugger/class-private-methods-static-nested.js
index b26fa13b84..216241a27c 100644
--- a/deps/v8/test/inspector/debugger/class-private-methods-static-nested.js
+++ b/deps/v8/test/inspector/debugger/class-private-methods-static-nested.js
@@ -43,7 +43,7 @@ InspectorTest.runAsyncTestSuite([
let { result } = await Protocol.Runtime.getProperties({
objectId: frame.this.objectId
});
- InspectorTest.logObject(result.privateProperties);
+ InspectorTest.logMessage(result.privateProperties);
Protocol.Debugger.resume();
({ params: { callFrames } } = await Protocol.Debugger.oncePaused()); // B.test();
diff --git a/deps/v8/test/inspector/debugger/class-private-methods-unused-expected.txt b/deps/v8/test/inspector/debugger/class-private-methods-unused-expected.txt
index 4f33b3156e..80bb36dd55 100644
--- a/deps/v8/test/inspector/debugger/class-private-methods-unused-expected.txt
+++ b/deps/v8/test/inspector/debugger/class-private-methods-unused-expected.txt
@@ -8,7 +8,7 @@ Get privateProperties of A in testStatic()
value : {
className : Function
description : #staticMethod() { return 1; }
- objectId : {"injectedScriptId":1,"id":34}
+ objectId : <objectId>
type : function
}
}
@@ -20,19 +20,19 @@ Access A.#staticMethod() in testStatic()
exception : {
className : ReferenceError
description : ReferenceError: A is not defined at eval (eval at testStatic (:1:1), <anonymous>:1:1) at Function.testStatic (<anonymous>:6:29) at run (<anonymous>:9:7) at <anonymous>:1:1
- objectId : {"injectedScriptId":1,"id":36}
+ objectId : <objectId>
subtype : error
type : object
}
- exceptionId : 1
+ exceptionId : <exceptionId>
lineNumber : 0
- scriptId : 5
+ scriptId : <scriptId>
text : Uncaught
}
result : {
className : ReferenceError
description : ReferenceError: A is not defined at eval (eval at testStatic (:1:1), <anonymous>:1:1) at Function.testStatic (<anonymous>:6:29) at run (<anonymous>:9:7) at <anonymous>:1:1
- objectId : {"injectedScriptId":1,"id":35}
+ objectId : <objectId>
subtype : error
type : object
}
@@ -44,19 +44,19 @@ Access this.#staticMethod() in testStatic()
exception : {
className : Error
description : Error: Unused static private method '#staticMethod' cannot be accessed at debug time at eval (eval at testStatic (:1:1), <anonymous>:1:1) at Function.testStatic (<anonymous>:6:29) at run (<anonymous>:9:7) at <anonymous>:1:1
- objectId : {"injectedScriptId":1,"id":38}
+ objectId : <objectId>
subtype : error
type : object
}
- exceptionId : 2
+ exceptionId : <exceptionId>
lineNumber : 0
- scriptId : 6
+ scriptId : <scriptId>
text : Uncaught
}
result : {
className : Error
description : Error: Unused static private method '#staticMethod' cannot be accessed at debug time at eval (eval at testStatic (:1:1), <anonymous>:1:1) at Function.testStatic (<anonymous>:6:29) at run (<anonymous>:9:7) at <anonymous>:1:1
- objectId : {"injectedScriptId":1,"id":37}
+ objectId : <objectId>
subtype : error
type : object
}
@@ -68,7 +68,7 @@ get privateProperties of a in testInstance()
value : {
className : Function
description : #instanceMethod() { return 2; }
- objectId : {"injectedScriptId":1,"id":61}
+ objectId : <objectId>
type : function
}
}
diff --git a/deps/v8/test/inspector/debugger/class-private-methods-unused.js b/deps/v8/test/inspector/debugger/class-private-methods-unused.js
index a868735401..0c11277c0d 100644
--- a/deps/v8/test/inspector/debugger/class-private-methods-unused.js
+++ b/deps/v8/test/inspector/debugger/class-private-methods-unused.js
@@ -38,7 +38,7 @@ InspectorTest.runAsyncTestSuite([
let { result } = await Protocol.Runtime.getProperties({
objectId: frame.this.objectId
});
- InspectorTest.logObject(result.privateProperties);
+ InspectorTest.logMessage(result.privateProperties);
// Variables not referenced in the source code are currently
// considered "optimized away".
@@ -47,14 +47,14 @@ InspectorTest.runAsyncTestSuite([
expression: 'A.#staticMethod();',
callFrameId: callFrames[0].callFrameId
}));
- InspectorTest.logObject(result);
+ InspectorTest.logMessage(result);
InspectorTest.log('Access this.#staticMethod() in testStatic()');
({ result } = await Protocol.Debugger.evaluateOnCallFrame({
expression: 'this.#staticMethod();',
callFrameId: callFrames[0].callFrameId
}));
- InspectorTest.logObject(result);
+ InspectorTest.logMessage(result);
Protocol.Debugger.resume();
({ params: { callFrames } } = await Protocol.Debugger.oncePaused()); // a.testInstatnce();
@@ -64,14 +64,14 @@ InspectorTest.runAsyncTestSuite([
({ result } = await Protocol.Runtime.getProperties({
objectId: frame.this.objectId
}));
- InspectorTest.logObject(result.privateProperties);
+ InspectorTest.logMessage(result.privateProperties);
InspectorTest.log('Evaluating this.#instanceMethod() in testInstance()');
({ result } = await Protocol.Debugger.evaluateOnCallFrame({
expression: 'this.#instanceMethod();',
callFrameId: callFrames[0].callFrameId
}));
- InspectorTest.logObject(result);
+ InspectorTest.logMessage(result);
Protocol.Debugger.resume();
Protocol.Debugger.disable();
diff --git a/deps/v8/test/inspector/debugger/destory-in-break-program-expected.txt b/deps/v8/test/inspector/debugger/destroy-in-break-program-expected.txt
index c0ce88ecbc..c0ce88ecbc 100644
--- a/deps/v8/test/inspector/debugger/destory-in-break-program-expected.txt
+++ b/deps/v8/test/inspector/debugger/destroy-in-break-program-expected.txt
diff --git a/deps/v8/test/inspector/debugger/destory-in-break-program.js b/deps/v8/test/inspector/debugger/destroy-in-break-program.js
index e002328aa1..e002328aa1 100644
--- a/deps/v8/test/inspector/debugger/destory-in-break-program.js
+++ b/deps/v8/test/inspector/debugger/destroy-in-break-program.js
diff --git a/deps/v8/test/inspector/debugger/destroy-in-break-program2-expected.txt b/deps/v8/test/inspector/debugger/destroy-in-break-program2-expected.txt
new file mode 100644
index 0000000000..834179a549
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/destroy-in-break-program2-expected.txt
@@ -0,0 +1,3 @@
+Check we're not pausing on breaks while installing console API
+frobnicate("test", "self") = self test
+paused in: the-right-place.js
diff --git a/deps/v8/test/inspector/debugger/destroy-in-break-program2.js b/deps/v8/test/inspector/debugger/destroy-in-break-program2.js
new file mode 100644
index 0000000000..94df496520
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/destroy-in-break-program2.js
@@ -0,0 +1,49 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const {session, contextGroup, Protocol} = InspectorTest.start(
+ 'Check we\'re not pausing on breaks while installing console API');
+
+(async function test(){
+ // Set up additional console API to give evaluate() a chance to pause
+ // there (which it shouldn't) while installing the API upon first eval.
+ utils.setAdditionalConsoleApi(`function frobnicate() {
+ return [...arguments].reverse().join(' ');
+ }`);
+
+ // Perform self-test, i.e. assure setAdditionalConsoleApi above has effect.
+ Protocol.Runtime.enable();
+ const expression = 'frobnicate("test", "self")';
+ const {result} = await Protocol.Runtime.evaluate({
+ expression,
+ includeCommandLineAPI: true,
+ returnByValue: true
+ });
+ InspectorTest.log(`${expression} = ${result.result.value}`);
+
+ // Now for the actual test: get a clean context so that Runtime.evaluate
+ // would install the API again.
+ const contextGroup = new InspectorTest.ContextGroup();
+ const session2 = contextGroup.connect();
+ const Protocol2 = session2.Protocol;
+
+ Protocol2.Runtime.enable();
+ Protocol2.Debugger.enable();
+ await Protocol2.Debugger.pause();
+
+ // Add a sourceURL to double check we're paused in the right place.
+ const sourceURL = '//# sourceURL=the-right-place.js';
+ Protocol2.Runtime.evaluate({
+ expression: `frobnicate("real", "test");\n${sourceURL}`,
+ includeCommandLineAPI: true
+ });
+
+ const paused = (await Protocol2.Debugger.oncePaused()).params;
+ InspectorTest.log(`paused in: ${paused.callFrames[0].url}`);
+
+ // Now if we're paused in the wrong place, we will likely crash.
+ session2.disconnect();
+
+ InspectorTest.quitImmediately();
+})();
diff --git a/deps/v8/test/inspector/debugger/pause-on-oom-expected.txt b/deps/v8/test/inspector/debugger/pause-on-oom-expected.txt
index 4ca988deca..7570134c6a 100644
--- a/deps/v8/test/inspector/debugger/pause-on-oom-expected.txt
+++ b/deps/v8/test/inspector/debugger/pause-on-oom-expected.txt
@@ -1,2 +1,3 @@
Check pause on OOM
+nearHeapLimitCallback
reason: OOM
diff --git a/deps/v8/test/inspector/debugger/pause-on-oom-extrawide-expected.txt b/deps/v8/test/inspector/debugger/pause-on-oom-extrawide-expected.txt
index 4ca988deca..7570134c6a 100644
--- a/deps/v8/test/inspector/debugger/pause-on-oom-extrawide-expected.txt
+++ b/deps/v8/test/inspector/debugger/pause-on-oom-extrawide-expected.txt
@@ -1,2 +1,3 @@
Check pause on OOM
+nearHeapLimitCallback
reason: OOM
diff --git a/deps/v8/test/inspector/debugger/pause-on-oom-wide-expected.txt b/deps/v8/test/inspector/debugger/pause-on-oom-wide-expected.txt
index 4ca988deca..7570134c6a 100644
--- a/deps/v8/test/inspector/debugger/pause-on-oom-wide-expected.txt
+++ b/deps/v8/test/inspector/debugger/pause-on-oom-wide-expected.txt
@@ -1,2 +1,3 @@
Check pause on OOM
+nearHeapLimitCallback
reason: OOM
diff --git a/deps/v8/test/inspector/debugger/wasm-breakpoint-reset-on-debugger-restart-expected.txt b/deps/v8/test/inspector/debugger/wasm-breakpoint-reset-on-debugger-restart-expected.txt
index 20d0e846bd..187a415608 100644
--- a/deps/v8/test/inspector/debugger/wasm-breakpoint-reset-on-debugger-restart-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-breakpoint-reset-on-debugger-restart-expected.txt
@@ -3,7 +3,7 @@ Instantiating.
Waiting for wasm script (ignoring first non-wasm script).
Setting breakpoint.
Calling func.
-Script wasm://wasm/8c388106 byte offset 33: Wasm opcode 0x01
+Script wasm://wasm/8c388106 byte offset 33: Wasm opcode 0x01 (kExprNop)
func returned.
Restarting debugger.
Calling func.
diff --git a/deps/v8/test/inspector/debugger/wasm-debug-command-expected.txt b/deps/v8/test/inspector/debugger/wasm-debug-command-expected.txt
index c8b9f18e7b..53f66b8d50 100644
--- a/deps/v8/test/inspector/debugger/wasm-debug-command-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-debug-command-expected.txt
@@ -3,7 +3,7 @@ Waiting for wasm scripts to be parsed.
Ignoring script with url v8://test/instantiate
Got wasm script: wasm://wasm/7d022e0e
paused No 1
-Script wasm://wasm/7d022e0e byte offset 35: Wasm opcode 0x20
+Script wasm://wasm/7d022e0e byte offset 35: Wasm opcode 0x20 (kExprLocalGet)
Debugger.resume
exports.main returned!
Finished!
diff --git a/deps/v8/test/inspector/debugger/wasm-imports-expected.txt b/deps/v8/test/inspector/debugger/wasm-imports-expected.txt
index fffaba9017..ab47c245ee 100644
--- a/deps/v8/test/inspector/debugger/wasm-imports-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-imports-expected.txt
@@ -4,11 +4,11 @@ Calling instantiate function for module A.
Waiting for wasm script to be parsed.
Got wasm script!
Setting breakpoint in line 1:
-Script wasm://wasm/8c388106 byte offset 33: Wasm opcode 0x01
+Script wasm://wasm/8c388106 byte offset 33: Wasm opcode 0x01 (kExprNop)
Calling instantiate function for module B.
Calling main function on module B.
Paused at 0:33.
-Script wasm://wasm/8c388106 byte offset 33: Wasm opcode 0x01
+Script wasm://wasm/8c388106 byte offset 33: Wasm opcode 0x01 (kExprNop)
Getting current stack trace via "new Error().stack".
Error
at v8://test/getStack:1:1
diff --git a/deps/v8/test/inspector/debugger/wasm-remove-breakpoint-expected.txt b/deps/v8/test/inspector/debugger/wasm-remove-breakpoint-expected.txt
index 3ec87ed63c..5edefd6ffa 100644
--- a/deps/v8/test/inspector/debugger/wasm-remove-breakpoint-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-remove-breakpoint-expected.txt
@@ -10,7 +10,7 @@ Setting breakpoint on line 3 of wasm function
scriptId : <scriptId>
}
paused No 1
-Script wasm://wasm/7d022e0e byte offset 39: Wasm opcode 0x6b
+Script wasm://wasm/7d022e0e byte offset 39: Wasm opcode 0x6b (kExprI32Sub)
Remove breakpoint
Debugger.resume
exports.main returned!
diff --git a/deps/v8/test/inspector/debugger/wasm-scope-info-expected.txt b/deps/v8/test/inspector/debugger/wasm-scope-info-expected.txt
index 0cb6cb7ff3..834121fef1 100644
--- a/deps/v8/test/inspector/debugger/wasm-scope-info-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-scope-info-expected.txt
@@ -4,19 +4,17 @@ Waiting for wasm script to be parsed.
Got wasm script!
Setting breakpoint on first instruction of second function
{
- columnNumber : 145
+ columnNumber : 147
lineNumber : 0
scriptId : <scriptId>
}
Paused:
-Script wasm://wasm/e1bff2da byte offset 145: Wasm opcode 0x41
+Script wasm://wasm/c723f83a byte offset 147: Wasm opcode 0x41 (kExprI32Const)
Scope:
-at func (0:145):
+at func (0:147):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, call_func: function 0() { [native code] }
globals: "exported_global": 0 (i32)
- scope (local):
0: 0 (f64)
@@ -25,13 +23,12 @@ at func (0:145):
i64_local: 0 (i64)
unicode☼f64: 0 (f64)
var6: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 (v128)
+ var7: 0 (f32)
- scope (wasm-expression-stack):
at call_func (0:132):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, call_func: function 0() { [native code] }
globals: "exported_global": 0 (i32)
- scope (local):
var0: 4 (i32)
@@ -42,14 +39,12 @@ at (anonymous) (0:17):
-- skipped globals
Paused:
-Script wasm://wasm/e1bff2da byte offset 147: Wasm opcode 0x21
+Script wasm://wasm/c723f83a byte offset 149: Wasm opcode 0x21 (kExprLocalSet)
Scope:
-at func (0:147):
+at func (0:149):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, call_func: function 0() { [native code] }
globals: "exported_global": 0 (i32)
- scope (local):
0: 0 (f64)
@@ -58,14 +53,13 @@ at func (0:147):
i64_local: 0 (i64)
unicode☼f64: 0 (f64)
var6: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 (v128)
+ var7: 0 (f32)
- scope (wasm-expression-stack):
0: 11 (i32)
at call_func (0:132):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, call_func: function 0() { [native code] }
globals: "exported_global": 0 (i32)
- scope (local):
var0: 4 (i32)
@@ -76,14 +70,12 @@ at (anonymous) (0:17):
-- skipped globals
Paused:
-Script wasm://wasm/e1bff2da byte offset 149: Wasm opcode 0x41
+Script wasm://wasm/c723f83a byte offset 151: Wasm opcode 0x41 (kExprI32Const)
Scope:
-at func (0:149):
+at func (0:151):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, call_func: function 0() { [native code] }
globals: "exported_global": 0 (i32)
- scope (local):
0: 0 (f64)
@@ -92,13 +84,12 @@ at func (0:149):
i64_local: 0 (i64)
unicode☼f64: 0 (f64)
var6: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 (v128)
+ var7: 0 (f32)
- scope (wasm-expression-stack):
at call_func (0:132):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, call_func: function 0() { [native code] }
globals: "exported_global": 0 (i32)
- scope (local):
var0: 4 (i32)
@@ -109,14 +100,12 @@ at (anonymous) (0:17):
-- skipped globals
Paused:
-Script wasm://wasm/e1bff2da byte offset 151: Wasm opcode 0x21
+Script wasm://wasm/c723f83a byte offset 153: Wasm opcode 0x21 (kExprLocalSet)
Scope:
-at func (0:151):
+at func (0:153):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, call_func: function 0() { [native code] }
globals: "exported_global": 0 (i32)
- scope (local):
0: 0 (f64)
@@ -125,14 +114,13 @@ at func (0:151):
i64_local: 0 (i64)
unicode☼f64: 0 (f64)
var6: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 (v128)
+ var7: 0 (f32)
- scope (wasm-expression-stack):
0: 47 (i32)
at call_func (0:132):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, call_func: function 0() { [native code] }
globals: "exported_global": 0 (i32)
- scope (local):
var0: 4 (i32)
@@ -143,14 +131,12 @@ at (anonymous) (0:17):
-- skipped globals
Paused:
-Script wasm://wasm/e1bff2da byte offset 153: Wasm opcode 0x42
+Script wasm://wasm/c723f83a byte offset 155: Wasm opcode 0x42 (kExprI64Const)
Scope:
-at func (0:153):
+at func (0:155):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, call_func: function 0() { [native code] }
globals: "exported_global": 0 (i32)
- scope (local):
0: 0 (f64)
@@ -159,13 +145,12 @@ at func (0:153):
i64_local: 0 (i64)
unicode☼f64: 0 (f64)
var6: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 (v128)
+ var7: 0 (f32)
- scope (wasm-expression-stack):
at call_func (0:132):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, call_func: function 0() { [native code] }
globals: "exported_global": 0 (i32)
- scope (local):
var0: 4 (i32)
@@ -176,14 +161,12 @@ at (anonymous) (0:17):
-- skipped globals
Paused:
-Script wasm://wasm/e1bff2da byte offset 164: Wasm opcode 0x21
+Script wasm://wasm/c723f83a byte offset 166: Wasm opcode 0x21 (kExprLocalSet)
Scope:
-at func (0:164):
+at func (0:166):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, call_func: function 0() { [native code] }
globals: "exported_global": 0 (i32)
- scope (local):
0: 0 (f64)
@@ -192,14 +175,13 @@ at func (0:164):
i64_local: 0 (i64)
unicode☼f64: 0 (f64)
var6: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 (v128)
+ var7: 0 (f32)
- scope (wasm-expression-stack):
0: 9223372036854775807 (i64)
at call_func (0:132):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, call_func: function 0() { [native code] }
globals: "exported_global": 0 (i32)
- scope (local):
var0: 4 (i32)
@@ -210,14 +192,12 @@ at (anonymous) (0:17):
-- skipped globals
Paused:
-Script wasm://wasm/e1bff2da byte offset 166: Wasm opcode 0x42
+Script wasm://wasm/c723f83a byte offset 168: Wasm opcode 0x42 (kExprI64Const)
Scope:
-at func (0:166):
+at func (0:168):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, call_func: function 0() { [native code] }
globals: "exported_global": 0 (i32)
- scope (local):
0: 0 (f64)
@@ -226,13 +206,12 @@ at func (0:166):
i64_local: 9223372036854775807 (i64)
unicode☼f64: 0 (f64)
var6: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 (v128)
+ var7: 0 (f32)
- scope (wasm-expression-stack):
at call_func (0:132):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, call_func: function 0() { [native code] }
globals: "exported_global": 0 (i32)
- scope (local):
var0: 4 (i32)
@@ -243,14 +222,12 @@ at (anonymous) (0:17):
-- skipped globals
Paused:
-Script wasm://wasm/e1bff2da byte offset 177: Wasm opcode 0x21
+Script wasm://wasm/c723f83a byte offset 179: Wasm opcode 0x21 (kExprLocalSet)
Scope:
-at func (0:177):
+at func (0:179):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, call_func: function 0() { [native code] }
globals: "exported_global": 0 (i32)
- scope (local):
0: 0 (f64)
@@ -259,14 +236,13 @@ at func (0:177):
i64_local: 9223372036854775807 (i64)
unicode☼f64: 0 (f64)
var6: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 (v128)
+ var7: 0 (f32)
- scope (wasm-expression-stack):
0: -9223372036854775808 (i64)
at call_func (0:132):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, call_func: function 0() { [native code] }
globals: "exported_global": 0 (i32)
- scope (local):
var0: 4 (i32)
@@ -277,14 +253,12 @@ at (anonymous) (0:17):
-- skipped globals
Paused:
-Script wasm://wasm/e1bff2da byte offset 179: Wasm opcode 0x41
+Script wasm://wasm/c723f83a byte offset 181: Wasm opcode 0x41 (kExprI32Const)
Scope:
-at func (0:179):
+at func (0:181):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, call_func: function 0() { [native code] }
globals: "exported_global": 0 (i32)
- scope (local):
0: 0 (f64)
@@ -293,13 +267,12 @@ at func (0:179):
i64_local: -9223372036854775808 (i64)
unicode☼f64: 0 (f64)
var6: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 (v128)
+ var7: 0 (f32)
- scope (wasm-expression-stack):
at call_func (0:132):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, call_func: function 0() { [native code] }
globals: "exported_global": 0 (i32)
- scope (local):
var0: 4 (i32)
@@ -310,14 +283,12 @@ at (anonymous) (0:17):
-- skipped globals
Paused:
-Script wasm://wasm/e1bff2da byte offset 181: Wasm opcode 0xb8
+Script wasm://wasm/c723f83a byte offset 183: Wasm opcode 0xb8 (kExprF64UConvertI32)
Scope:
-at func (0:181):
+at func (0:183):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, call_func: function 0() { [native code] }
globals: "exported_global": 0 (i32)
- scope (local):
0: 0 (f64)
@@ -326,14 +297,13 @@ at func (0:181):
i64_local: -9223372036854775808 (i64)
unicode☼f64: 0 (f64)
var6: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 (v128)
+ var7: 0 (f32)
- scope (wasm-expression-stack):
0: 1 (i32)
at call_func (0:132):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, call_func: function 0() { [native code] }
globals: "exported_global": 0 (i32)
- scope (local):
var0: 4 (i32)
@@ -344,14 +314,12 @@ at (anonymous) (0:17):
-- skipped globals
Paused:
-Script wasm://wasm/e1bff2da byte offset 182: Wasm opcode 0x41
+Script wasm://wasm/c723f83a byte offset 184: Wasm opcode 0x41 (kExprI32Const)
Scope:
-at func (0:182):
+at func (0:184):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, call_func: function 0() { [native code] }
globals: "exported_global": 0 (i32)
- scope (local):
0: 0 (f64)
@@ -360,14 +328,13 @@ at func (0:182):
i64_local: -9223372036854775808 (i64)
unicode☼f64: 0 (f64)
var6: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 (v128)
+ var7: 0 (f32)
- scope (wasm-expression-stack):
0: 1 (f64)
at call_func (0:132):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, call_func: function 0() { [native code] }
globals: "exported_global": 0 (i32)
- scope (local):
var0: 4 (i32)
@@ -378,14 +345,12 @@ at (anonymous) (0:17):
-- skipped globals
Paused:
-Script wasm://wasm/e1bff2da byte offset 184: Wasm opcode 0xb8
+Script wasm://wasm/c723f83a byte offset 186: Wasm opcode 0xb8 (kExprF64UConvertI32)
Scope:
-at func (0:184):
+at func (0:186):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, call_func: function 0() { [native code] }
globals: "exported_global": 0 (i32)
- scope (local):
0: 0 (f64)
@@ -394,6 +359,7 @@ at func (0:184):
i64_local: -9223372036854775808 (i64)
unicode☼f64: 0 (f64)
var6: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 (v128)
+ var7: 0 (f32)
- scope (wasm-expression-stack):
0: 1 (f64)
1: 7 (i32)
@@ -401,8 +367,6 @@ at call_func (0:132):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, call_func: function 0() { [native code] }
globals: "exported_global": 0 (i32)
- scope (local):
var0: 4 (i32)
@@ -413,14 +377,12 @@ at (anonymous) (0:17):
-- skipped globals
Paused:
-Script wasm://wasm/e1bff2da byte offset 185: Wasm opcode 0xa3
+Script wasm://wasm/c723f83a byte offset 187: Wasm opcode 0xa3 (kExprF64Div)
Scope:
-at func (0:185):
+at func (0:187):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, call_func: function 0() { [native code] }
globals: "exported_global": 0 (i32)
- scope (local):
0: 0 (f64)
@@ -429,6 +391,7 @@ at func (0:185):
i64_local: -9223372036854775808 (i64)
unicode☼f64: 0 (f64)
var6: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 (v128)
+ var7: 0 (f32)
- scope (wasm-expression-stack):
0: 1 (f64)
1: 7 (f64)
@@ -436,8 +399,6 @@ at call_func (0:132):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, call_func: function 0() { [native code] }
globals: "exported_global": 0 (i32)
- scope (local):
var0: 4 (i32)
@@ -448,14 +409,12 @@ at (anonymous) (0:17):
-- skipped globals
Paused:
-Script wasm://wasm/e1bff2da byte offset 186: Wasm opcode 0x21
+Script wasm://wasm/c723f83a byte offset 188: Wasm opcode 0x21 (kExprLocalSet)
Scope:
-at func (0:186):
+at func (0:188):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, call_func: function 0() { [native code] }
globals: "exported_global": 0 (i32)
- scope (local):
0: 0 (f64)
@@ -464,14 +423,13 @@ at func (0:186):
i64_local: -9223372036854775808 (i64)
unicode☼f64: 0 (f64)
var6: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 (v128)
+ var7: 0 (f32)
- scope (wasm-expression-stack):
0: 0.14285714285714285 (f64)
at call_func (0:132):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, call_func: function 0() { [native code] }
globals: "exported_global": 0 (i32)
- scope (local):
var0: 4 (i32)
@@ -482,14 +440,12 @@ at (anonymous) (0:17):
-- skipped globals
Paused:
-Script wasm://wasm/e1bff2da byte offset 188: Wasm opcode 0x41
+Script wasm://wasm/c723f83a byte offset 190: Wasm opcode 0x41 (kExprI32Const)
Scope:
-at func (0:188):
+at func (0:190):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, call_func: function 0() { [native code] }
globals: "exported_global": 0 (i32)
- scope (local):
0: 0 (f64)
@@ -498,13 +454,12 @@ at func (0:188):
i64_local: -9223372036854775808 (i64)
unicode☼f64: 0.14285714285714285 (f64)
var6: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 (v128)
+ var7: 0 (f32)
- scope (wasm-expression-stack):
at call_func (0:132):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, call_func: function 0() { [native code] }
globals: "exported_global": 0 (i32)
- scope (local):
var0: 4 (i32)
@@ -515,14 +470,12 @@ at (anonymous) (0:17):
-- skipped globals
Paused:
-Script wasm://wasm/e1bff2da byte offset 190: Wasm opcode 0xfd
+Script wasm://wasm/c723f83a byte offset 192: Wasm opcode 0xfd (kSimdPrefix)
Scope:
-at func (0:190):
+at func (0:192):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, call_func: function 0() { [native code] }
globals: "exported_global": 0 (i32)
- scope (local):
0: 0 (f64)
@@ -531,14 +484,13 @@ at func (0:190):
i64_local: -9223372036854775808 (i64)
unicode☼f64: 0.14285714285714285 (f64)
var6: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 (v128)
+ var7: 0 (f32)
- scope (wasm-expression-stack):
0: 23 (i32)
at call_func (0:132):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, call_func: function 0() { [native code] }
globals: "exported_global": 0 (i32)
- scope (local):
var0: 4 (i32)
@@ -549,14 +501,12 @@ at (anonymous) (0:17):
-- skipped globals
Paused:
-Script wasm://wasm/e1bff2da byte offset 192: Wasm opcode 0x21
+Script wasm://wasm/c723f83a byte offset 194: Wasm opcode 0x21 (kExprLocalSet)
Scope:
-at func (0:192):
+at func (0:194):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, call_func: function 0() { [native code] }
globals: "exported_global": 0 (i32)
- scope (local):
0: 0 (f64)
@@ -565,14 +515,13 @@ at func (0:192):
i64_local: -9223372036854775808 (i64)
unicode☼f64: 0.14285714285714285 (f64)
var6: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 (v128)
+ var7: 0 (f32)
- scope (wasm-expression-stack):
0: 17 00 00 00 17 00 00 00 17 00 00 00 17 00 00 00 (v128)
at call_func (0:132):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, call_func: function 0() { [native code] }
globals: "exported_global": 0 (i32)
- scope (local):
var0: 4 (i32)
@@ -583,14 +532,12 @@ at (anonymous) (0:17):
-- skipped globals
Paused:
-Script wasm://wasm/e1bff2da byte offset 194: Wasm opcode 0x41
+Script wasm://wasm/c723f83a byte offset 196: Wasm opcode 0x41 (kExprI32Const)
Scope:
-at func (0:194):
+at func (0:196):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, call_func: function 0() { [native code] }
globals: "exported_global": 0 (i32)
- scope (local):
0: 0 (f64)
@@ -599,13 +546,12 @@ at func (0:194):
i64_local: -9223372036854775808 (i64)
unicode☼f64: 0.14285714285714285 (f64)
var6: 17 00 00 00 17 00 00 00 17 00 00 00 17 00 00 00 (v128)
+ var7: 0 (f32)
- scope (wasm-expression-stack):
at call_func (0:132):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, call_func: function 0() { [native code] }
globals: "exported_global": 0 (i32)
- scope (local):
var0: 4 (i32)
@@ -616,14 +562,43 @@ at (anonymous) (0:17):
-- skipped globals
Paused:
-Script wasm://wasm/e1bff2da byte offset 196: Wasm opcode 0x24
+Script wasm://wasm/c723f83a byte offset 198: Wasm opcode 0xb3 (kExprF32UConvertI32)
Scope:
-at func (0:196):
+at func (0:198):
+ - scope (module):
+ instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
+ exported_memory: Uint8Array(65536)
+ globals: "exported_global": 0 (i32)
+ - scope (local):
+ 0: 0 (f64)
+ i32Arg: 11 (i32)
+ var1: 47 (i32)
+ i64_local: -9223372036854775808 (i64)
+ unicode☼f64: 0.14285714285714285 (f64)
+ var6: 17 00 00 00 17 00 00 00 17 00 00 00 17 00 00 00 (v128)
+ var7: 0 (f32)
+ - scope (wasm-expression-stack):
+ 0: 21 (i32)
+at call_func (0:132):
+ - scope (module):
+ instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
+ exported_memory: Uint8Array(65536)
+ globals: "exported_global": 0 (i32)
+ - scope (local):
+ var0: 4 (i32)
+ var1: 7.199999809265137 (f32)
+ - scope (wasm-expression-stack):
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped globals
+
+Paused:
+Script wasm://wasm/c723f83a byte offset 199: Wasm opcode 0x21 (kExprLocalSet)
+Scope:
+at func (0:199):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, call_func: function 0() { [native code] }
globals: "exported_global": 0 (i32)
- scope (local):
0: 0 (f64)
@@ -632,14 +607,74 @@ at func (0:196):
i64_local: -9223372036854775808 (i64)
unicode☼f64: 0.14285714285714285 (f64)
var6: 17 00 00 00 17 00 00 00 17 00 00 00 17 00 00 00 (v128)
+ var7: 0 (f32)
+ - scope (wasm-expression-stack):
+ 0: 21 (f32)
+at call_func (0:132):
+ - scope (module):
+ instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
+ exported_memory: Uint8Array(65536)
+ globals: "exported_global": 0 (i32)
+ - scope (local):
+ var0: 4 (i32)
+ var1: 7.199999809265137 (f32)
+ - scope (wasm-expression-stack):
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped globals
+
+Paused:
+Script wasm://wasm/c723f83a byte offset 201: Wasm opcode 0x41 (kExprI32Const)
+Scope:
+at func (0:201):
+ - scope (module):
+ instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
+ exported_memory: Uint8Array(65536)
+ globals: "exported_global": 0 (i32)
+ - scope (local):
+ 0: 0 (f64)
+ i32Arg: 11 (i32)
+ var1: 47 (i32)
+ i64_local: -9223372036854775808 (i64)
+ unicode☼f64: 0.14285714285714285 (f64)
+ var6: 17 00 00 00 17 00 00 00 17 00 00 00 17 00 00 00 (v128)
+ var7: 21 (f32)
+ - scope (wasm-expression-stack):
+at call_func (0:132):
+ - scope (module):
+ instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
+ exported_memory: Uint8Array(65536)
+ globals: "exported_global": 0 (i32)
+ - scope (local):
+ var0: 4 (i32)
+ var1: 7.199999809265137 (f32)
+ - scope (wasm-expression-stack):
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped globals
+
+Paused:
+Script wasm://wasm/c723f83a byte offset 203: Wasm opcode 0x24 (kExprGlobalSet)
+Scope:
+at func (0:203):
+ - scope (module):
+ instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
+ exported_memory: Uint8Array(65536)
+ globals: "exported_global": 0 (i32)
+ - scope (local):
+ 0: 0 (f64)
+ i32Arg: 11 (i32)
+ var1: 47 (i32)
+ i64_local: -9223372036854775808 (i64)
+ unicode☼f64: 0.14285714285714285 (f64)
+ var6: 17 00 00 00 17 00 00 00 17 00 00 00 17 00 00 00 (v128)
+ var7: 21 (f32)
- scope (wasm-expression-stack):
0: 15 (i32)
at call_func (0:132):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, call_func: function 0() { [native code] }
globals: "exported_global": 0 (i32)
- scope (local):
var0: 4 (i32)
@@ -650,14 +685,12 @@ at (anonymous) (0:17):
-- skipped globals
Paused:
-Script wasm://wasm/e1bff2da byte offset 198: Wasm opcode 0x0b
+Script wasm://wasm/c723f83a byte offset 205: Wasm opcode 0x0b (kExprEnd)
Scope:
-at func (0:198):
+at func (0:205):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, call_func: function 0() { [native code] }
globals: "exported_global": 15 (i32)
- scope (local):
0: 0 (f64)
@@ -666,13 +699,12 @@ at func (0:198):
i64_local: -9223372036854775808 (i64)
unicode☼f64: 0.14285714285714285 (f64)
var6: 17 00 00 00 17 00 00 00 17 00 00 00 17 00 00 00 (v128)
+ var7: 21 (f32)
- scope (wasm-expression-stack):
at call_func (0:132):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, call_func: function 0() { [native code] }
globals: "exported_global": 15 (i32)
- scope (local):
var0: 4 (i32)
@@ -683,14 +715,12 @@ at (anonymous) (0:17):
-- skipped globals
Paused:
-Script wasm://wasm/e1bff2da byte offset 134: Wasm opcode 0x0b
+Script wasm://wasm/c723f83a byte offset 134: Wasm opcode 0x0b (kExprEnd)
Scope:
at call_func (0:134):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, call_func: function 0() { [native code] }
globals: "exported_global": 15 (i32)
- scope (local):
var0: 4 (i32)
diff --git a/deps/v8/test/inspector/debugger/wasm-scope-info-liftoff-expected.txt b/deps/v8/test/inspector/debugger/wasm-scope-info-liftoff-expected.txt
index 65fb8f42ca..b5a50d57e7 100644
--- a/deps/v8/test/inspector/debugger/wasm-scope-info-liftoff-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-scope-info-liftoff-expected.txt
@@ -4,30 +4,27 @@ Waiting for wasm script to be parsed.
Got wasm script!
Setting breakpoint on line 2 (first instruction) of third function
{
- columnNumber : 167
+ columnNumber : 169
lineNumber : 0
scriptId : <scriptId>
}
Paused:
-Script wasm://wasm/ed01bcee byte offset 167: Wasm opcode 0x20
+Script wasm://wasm/e33badc2 byte offset 169: Wasm opcode 0x20 (kExprLocalGet)
Scope:
-at C (interpreted) (0:167):
+at C (interpreted) (0:169):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, A (liftoff): function 0() { [native code] }
globals: "exported_global": 0 (i32)
- scope (local):
i32_arg: 42 (i32)
i32_local: 0 (i32)
+ var2: 0 (f32)
- scope (wasm-expression-stack):
at B (liftoff) (0:158):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, A (liftoff): function 0() { [native code] }
globals: "exported_global": 0 (i32)
- scope (local):
0: 0 (f32)
@@ -43,8 +40,6 @@ at A (liftoff) (0:128):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, A (liftoff): function 0() { [native code] }
globals: "exported_global": 0 (i32)
- scope (local):
var0: 42 (i32)
@@ -54,26 +49,23 @@ at (anonymous) (0:17):
-- skipped globals
Paused:
-Script wasm://wasm/ed01bcee byte offset 169: Wasm opcode 0x24
+Script wasm://wasm/e33badc2 byte offset 171: Wasm opcode 0x24 (kExprGlobalSet)
Scope:
-at C (interpreted) (0:169):
+at C (interpreted) (0:171):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, A (liftoff): function 0() { [native code] }
globals: "exported_global": 0 (i32)
- scope (local):
i32_arg: 42 (i32)
i32_local: 0 (i32)
+ var2: 0 (f32)
- scope (wasm-expression-stack):
0: 42 (i32)
at B (liftoff) (0:158):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, A (liftoff): function 0() { [native code] }
globals: "exported_global": 0 (i32)
- scope (local):
0: 0 (f32)
@@ -89,8 +81,6 @@ at A (liftoff) (0:128):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, A (liftoff): function 0() { [native code] }
globals: "exported_global": 0 (i32)
- scope (local):
var0: 42 (i32)
@@ -100,25 +90,22 @@ at (anonymous) (0:17):
-- skipped globals
Paused:
-Script wasm://wasm/ed01bcee byte offset 171: Wasm opcode 0x41
+Script wasm://wasm/e33badc2 byte offset 173: Wasm opcode 0x41 (kExprI32Const)
Scope:
-at C (interpreted) (0:171):
+at C (interpreted) (0:173):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, A (liftoff): function 0() { [native code] }
globals: "exported_global": 42 (i32)
- scope (local):
i32_arg: 42 (i32)
i32_local: 0 (i32)
+ var2: 0 (f32)
- scope (wasm-expression-stack):
at B (liftoff) (0:158):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, A (liftoff): function 0() { [native code] }
globals: "exported_global": 42 (i32)
- scope (local):
0: 0 (f32)
@@ -134,8 +121,6 @@ at A (liftoff) (0:128):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, A (liftoff): function 0() { [native code] }
globals: "exported_global": 42 (i32)
- scope (local):
var0: 42 (i32)
@@ -145,26 +130,23 @@ at (anonymous) (0:17):
-- skipped globals
Paused:
-Script wasm://wasm/ed01bcee byte offset 173: Wasm opcode 0x21
+Script wasm://wasm/e33badc2 byte offset 175: Wasm opcode 0x21 (kExprLocalSet)
Scope:
-at C (interpreted) (0:173):
+at C (interpreted) (0:175):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, A (liftoff): function 0() { [native code] }
globals: "exported_global": 42 (i32)
- scope (local):
i32_arg: 42 (i32)
i32_local: 0 (i32)
+ var2: 0 (f32)
- scope (wasm-expression-stack):
0: 47 (i32)
at B (liftoff) (0:158):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, A (liftoff): function 0() { [native code] }
globals: "exported_global": 42 (i32)
- scope (local):
0: 0 (f32)
@@ -180,8 +162,6 @@ at A (liftoff) (0:128):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, A (liftoff): function 0() { [native code] }
globals: "exported_global": 42 (i32)
- scope (local):
var0: 42 (i32)
@@ -191,25 +171,22 @@ at (anonymous) (0:17):
-- skipped globals
Paused:
-Script wasm://wasm/ed01bcee byte offset 175: Wasm opcode 0x0b
+Script wasm://wasm/e33badc2 byte offset 177: Wasm opcode 0x0b (kExprEnd)
Scope:
-at C (interpreted) (0:175):
+at C (interpreted) (0:177):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, A (liftoff): function 0() { [native code] }
globals: "exported_global": 42 (i32)
- scope (local):
i32_arg: 42 (i32)
i32_local: 47 (i32)
+ var2: 0 (f32)
- scope (wasm-expression-stack):
at B (liftoff) (0:158):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, A (liftoff): function 0() { [native code] }
globals: "exported_global": 42 (i32)
- scope (local):
0: 0 (f32)
@@ -225,8 +202,6 @@ at A (liftoff) (0:128):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, A (liftoff): function 0() { [native code] }
globals: "exported_global": 42 (i32)
- scope (local):
var0: 42 (i32)
@@ -236,14 +211,12 @@ at (anonymous) (0:17):
-- skipped globals
Paused:
-Script wasm://wasm/ed01bcee byte offset 160: Wasm opcode 0x1a
+Script wasm://wasm/e33badc2 byte offset 160: Wasm opcode 0x1a (kExprDrop)
Scope:
at B (liftoff) (0:160):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, A (liftoff): function 0() { [native code] }
globals: "exported_global": 42 (i32)
- scope (local):
0: 0 (f32)
@@ -259,8 +232,6 @@ at A (liftoff) (0:128):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, A (liftoff): function 0() { [native code] }
globals: "exported_global": 42 (i32)
- scope (local):
var0: 42 (i32)
@@ -270,14 +241,12 @@ at (anonymous) (0:17):
-- skipped globals
Paused:
-Script wasm://wasm/ed01bcee byte offset 161: Wasm opcode 0x1a
+Script wasm://wasm/e33badc2 byte offset 161: Wasm opcode 0x1a (kExprDrop)
Scope:
at B (liftoff) (0:161):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, A (liftoff): function 0() { [native code] }
globals: "exported_global": 42 (i32)
- scope (local):
0: 0 (f32)
@@ -292,8 +261,6 @@ at A (liftoff) (0:128):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, A (liftoff): function 0() { [native code] }
globals: "exported_global": 42 (i32)
- scope (local):
var0: 42 (i32)
@@ -303,14 +270,12 @@ at (anonymous) (0:17):
-- skipped globals
Paused:
-Script wasm://wasm/ed01bcee byte offset 162: Wasm opcode 0x0b
+Script wasm://wasm/e33badc2 byte offset 162: Wasm opcode 0x0b (kExprEnd)
Scope:
at B (liftoff) (0:162):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, A (liftoff): function 0() { [native code] }
globals: "exported_global": 42 (i32)
- scope (local):
0: 0 (f32)
@@ -324,8 +289,6 @@ at A (liftoff) (0:128):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, A (liftoff): function 0() { [native code] }
globals: "exported_global": 42 (i32)
- scope (local):
var0: 42 (i32)
@@ -335,14 +298,12 @@ at (anonymous) (0:17):
-- skipped globals
Paused:
-Script wasm://wasm/ed01bcee byte offset 130: Wasm opcode 0x0b
+Script wasm://wasm/e33badc2 byte offset 130: Wasm opcode 0x0b (kExprEnd)
Scope:
at A (liftoff) (0:130):
- scope (module):
instance: exports: "exported_global" (Global), "exported_memory" (Memory), "exported_table" (Table), "main" (Function)
exported_memory: Uint8Array(65536)
- function tables:
- exported_table: js_func: function js_func() { [native code] }, anonymous: function () { [native code] }, A (liftoff): function 0() { [native code] }
globals: "exported_global": 42 (i32)
- scope (local):
var0: 42 (i32)
diff --git a/deps/v8/test/inspector/debugger/wasm-scope-info-liftoff.js b/deps/v8/test/inspector/debugger/wasm-scope-info-liftoff.js
index 7a5f8f4edc..4a6a8c7e4d 100644
--- a/deps/v8/test/inspector/debugger/wasm-scope-info-liftoff.js
+++ b/deps/v8/test/inspector/debugger/wasm-scope-info-liftoff.js
@@ -104,6 +104,7 @@ async function instantiateWasm() {
// A third function which will be stepped through.
let func = builder.addFunction('C (interpreted)', kSig_v_i, ['i32_arg'])
.addLocals(kWasmI32, 1, ['i32_local'])
+ .addLocals(kWasmF32, 1, [''])
.addBody([
// Set global 0 to param 0.
kExprLocalGet, 0, kExprGlobalSet, 0,
diff --git a/deps/v8/test/inspector/debugger/wasm-scope-info.js b/deps/v8/test/inspector/debugger/wasm-scope-info.js
index b1b270edfc..74d024c027 100644
--- a/deps/v8/test/inspector/debugger/wasm-scope-info.js
+++ b/deps/v8/test/inspector/debugger/wasm-scope-info.js
@@ -90,6 +90,7 @@ async function instantiateWasm() {
.addLocals(kWasmI64, 1, ['i64_local'])
.addLocals(kWasmF64, 3, ['unicode☼f64', '0', '0'])
.addLocals(kWasmS128, 1)
+ .addLocals(kWasmF32, 1, [''])
.addBody([
// Set param 0 to 11.
kExprI32Const, 11, kExprLocalSet, 0,
@@ -108,6 +109,9 @@ async function instantiateWasm() {
kExprI32Const, 23,
kSimdPrefix, kExprI32x4Splat,
kExprLocalSet, 6,
+ // Set local 7 to 21
+ kExprI32Const, 21, kExprF32UConvertI32,
+ kExprLocalSet, 7,
// Set global 0 to 15
kExprI32Const, 15, kExprGlobalSet, 0,
diff --git a/deps/v8/test/inspector/debugger/wasm-set-breakpoint-liftoff-expected.txt b/deps/v8/test/inspector/debugger/wasm-set-breakpoint-liftoff-expected.txt
index 8dce88c8cf..9511f986d8 100644
--- a/deps/v8/test/inspector/debugger/wasm-set-breakpoint-liftoff-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-set-breakpoint-liftoff-expected.txt
@@ -4,7 +4,7 @@ Waiting for wasm script (ignoring first non-wasm script).
Setting breakpoint at offset 38 on script wasm://wasm/0c10a5fe
Calling main(4)
Paused:
-Script wasm://wasm/0c10a5fe byte offset 38: Wasm opcode 0x01
+Script wasm://wasm/0c10a5fe byte offset 38: Wasm opcode 0x01 (kExprNop)
Scope:
at wasm_A (0:38):
- scope (module):
@@ -29,7 +29,7 @@ Setting breakpoint at offset 49 on script v8://test/runWasm
Setting breakpoint at offset 45 on script v8://test/runWasm
Setting breakpoint at offset 47 on script v8://test/runWasm
Paused:
-Script wasm://wasm/0c10a5fe byte offset 39: Wasm opcode 0x01
+Script wasm://wasm/0c10a5fe byte offset 39: Wasm opcode 0x01 (kExprNop)
Scope:
at wasm_A (0:39):
- scope (module):
@@ -45,7 +45,7 @@ at wasm_B (0:56):
at (anonymous) (0:17):
-- skipped
Paused:
-Script wasm://wasm/0c10a5fe byte offset 45: Wasm opcode 0x20
+Script wasm://wasm/0c10a5fe byte offset 45: Wasm opcode 0x20 (kExprLocalGet)
Scope:
at wasm_B (0:45):
- scope (module):
@@ -56,7 +56,7 @@ at wasm_B (0:45):
at (anonymous) (0:17):
-- skipped
Paused:
-Script wasm://wasm/0c10a5fe byte offset 47: Wasm opcode 0x04
+Script wasm://wasm/0c10a5fe byte offset 47: Wasm opcode 0x04 (kExprIf)
Scope:
at wasm_B (0:47):
- scope (module):
@@ -68,7 +68,7 @@ at wasm_B (0:47):
at (anonymous) (0:17):
-- skipped
Paused:
-Script wasm://wasm/0c10a5fe byte offset 49: Wasm opcode 0x20
+Script wasm://wasm/0c10a5fe byte offset 49: Wasm opcode 0x20 (kExprLocalGet)
Scope:
at wasm_B (0:49):
- scope (module):
@@ -79,7 +79,7 @@ at wasm_B (0:49):
at (anonymous) (0:17):
-- skipped
Paused:
-Script wasm://wasm/0c10a5fe byte offset 51: Wasm opcode 0x41
+Script wasm://wasm/0c10a5fe byte offset 51: Wasm opcode 0x41 (kExprI32Const)
Scope:
at wasm_B (0:51):
- scope (module):
@@ -91,7 +91,7 @@ at wasm_B (0:51):
at (anonymous) (0:17):
-- skipped
Paused:
-Script wasm://wasm/0c10a5fe byte offset 53: Wasm opcode 0x6b
+Script wasm://wasm/0c10a5fe byte offset 53: Wasm opcode 0x6b (kExprI32Sub)
Scope:
at wasm_B (0:53):
- scope (module):
@@ -104,7 +104,7 @@ at wasm_B (0:53):
at (anonymous) (0:17):
-- skipped
Paused:
-Script wasm://wasm/0c10a5fe byte offset 54: Wasm opcode 0x21
+Script wasm://wasm/0c10a5fe byte offset 54: Wasm opcode 0x21 (kExprLocalSet)
Scope:
at wasm_B (0:54):
- scope (module):
@@ -116,7 +116,7 @@ at wasm_B (0:54):
at (anonymous) (0:17):
-- skipped
Paused:
-Script wasm://wasm/0c10a5fe byte offset 38: Wasm opcode 0x01
+Script wasm://wasm/0c10a5fe byte offset 38: Wasm opcode 0x01 (kExprNop)
Scope:
at wasm_A (0:38):
- scope (module):
@@ -132,7 +132,7 @@ at wasm_B (0:56):
at (anonymous) (0:17):
-- skipped
Paused:
-Script wasm://wasm/0c10a5fe byte offset 39: Wasm opcode 0x01
+Script wasm://wasm/0c10a5fe byte offset 39: Wasm opcode 0x01 (kExprNop)
Scope:
at wasm_A (0:39):
- scope (module):
@@ -148,7 +148,7 @@ at wasm_B (0:56):
at (anonymous) (0:17):
-- skipped
Paused:
-Script wasm://wasm/0c10a5fe byte offset 45: Wasm opcode 0x20
+Script wasm://wasm/0c10a5fe byte offset 45: Wasm opcode 0x20 (kExprLocalGet)
Scope:
at wasm_B (0:45):
- scope (module):
@@ -159,7 +159,7 @@ at wasm_B (0:45):
at (anonymous) (0:17):
-- skipped
Paused:
-Script wasm://wasm/0c10a5fe byte offset 47: Wasm opcode 0x04
+Script wasm://wasm/0c10a5fe byte offset 47: Wasm opcode 0x04 (kExprIf)
Scope:
at wasm_B (0:47):
- scope (module):
@@ -171,7 +171,7 @@ at wasm_B (0:47):
at (anonymous) (0:17):
-- skipped
Paused:
-Script wasm://wasm/0c10a5fe byte offset 49: Wasm opcode 0x20
+Script wasm://wasm/0c10a5fe byte offset 49: Wasm opcode 0x20 (kExprLocalGet)
Scope:
at wasm_B (0:49):
- scope (module):
@@ -182,7 +182,7 @@ at wasm_B (0:49):
at (anonymous) (0:17):
-- skipped
Paused:
-Script wasm://wasm/0c10a5fe byte offset 51: Wasm opcode 0x41
+Script wasm://wasm/0c10a5fe byte offset 51: Wasm opcode 0x41 (kExprI32Const)
Scope:
at wasm_B (0:51):
- scope (module):
@@ -194,7 +194,7 @@ at wasm_B (0:51):
at (anonymous) (0:17):
-- skipped
Paused:
-Script wasm://wasm/0c10a5fe byte offset 53: Wasm opcode 0x6b
+Script wasm://wasm/0c10a5fe byte offset 53: Wasm opcode 0x6b (kExprI32Sub)
Scope:
at wasm_B (0:53):
- scope (module):
@@ -207,7 +207,7 @@ at wasm_B (0:53):
at (anonymous) (0:17):
-- skipped
Paused:
-Script wasm://wasm/0c10a5fe byte offset 54: Wasm opcode 0x21
+Script wasm://wasm/0c10a5fe byte offset 54: Wasm opcode 0x21 (kExprLocalSet)
Scope:
at wasm_B (0:54):
- scope (module):
@@ -219,7 +219,7 @@ at wasm_B (0:54):
at (anonymous) (0:17):
-- skipped
Paused:
-Script wasm://wasm/0c10a5fe byte offset 38: Wasm opcode 0x01
+Script wasm://wasm/0c10a5fe byte offset 38: Wasm opcode 0x01 (kExprNop)
Scope:
at wasm_A (0:38):
- scope (module):
@@ -235,7 +235,7 @@ at wasm_B (0:56):
at (anonymous) (0:17):
-- skipped
Paused:
-Script wasm://wasm/0c10a5fe byte offset 39: Wasm opcode 0x01
+Script wasm://wasm/0c10a5fe byte offset 39: Wasm opcode 0x01 (kExprNop)
Scope:
at wasm_A (0:39):
- scope (module):
@@ -251,7 +251,7 @@ at wasm_B (0:56):
at (anonymous) (0:17):
-- skipped
Paused:
-Script wasm://wasm/0c10a5fe byte offset 45: Wasm opcode 0x20
+Script wasm://wasm/0c10a5fe byte offset 45: Wasm opcode 0x20 (kExprLocalGet)
Scope:
at wasm_B (0:45):
- scope (module):
@@ -262,7 +262,7 @@ at wasm_B (0:45):
at (anonymous) (0:17):
-- skipped
Paused:
-Script wasm://wasm/0c10a5fe byte offset 47: Wasm opcode 0x04
+Script wasm://wasm/0c10a5fe byte offset 47: Wasm opcode 0x04 (kExprIf)
Scope:
at wasm_B (0:47):
- scope (module):
@@ -274,7 +274,7 @@ at wasm_B (0:47):
at (anonymous) (0:17):
-- skipped
Paused:
-Script wasm://wasm/0c10a5fe byte offset 49: Wasm opcode 0x20
+Script wasm://wasm/0c10a5fe byte offset 49: Wasm opcode 0x20 (kExprLocalGet)
Scope:
at wasm_B (0:49):
- scope (module):
@@ -285,7 +285,7 @@ at wasm_B (0:49):
at (anonymous) (0:17):
-- skipped
Paused:
-Script wasm://wasm/0c10a5fe byte offset 51: Wasm opcode 0x41
+Script wasm://wasm/0c10a5fe byte offset 51: Wasm opcode 0x41 (kExprI32Const)
Scope:
at wasm_B (0:51):
- scope (module):
@@ -297,7 +297,7 @@ at wasm_B (0:51):
at (anonymous) (0:17):
-- skipped
Paused:
-Script wasm://wasm/0c10a5fe byte offset 53: Wasm opcode 0x6b
+Script wasm://wasm/0c10a5fe byte offset 53: Wasm opcode 0x6b (kExprI32Sub)
Scope:
at wasm_B (0:53):
- scope (module):
@@ -310,7 +310,7 @@ at wasm_B (0:53):
at (anonymous) (0:17):
-- skipped
Paused:
-Script wasm://wasm/0c10a5fe byte offset 54: Wasm opcode 0x21
+Script wasm://wasm/0c10a5fe byte offset 54: Wasm opcode 0x21 (kExprLocalSet)
Scope:
at wasm_B (0:54):
- scope (module):
@@ -322,7 +322,7 @@ at wasm_B (0:54):
at (anonymous) (0:17):
-- skipped
Paused:
-Script wasm://wasm/0c10a5fe byte offset 38: Wasm opcode 0x01
+Script wasm://wasm/0c10a5fe byte offset 38: Wasm opcode 0x01 (kExprNop)
Scope:
at wasm_A (0:38):
- scope (module):
@@ -338,7 +338,7 @@ at wasm_B (0:56):
at (anonymous) (0:17):
-- skipped
Paused:
-Script wasm://wasm/0c10a5fe byte offset 39: Wasm opcode 0x01
+Script wasm://wasm/0c10a5fe byte offset 39: Wasm opcode 0x01 (kExprNop)
Scope:
at wasm_A (0:39):
- scope (module):
@@ -354,7 +354,7 @@ at wasm_B (0:56):
at (anonymous) (0:17):
-- skipped
Paused:
-Script wasm://wasm/0c10a5fe byte offset 45: Wasm opcode 0x20
+Script wasm://wasm/0c10a5fe byte offset 45: Wasm opcode 0x20 (kExprLocalGet)
Scope:
at wasm_B (0:45):
- scope (module):
@@ -365,7 +365,7 @@ at wasm_B (0:45):
at (anonymous) (0:17):
-- skipped
Paused:
-Script wasm://wasm/0c10a5fe byte offset 47: Wasm opcode 0x04
+Script wasm://wasm/0c10a5fe byte offset 47: Wasm opcode 0x04 (kExprIf)
Scope:
at wasm_B (0:47):
- scope (module):
@@ -377,7 +377,7 @@ at wasm_B (0:47):
at (anonymous) (0:17):
-- skipped
Paused:
-Script wasm://wasm/0c10a5fe byte offset 61: Wasm opcode 0x0b
+Script wasm://wasm/0c10a5fe byte offset 61: Wasm opcode 0x0b (kExprEnd)
Scope:
at wasm_B (0:61):
- scope (module):
diff --git a/deps/v8/test/inspector/debugger/wasm-step-after-trap-expected.txt b/deps/v8/test/inspector/debugger/wasm-step-after-trap-expected.txt
index 3424b4e96c..3e3421ff87 100644
--- a/deps/v8/test/inspector/debugger/wasm-step-after-trap-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-step-after-trap-expected.txt
@@ -3,7 +3,7 @@ Instantiating.
Calling div function.
Paused at:
--- 0 ---
-Script wasm://wasm/a9a86c5e byte offset 46: Wasm opcode 0x6d
+Script wasm://wasm/a9a86c5e byte offset 46: Wasm opcode 0x6d (kExprI32DivS)
scope at div (0:46):
a: 1
b: 0
@@ -33,7 +33,7 @@ Paused at:
-> resume
Paused at:
--- 0 ---
-Script wasm://wasm/a9a86c5e byte offset 46: Wasm opcode 0x6d
+Script wasm://wasm/a9a86c5e byte offset 46: Wasm opcode 0x6d (kExprI32DivS)
scope at div (0:46):
a: -2147483648
b: -1
diff --git a/deps/v8/test/inspector/debugger/wasm-step-from-non-breakable-position-expected.txt b/deps/v8/test/inspector/debugger/wasm-step-from-non-breakable-position-expected.txt
new file mode 100644
index 0000000000..28e1bbe091
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/wasm-step-from-non-breakable-position-expected.txt
@@ -0,0 +1,12 @@
+Step into a function that starts with a non-breakable opcode (i.e. block), then step from there. See https://crbug.com/1137710.
+Setting up global instance variable.
+Got wasm script: wasm://wasm/4658c40e
+Setting breakpoint on offset 44
+Running main function.
+Script wasm://wasm/4658c40e byte offset 44: Wasm opcode 0x10 (kExprCallFunction)
+Debugger.stepInto called
+Script wasm://wasm/4658c40e byte offset 40: Wasm opcode 0x0b (kExprEnd)
+Debugger.stepInto called
+Script wasm://wasm/4658c40e byte offset 41: Wasm opcode 0x0b (kExprEnd)
+Debugger.resume called
+exports.main returned.
diff --git a/deps/v8/test/inspector/debugger/wasm-step-from-non-breakable-position.js b/deps/v8/test/inspector/debugger/wasm-step-from-non-breakable-position.js
new file mode 100644
index 0000000000..2576d1364a
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/wasm-step-from-non-breakable-position.js
@@ -0,0 +1,54 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+utils.load('test/inspector/wasm-inspector-test.js');
+
+let {session, contextGroup, Protocol} = InspectorTest.start(
+ 'Step into a function that starts with a non-breakable opcode (i.e. ' +
+ 'block), then step from there. See https://crbug.com/1137710.');
+session.setupScriptMap();
+
+var builder = new WasmModuleBuilder();
+
+var callee = builder.addFunction('callee', kSig_v_v)
+ .addBody([kExprBlock, kWasmStmt, kExprEnd])
+ .index;
+
+var main = builder.addFunction('main', kSig_v_i)
+ .addBody([kExprCallFunction, callee])
+ .exportFunc();
+
+var module_bytes = builder.toArray();
+
+(async function test() {
+ InspectorTest.logProtocolCommandCalls('Debugger.stepInto');
+ InspectorTest.logProtocolCommandCalls('Debugger.resume');
+
+ await Protocol.Debugger.enable();
+ InspectorTest.log('Setting up global instance variable.');
+ WasmInspectorTest.instantiate(module_bytes);
+ const [, {params: wasmScript}] = await Protocol.Debugger.onceScriptParsed(2);
+
+ InspectorTest.log(`Got wasm script: ${wasmScript.url}`);
+
+ // Set a breakpoint in 'main', at the call.
+ InspectorTest.log(`Setting breakpoint on offset ${main.body_offset}`);
+ await Protocol.Debugger.setBreakpoint({
+ location: {
+ scriptId: wasmScript.scriptId,
+ lineNumber: 0,
+ columnNumber: main.body_offset
+ }
+ });
+
+ InspectorTest.log('Running main function.');
+ Protocol.Runtime.evaluate({ expression: 'instance.exports.main()' });
+ for (let action of ['stepInto', 'stepInto', 'resume']) {
+ const {params: {callFrames}} = await Protocol.Debugger.oncePaused();
+ await session.logSourceLocation(callFrames[0].location);
+ Protocol.Debugger[action]();
+ }
+ InspectorTest.log('exports.main returned.');
+})().catch(reason => InspectorTest.log(`Failed: ${reason}`))
+ .finally(InspectorTest.completeTest);
diff --git a/deps/v8/test/inspector/debugger/wasm-stepping-byte-offsets-expected.txt b/deps/v8/test/inspector/debugger/wasm-stepping-byte-offsets-expected.txt
deleted file mode 100644
index 72dd1f9cf6..0000000000
--- a/deps/v8/test/inspector/debugger/wasm-stepping-byte-offsets-expected.txt
+++ /dev/null
@@ -1,317 +0,0 @@
-Tests stepping through wasm scripts by byte offsets
-Setting up global instance variable.
-Got wasm script: wasm://wasm/befe41aa
-Setting breakpoint on offset 59 (should be propagated to 60, the offset of the call), url wasm://wasm/befe41aa
-{
- columnNumber : 60
- lineNumber : 0
- scriptId : <scriptId>
-}
-Script wasm://wasm/befe41aa byte offset 60: Wasm opcode 0x10
-at wasm_B (0:60):
- - scope (module):
- -- skipped
- - scope (local):
- {"var0":3}
- - scope (wasm-expression-stack):
- {"0":1024}
-at (anonymous) (0:17):
- - scope (global):
- -- skipped
-Debugger.stepInto called
-Script wasm://wasm/befe41aa byte offset 39: Wasm opcode 0x01
-at wasm_A (0:39):
- - scope (module):
- -- skipped
- - scope (local):
- {"var0":1024}
- - scope (wasm-expression-stack):
- {}
-at wasm_B (0:60):
- - scope (module):
- -- skipped
- - scope (local):
- {"var0":3}
- - scope (wasm-expression-stack):
- {}
-at (anonymous) (0:17):
- - scope (global):
- -- skipped
-Debugger.stepOver called
-Script wasm://wasm/befe41aa byte offset 40: Wasm opcode 0x01
-at wasm_A (0:40):
- - scope (module):
- -- skipped
- - scope (local):
- {"var0":1024}
- - scope (wasm-expression-stack):
- {}
-at wasm_B (0:60):
- - scope (module):
- -- skipped
- - scope (local):
- {"var0":3}
- - scope (wasm-expression-stack):
- {}
-at (anonymous) (0:17):
- - scope (global):
- -- skipped
-Debugger.stepOut called
-Script wasm://wasm/befe41aa byte offset 62: Wasm opcode 0x0c
-at wasm_B (0:62):
- - scope (module):
- -- skipped
- - scope (local):
- {"var0":3}
- - scope (wasm-expression-stack):
- {}
-at (anonymous) (0:17):
- - scope (global):
- -- skipped
-Debugger.stepOut called
-Script wasm://wasm/befe41aa byte offset 60: Wasm opcode 0x10
-at wasm_B (0:60):
- - scope (module):
- -- skipped
- - scope (local):
- {"var0":2}
- - scope (wasm-expression-stack):
- {"0":1024}
-at (anonymous) (0:17):
- - scope (global):
- -- skipped
-Debugger.stepOver called
-Script wasm://wasm/befe41aa byte offset 62: Wasm opcode 0x0c
-at wasm_B (0:62):
- - scope (module):
- -- skipped
- - scope (local):
- {"var0":2}
- - scope (wasm-expression-stack):
- {}
-at (anonymous) (0:17):
- - scope (global):
- -- skipped
-Debugger.stepInto called
-Script wasm://wasm/befe41aa byte offset 46: Wasm opcode 0x20
-at wasm_B (0:46):
- - scope (module):
- -- skipped
- - scope (local):
- {"var0":2}
- - scope (wasm-expression-stack):
- {}
-at (anonymous) (0:17):
- - scope (global):
- -- skipped
-Debugger.resume called
-Script wasm://wasm/befe41aa byte offset 60: Wasm opcode 0x10
-at wasm_B (0:60):
- - scope (module):
- -- skipped
- - scope (local):
- {"var0":1}
- - scope (wasm-expression-stack):
- {"0":1024}
-at (anonymous) (0:17):
- - scope (global):
- -- skipped
-Debugger.stepInto called
-Script wasm://wasm/befe41aa byte offset 39: Wasm opcode 0x01
-at wasm_A (0:39):
- - scope (module):
- -- skipped
- - scope (local):
- {"var0":1024}
- - scope (wasm-expression-stack):
- {}
-at wasm_B (0:60):
- - scope (module):
- -- skipped
- - scope (local):
- {"var0":1}
- - scope (wasm-expression-stack):
- {}
-at (anonymous) (0:17):
- - scope (global):
- -- skipped
-Debugger.stepOut called
-Script wasm://wasm/befe41aa byte offset 62: Wasm opcode 0x0c
-at wasm_B (0:62):
- - scope (module):
- -- skipped
- - scope (local):
- {"var0":1}
- - scope (wasm-expression-stack):
- {}
-at (anonymous) (0:17):
- - scope (global):
- -- skipped
-Debugger.stepInto called
-Script wasm://wasm/befe41aa byte offset 46: Wasm opcode 0x20
-at wasm_B (0:46):
- - scope (module):
- -- skipped
- - scope (local):
- {"var0":1}
- - scope (wasm-expression-stack):
- {}
-at (anonymous) (0:17):
- - scope (global):
- -- skipped
-Debugger.stepInto called
-Script wasm://wasm/befe41aa byte offset 48: Wasm opcode 0x04
-at wasm_B (0:48):
- - scope (module):
- -- skipped
- - scope (local):
- {"var0":1}
- - scope (wasm-expression-stack):
- {"0":1}
-at (anonymous) (0:17):
- - scope (global):
- -- skipped
-Debugger.stepInto called
-Script wasm://wasm/befe41aa byte offset 50: Wasm opcode 0x20
-at wasm_B (0:50):
- - scope (module):
- -- skipped
- - scope (local):
- {"var0":1}
- - scope (wasm-expression-stack):
- {}
-at (anonymous) (0:17):
- - scope (global):
- -- skipped
-Debugger.stepInto called
-Script wasm://wasm/befe41aa byte offset 52: Wasm opcode 0x41
-at wasm_B (0:52):
- - scope (module):
- -- skipped
- - scope (local):
- {"var0":1}
- - scope (wasm-expression-stack):
- {"0":1}
-at (anonymous) (0:17):
- - scope (global):
- -- skipped
-Debugger.stepInto called
-Script wasm://wasm/befe41aa byte offset 54: Wasm opcode 0x6b
-at wasm_B (0:54):
- - scope (module):
- -- skipped
- - scope (local):
- {"var0":1}
- - scope (wasm-expression-stack):
- {"0":1,"1":1}
-at (anonymous) (0:17):
- - scope (global):
- -- skipped
-Debugger.stepInto called
-Script wasm://wasm/befe41aa byte offset 55: Wasm opcode 0x21
-at wasm_B (0:55):
- - scope (module):
- -- skipped
- - scope (local):
- {"var0":1}
- - scope (wasm-expression-stack):
- {"0":0}
-at (anonymous) (0:17):
- - scope (global):
- -- skipped
-Debugger.stepInto called
-Script wasm://wasm/befe41aa byte offset 57: Wasm opcode 0x41
-at wasm_B (0:57):
- - scope (module):
- -- skipped
- - scope (local):
- {"var0":0}
- - scope (wasm-expression-stack):
- {}
-at (anonymous) (0:17):
- - scope (global):
- -- skipped
-Debugger.stepInto called
-Script wasm://wasm/befe41aa byte offset 60: Wasm opcode 0x10
-at wasm_B (0:60):
- - scope (module):
- -- skipped
- - scope (local):
- {"var0":0}
- - scope (wasm-expression-stack):
- {"0":1024}
-at (anonymous) (0:17):
- - scope (global):
- -- skipped
-Debugger.stepInto called
-Script wasm://wasm/befe41aa byte offset 39: Wasm opcode 0x01
-at wasm_A (0:39):
- - scope (module):
- -- skipped
- - scope (local):
- {"var0":1024}
- - scope (wasm-expression-stack):
- {}
-at wasm_B (0:60):
- - scope (module):
- -- skipped
- - scope (local):
- {"var0":0}
- - scope (wasm-expression-stack):
- {}
-at (anonymous) (0:17):
- - scope (global):
- -- skipped
-Debugger.stepInto called
-Script wasm://wasm/befe41aa byte offset 40: Wasm opcode 0x01
-at wasm_A (0:40):
- - scope (module):
- -- skipped
- - scope (local):
- {"var0":1024}
- - scope (wasm-expression-stack):
- {}
-at wasm_B (0:60):
- - scope (module):
- -- skipped
- - scope (local):
- {"var0":0}
- - scope (wasm-expression-stack):
- {}
-at (anonymous) (0:17):
- - scope (global):
- -- skipped
-Debugger.stepInto called
-Script wasm://wasm/befe41aa byte offset 41: Wasm opcode 0x0b
-at wasm_A (0:41):
- - scope (module):
- -- skipped
- - scope (local):
- {"var0":1024}
- - scope (wasm-expression-stack):
- {}
-at wasm_B (0:60):
- - scope (module):
- -- skipped
- - scope (local):
- {"var0":0}
- - scope (wasm-expression-stack):
- {}
-at (anonymous) (0:17):
- - scope (global):
- -- skipped
-Debugger.stepInto called
-Script wasm://wasm/befe41aa byte offset 62: Wasm opcode 0x0c
-at wasm_B (0:62):
- - scope (module):
- -- skipped
- - scope (local):
- {"var0":0}
- - scope (wasm-expression-stack):
- {}
-at (anonymous) (0:17):
- - scope (global):
- -- skipped
-Debugger.resume called
-exports.main returned!
-Finished!
diff --git a/deps/v8/test/inspector/debugger/wasm-stepping-byte-offsets.js b/deps/v8/test/inspector/debugger/wasm-stepping-byte-offsets.js
deleted file mode 100644
index b4fdc7ae3e..0000000000
--- a/deps/v8/test/inspector/debugger/wasm-stepping-byte-offsets.js
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-utils.load('test/inspector/wasm-inspector-test.js');
-
-let {session, contextGroup, Protocol} =
- InspectorTest.start('Tests stepping through wasm scripts by byte offsets');
-session.setupScriptMap();
-
-var builder = new WasmModuleBuilder();
-
-var func_a_idx =
- builder.addFunction('wasm_A', kSig_v_i).addBody([kExprNop, kExprNop]).index;
-
-// wasm_B calls wasm_A <param0> times.
-builder.addFunction('wasm_B', kSig_v_i)
- .addBody([
- // clang-format off
- kExprLoop, kWasmStmt, // while
- kExprLocalGet, 0, // -
- kExprIf, kWasmStmt, // if <param0> != 0
- kExprLocalGet, 0, // -
- kExprI32Const, 1, // -
- kExprI32Sub, // -
- kExprLocalSet, 0, // decrease <param0>
- ...wasmI32Const(1024), // some longer i32 const (2 byte imm)
- kExprCallFunction, func_a_idx, // -
- kExprBr, 1, // continue
- kExprEnd, // -
- kExprEnd, // break
- // clang-format on
- ])
- .exportAs('main');
-
-
-var module_bytes = builder.toArray();
-
-(async function test() {
- for (const action of ['stepInto', 'stepOver', 'stepOut', 'resume'])
- InspectorTest.logProtocolCommandCalls('Debugger.' + action);
-
- await Protocol.Debugger.enable();
- InspectorTest.log('Setting up global instance variable.');
- WasmInspectorTest.instantiate(module_bytes);
- const [, {params: wasmScript}] = await Protocol.Debugger.onceScriptParsed(2);
-
- InspectorTest.log('Got wasm script: ' + wasmScript.url);
-
- // Set the breakpoint on a non-breakable position. This should resolve to the
- // next instruction.
- InspectorTest.log(
- `Setting breakpoint on offset 59 (should be propagated to 60, the ` +
- `offset of the call), url ${wasmScript.url}`);
- const bpmsg = await Protocol.Debugger.setBreakpoint({
- location: {scriptId: wasmScript.scriptId, lineNumber: 0, columnNumber: 59}
- });
-
- const actualLocation = bpmsg.result.actualLocation;
- InspectorTest.logMessage(actualLocation);
- Protocol.Runtime.evaluate({ expression: 'instance.exports.main(4)' });
- await waitForPauseAndStep('stepInto'); // into call to wasm_A
- await waitForPauseAndStep('stepOver'); // over first nop
- await waitForPauseAndStep('stepOut'); // out of wasm_A
- await waitForPauseAndStep('stepOut'); // out of wasm_B, stop on breakpoint
- await waitForPauseAndStep('stepOver'); // over call
- await waitForPauseAndStep('stepInto'); // == stepOver br
- await waitForPauseAndStep('resume'); // to next breakpoint (3rd iteration)
- await waitForPauseAndStep('stepInto'); // into wasm_A
- await waitForPauseAndStep('stepOut'); // out to wasm_B
- // Now step 9 times, until we are in wasm_A again.
- for (let i = 0; i < 9; ++i) await waitForPauseAndStep('stepInto');
- // 3 more times, back to wasm_B.
- for (let i = 0; i < 3; ++i) await waitForPauseAndStep('stepInto');
- // Then just resume.
- await waitForPauseAndStep('resume');
- InspectorTest.log('exports.main returned!');
- InspectorTest.log('Finished!');
-})().catch(reason => InspectorTest.log(`Failed: ${reason}`))
- .finally(InspectorTest.completeTest);
-
-async function waitForPauseAndStep(stepAction) {
- const {params: {callFrames}} = await Protocol.Debugger.oncePaused();
- await session.logSourceLocation(callFrames[0].location);
- for (var frame of callFrames) {
- const functionName = frame.functionName || '(anonymous)';
- const lineNumber = frame.location.lineNumber;
- const columnNumber = frame.location.columnNumber;
- InspectorTest.log(`at ${functionName} (${lineNumber}:${columnNumber}):`);
- for (var scope of frame.scopeChain) {
- InspectorTest.logObject(' - scope (' + scope.type + '):');
- if (scope.type === 'module' || scope.type === 'global') {
- InspectorTest.logObject(' -- skipped');
- } else {
- const {result: {result: {value}}} =
- await Protocol.Runtime.callFunctionOn({
- objectId: scope.object.objectId,
- functionDeclaration: 'function() { return this; }',
- returnByValue: true
- });
- InspectorTest.log(` ${JSON.stringify(value)}`);
- }
- }
- }
- Protocol.Debugger[stepAction]();
-}
diff --git a/deps/v8/test/inspector/debugger/wasm-stepping-expected.txt b/deps/v8/test/inspector/debugger/wasm-stepping-expected.txt
new file mode 100644
index 0000000000..5d019fcb6c
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/wasm-stepping-expected.txt
@@ -0,0 +1,69 @@
+Tests stepping through wasm scripts by byte offsets
+Setting up global instance variable.
+Got wasm script: wasm://wasm/42af3c82
+Setting breakpoint on offset 72 (should be propagated to 73, the offset of the call), url wasm://wasm/42af3c82
+{
+ columnNumber : 73
+ lineNumber : 0
+ scriptId : <scriptId>
+}
+Script wasm://wasm/42af3c82 byte offset 73: Wasm opcode 0x10 (kExprCallFunction)
+Debugger.stepInto called
+Script wasm://wasm/42af3c82 byte offset 52: Wasm opcode 0x01 (kExprNop)
+Debugger.stepOver called
+Script wasm://wasm/42af3c82 byte offset 53: Wasm opcode 0x01 (kExprNop)
+Debugger.stepOut called
+Script wasm://wasm/42af3c82 byte offset 75: Wasm opcode 0x0c (kExprBr)
+Debugger.stepOut called
+Script wasm://wasm/42af3c82 byte offset 73: Wasm opcode 0x10 (kExprCallFunction)
+Debugger.stepOver called
+Script wasm://wasm/42af3c82 byte offset 75: Wasm opcode 0x0c (kExprBr)
+Debugger.stepInto called
+Script wasm://wasm/42af3c82 byte offset 59: Wasm opcode 0x20 (kExprLocalGet)
+Debugger.resume called
+Script wasm://wasm/42af3c82 byte offset 73: Wasm opcode 0x10 (kExprCallFunction)
+Debugger.stepInto called
+Script wasm://wasm/42af3c82 byte offset 52: Wasm opcode 0x01 (kExprNop)
+Debugger.stepOut called
+Script wasm://wasm/42af3c82 byte offset 75: Wasm opcode 0x0c (kExprBr)
+Debugger.stepInto called
+Script wasm://wasm/42af3c82 byte offset 59: Wasm opcode 0x20 (kExprLocalGet)
+Debugger.stepInto called
+Script wasm://wasm/42af3c82 byte offset 61: Wasm opcode 0x04 (kExprIf)
+Debugger.stepInto called
+Script wasm://wasm/42af3c82 byte offset 63: Wasm opcode 0x20 (kExprLocalGet)
+Debugger.stepInto called
+Script wasm://wasm/42af3c82 byte offset 65: Wasm opcode 0x41 (kExprI32Const)
+Debugger.stepInto called
+Script wasm://wasm/42af3c82 byte offset 67: Wasm opcode 0x6b (kExprI32Sub)
+Debugger.stepInto called
+Script wasm://wasm/42af3c82 byte offset 68: Wasm opcode 0x21 (kExprLocalSet)
+Debugger.stepInto called
+Script wasm://wasm/42af3c82 byte offset 70: Wasm opcode 0x41 (kExprI32Const)
+Debugger.stepInto called
+Script wasm://wasm/42af3c82 byte offset 73: Wasm opcode 0x10 (kExprCallFunction)
+Debugger.stepInto called
+Script wasm://wasm/42af3c82 byte offset 52: Wasm opcode 0x01 (kExprNop)
+Debugger.stepInto called
+Script wasm://wasm/42af3c82 byte offset 53: Wasm opcode 0x01 (kExprNop)
+Debugger.stepInto called
+Script wasm://wasm/42af3c82 byte offset 54: Wasm opcode 0x0b (kExprEnd)
+Debugger.stepInto called
+Script wasm://wasm/42af3c82 byte offset 75: Wasm opcode 0x0c (kExprBr)
+Debugger.stepInto called
+Script wasm://wasm/42af3c82 byte offset 59: Wasm opcode 0x20 (kExprLocalGet)
+Debugger.resume called
+exports.main returned!
+Test stepping over a recursive call
+Setting breakpoint on the recursive call instruction @+93, url wasm://wasm/42af3c82
+{
+ columnNumber : 93
+ lineNumber : 0
+ scriptId : <scriptId>
+}
+Script wasm://wasm/42af3c82 byte offset 93: Wasm opcode 0x10 (kExprCallFunction)
+Removing breakpoint
+Debugger.stepOver called
+Script wasm://wasm/42af3c82 byte offset 95: Wasm opcode 0x20 (kExprLocalGet)
+Debugger.resume called
+Finished!
diff --git a/deps/v8/test/inspector/debugger/wasm-stepping-in-from-js-expected.txt b/deps/v8/test/inspector/debugger/wasm-stepping-in-from-js-expected.txt
index d4b4552a74..b22ff77a53 100644
--- a/deps/v8/test/inspector/debugger/wasm-stepping-in-from-js-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-stepping-in-from-js-expected.txt
@@ -22,10 +22,10 @@ paused
Debugger.stepInto
paused
-Script wasm://wasm/7d022e0e byte offset 35: Wasm opcode 0x20
+Script wasm://wasm/7d022e0e byte offset 35: Wasm opcode 0x20 (kExprLocalGet)
Debugger.resume
paused
-Script wasm://wasm/7d022e0e byte offset 37: Wasm opcode 0x41
+Script wasm://wasm/7d022e0e byte offset 37: Wasm opcode 0x41 (kExprI32Const)
Debugger.resume
exports.main returned!
Finished!
diff --git a/deps/v8/test/inspector/debugger/wasm-stepping-liftoff-expected.txt b/deps/v8/test/inspector/debugger/wasm-stepping-liftoff-expected.txt
deleted file mode 100644
index 2f4194cd53..0000000000
--- a/deps/v8/test/inspector/debugger/wasm-stepping-liftoff-expected.txt
+++ /dev/null
@@ -1,69 +0,0 @@
-Tests stepping through wasm scripts by byte offsets
-Setting up global instance variable.
-Got wasm script: wasm://wasm/42af3c82
-Setting breakpoint on offset 72 (should be propagated to 73, the offset of the call), url wasm://wasm/42af3c82
-{
- columnNumber : 73
- lineNumber : 0
- scriptId : <scriptId>
-}
-Script wasm://wasm/42af3c82 byte offset 73: Wasm opcode 0x10
-Debugger.stepInto called
-Script wasm://wasm/42af3c82 byte offset 52: Wasm opcode 0x01
-Debugger.stepOver called
-Script wasm://wasm/42af3c82 byte offset 53: Wasm opcode 0x01
-Debugger.stepOut called
-Script wasm://wasm/42af3c82 byte offset 75: Wasm opcode 0x0c
-Debugger.stepOut called
-Script wasm://wasm/42af3c82 byte offset 73: Wasm opcode 0x10
-Debugger.stepOver called
-Script wasm://wasm/42af3c82 byte offset 75: Wasm opcode 0x0c
-Debugger.stepInto called
-Script wasm://wasm/42af3c82 byte offset 59: Wasm opcode 0x20
-Debugger.resume called
-Script wasm://wasm/42af3c82 byte offset 73: Wasm opcode 0x10
-Debugger.stepInto called
-Script wasm://wasm/42af3c82 byte offset 52: Wasm opcode 0x01
-Debugger.stepOut called
-Script wasm://wasm/42af3c82 byte offset 75: Wasm opcode 0x0c
-Debugger.stepInto called
-Script wasm://wasm/42af3c82 byte offset 59: Wasm opcode 0x20
-Debugger.stepInto called
-Script wasm://wasm/42af3c82 byte offset 61: Wasm opcode 0x04
-Debugger.stepInto called
-Script wasm://wasm/42af3c82 byte offset 63: Wasm opcode 0x20
-Debugger.stepInto called
-Script wasm://wasm/42af3c82 byte offset 65: Wasm opcode 0x41
-Debugger.stepInto called
-Script wasm://wasm/42af3c82 byte offset 67: Wasm opcode 0x6b
-Debugger.stepInto called
-Script wasm://wasm/42af3c82 byte offset 68: Wasm opcode 0x21
-Debugger.stepInto called
-Script wasm://wasm/42af3c82 byte offset 70: Wasm opcode 0x41
-Debugger.stepInto called
-Script wasm://wasm/42af3c82 byte offset 73: Wasm opcode 0x10
-Debugger.stepInto called
-Script wasm://wasm/42af3c82 byte offset 52: Wasm opcode 0x01
-Debugger.stepInto called
-Script wasm://wasm/42af3c82 byte offset 53: Wasm opcode 0x01
-Debugger.stepInto called
-Script wasm://wasm/42af3c82 byte offset 54: Wasm opcode 0x0b
-Debugger.stepInto called
-Script wasm://wasm/42af3c82 byte offset 75: Wasm opcode 0x0c
-Debugger.stepInto called
-Script wasm://wasm/42af3c82 byte offset 59: Wasm opcode 0x20
-Debugger.resume called
-exports.main returned!
-Test stepping over a recursive call
-Setting breakpoint on the recursive call instruction @+93, url wasm://wasm/42af3c82
-{
- columnNumber : 93
- lineNumber : 0
- scriptId : <scriptId>
-}
-Script wasm://wasm/42af3c82 byte offset 93: Wasm opcode 0x10
-Removing breakpoint
-Debugger.stepOver called
-Script wasm://wasm/42af3c82 byte offset 95: Wasm opcode 0x20
-Debugger.resume called
-Finished!
diff --git a/deps/v8/test/inspector/debugger/wasm-stepping-to-js-expected.txt b/deps/v8/test/inspector/debugger/wasm-stepping-to-js-expected.txt
index 37deb683b8..4b3bd5dd60 100644
--- a/deps/v8/test/inspector/debugger/wasm-stepping-to-js-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-stepping-to-js-expected.txt
@@ -17,7 +17,7 @@ function test() {
Debugger.resume
paused
-Script wasm://wasm/242f4a16 byte offset 33: Wasm opcode 0x01
+Script wasm://wasm/242f4a16 byte offset 33: Wasm opcode 0x01 (kExprNop)
Debugger.stepOut
paused
instance.exports.main();
@@ -36,10 +36,10 @@ function test() {
Debugger.resume
paused
-Script wasm://wasm/242f4a16 byte offset 33: Wasm opcode 0x01
+Script wasm://wasm/242f4a16 byte offset 33: Wasm opcode 0x01 (kExprNop)
Debugger.stepOver
paused
-Script wasm://wasm/242f4a16 byte offset 34: Wasm opcode 0x0b
+Script wasm://wasm/242f4a16 byte offset 34: Wasm opcode 0x0b (kExprEnd)
Debugger.resume
exports.main returned!
Finished run 2!
@@ -52,10 +52,10 @@ function test() {
Debugger.resume
paused
-Script wasm://wasm/242f4a16 byte offset 33: Wasm opcode 0x01
+Script wasm://wasm/242f4a16 byte offset 33: Wasm opcode 0x01 (kExprNop)
Debugger.stepInto
paused
-Script wasm://wasm/242f4a16 byte offset 34: Wasm opcode 0x0b
+Script wasm://wasm/242f4a16 byte offset 34: Wasm opcode 0x0b (kExprEnd)
Debugger.resume
exports.main returned!
Finished run 3!
diff --git a/deps/v8/test/inspector/debugger/wasm-stepping-with-skiplist-expected.txt b/deps/v8/test/inspector/debugger/wasm-stepping-with-skiplist-expected.txt
index ce23513810..e501728678 100644
--- a/deps/v8/test/inspector/debugger/wasm-stepping-with-skiplist-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-stepping-with-skiplist-expected.txt
@@ -7,83 +7,83 @@ Got wasm script: wasm://wasm/befe41aa
scriptId : <scriptId>
}
Test with valid skip lists
-Script wasm://wasm/befe41aa byte offset 46: Wasm opcode 0x20
+Script wasm://wasm/befe41aa byte offset 46: Wasm opcode 0x20 (kExprLocalGet)
Test: Stepping over without skip list
Testing stepOver with skipList: []
-Script wasm://wasm/befe41aa byte offset 48: Wasm opcode 0x04
-Script wasm://wasm/befe41aa byte offset 50: Wasm opcode 0x20
-Script wasm://wasm/befe41aa byte offset 52: Wasm opcode 0x41
-Script wasm://wasm/befe41aa byte offset 54: Wasm opcode 0x6b
-Script wasm://wasm/befe41aa byte offset 55: Wasm opcode 0x21
-Script wasm://wasm/befe41aa byte offset 57: Wasm opcode 0x41
-Script wasm://wasm/befe41aa byte offset 60: Wasm opcode 0x10
-Script wasm://wasm/befe41aa byte offset 62: Wasm opcode 0x0c
-Script wasm://wasm/befe41aa byte offset 46: Wasm opcode 0x20
+Script wasm://wasm/befe41aa byte offset 48: Wasm opcode 0x04 (kExprIf)
+Script wasm://wasm/befe41aa byte offset 50: Wasm opcode 0x20 (kExprLocalGet)
+Script wasm://wasm/befe41aa byte offset 52: Wasm opcode 0x41 (kExprI32Const)
+Script wasm://wasm/befe41aa byte offset 54: Wasm opcode 0x6b (kExprI32Sub)
+Script wasm://wasm/befe41aa byte offset 55: Wasm opcode 0x21 (kExprLocalSet)
+Script wasm://wasm/befe41aa byte offset 57: Wasm opcode 0x41 (kExprI32Const)
+Script wasm://wasm/befe41aa byte offset 60: Wasm opcode 0x10 (kExprCallFunction)
+Script wasm://wasm/befe41aa byte offset 62: Wasm opcode 0x0c (kExprBr)
+Script wasm://wasm/befe41aa byte offset 46: Wasm opcode 0x20 (kExprLocalGet)
Test: Stepping over with skip list
Testing stepOver with skipList: [{"scriptId":"4","start":{"lineNumber":0,"columnNumber":48},"end":{"lineNumber":0,"columnNumber":50}},{"scriptId":"4","start":{"lineNumber":0,"columnNumber":60},"end":{"lineNumber":0,"columnNumber":62}}]
-Script wasm://wasm/befe41aa byte offset 50: Wasm opcode 0x20
-Script wasm://wasm/befe41aa byte offset 52: Wasm opcode 0x41
-Script wasm://wasm/befe41aa byte offset 54: Wasm opcode 0x6b
-Script wasm://wasm/befe41aa byte offset 55: Wasm opcode 0x21
-Script wasm://wasm/befe41aa byte offset 57: Wasm opcode 0x41
-Script wasm://wasm/befe41aa byte offset 62: Wasm opcode 0x0c
-Script wasm://wasm/befe41aa byte offset 46: Wasm opcode 0x20
+Script wasm://wasm/befe41aa byte offset 50: Wasm opcode 0x20 (kExprLocalGet)
+Script wasm://wasm/befe41aa byte offset 52: Wasm opcode 0x41 (kExprI32Const)
+Script wasm://wasm/befe41aa byte offset 54: Wasm opcode 0x6b (kExprI32Sub)
+Script wasm://wasm/befe41aa byte offset 55: Wasm opcode 0x21 (kExprLocalSet)
+Script wasm://wasm/befe41aa byte offset 57: Wasm opcode 0x41 (kExprI32Const)
+Script wasm://wasm/befe41aa byte offset 62: Wasm opcode 0x0c (kExprBr)
+Script wasm://wasm/befe41aa byte offset 46: Wasm opcode 0x20 (kExprLocalGet)
Test: Stepping over start location is inclusive
Testing stepOver with skipList: [{"scriptId":"4","start":{"lineNumber":0,"columnNumber":48},"end":{"lineNumber":0,"columnNumber":61}}]
-Script wasm://wasm/befe41aa byte offset 62: Wasm opcode 0x0c
-Script wasm://wasm/befe41aa byte offset 46: Wasm opcode 0x20
+Script wasm://wasm/befe41aa byte offset 62: Wasm opcode 0x0c (kExprBr)
+Script wasm://wasm/befe41aa byte offset 46: Wasm opcode 0x20 (kExprLocalGet)
Test: Stepping over end location is exclusive
Testing stepOver with skipList: [{"scriptId":"4","start":{"lineNumber":0,"columnNumber":49},"end":{"lineNumber":0,"columnNumber":62}}]
-Script wasm://wasm/befe41aa byte offset 48: Wasm opcode 0x04
-Script wasm://wasm/befe41aa byte offset 62: Wasm opcode 0x0c
-Script wasm://wasm/befe41aa byte offset 46: Wasm opcode 0x20
+Script wasm://wasm/befe41aa byte offset 48: Wasm opcode 0x04 (kExprIf)
+Script wasm://wasm/befe41aa byte offset 62: Wasm opcode 0x0c (kExprBr)
+Script wasm://wasm/befe41aa byte offset 46: Wasm opcode 0x20 (kExprLocalGet)
Test: Stepping into without skip list
Testing stepInto with skipList: []
-Script wasm://wasm/befe41aa byte offset 48: Wasm opcode 0x04
-Script wasm://wasm/befe41aa byte offset 50: Wasm opcode 0x20
-Script wasm://wasm/befe41aa byte offset 52: Wasm opcode 0x41
-Script wasm://wasm/befe41aa byte offset 54: Wasm opcode 0x6b
-Script wasm://wasm/befe41aa byte offset 55: Wasm opcode 0x21
-Script wasm://wasm/befe41aa byte offset 57: Wasm opcode 0x41
-Script wasm://wasm/befe41aa byte offset 60: Wasm opcode 0x10
-Script wasm://wasm/befe41aa byte offset 39: Wasm opcode 0x01
-Script wasm://wasm/befe41aa byte offset 40: Wasm opcode 0x01
-Script wasm://wasm/befe41aa byte offset 41: Wasm opcode 0x0b
-Script wasm://wasm/befe41aa byte offset 62: Wasm opcode 0x0c
-Script wasm://wasm/befe41aa byte offset 46: Wasm opcode 0x20
+Script wasm://wasm/befe41aa byte offset 48: Wasm opcode 0x04 (kExprIf)
+Script wasm://wasm/befe41aa byte offset 50: Wasm opcode 0x20 (kExprLocalGet)
+Script wasm://wasm/befe41aa byte offset 52: Wasm opcode 0x41 (kExprI32Const)
+Script wasm://wasm/befe41aa byte offset 54: Wasm opcode 0x6b (kExprI32Sub)
+Script wasm://wasm/befe41aa byte offset 55: Wasm opcode 0x21 (kExprLocalSet)
+Script wasm://wasm/befe41aa byte offset 57: Wasm opcode 0x41 (kExprI32Const)
+Script wasm://wasm/befe41aa byte offset 60: Wasm opcode 0x10 (kExprCallFunction)
+Script wasm://wasm/befe41aa byte offset 39: Wasm opcode 0x01 (kExprNop)
+Script wasm://wasm/befe41aa byte offset 40: Wasm opcode 0x01 (kExprNop)
+Script wasm://wasm/befe41aa byte offset 41: Wasm opcode 0x0b (kExprEnd)
+Script wasm://wasm/befe41aa byte offset 62: Wasm opcode 0x0c (kExprBr)
+Script wasm://wasm/befe41aa byte offset 46: Wasm opcode 0x20 (kExprLocalGet)
Test: Stepping into with skip list, while call itself is skipped
Testing stepInto with skipList: [{"scriptId":"4","start":{"lineNumber":0,"columnNumber":39},"end":{"lineNumber":0,"columnNumber":41}},{"scriptId":"4","start":{"lineNumber":0,"columnNumber":50},"end":{"lineNumber":0,"columnNumber":62}}]
-Script wasm://wasm/befe41aa byte offset 48: Wasm opcode 0x04
-Script wasm://wasm/befe41aa byte offset 41: Wasm opcode 0x0b
-Script wasm://wasm/befe41aa byte offset 62: Wasm opcode 0x0c
-Script wasm://wasm/befe41aa byte offset 46: Wasm opcode 0x20
+Script wasm://wasm/befe41aa byte offset 48: Wasm opcode 0x04 (kExprIf)
+Script wasm://wasm/befe41aa byte offset 41: Wasm opcode 0x0b (kExprEnd)
+Script wasm://wasm/befe41aa byte offset 62: Wasm opcode 0x0c (kExprBr)
+Script wasm://wasm/befe41aa byte offset 46: Wasm opcode 0x20 (kExprLocalGet)
Test: Stepping into start location is inclusive
Testing stepInto with skipList: [{"scriptId":"4","start":{"lineNumber":0,"columnNumber":39},"end":{"lineNumber":0,"columnNumber":40}}]
-Script wasm://wasm/befe41aa byte offset 48: Wasm opcode 0x04
-Script wasm://wasm/befe41aa byte offset 50: Wasm opcode 0x20
-Script wasm://wasm/befe41aa byte offset 52: Wasm opcode 0x41
-Script wasm://wasm/befe41aa byte offset 54: Wasm opcode 0x6b
-Script wasm://wasm/befe41aa byte offset 55: Wasm opcode 0x21
-Script wasm://wasm/befe41aa byte offset 57: Wasm opcode 0x41
-Script wasm://wasm/befe41aa byte offset 60: Wasm opcode 0x10
-Script wasm://wasm/befe41aa byte offset 40: Wasm opcode 0x01
-Script wasm://wasm/befe41aa byte offset 41: Wasm opcode 0x0b
-Script wasm://wasm/befe41aa byte offset 62: Wasm opcode 0x0c
-Script wasm://wasm/befe41aa byte offset 46: Wasm opcode 0x20
+Script wasm://wasm/befe41aa byte offset 48: Wasm opcode 0x04 (kExprIf)
+Script wasm://wasm/befe41aa byte offset 50: Wasm opcode 0x20 (kExprLocalGet)
+Script wasm://wasm/befe41aa byte offset 52: Wasm opcode 0x41 (kExprI32Const)
+Script wasm://wasm/befe41aa byte offset 54: Wasm opcode 0x6b (kExprI32Sub)
+Script wasm://wasm/befe41aa byte offset 55: Wasm opcode 0x21 (kExprLocalSet)
+Script wasm://wasm/befe41aa byte offset 57: Wasm opcode 0x41 (kExprI32Const)
+Script wasm://wasm/befe41aa byte offset 60: Wasm opcode 0x10 (kExprCallFunction)
+Script wasm://wasm/befe41aa byte offset 40: Wasm opcode 0x01 (kExprNop)
+Script wasm://wasm/befe41aa byte offset 41: Wasm opcode 0x0b (kExprEnd)
+Script wasm://wasm/befe41aa byte offset 62: Wasm opcode 0x0c (kExprBr)
+Script wasm://wasm/befe41aa byte offset 46: Wasm opcode 0x20 (kExprLocalGet)
Test: Stepping into end location is exclusive
Testing stepInto with skipList: [{"scriptId":"4","start":{"lineNumber":0,"columnNumber":38},"end":{"lineNumber":0,"columnNumber":41}}]
-Script wasm://wasm/befe41aa byte offset 48: Wasm opcode 0x04
-Script wasm://wasm/befe41aa byte offset 50: Wasm opcode 0x20
-Script wasm://wasm/befe41aa byte offset 52: Wasm opcode 0x41
-Script wasm://wasm/befe41aa byte offset 54: Wasm opcode 0x6b
-Script wasm://wasm/befe41aa byte offset 55: Wasm opcode 0x21
-Script wasm://wasm/befe41aa byte offset 57: Wasm opcode 0x41
-Script wasm://wasm/befe41aa byte offset 60: Wasm opcode 0x10
-Script wasm://wasm/befe41aa byte offset 41: Wasm opcode 0x0b
-Script wasm://wasm/befe41aa byte offset 62: Wasm opcode 0x0c
-Script wasm://wasm/befe41aa byte offset 46: Wasm opcode 0x20
+Script wasm://wasm/befe41aa byte offset 48: Wasm opcode 0x04 (kExprIf)
+Script wasm://wasm/befe41aa byte offset 50: Wasm opcode 0x20 (kExprLocalGet)
+Script wasm://wasm/befe41aa byte offset 52: Wasm opcode 0x41 (kExprI32Const)
+Script wasm://wasm/befe41aa byte offset 54: Wasm opcode 0x6b (kExprI32Sub)
+Script wasm://wasm/befe41aa byte offset 55: Wasm opcode 0x21 (kExprLocalSet)
+Script wasm://wasm/befe41aa byte offset 57: Wasm opcode 0x41 (kExprI32Const)
+Script wasm://wasm/befe41aa byte offset 60: Wasm opcode 0x10 (kExprCallFunction)
+Script wasm://wasm/befe41aa byte offset 41: Wasm opcode 0x0b (kExprEnd)
+Script wasm://wasm/befe41aa byte offset 62: Wasm opcode 0x0c (kExprBr)
+Script wasm://wasm/befe41aa byte offset 46: Wasm opcode 0x20 (kExprLocalGet)
Test with invalid skip lists
-Script wasm://wasm/befe41aa byte offset 46: Wasm opcode 0x20
+Script wasm://wasm/befe41aa byte offset 46: Wasm opcode 0x20 (kExprLocalGet)
Test: start position has invalid column number
Testing stepOver with skipList: [{"scriptId":"4","start":{"lineNumber":0,"columnNumber":-1},"end":{"lineNumber":0,"columnNumber":62}}]
Position missing 'column' or 'column' < 0.
diff --git a/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map-expected.txt b/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map-expected.txt
index 1e58407b5e..bf423acd1a 100644
--- a/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-stepping-with-source-map-expected.txt
@@ -9,7 +9,7 @@ Setting breakpoint on offset 54 (on the setlocal before the call), url wasm://wa
lineNumber : 0
scriptId : <scriptId>
}
-Script wasm://wasm/9b4bf87e byte offset 54: Wasm opcode 0x21
+Script wasm://wasm/9b4bf87e byte offset 54: Wasm opcode 0x21 (kExprLocalSet)
at wasm_B (0:54):
- scope (module):
-- skipped
@@ -21,7 +21,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Script wasm://wasm/9b4bf87e byte offset 56: Wasm opcode 0x10
+Script wasm://wasm/9b4bf87e byte offset 56: Wasm opcode 0x10 (kExprCallFunction)
at wasm_B (0:56):
- scope (module):
-- skipped
@@ -33,7 +33,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Script wasm://wasm/9b4bf87e byte offset 38: Wasm opcode 0x01
+Script wasm://wasm/9b4bf87e byte offset 38: Wasm opcode 0x01 (kExprNop)
at wasm_A (0:38):
- scope (module):
-- skipped
@@ -52,7 +52,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepOver called
-Script wasm://wasm/9b4bf87e byte offset 39: Wasm opcode 0x01
+Script wasm://wasm/9b4bf87e byte offset 39: Wasm opcode 0x01 (kExprNop)
at wasm_A (0:39):
- scope (module):
-- skipped
@@ -71,7 +71,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepOut called
-Script wasm://wasm/9b4bf87e byte offset 58: Wasm opcode 0x0c
+Script wasm://wasm/9b4bf87e byte offset 58: Wasm opcode 0x0c (kExprBr)
at wasm_B (0:58):
- scope (module):
-- skipped
@@ -83,7 +83,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepOut called
-Script wasm://wasm/9b4bf87e byte offset 54: Wasm opcode 0x21
+Script wasm://wasm/9b4bf87e byte offset 54: Wasm opcode 0x21 (kExprLocalSet)
at wasm_B (0:54):
- scope (module):
-- skipped
@@ -95,7 +95,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepOver called
-Script wasm://wasm/9b4bf87e byte offset 56: Wasm opcode 0x10
+Script wasm://wasm/9b4bf87e byte offset 56: Wasm opcode 0x10 (kExprCallFunction)
at wasm_B (0:56):
- scope (module):
-- skipped
@@ -107,7 +107,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepOver called
-Script wasm://wasm/9b4bf87e byte offset 58: Wasm opcode 0x0c
+Script wasm://wasm/9b4bf87e byte offset 58: Wasm opcode 0x0c (kExprBr)
at wasm_B (0:58):
- scope (module):
-- skipped
@@ -119,7 +119,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.resume called
-Script wasm://wasm/9b4bf87e byte offset 54: Wasm opcode 0x21
+Script wasm://wasm/9b4bf87e byte offset 54: Wasm opcode 0x21 (kExprLocalSet)
at wasm_B (0:54):
- scope (module):
-- skipped
@@ -131,7 +131,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Script wasm://wasm/9b4bf87e byte offset 56: Wasm opcode 0x10
+Script wasm://wasm/9b4bf87e byte offset 56: Wasm opcode 0x10 (kExprCallFunction)
at wasm_B (0:56):
- scope (module):
-- skipped
@@ -143,7 +143,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Script wasm://wasm/9b4bf87e byte offset 38: Wasm opcode 0x01
+Script wasm://wasm/9b4bf87e byte offset 38: Wasm opcode 0x01 (kExprNop)
at wasm_A (0:38):
- scope (module):
-- skipped
@@ -162,7 +162,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepOut called
-Script wasm://wasm/9b4bf87e byte offset 58: Wasm opcode 0x0c
+Script wasm://wasm/9b4bf87e byte offset 58: Wasm opcode 0x0c (kExprBr)
at wasm_B (0:58):
- scope (module):
-- skipped
@@ -174,7 +174,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Script wasm://wasm/9b4bf87e byte offset 45: Wasm opcode 0x20
+Script wasm://wasm/9b4bf87e byte offset 45: Wasm opcode 0x20 (kExprLocalGet)
at wasm_B (0:45):
- scope (module):
-- skipped
@@ -186,7 +186,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Script wasm://wasm/9b4bf87e byte offset 47: Wasm opcode 0x04
+Script wasm://wasm/9b4bf87e byte offset 47: Wasm opcode 0x04 (kExprIf)
at wasm_B (0:47):
- scope (module):
-- skipped
@@ -198,7 +198,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Script wasm://wasm/9b4bf87e byte offset 49: Wasm opcode 0x20
+Script wasm://wasm/9b4bf87e byte offset 49: Wasm opcode 0x20 (kExprLocalGet)
at wasm_B (0:49):
- scope (module):
-- skipped
@@ -210,7 +210,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Script wasm://wasm/9b4bf87e byte offset 51: Wasm opcode 0x41
+Script wasm://wasm/9b4bf87e byte offset 51: Wasm opcode 0x41 (kExprI32Const)
at wasm_B (0:51):
- scope (module):
-- skipped
@@ -222,7 +222,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Script wasm://wasm/9b4bf87e byte offset 53: Wasm opcode 0x6b
+Script wasm://wasm/9b4bf87e byte offset 53: Wasm opcode 0x6b (kExprI32Sub)
at wasm_B (0:53):
- scope (module):
-- skipped
@@ -234,7 +234,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Script wasm://wasm/9b4bf87e byte offset 54: Wasm opcode 0x21
+Script wasm://wasm/9b4bf87e byte offset 54: Wasm opcode 0x21 (kExprLocalSet)
at wasm_B (0:54):
- scope (module):
-- skipped
@@ -246,7 +246,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Script wasm://wasm/9b4bf87e byte offset 56: Wasm opcode 0x10
+Script wasm://wasm/9b4bf87e byte offset 56: Wasm opcode 0x10 (kExprCallFunction)
at wasm_B (0:56):
- scope (module):
-- skipped
@@ -258,7 +258,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Script wasm://wasm/9b4bf87e byte offset 38: Wasm opcode 0x01
+Script wasm://wasm/9b4bf87e byte offset 38: Wasm opcode 0x01 (kExprNop)
at wasm_A (0:38):
- scope (module):
-- skipped
@@ -277,7 +277,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Script wasm://wasm/9b4bf87e byte offset 39: Wasm opcode 0x01
+Script wasm://wasm/9b4bf87e byte offset 39: Wasm opcode 0x01 (kExprNop)
at wasm_A (0:39):
- scope (module):
-- skipped
@@ -296,7 +296,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Script wasm://wasm/9b4bf87e byte offset 40: Wasm opcode 0x0b
+Script wasm://wasm/9b4bf87e byte offset 40: Wasm opcode 0x0b (kExprEnd)
at wasm_A (0:40):
- scope (module):
-- skipped
@@ -315,7 +315,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Script wasm://wasm/9b4bf87e byte offset 58: Wasm opcode 0x0c
+Script wasm://wasm/9b4bf87e byte offset 58: Wasm opcode 0x0c (kExprBr)
at wasm_B (0:58):
- scope (module):
-- skipped
diff --git a/deps/v8/test/inspector/debugger/wasm-stepping-liftoff.js b/deps/v8/test/inspector/debugger/wasm-stepping.js
index c8dbedef5c..c8dbedef5c 100644
--- a/deps/v8/test/inspector/debugger/wasm-stepping-liftoff.js
+++ b/deps/v8/test/inspector/debugger/wasm-stepping.js
diff --git a/deps/v8/test/inspector/frontend-channel.h b/deps/v8/test/inspector/frontend-channel.h
new file mode 100644
index 0000000000..f92940bf76
--- /dev/null
+++ b/deps/v8/test/inspector/frontend-channel.h
@@ -0,0 +1,79 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TEST_INSPECTOR_FRONTEND_CHANNEL_H_
+#define V8_TEST_INSPECTOR_FRONTEND_CHANNEL_H_
+
+#include <vector>
+
+#include "include/v8-inspector.h"
+#include "include/v8.h"
+#include "test/inspector/task-runner.h"
+#include "test/inspector/utils.h"
+
+namespace v8 {
+namespace internal {
+
+class FrontendChannelImpl : public v8_inspector::V8Inspector::Channel {
+ public:
+ FrontendChannelImpl(TaskRunner* task_runner, int context_group_id,
+ v8::Isolate* isolate, v8::Local<v8::Function> function)
+ : task_runner_(task_runner),
+ context_group_id_(context_group_id),
+ function_(isolate, function) {}
+ ~FrontendChannelImpl() override = default;
+ FrontendChannelImpl(const FrontendChannelImpl&) = delete;
+ FrontendChannelImpl& operator=(const FrontendChannelImpl&) = delete;
+
+ void set_session_id(int session_id) { session_id_ = session_id; }
+
+ private:
+ void sendResponse(
+ int callId,
+ std::unique_ptr<v8_inspector::StringBuffer> message) override {
+ task_runner_->Append(
+ std::make_unique<SendMessageTask>(this, ToVector(message->string())));
+ }
+ void sendNotification(
+ std::unique_ptr<v8_inspector::StringBuffer> message) override {
+ task_runner_->Append(
+ std::make_unique<SendMessageTask>(this, ToVector(message->string())));
+ }
+ void flushProtocolNotifications() override {}
+
+ class SendMessageTask : public TaskRunner::Task {
+ public:
+ SendMessageTask(FrontendChannelImpl* channel,
+ const std::vector<uint16_t>& message)
+ : channel_(channel), message_(message) {}
+ ~SendMessageTask() override = default;
+ bool is_priority_task() final { return false; }
+
+ private:
+ void Run(IsolateData* data) override {
+ v8::MicrotasksScope microtasks_scope(data->isolate(),
+ v8::MicrotasksScope::kRunMicrotasks);
+ v8::HandleScope handle_scope(data->isolate());
+ v8::Local<v8::Context> context =
+ data->GetDefaultContext(channel_->context_group_id_);
+ v8::Context::Scope context_scope(context);
+ v8::Local<v8::Value> message = ToV8String(data->isolate(), message_);
+ v8::MaybeLocal<v8::Value> result;
+ result = channel_->function_.Get(data->isolate())
+ ->Call(context, context->Global(), 1, &message);
+ }
+ FrontendChannelImpl* channel_;
+ std::vector<uint16_t> message_;
+ };
+
+ TaskRunner* task_runner_;
+ int context_group_id_;
+ v8::Global<v8::Function> function_;
+ int session_id_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TEST_INSPECTOR_FRONTEND_CHANNEL_H_
diff --git a/deps/v8/test/inspector/inspector-test.cc b/deps/v8/test/inspector/inspector-test.cc
index ac56ac9242..8cf5f27d91 100644
--- a/deps/v8/test/inspector/inspector-test.cc
+++ b/deps/v8/test/inspector/inspector-test.cc
@@ -7,20 +7,23 @@
#endif // !defined(_WIN32) && !defined(_WIN64)
#include <locale.h>
+
#include <string>
#include <vector>
#include "include/libplatform/libplatform.h"
#include "include/v8.h"
-
#include "src/base/platform/platform.h"
+#include "src/base/small-vector.h"
#include "src/flags/flags.h"
#include "src/heap/read-only-heap.h"
#include "src/utils/utils.h"
#include "src/utils/vector.h"
-
+#include "test/inspector/frontend-channel.h"
#include "test/inspector/isolate-data.h"
#include "test/inspector/task-runner.h"
+#include "test/inspector/tasks.h"
+#include "test/inspector/utils.h"
namespace v8 {
namespace internal {
@@ -34,271 +37,9 @@ extern v8::StartupData CreateSnapshotDataBlobInternal(
extern v8::StartupData WarmUpSnapshotDataBlobInternal(
v8::StartupData cold_snapshot_blob, const char* warmup_source);
-} // namespace internal
-} // namespace v8
-
namespace {
-std::vector<TaskRunner*> task_runners;
-
-void Terminate() {
- for (size_t i = 0; i < task_runners.size(); ++i) {
- task_runners[i]->Terminate();
- task_runners[i]->Join();
- }
- std::vector<TaskRunner*> empty;
- task_runners.swap(empty);
-}
-
-void Exit() {
- fflush(stdout);
- fflush(stderr);
- Terminate();
-}
-
-std::vector<uint16_t> ToVector(v8::Isolate* isolate,
- v8::Local<v8::String> str) {
- std::vector<uint16_t> buffer(str->Length());
- str->Write(isolate, buffer.data(), 0, str->Length());
- return buffer;
-}
-
-std::vector<uint8_t> ToBytes(v8::Isolate* isolate, v8::Local<v8::String> str) {
- std::vector<uint8_t> buffer(str->Length());
- str->WriteOneByte(isolate, buffer.data(), 0, str->Length());
- return buffer;
-}
-
-v8::Local<v8::String> ToV8String(v8::Isolate* isolate, const char* str) {
- return v8::String::NewFromUtf8(isolate, str).ToLocalChecked();
-}
-
-v8::Local<v8::String> ToV8String(v8::Isolate* isolate,
- const std::vector<uint8_t>& bytes) {
- return v8::String::NewFromOneByte(isolate, bytes.data(),
- v8::NewStringType::kNormal,
- static_cast<int>(bytes.size()))
- .ToLocalChecked();
-}
-
-v8::Local<v8::String> ToV8String(v8::Isolate* isolate,
- const std::string& buffer) {
- int length = static_cast<int>(buffer.size());
- return v8::String::NewFromUtf8(isolate, buffer.data(),
- v8::NewStringType::kNormal, length)
- .ToLocalChecked();
-}
-
-v8::Local<v8::String> ToV8String(v8::Isolate* isolate,
- const std::vector<uint16_t>& buffer) {
- int length = static_cast<int>(buffer.size());
- return v8::String::NewFromTwoByte(isolate, buffer.data(),
- v8::NewStringType::kNormal, length)
- .ToLocalChecked();
-}
-
-std::vector<uint16_t> ToVector(const v8_inspector::StringView& string) {
- std::vector<uint16_t> buffer(string.length());
- for (size_t i = 0; i < string.length(); i++) {
- if (string.is8Bit())
- buffer[i] = string.characters8()[i];
- else
- buffer[i] = string.characters16()[i];
- }
- return buffer;
-}
-
-class FrontendChannelImpl : public v8_inspector::V8Inspector::Channel {
- public:
- FrontendChannelImpl(TaskRunner* task_runner, int context_group_id,
- v8::Isolate* isolate,
- v8::Local<v8::Function> dispatch_message_callback)
- : task_runner_(task_runner),
- context_group_id_(context_group_id),
- dispatch_message_callback_(isolate, dispatch_message_callback) {}
- ~FrontendChannelImpl() override = default;
-
- void set_session_id(int session_id) { session_id_ = session_id; }
-
- private:
- void sendResponse(
- int callId,
- std::unique_ptr<v8_inspector::StringBuffer> message) override {
- task_runner_->Append(
- new SendMessageTask(this, ToVector(message->string())));
- }
- void sendNotification(
- std::unique_ptr<v8_inspector::StringBuffer> message) override {
- task_runner_->Append(
- new SendMessageTask(this, ToVector(message->string())));
- }
- void flushProtocolNotifications() override {}
-
- class SendMessageTask : public TaskRunner::Task {
- public:
- SendMessageTask(FrontendChannelImpl* channel,
- const std::vector<uint16_t>& message)
- : channel_(channel), message_(message) {}
- ~SendMessageTask() override = default;
- bool is_priority_task() final { return false; }
-
- private:
- void Run(IsolateData* data) override {
- v8::MicrotasksScope microtasks_scope(data->isolate(),
- v8::MicrotasksScope::kRunMicrotasks);
- v8::HandleScope handle_scope(data->isolate());
- v8::Local<v8::Context> context =
- data->GetContext(channel_->context_group_id_);
- v8::Context::Scope context_scope(context);
- v8::Local<v8::Value> message = ToV8String(data->isolate(), message_);
- v8::MaybeLocal<v8::Value> result;
- result = channel_->dispatch_message_callback_.Get(data->isolate())
- ->Call(context, context->Global(), 1, &message);
- }
- FrontendChannelImpl* channel_;
- std::vector<uint16_t> message_;
- };
-
- TaskRunner* task_runner_;
- int context_group_id_;
- v8::Global<v8::Function> dispatch_message_callback_;
- int session_id_;
- DISALLOW_COPY_AND_ASSIGN(FrontendChannelImpl);
-};
-
-template <typename T>
-void RunSyncTask(TaskRunner* task_runner, T callback) {
- class SyncTask : public TaskRunner::Task {
- public:
- SyncTask(v8::base::Semaphore* ready_semaphore, T callback)
- : ready_semaphore_(ready_semaphore), callback_(callback) {}
- ~SyncTask() override = default;
- bool is_priority_task() final { return true; }
-
- private:
- void Run(IsolateData* data) override {
- callback_(data);
- if (ready_semaphore_) ready_semaphore_->Signal();
- }
-
- v8::base::Semaphore* ready_semaphore_;
- T callback_;
- };
-
- v8::base::Semaphore ready_semaphore(0);
- task_runner->Append(new SyncTask(&ready_semaphore, callback));
- ready_semaphore.Wait();
-}
-
-class SendMessageToBackendTask : public TaskRunner::Task {
- public:
- SendMessageToBackendTask(int session_id, const std::vector<uint16_t>& message)
- : session_id_(session_id), message_(message) {}
- bool is_priority_task() final { return true; }
-
- private:
- void Run(IsolateData* data) override {
- v8_inspector::StringView message_view(message_.data(), message_.size());
- data->SendMessage(session_id_, message_view);
- }
-
- int session_id_;
- std::vector<uint16_t> message_;
-};
-
-void RunAsyncTask(TaskRunner* task_runner,
- const v8_inspector::StringView& task_name,
- TaskRunner::Task* task) {
- class AsyncTask : public TaskRunner::Task {
- public:
- explicit AsyncTask(TaskRunner::Task* inner) : inner_(inner) {}
- ~AsyncTask() override = default;
- bool is_priority_task() override { return inner_->is_priority_task(); }
- void Run(IsolateData* data) override {
- data->AsyncTaskStarted(inner_.get());
- inner_->Run(data);
- data->AsyncTaskFinished(inner_.get());
- }
-
- private:
- std::unique_ptr<TaskRunner::Task> inner_;
- DISALLOW_COPY_AND_ASSIGN(AsyncTask);
- };
-
- task_runner->data()->AsyncTaskScheduled(task_name, task, false);
- task_runner->Append(new AsyncTask(task));
-}
-
-class ExecuteStringTask : public TaskRunner::Task {
- public:
- ExecuteStringTask(v8::Isolate* isolate, int context_group_id,
- const std::vector<uint16_t>& expression,
- v8::Local<v8::String> name,
- v8::Local<v8::Integer> line_offset,
- v8::Local<v8::Integer> column_offset,
- v8::Local<v8::Boolean> is_module)
- : expression_(expression),
- name_(ToVector(isolate, name)),
- line_offset_(line_offset.As<v8::Int32>()->Value()),
- column_offset_(column_offset.As<v8::Int32>()->Value()),
- is_module_(is_module->Value()),
- context_group_id_(context_group_id) {}
- ExecuteStringTask(const std::string& expression, int context_group_id)
- : expression_utf8_(expression), context_group_id_(context_group_id) {}
-
- ~ExecuteStringTask() override = default;
- bool is_priority_task() override { return false; }
- void Run(IsolateData* data) override {
- v8::MicrotasksScope microtasks_scope(data->isolate(),
- v8::MicrotasksScope::kRunMicrotasks);
- v8::HandleScope handle_scope(data->isolate());
- v8::Local<v8::Context> context = data->GetContext(context_group_id_);
- v8::Context::Scope context_scope(context);
- v8::ScriptOrigin origin(
- ToV8String(data->isolate(), name_),
- v8::Integer::New(data->isolate(), line_offset_),
- v8::Integer::New(data->isolate(), column_offset_),
- /* resource_is_shared_cross_origin */ v8::Local<v8::Boolean>(),
- /* script_id */ v8::Local<v8::Integer>(),
- /* source_map_url */ v8::Local<v8::Value>(),
- /* resource_is_opaque */ v8::Local<v8::Boolean>(),
- /* is_wasm */ v8::Local<v8::Boolean>(),
- v8::Boolean::New(data->isolate(), is_module_));
- v8::Local<v8::String> source;
- if (expression_.size() != 0)
- source = ToV8String(data->isolate(), expression_);
- else
- source = ToV8String(data->isolate(), expression_utf8_);
-
- v8::ScriptCompiler::Source scriptSource(source, origin);
- v8::Isolate::SafeForTerminationScope allowTermination(data->isolate());
- if (!is_module_) {
- v8::Local<v8::Script> script;
- if (!v8::ScriptCompiler::Compile(context, &scriptSource).ToLocal(&script))
- return;
- v8::MaybeLocal<v8::Value> result;
- result = script->Run(context);
- } else {
- // Register Module takes ownership of {buffer}, so we need to make a copy.
- int length = static_cast<int>(name_.size());
- v8::internal::Vector<uint16_t> buffer =
- v8::internal::Vector<uint16_t>::New(length);
- std::copy(name_.begin(), name_.end(), buffer.begin());
- data->RegisterModule(context, buffer, &scriptSource);
- }
- }
-
- private:
- std::vector<uint16_t> expression_;
- std::string expression_utf8_;
- std::vector<uint16_t> name_;
- int32_t line_offset_ = 0;
- int32_t column_offset_ = 0;
- bool is_module_ = false;
- int context_group_id_;
-
- DISALLOW_COPY_AND_ASSIGN(ExecuteStringTask);
-};
+base::SmallVector<TaskRunner*, 2> task_runners;
class UtilsExtension : public IsolateData::SetupGlobalTask {
public:
@@ -334,6 +75,9 @@ class UtilsExtension : public IsolateData::SetupGlobalTask {
utils->Set(isolate, "setLogConsoleApiMessageCalls",
v8::FunctionTemplate::New(
isolate, &UtilsExtension::SetLogConsoleApiMessageCalls));
+ utils->Set(isolate, "setAdditionalConsoleApi",
+ v8::FunctionTemplate::New(
+ isolate, &UtilsExtension::SetAdditionalConsoleApi));
utils->Set(
isolate, "setLogMaxAsyncCallStackDepthChanged",
v8::FunctionTemplate::New(
@@ -342,6 +86,9 @@ class UtilsExtension : public IsolateData::SetupGlobalTask {
v8::FunctionTemplate::New(isolate,
&UtilsExtension::CreateContextGroup));
utils->Set(
+ isolate, "createContext",
+ v8::FunctionTemplate::New(isolate, &UtilsExtension::CreateContext));
+ utils->Set(
isolate, "resetContextGroup",
v8::FunctionTemplate::New(isolate, &UtilsExtension::ResetContextGroup));
utils->Set(
@@ -390,20 +137,24 @@ class UtilsExtension : public IsolateData::SetupGlobalTask {
int n =
static_cast<int>(fwrite(*str, sizeof(**str), str.length(), stdout));
if (n != str.length()) {
- printf("Error in fwrite\n");
- Quit(args);
+ FATAL("Error in fwrite\n");
}
}
printf("\n");
fflush(stdout);
}
- static void Quit(const v8::FunctionCallbackInfo<v8::Value>& args) { Exit(); }
+ static void Quit(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ fflush(stdout);
+ fflush(stderr);
+ // Only terminate, so not join the threads here, since joining concurrently
+ // from multiple threads can be undefined behaviour (see pthread_join).
+ for (TaskRunner* task_runner : task_runners) task_runner->Terminate();
+ }
static void Setlocale(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1 || !args[0]->IsString()) {
- fprintf(stderr, "Internal error: setlocale get one string argument.");
- Exit();
+ FATAL("Internal error: setlocale get one string argument.");
}
v8::String::Utf8Value str(args.GetIsolate(), args[1]);
@@ -425,8 +176,7 @@ class UtilsExtension : public IsolateData::SetupGlobalTask {
static void Read(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1 || !args[0]->IsString()) {
- fprintf(stderr, "Internal error: read gets one string argument.");
- Exit();
+ FATAL("Internal error: read gets one string argument.");
}
std::string chars;
v8::Isolate* isolate = args.GetIsolate();
@@ -437,8 +187,7 @@ class UtilsExtension : public IsolateData::SetupGlobalTask {
static void Load(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1 || !args[0]->IsString()) {
- fprintf(stderr, "Internal error: load gets one string argument.");
- Exit();
+ FATAL("Internal error: load gets one string argument.");
}
std::string chars;
v8::Isolate* isolate = args.GetIsolate();
@@ -455,14 +204,12 @@ class UtilsExtension : public IsolateData::SetupGlobalTask {
if (args.Length() != 6 || !args[0]->IsInt32() || !args[1]->IsString() ||
!args[2]->IsString() || !args[3]->IsInt32() || !args[4]->IsInt32() ||
!args[5]->IsBoolean()) {
- fprintf(stderr,
- "Internal error: compileAndRunWithOrigin(context_group_id, "
- "source, name, line, "
- "column, is_module).");
- Exit();
+ FATAL(
+ "Internal error: compileAndRunWithOrigin(context_group_id, source, "
+ "name, line, column, is_module).");
}
- backend_runner_->Append(new ExecuteStringTask(
+ backend_runner_->Append(std::make_unique<ExecuteStringTask>(
args.GetIsolate(), args[0].As<v8::Int32>()->Value(),
ToVector(args.GetIsolate(), args[1].As<v8::String>()),
args[2].As<v8::String>(), args[3].As<v8::Int32>(),
@@ -472,8 +219,7 @@ class UtilsExtension : public IsolateData::SetupGlobalTask {
static void SetCurrentTimeMSForTest(
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1 || !args[0]->IsNumber()) {
- fprintf(stderr, "Internal error: setCurrentTimeMSForTest(time).");
- Exit();
+ FATAL("Internal error: setCurrentTimeMSForTest(time).");
}
backend_runner_->data()->SetCurrentTimeMS(
args[0].As<v8::Number>()->Value());
@@ -482,8 +228,7 @@ class UtilsExtension : public IsolateData::SetupGlobalTask {
static void SetMemoryInfoForTest(
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
- fprintf(stderr, "Internal error: setMemoryInfoForTest(value).");
- Exit();
+ FATAL("Internal error: setMemoryInfoForTest(value).");
}
backend_runner_->data()->SetMemoryInfo(args[0]);
}
@@ -492,10 +237,9 @@ class UtilsExtension : public IsolateData::SetupGlobalTask {
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 3 || !args[0]->IsInt32() || !args[1]->IsString() ||
!args[2]->IsString()) {
- fprintf(stderr,
- "Internal error: schedulePauseOnNextStatement(context_group_id, "
- "'reason', 'details').");
- Exit();
+ FATAL(
+ "Internal error: schedulePauseOnNextStatement(context_group_id, "
+ "'reason', 'details').");
}
std::vector<uint16_t> reason =
ToVector(args.GetIsolate(), args[1].As<v8::String>());
@@ -514,9 +258,7 @@ class UtilsExtension : public IsolateData::SetupGlobalTask {
static void CancelPauseOnNextStatement(
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1 || !args[0]->IsInt32()) {
- fprintf(stderr,
- "Internal error: cancelPauseOnNextStatement(context_group_id).");
- Exit();
+ FATAL("Internal error: cancelPauseOnNextStatement(context_group_id).");
}
int context_group_id = args[0].As<v8::Int32>()->Value();
RunSyncTask(backend_runner_, [&context_group_id](IsolateData* data) {
@@ -527,8 +269,7 @@ class UtilsExtension : public IsolateData::SetupGlobalTask {
static void SetLogConsoleApiMessageCalls(
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1 || !args[0]->IsBoolean()) {
- fprintf(stderr, "Internal error: setLogConsoleApiMessageCalls(bool).");
- Exit();
+ FATAL("Internal error: setLogConsoleApiMessageCalls(bool).");
}
backend_runner_->data()->SetLogConsoleApiMessageCalls(
args[0].As<v8::Boolean>()->Value());
@@ -537,19 +278,29 @@ class UtilsExtension : public IsolateData::SetupGlobalTask {
static void SetLogMaxAsyncCallStackDepthChanged(
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1 || !args[0]->IsBoolean()) {
- fprintf(stderr,
- "Internal error: setLogMaxAsyncCallStackDepthChanged(bool).");
- Exit();
+ FATAL("Internal error: setLogMaxAsyncCallStackDepthChanged(bool).");
}
backend_runner_->data()->SetLogMaxAsyncCallStackDepthChanged(
args[0].As<v8::Boolean>()->Value());
}
+ static void SetAdditionalConsoleApi(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() != 1 || !args[0]->IsString()) {
+ FATAL("Internal error: SetAdditionalConsoleApi(string).");
+ }
+ std::vector<uint16_t> script =
+ ToVector(args.GetIsolate(), args[0].As<v8::String>());
+ RunSyncTask(backend_runner_, [&script](IsolateData* data) {
+ data->SetAdditionalConsoleApi(
+ v8_inspector::StringView(script.data(), script.size()));
+ });
+ }
+
static void CreateContextGroup(
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 0) {
- fprintf(stderr, "Internal error: createContextGroup().");
- Exit();
+ FATAL("Internal error: createContextGroup().");
}
int context_group_id = 0;
RunSyncTask(backend_runner_, [&context_group_id](IsolateData* data) {
@@ -559,11 +310,24 @@ class UtilsExtension : public IsolateData::SetupGlobalTask {
v8::Int32::New(args.GetIsolate(), context_group_id));
}
+ static void CreateContext(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() != 2) {
+ FATAL("Internal error: createContext(context, name).");
+ }
+ int context_group_id = args[0].As<v8::Int32>()->Value();
+ std::vector<uint16_t> name =
+ ToVector(args.GetIsolate(), args[1].As<v8::String>());
+
+ RunSyncTask(backend_runner_, [&context_group_id, name](IsolateData* data) {
+ data->CreateContext(context_group_id,
+ v8_inspector::StringView(name.data(), name.size()));
+ });
+ }
+
static void ResetContextGroup(
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1 || !args[0]->IsInt32()) {
- fprintf(stderr, "Internal error: resetContextGroup(context_group_id).");
- Exit();
+ FATAL("Internal error: resetContextGroup(context_group_id).");
}
int context_group_id = args[0].As<v8::Int32>()->Value();
RunSyncTask(backend_runner_, [&context_group_id](IsolateData* data) {
@@ -574,10 +338,9 @@ class UtilsExtension : public IsolateData::SetupGlobalTask {
static void ConnectSession(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 3 || !args[0]->IsInt32() || !args[1]->IsString() ||
!args[2]->IsFunction()) {
- fprintf(stderr,
- "Internal error: connectionSession(context_group_id, state, "
- "dispatch).");
- Exit();
+ FATAL(
+ "Internal error: connectionSession(context_group_id, state, "
+ "dispatch).");
}
v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
FrontendChannelImpl* channel = new FrontendChannelImpl(
@@ -604,8 +367,7 @@ class UtilsExtension : public IsolateData::SetupGlobalTask {
static void DisconnectSession(
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1 || !args[0]->IsInt32()) {
- fprintf(stderr, "Internal error: disconnectionSession(session_id).");
- Exit();
+ FATAL("Internal error: disconnectionSession(session_id).");
}
int session_id = args[0].As<v8::Int32>()->Value();
std::vector<uint8_t> state;
@@ -619,11 +381,9 @@ class UtilsExtension : public IsolateData::SetupGlobalTask {
static void SendMessageToBackend(
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 2 || !args[0]->IsInt32() || !args[1]->IsString()) {
- fprintf(stderr,
- "Internal error: sendMessageToBackend(session_id, message).");
- Exit();
+ FATAL("Internal error: sendMessageToBackend(session_id, message).");
}
- backend_runner_->Append(new SendMessageToBackendTask(
+ backend_runner_->Append(std::make_unique<SendMessageToBackendTask>(
args[0].As<v8::Int32>()->Value(),
ToVector(args.GetIsolate(), args[1].As<v8::String>())));
}
@@ -634,73 +394,6 @@ class UtilsExtension : public IsolateData::SetupGlobalTask {
TaskRunner* UtilsExtension::backend_runner_ = nullptr;
std::map<int, std::unique_ptr<FrontendChannelImpl>> UtilsExtension::channels_;
-class SetTimeoutTask : public TaskRunner::Task {
- public:
- SetTimeoutTask(int context_group_id, v8::Isolate* isolate,
- v8::Local<v8::Function> callback)
- : callback_(isolate, callback), context_group_id_(context_group_id) {}
- ~SetTimeoutTask() override = default;
- bool is_priority_task() final { return false; }
-
- private:
- void Run(IsolateData* data) override {
- v8::MicrotasksScope microtasks_scope(data->isolate(),
- v8::MicrotasksScope::kRunMicrotasks);
- v8::HandleScope handle_scope(data->isolate());
- v8::Local<v8::Context> context = data->GetContext(context_group_id_);
- v8::Context::Scope context_scope(context);
-
- v8::Local<v8::Function> callback = callback_.Get(data->isolate());
- v8::MaybeLocal<v8::Value> result;
- result = callback->Call(context, context->Global(), 0, nullptr);
- }
-
- v8::Global<v8::Function> callback_;
- int context_group_id_;
-};
-
-class SetTimeoutExtension : public IsolateData::SetupGlobalTask {
- public:
- void Run(v8::Isolate* isolate,
- v8::Local<v8::ObjectTemplate> global) override {
- global->Set(
- ToV8String(isolate, "setTimeout"),
- v8::FunctionTemplate::New(isolate, &SetTimeoutExtension::SetTimeout));
- }
-
- private:
- static void SetTimeout(const v8::FunctionCallbackInfo<v8::Value>& args) {
- if (args.Length() != 2 || !args[1]->IsNumber() ||
- (!args[0]->IsFunction() && !args[0]->IsString()) ||
- args[1].As<v8::Number>()->Value() != 0.0) {
- fprintf(
- stderr,
- "Internal error: only setTimeout(function|code, 0) is supported.");
- Exit();
- }
- v8::Isolate* isolate = args.GetIsolate();
- v8::Local<v8::Context> context = isolate->GetCurrentContext();
- IsolateData* data = IsolateData::FromContext(context);
- int context_group_id = data->GetContextGroupId(context);
- const char* task_name = "setTimeout";
- v8_inspector::StringView task_name_view(
- reinterpret_cast<const uint8_t*>(task_name), strlen(task_name));
- if (args[0]->IsFunction()) {
- RunAsyncTask(data->task_runner(), task_name_view,
- new SetTimeoutTask(context_group_id, isolate,
- v8::Local<v8::Function>::Cast(args[0])));
- } else {
- RunAsyncTask(
- data->task_runner(), task_name_view,
- new ExecuteStringTask(
- isolate, context_group_id,
- ToVector(isolate, args[0].As<v8::String>()),
- v8::String::Empty(isolate), v8::Integer::New(isolate, 0),
- v8::Integer::New(isolate, 0), v8::Boolean::New(isolate, false)));
- }
- }
-};
-
bool StrictAccessCheck(v8::Local<v8::Context> accessing_context,
v8::Local<v8::Object> accessed_object,
v8::Local<v8::Value> data) {
@@ -781,7 +474,8 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
IsolateData* data = IsolateData::FromContext(context);
- data->FireContextCreated(context, data->GetContextGroupId(context));
+ data->FireContextCreated(context, data->GetContextGroupId(context),
+ v8_inspector::StringView());
}
static void FireContextDestroyed(
@@ -800,9 +494,7 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
static void AddInspectedObject(
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 2 || !args[0]->IsInt32()) {
- fprintf(stderr,
- "Internal error: addInspectedObject(session_id, object).");
- Exit();
+ FATAL("Internal error: addInspectedObject(session_id, object).");
}
v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
IsolateData* data = IsolateData::FromContext(context);
@@ -812,8 +504,7 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
static void SetMaxAsyncTaskStacks(
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1 || !args[0]->IsInt32()) {
- fprintf(stderr, "Internal error: setMaxAsyncTaskStacks(max).");
- Exit();
+ FATAL("Internal error: setMaxAsyncTaskStacks(max).");
}
IsolateData::FromContext(args.GetIsolate()->GetCurrentContext())
->SetMaxAsyncTaskStacksForTest(args[0].As<v8::Int32>()->Value());
@@ -822,8 +513,7 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
static void DumpAsyncTaskStacksStateForTest(
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 0) {
- fprintf(stderr, "Internal error: dumpAsyncTaskStacksStateForTest().");
- Exit();
+ FATAL("Internal error: dumpAsyncTaskStacksStateForTest().");
}
IsolateData::FromContext(args.GetIsolate()->GetCurrentContext())
->DumpAsyncTaskStacksStateForTest();
@@ -831,8 +521,7 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
static void BreakProgram(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 2 || !args[0]->IsString() || !args[1]->IsString()) {
- fprintf(stderr, "Internal error: breakProgram('reason', 'details').");
- Exit();
+ FATAL("Internal error: breakProgram('reason', 'details').");
}
v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
IsolateData* data = IsolateData::FromContext(context);
@@ -849,8 +538,7 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
static void CreateObjectWithStrictCheck(
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 0) {
- fprintf(stderr, "Internal error: createObjectWithStrictCheck().");
- Exit();
+ FATAL("Internal error: createObjectWithStrictCheck().");
}
v8::Local<v8::ObjectTemplate> templ =
v8::ObjectTemplate::New(args.GetIsolate());
@@ -864,9 +552,7 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 3 || !args[0]->IsFunction() || !args[1]->IsString() ||
!args[2]->IsString()) {
- fprintf(stderr,
- "Internal error: callWithScheduledBreak('reason', 'details').");
- Exit();
+ FATAL("Internal error: callWithScheduledBreak('reason', 'details').");
}
std::vector<uint16_t> reason =
ToVector(args.GetIsolate(), args[1].As<v8::String>());
@@ -888,8 +574,7 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
static void AllowAccessorFormatting(
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1 || !args[0]->IsObject()) {
- fprintf(stderr, "Internal error: allowAccessorFormatting('object').");
- Exit();
+ FATAL("Internal error: allowAccessorFormatting('object').");
}
v8::Local<v8::Object> object = args[0].As<v8::Object>();
v8::Isolate* isolate = args.GetIsolate();
@@ -904,8 +589,7 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
static void MarkObjectAsNotInspectable(
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1 || !args[0]->IsObject()) {
- fprintf(stderr, "Internal error: markObjectAsNotInspectable(object).");
- Exit();
+ FATAL("Internal error: markObjectAsNotInspectable(object).");
}
v8::Local<v8::Object> object = args[0].As<v8::Object>();
v8::Isolate* isolate = args.GetIsolate();
@@ -920,10 +604,9 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
static void CreateObjectWithAccessor(
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 2 || !args[0]->IsString() || !args[1]->IsBoolean()) {
- fprintf(stderr,
- "Internal error: createObjectWithAccessor('accessor name', "
- "hasSetter)\n");
- Exit();
+ FATAL(
+ "Internal error: createObjectWithAccessor('accessor name', "
+ "hasSetter)\n");
}
v8::Isolate* isolate = args.GetIsolate();
v8::Local<v8::ObjectTemplate> templ = v8::ObjectTemplate::New(isolate);
@@ -953,9 +636,7 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
static void StoreCurrentStackTrace(
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1 || !args[0]->IsString()) {
- fprintf(stderr,
- "Internal error: storeCurrentStackTrace('description')\n");
- Exit();
+ FATAL("Internal error: storeCurrentStackTrace('description')\n");
}
v8::Isolate* isolate = args.GetIsolate();
v8::Local<v8::Context> context = isolate->GetCurrentContext();
@@ -976,8 +657,7 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
static void ExternalAsyncTaskStarted(
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1 || !args[0]->IsArrayBuffer()) {
- fprintf(stderr, "Internal error: externalAsyncTaskStarted(id)\n");
- Exit();
+ FATAL("Internal error: externalAsyncTaskStarted(id)\n");
}
v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
IsolateData* data = IsolateData::FromContext(context);
@@ -990,8 +670,7 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
static void ExternalAsyncTaskFinished(
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1 || !args[0]->IsArrayBuffer()) {
- fprintf(stderr, "Internal error: externalAsyncTaskFinished(id)\n");
- Exit();
+ FATAL("Internal error: externalAsyncTaskFinished(id)\n");
}
v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
IsolateData* data = IsolateData::FromContext(context);
@@ -1005,10 +684,9 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 3 || !args[0]->IsFunction() || !args[1]->IsString() ||
!args[2]->IsBoolean()) {
- fprintf(stderr,
- "Internal error: scheduleWithAsyncStack(function, "
- "'task-name', with_empty_stack).");
- Exit();
+ FATAL(
+ "Internal error: scheduleWithAsyncStack(function, 'task-name', "
+ "with_empty_stack).");
}
v8::Isolate* isolate = args.GetIsolate();
v8::Local<v8::Context> context = isolate->GetCurrentContext();
@@ -1021,18 +699,17 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
ToVector(isolate, args[1].As<v8::String>());
v8_inspector::StringView task_name_view(task_name.data(), task_name.size());
- RunAsyncTask(data->task_runner(), task_name_view,
- new SetTimeoutTask(context_group_id, isolate,
- v8::Local<v8::Function>::Cast(args[0])));
+ RunAsyncTask(
+ data->task_runner(), task_name_view,
+ std::make_unique<SetTimeoutTask>(
+ context_group_id, isolate, v8::Local<v8::Function>::Cast(args[0])));
if (with_empty_stack) context->Enter();
}
static void SetAllowCodeGenerationFromStrings(
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1 || !args[0]->IsBoolean()) {
- fprintf(stderr,
- "Internal error: setAllowCodeGenerationFromStrings(allow).");
- Exit();
+ FATAL("Internal error: setAllowCodeGenerationFromStrings(allow).");
}
args.GetIsolate()->GetCurrentContext()->AllowCodeGenerationFromStrings(
args[0].As<v8::Boolean>()->Value());
@@ -1041,8 +718,7 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
static void SetResourceNamePrefix(
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1 || !args[0]->IsString()) {
- fprintf(stderr, "Internal error: setResourceNamePrefix('prefix').");
- Exit();
+ FATAL("Internal error: setResourceNamePrefix('prefix').");
}
v8::Isolate* isolate = args.GetIsolate();
v8::Local<v8::Context> context = isolate->GetCurrentContext();
@@ -1051,27 +727,25 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
}
};
-} // namespace
-
-int main(int argc, char* argv[]) {
+int InspectorTestMain(int argc, char* argv[]) {
v8::V8::InitializeICUDefaultLocation(argv[0]);
- std::unique_ptr<v8::Platform> platform(v8::platform::NewDefaultPlatform());
+ std::unique_ptr<Platform> platform(platform::NewDefaultPlatform());
v8::V8::InitializePlatform(platform.get());
- v8::internal::FLAG_abort_on_contradictory_flags = true;
+ FLAG_abort_on_contradictory_flags = true;
v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
v8::V8::InitializeExternalStartupData(argv[0]);
v8::V8::Initialize();
i::DisableEmbeddedBlobRefcounting();
- v8::base::Semaphore ready_semaphore(0);
+ base::Semaphore ready_semaphore(0);
- v8::StartupData startup_data = {nullptr, 0};
+ StartupData startup_data = {nullptr, 0};
for (int i = 1; i < argc; ++i) {
if (strcmp(argv[i], "--embed") == 0) {
argv[i++] = nullptr;
printf("Embedding script '%s'\n", argv[i]);
startup_data = i::CreateSnapshotDataBlobInternal(
- v8::SnapshotCreator::FunctionCodeHandling::kClear, argv[i], nullptr);
+ SnapshotCreator::FunctionCodeHandling::kClear, argv[i], nullptr);
argv[i] = nullptr;
}
}
@@ -1080,8 +754,8 @@ int main(int argc, char* argv[]) {
IsolateData::SetupGlobalTasks frontend_extensions;
frontend_extensions.emplace_back(new UtilsExtension());
TaskRunner frontend_runner(
- std::move(frontend_extensions), true, &ready_semaphore,
- startup_data.data ? &startup_data : nullptr, false);
+ std::move(frontend_extensions), kDoCatchExceptions, &ready_semaphore,
+ startup_data.data ? &startup_data : nullptr, kNoInspector);
ready_semaphore.Wait();
int frontend_context_group_id = 0;
@@ -1094,34 +768,31 @@ int main(int argc, char* argv[]) {
backend_extensions.emplace_back(new SetTimeoutExtension());
backend_extensions.emplace_back(new InspectorExtension());
TaskRunner backend_runner(
- std::move(backend_extensions), false, &ready_semaphore,
- startup_data.data ? &startup_data : nullptr, true);
+ std::move(backend_extensions), kDontCatchExceptions, &ready_semaphore,
+ startup_data.data ? &startup_data : nullptr, kWithInspector);
ready_semaphore.Wait();
UtilsExtension::set_backend_task_runner(&backend_runner);
- task_runners.push_back(&frontend_runner);
- task_runners.push_back(&backend_runner);
+ task_runners = {&frontend_runner, &backend_runner};
for (int i = 1; i < argc; ++i) {
// Ignore unknown flags.
if (argv[i] == nullptr || argv[i][0] == '-') continue;
bool exists = false;
- std::string chars = v8::internal::ReadFile(argv[i], &exists, true);
+ std::string chars = ReadFile(argv[i], &exists, true);
if (!exists) {
- fprintf(stderr, "Internal error: script file doesn't exists: %s\n",
- argv[i]);
- Exit();
+ FATAL("Internal error: script file doesn't exists: %s\n", argv[i]);
}
- frontend_runner.Append(
- new ExecuteStringTask(chars, frontend_context_group_id));
+ frontend_runner.Append(std::make_unique<ExecuteStringTask>(
+ chars, frontend_context_group_id));
}
frontend_runner.Join();
backend_runner.Join();
UtilsExtension::ClearAllSessions();
- delete startup_data.data;
+ delete[] startup_data.data;
// TaskRunners go out of scope here, which causes Isolate teardown and all
// running background tasks to be properly joined.
@@ -1130,3 +801,11 @@ int main(int argc, char* argv[]) {
i::FreeCurrentEmbeddedBlob();
return 0;
}
+} // namespace
+
+} // namespace internal
+} // namespace v8
+
+int main(int argc, char* argv[]) {
+ return v8::internal::InspectorTestMain(argc, argv);
+}
diff --git a/deps/v8/test/inspector/inspector.status b/deps/v8/test/inspector/inspector.status
index e7a69f9402..22f5dd0842 100644
--- a/deps/v8/test/inspector/inspector.status
+++ b/deps/v8/test/inspector/inspector.status
@@ -22,11 +22,12 @@
}], # ALWAYS
##############################################################################
-['arch == x64 and mode == debug', {
- # Flaky tests: https://crbug.com/v8/10876
- 'debugger/pause-on-oom-extrawide': [PASS, FAIL],
- 'debugger/pause-on-oom-wide': [PASS, FAIL],
-}], # 'arch == x64 and mode == debug'
+['mode != debug or dcheck_always_on', {
+ # Investigating flaky tests: https://crbug.com/v8/10876. Enable only on pure debug.
+ 'debugger/pause-on-oom': [SKIP],
+ 'debugger/pause-on-oom-wide': [SKIP],
+ 'debugger/pause-on-oom-extrawide': [SKIP],
+}], # 'mode != debug or dcheck_always_on'
##############################################################################
['system == android', {
@@ -76,6 +77,12 @@
}], # (arch == arm or arch == arm64) and simulator_run
##############################################################################
+['(arch == ppc64) and simulator_run', {
+ # Slow test: https://crbug.com/v8/10965
+ 'runtime/console-messages-limits': [PASS, SLOW],
+}], # (arch == ppc64) and simulator_run
+
+##############################################################################
['variant == no_wasm_traps', {
'*': [SKIP],
}], # variant == no_wasm_traps
@@ -94,6 +101,12 @@
'debugger/wasm-*': [SKIP],
}], # 'arch == s390 or arch == s390x'
+##############################################################################
+['(arch == mipsel or arch == mips64el) and not simd_mips', {
+ # Skip tests that fail on MIPS architectures that don't support SIMD.
+ 'debugger/wasm-scope-info*': [SKIP],
+}], # '(arch == mipsel or arch == mips64el) and not simd_mips'
+
################################################################################
['variant == stress_snapshot', {
'*': [SKIP], # only relevant for mjsunit tests.
@@ -143,4 +156,20 @@
'debugger/wasm-stepping-with-skiplist': [SKIP],
}], # stress_js_bg_compile_wasm_code_gc
+##############################################################################
+['variant == stress_concurrent_allocation', {
+ # TODO(dinfuehr): Fix tests such that we can remove these lines.
+ 'cpu-profiler/coverage': [SKIP],
+ 'cpu-profiler/coverage-block': [SKIP],
+ 'runtime/internal-properties-entries': [SKIP],
+ 'runtime-call-stats/collection': [SKIP],
+}], # stress_concurrent_allocation
+
+##############################################################################
+['asan == True', {
+ # There are still memory leaks in some inspector tests
+ # (https://crbug.com/v8/11107).
+ 'runtime/evaluate-async': [SKIP],
+}], # asan == True
+
]
diff --git a/deps/v8/test/inspector/isolate-data.cc b/deps/v8/test/inspector/isolate-data.cc
index 2d69ba4a43..c58e3d20b9 100644
--- a/deps/v8/test/inspector/isolate-data.cc
+++ b/deps/v8/test/inspector/isolate-data.cc
@@ -5,37 +5,20 @@
#include "test/inspector/isolate-data.h"
#include "src/inspector/test-interface.h"
+#include "src/utils/vector.h"
#include "test/inspector/task-runner.h"
+#include "test/inspector/utils.h"
+
+namespace v8 {
+namespace internal {
namespace {
const int kIsolateDataIndex = 2;
const int kContextGroupIdIndex = 3;
-v8::internal::Vector<uint16_t> ToVector(v8::Isolate* isolate,
- v8::Local<v8::String> str) {
- v8::internal::Vector<uint16_t> buffer =
- v8::internal::Vector<uint16_t>::New(str->Length());
- str->Write(isolate, buffer.begin(), 0, str->Length());
- return buffer;
-}
-
-v8::Local<v8::String> ToString(v8::Isolate* isolate,
- const v8_inspector::StringView& string) {
- if (string.is8Bit())
- return v8::String::NewFromOneByte(isolate, string.characters8(),
- v8::NewStringType::kNormal,
- static_cast<int>(string.length()))
- .ToLocalChecked();
- else
- return v8::String::NewFromTwoByte(isolate, string.characters16(),
- v8::NewStringType::kNormal,
- static_cast<int>(string.length()))
- .ToLocalChecked();
-}
-
void Print(v8::Isolate* isolate, const v8_inspector::StringView& string) {
- v8::Local<v8::String> v8_string = ToString(isolate, string);
+ v8::Local<v8::String> v8_string = ToV8String(isolate, string);
v8::String::Utf8Value utf8_string(isolate, v8_string);
fwrite(*utf8_string, sizeof(**utf8_string), utf8_string.length(), stdout);
}
@@ -57,7 +40,8 @@ class Inspectable : public v8_inspector::V8InspectorSession::Inspectable {
IsolateData::IsolateData(TaskRunner* task_runner,
IsolateData::SetupGlobalTasks setup_global_tasks,
- v8::StartupData* startup_data, bool with_inspector)
+ v8::StartupData* startup_data,
+ WithInspector with_inspector)
: task_runner_(task_runner),
setup_global_tasks_(std::move(setup_global_tasks)) {
v8::Isolate::CreateParams params;
@@ -87,6 +71,13 @@ IsolateData* IsolateData::FromContext(v8::Local<v8::Context> context) {
}
int IsolateData::CreateContextGroup() {
+ int context_group_id = ++last_context_group_id_;
+ CreateContext(context_group_id, v8_inspector::StringView());
+ return context_group_id;
+}
+
+void IsolateData::CreateContext(int context_group_id,
+ v8_inspector::StringView name) {
v8::HandleScope handle_scope(isolate_.get());
v8::Local<v8::ObjectTemplate> global_template =
v8::ObjectTemplate::New(isolate_.get());
@@ -97,17 +88,15 @@ int IsolateData::CreateContextGroup() {
v8::Local<v8::Context> context =
v8::Context::New(isolate_.get(), nullptr, global_template);
context->SetAlignedPointerInEmbedderData(kIsolateDataIndex, this);
- int context_group_id = ++last_context_group_id_;
// Should be 2-byte aligned.
context->SetAlignedPointerInEmbedderData(
kContextGroupIdIndex, reinterpret_cast<void*>(context_group_id * 2));
- contexts_[context_group_id].Reset(isolate_.get(), context);
- if (inspector_) FireContextCreated(context, context_group_id);
- return context_group_id;
+ contexts_[context_group_id].emplace_back(isolate_.get(), context);
+ if (inspector_) FireContextCreated(context, context_group_id, name);
}
-v8::Local<v8::Context> IsolateData::GetContext(int context_group_id) {
- return contexts_[context_group_id].Get(isolate_.get());
+v8::Local<v8::Context> IsolateData::GetDefaultContext(int context_group_id) {
+ return contexts_[context_group_id].begin()->Get(isolate_.get());
}
void IsolateData::ResetContextGroup(int context_group_id) {
@@ -123,7 +112,7 @@ int IsolateData::GetContextGroupId(v8::Local<v8::Context> context) {
}
void IsolateData::RegisterModule(v8::Local<v8::Context> context,
- v8::internal::Vector<uint16_t> name,
+ std::vector<uint16_t> name,
v8::ScriptCompiler::Source* source) {
v8::Local<v8::Module> module;
if (!v8::ScriptCompiler::CompileModule(isolate(), source).ToLocal(&module))
@@ -279,16 +268,15 @@ int IsolateData::HandleMessage(v8::Local<v8::Message> message,
column_number = message->GetStartColumn(context).FromJust() + 1;
v8_inspector::StringView detailed_message;
- v8::internal::Vector<uint16_t> message_text_string =
- ToVector(isolate, message->Get());
- v8_inspector::StringView message_text(message_text_string.begin(),
- message_text_string.length());
- v8::internal::Vector<uint16_t> url_string;
+ std::vector<uint16_t> message_text_string = ToVector(isolate, message->Get());
+ v8_inspector::StringView message_text(message_text_string.data(),
+ message_text_string.size());
+ std::vector<uint16_t> url_string;
if (message->GetScriptOrigin().ResourceName()->IsString()) {
url_string = ToVector(
isolate, message->GetScriptOrigin().ResourceName().As<v8::String>());
}
- v8_inspector::StringView url(url_string.begin(), url_string.length());
+ v8_inspector::StringView url(url_string.data(), url_string.size());
v8::SealHandleScope seal_handle_scope(isolate);
return inspector->exceptionThrown(
@@ -338,9 +326,9 @@ void IsolateData::PromiseRejectHandler(v8::PromiseRejectMessage data) {
}
void IsolateData::FireContextCreated(v8::Local<v8::Context> context,
- int context_group_id) {
- v8_inspector::V8ContextInfo info(context, context_group_id,
- v8_inspector::StringView());
+ int context_group_id,
+ v8_inspector::StringView name) {
+ v8_inspector::V8ContextInfo info(context, context_group_id, name);
info.hasMemoryOnConsole = true;
v8::SealHandleScope seal_handle_scope(isolate());
inspector_->contextCreated(info);
@@ -388,7 +376,7 @@ bool IsolateData::isInspectableHeapObject(v8::Local<v8::Object> object) {
v8::Local<v8::Context> IsolateData::ensureDefaultContextInGroup(
int context_group_id) {
- return GetContext(context_group_id);
+ return GetDefaultContext(context_group_id);
}
void IsolateData::SetCurrentTimeMS(double time) {
@@ -398,7 +386,7 @@ void IsolateData::SetCurrentTimeMS(double time) {
double IsolateData::currentTimeMS() {
if (current_time_set_) return current_time_;
- return v8::internal::V8::GetCurrentPlatform()->CurrentClockTimeMillis();
+ return V8::GetCurrentPlatform()->CurrentClockTimeMillis();
}
void IsolateData::SetMemoryInfo(v8::Local<v8::Value> memory_info) {
@@ -413,6 +401,11 @@ void IsolateData::SetLogMaxAsyncCallStackDepthChanged(bool log) {
log_max_async_call_stack_depth_changed_ = log;
}
+void IsolateData::SetAdditionalConsoleApi(v8_inspector::StringView api_script) {
+ v8::HandleScope handle_scope(isolate());
+ additional_console_api_.Reset(isolate(), ToV8String(isolate(), api_script));
+}
+
v8::MaybeLocal<v8::Value> IsolateData::memoryInfo(v8::Isolate* isolate,
v8::Local<v8::Context>) {
if (memory_info_.IsEmpty()) return v8::MaybeLocal<v8::Value>();
@@ -429,6 +422,21 @@ void IsolateData::quitMessageLoopOnPause() {
task_runner_->QuitMessageLoop();
}
+void IsolateData::installAdditionalCommandLineAPI(
+ v8::Local<v8::Context> context, v8::Local<v8::Object> object) {
+ if (additional_console_api_.IsEmpty()) return;
+ CHECK(context->GetIsolate() == isolate());
+ v8::HandleScope handle_scope(isolate());
+ v8::Context::Scope context_scope(context);
+ v8::ScriptOrigin origin(
+ v8::String::NewFromUtf8Literal(isolate(), "internal-console-api"));
+ v8::ScriptCompiler::Source scriptSource(
+ additional_console_api_.Get(isolate()), origin);
+ v8::MaybeLocal<v8::Script> script =
+ v8::ScriptCompiler::Compile(context, &scriptSource);
+ CHECK(!script.ToLocalChecked()->Run(context).IsEmpty());
+}
+
void IsolateData::consoleAPIMessage(int contextGroupId,
v8::Isolate::MessageErrorLevel level,
const v8_inspector::StringView& message,
@@ -460,11 +468,11 @@ class StringBufferImpl : public v8_inspector::StringBuffer {
: data_(ToVector(isolate, string)) {}
v8_inspector::StringView string() const override {
- return v8_inspector::StringView(data_.begin(), data_.length());
+ return v8_inspector::StringView(data_.data(), data_.size());
}
private:
- v8::internal::Vector<uint16_t> data_;
+ std::vector<uint16_t> data_;
};
} // anonymous namespace
@@ -472,9 +480,11 @@ std::unique_ptr<v8_inspector::StringBuffer> IsolateData::resourceNameToUrl(
const v8_inspector::StringView& resourceName) {
if (resource_name_prefix_.IsEmpty()) return nullptr;
v8::HandleScope handle_scope(isolate());
- v8::Local<v8::String> name = ToString(isolate(), resourceName);
+ v8::Local<v8::String> name = ToV8String(isolate(), resourceName);
v8::Local<v8::String> prefix = resource_name_prefix_.Get(isolate());
v8::Local<v8::String> url = v8::String::Concat(isolate(), prefix, name);
- return std::unique_ptr<StringBufferImpl>(
- new StringBufferImpl(isolate(), url));
+ return std::make_unique<StringBufferImpl>(isolate(), url);
}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/inspector/isolate-data.h b/deps/v8/test/inspector/isolate-data.h
index fc15c3b5f3..6cab3de108 100644
--- a/deps/v8/test/inspector/isolate-data.h
+++ b/deps/v8/test/inspector/isolate-data.h
@@ -15,8 +15,13 @@
#include "src/base/platform/platform.h"
#include "src/utils/vector.h"
+namespace v8 {
+namespace internal {
+
class TaskRunner;
+enum WithInspector : bool { kWithInspector = true, kNoInspector = false };
+
class IsolateData : public v8_inspector::V8InspectorClient {
public:
class SetupGlobalTask {
@@ -27,20 +32,30 @@ class IsolateData : public v8_inspector::V8InspectorClient {
};
using SetupGlobalTasks = std::vector<std::unique_ptr<SetupGlobalTask>>;
+ IsolateData(const IsolateData&) = delete;
+ IsolateData& operator=(const IsolateData&) = delete;
IsolateData(TaskRunner* task_runner, SetupGlobalTasks setup_global_tasks,
- v8::StartupData* startup_data, bool with_inspector);
+ v8::StartupData* startup_data, WithInspector with_inspector);
static IsolateData* FromContext(v8::Local<v8::Context> context);
+ ~IsolateData() override {
+ // Enter the isolate before destructing this IsolateData, so that
+ // destructors that run before the Isolate's destructor still see it as
+ // entered.
+ isolate()->Enter();
+ }
+
v8::Isolate* isolate() const { return isolate_.get(); }
TaskRunner* task_runner() const { return task_runner_; }
// Setting things up.
int CreateContextGroup();
+ void CreateContext(int context_group_id, v8_inspector::StringView name);
void ResetContextGroup(int context_group_id);
- v8::Local<v8::Context> GetContext(int context_group_id);
+ v8::Local<v8::Context> GetDefaultContext(int context_group_id);
int GetContextGroupId(v8::Local<v8::Context> context);
void RegisterModule(v8::Local<v8::Context> context,
- v8::internal::Vector<uint16_t> name,
+ std::vector<uint16_t> name,
v8::ScriptCompiler::Source* source);
// Working with V8Inspector api.
@@ -73,23 +88,16 @@ class IsolateData : public v8_inspector::V8InspectorClient {
void SetMemoryInfo(v8::Local<v8::Value> memory_info);
void SetLogConsoleApiMessageCalls(bool log);
void SetLogMaxAsyncCallStackDepthChanged(bool log);
+ void SetAdditionalConsoleApi(v8_inspector::StringView api_script);
void SetMaxAsyncTaskStacksForTest(int limit);
void DumpAsyncTaskStacksStateForTest();
- void FireContextCreated(v8::Local<v8::Context> context, int context_group_id);
+ void FireContextCreated(v8::Local<v8::Context> context, int context_group_id,
+ v8_inspector::StringView name);
void FireContextDestroyed(v8::Local<v8::Context> context);
void FreeContext(v8::Local<v8::Context> context);
void SetResourceNamePrefix(v8::Local<v8::String> prefix);
private:
- struct VectorCompare {
- bool operator()(const v8::internal::Vector<uint16_t>& lhs,
- const v8::internal::Vector<uint16_t>& rhs) const {
- for (int i = 0; i < lhs.length() && i < rhs.length(); ++i) {
- if (lhs[i] != rhs[i]) return lhs[i] < rhs[i];
- }
- return false;
- }
- };
static v8::MaybeLocal<v8::Module> ModuleResolveCallback(
v8::Local<v8::Context> context, v8::Local<v8::String> specifier,
v8::Local<v8::Module> referrer);
@@ -109,6 +117,8 @@ class IsolateData : public v8_inspector::V8InspectorClient {
v8::Local<v8::Context>) override;
void runMessageLoopOnPause(int context_group_id) override;
void quitMessageLoopOnPause() override;
+ void installAdditionalCommandLineAPI(v8::Local<v8::Context>,
+ v8::Local<v8::Object>) override;
void consoleAPIMessage(int contextGroupId,
v8::Isolate::MessageErrorLevel level,
const v8_inspector::StringView& message,
@@ -125,7 +135,11 @@ class IsolateData : public v8_inspector::V8InspectorClient {
// call {Dispose}. We have to use the unique_ptr so that the isolate get
// disposed in the right order, relative to other member variables.
struct IsolateDeleter {
- void operator()(v8::Isolate* isolate) const { isolate->Dispose(); }
+ void operator()(v8::Isolate* isolate) const {
+ // Exit the isolate after it was entered by ~IsolateData.
+ isolate->Exit();
+ isolate->Dispose();
+ }
};
TaskRunner* task_runner_;
@@ -134,10 +148,8 @@ class IsolateData : public v8_inspector::V8InspectorClient {
std::unique_ptr<v8::Isolate, IsolateDeleter> isolate_;
std::unique_ptr<v8_inspector::V8Inspector> inspector_;
int last_context_group_id_ = 0;
- std::map<int, v8::Global<v8::Context>> contexts_;
- std::map<v8::internal::Vector<uint16_t>, v8::Global<v8::Module>,
- VectorCompare>
- modules_;
+ std::map<int, std::vector<v8::Global<v8::Context>>> contexts_;
+ std::map<std::vector<uint16_t>, v8::Global<v8::Module>> modules_;
int last_session_id_ = 0;
std::map<int, std::unique_ptr<v8_inspector::V8InspectorSession>> sessions_;
std::map<v8_inspector::V8InspectorSession*, int> context_group_by_session_;
@@ -148,8 +160,10 @@ class IsolateData : public v8_inspector::V8InspectorClient {
bool log_max_async_call_stack_depth_changed_ = false;
v8::Global<v8::Private> not_inspectable_private_;
v8::Global<v8::String> resource_name_prefix_;
-
- DISALLOW_COPY_AND_ASSIGN(IsolateData);
+ v8::Global<v8::String> additional_console_api_;
};
+} // namespace internal
+} // namespace v8
+
#endif // V8_TEST_INSPECTOR_PROTOCOL_ISOLATE_DATA_H_
diff --git a/deps/v8/test/inspector/protocol-test.js b/deps/v8/test/inspector/protocol-test.js
index 5f115ae91f..89b663f052 100644
--- a/deps/v8/test/inspector/protocol-test.js
+++ b/deps/v8/test/inspector/protocol-test.js
@@ -142,6 +142,10 @@ InspectorTest.ContextGroup = class {
this.id = utils.createContextGroup();
}
+ createContext(name) {
+ utils.createContext(this.id, name || '');
+ }
+
schedulePauseOnNextStatement(reason, details) {
utils.schedulePauseOnNextStatement(this.id, reason, details);
}
@@ -295,9 +299,14 @@ InspectorTest.Session = class {
if (location.lineNumber != 0) {
InspectorTest.log('Unexpected wasm line number: ' + location.lineNumber);
}
- let wasm_opcode = script.bytecode[location.columnNumber].toString(16);
- if (wasm_opcode.length % 2) wasm_opcode = '0' + wasm_opcode;
- InspectorTest.log(`Script ${script.url} byte offset ${location.columnNumber}: Wasm opcode 0x${wasm_opcode}`);
+ let wasm_opcode = script.bytecode[location.columnNumber];
+ let opcode_str = wasm_opcode.toString(16);
+ if (opcode_str.length % 2) opcode_str = `0${opcode_str}`;
+ if (InspectorTest.getWasmOpcodeName) {
+ opcode_str += ` (${InspectorTest.getWasmOpcodeName(wasm_opcode)})`;
+ }
+ InspectorTest.log(`Script ${script.url} byte offset ${
+ location.columnNumber}: Wasm opcode 0x${opcode_str}`);
} else {
var lines = script.scriptSource.split('\n');
var line = lines[location.lineNumber];
diff --git a/deps/v8/test/inspector/runtime/add-binding-expected.txt b/deps/v8/test/inspector/runtime/add-binding-expected.txt
index 94d5ed4e0a..38ecb05825 100644
--- a/deps/v8/test/inspector/runtime/add-binding-expected.txt
+++ b/deps/v8/test/inspector/runtime/add-binding-expected.txt
@@ -37,6 +37,15 @@ binding called in session2
Disable agent inside session1..
Call binding..
+binding called in session1
+{
+ method : Runtime.bindingCalled
+ params : {
+ executionContextId : <executionContextId>
+ name : send
+ payload : payload
+ }
+}
binding called in session2
{
method : Runtime.bindingCalled
@@ -49,9 +58,45 @@ binding called in session2
Disable agent inside session2..
Call binding..
+binding called in session1
+{
+ method : Runtime.bindingCalled
+ params : {
+ executionContextId : <executionContextId>
+ name : send
+ payload : payload
+ }
+}
+binding called in session2
+{
+ method : Runtime.bindingCalled
+ params : {
+ executionContextId : <executionContextId>
+ name : send
+ payload : payload
+ }
+}
Enable agent inside session1..
Call binding..
+binding called in session1
+{
+ method : Runtime.bindingCalled
+ params : {
+ executionContextId : <executionContextId>
+ name : send
+ payload : payload
+ }
+}
+binding called in session2
+{
+ method : Runtime.bindingCalled
+ params : {
+ executionContextId : <executionContextId>
+ name : send
+ payload : payload
+ }
+}
Running test: testReconnect
@@ -97,3 +142,65 @@ binding called in session1
}
Remove binding inside session..
Call binding..
+
+Running test: testAddBindingToContextById
+Call binding in default context (binding should NOT be exposed)
+Call binding in target context (binding should be exposed)
+binding called in session1
+{
+ method : Runtime.bindingCalled
+ params : {
+ executionContextId : <executionContextId>
+ name : frobnicate
+ payload : message
+ }
+}
+Call binding in newly created context (binding should NOT be exposed)
+
+Running test: testAddBindingToContextByName
+Call binding in default context (binding should NOT be exposed)
+Call binding in Foo (binding should be exposed)
+binding called in session1
+{
+ method : Runtime.bindingCalled
+ params : {
+ executionContextId : <executionContextId>
+ name : frobnicate
+ payload : message
+ }
+}
+Call binding in Bar (binding should NOT be exposed)
+Call binding in newly-created Foo (binding should be exposed)
+binding called in session1
+{
+ method : Runtime.bindingCalled
+ params : {
+ executionContextId : <executionContextId>
+ name : frobnicate
+ payload : message
+ }
+}
+Call binding in newly-created Bazz (binding should NOT be exposed)
+
+Running test: testErrors
+{
+ error : {
+ code : -32602
+ message : Invalid executionContextName
+ }
+ id : <messageId>
+}
+{
+ error : {
+ code : -32602
+ message : executionContextName is mutually exclusive with executionContextId
+ }
+ id : <messageId>
+}
+{
+ error : {
+ code : -32602
+ message : Cannot find execution context with given executionContextId
+ }
+ id : <messageId>
+}
diff --git a/deps/v8/test/inspector/runtime/add-binding.js b/deps/v8/test/inspector/runtime/add-binding.js
index 78e8d00be7..95f4549d4f 100644
--- a/deps/v8/test/inspector/runtime/add-binding.js
+++ b/deps/v8/test/inspector/runtime/add-binding.js
@@ -63,7 +63,75 @@ InspectorTest.runAsyncTestSuite([
session.Protocol.Runtime.removeBinding({name: 'send'});
InspectorTest.log('Call binding..');
await session.Protocol.Runtime.evaluate({expression: `send('payload')`});
+ },
+
+ async function testAddBindingToContextById() {
+ const {contextGroup, sessions: [session]} = setupSessions(1);
+ const contextId1 = (await session.Protocol.Runtime.onceExecutionContextCreated()).params.context.id;
+
+ contextGroup.createContext();
+ const contextId2 = (await session.Protocol.Runtime.onceExecutionContextCreated()).params.context.id;
+
+ await session.Protocol.Runtime.addBinding({name: 'frobnicate', executionContextId: contextId2});
+ const expression = `frobnicate('message')`;
+
+ InspectorTest.log('Call binding in default context (binding should NOT be exposed)');
+ await session.Protocol.Runtime.evaluate({expression});
+
+ InspectorTest.log('Call binding in target context (binding should be exposed)');
+ await session.Protocol.Runtime.evaluate({expression, contextId: contextId2});
+
+ InspectorTest.log('Call binding in newly created context (binding should NOT be exposed)');
+ contextGroup.createContext();
+ const contextId3 = (await session.Protocol.Runtime.onceExecutionContextCreated()).params.context.id;
+ await session.Protocol.Runtime.evaluate({expression, contextId: contextId3});
+ },
+
+ async function testAddBindingToContextByName() {
+ const {contextGroup, sessions: [session]} = setupSessions(1);
+ const defaultContext = (await session.Protocol.Runtime.onceExecutionContextCreated()).params.context.id;
+
+ contextGroup.createContext("foo");
+ const contextFoo = (await session.Protocol.Runtime.onceExecutionContextCreated()).params.context.id;
+
+ contextGroup.createContext("bar");
+ const contextBar = (await session.Protocol.Runtime.onceExecutionContextCreated()).params.context.id;
+
+ await session.Protocol.Runtime.addBinding({name: 'frobnicate', executionContextName: 'foo'});
+ const expression = `frobnicate('message')`;
+
+ InspectorTest.log('Call binding in default context (binding should NOT be exposed)');
+ await session.Protocol.Runtime.evaluate({expression});
+
+ InspectorTest.log('Call binding in Foo (binding should be exposed)');
+ await session.Protocol.Runtime.evaluate({expression, contextId: contextFoo});
+
+ InspectorTest.log('Call binding in Bar (binding should NOT be exposed)');
+ await session.Protocol.Runtime.evaluate({expression, contextId: contextBar});
+
+ contextGroup.createContext("foo");
+ const contextFoo2 = (await session.Protocol.Runtime.onceExecutionContextCreated()).params.context.id;
+
+ InspectorTest.log('Call binding in newly-created Foo (binding should be exposed)');
+ await session.Protocol.Runtime.evaluate({expression, contextId: contextFoo2});
+
+ contextGroup.createContext("bazz");
+ const contextBazz = (await session.Protocol.Runtime.onceExecutionContextCreated()).params.context.id;
+
+ InspectorTest.log('Call binding in newly-created Bazz (binding should NOT be exposed)');
+ await session.Protocol.Runtime.evaluate({expression, contextId: contextBazz});
+ },
+
+ async function testErrors() {
+ const {contextGroup, sessions: [session]} = setupSessions(1);
+ let err = await session.Protocol.Runtime.addBinding({name: 'frobnicate', executionContextName: ''});
+ InspectorTest.logMessage(err);
+ err = await session.Protocol.Runtime.addBinding({name: 'frobnicate', executionContextName: 'foo', executionContextId: 1});
+ InspectorTest.logMessage(err);
+ err = await session.Protocol.Runtime.addBinding({name: 'frobnicate', executionContextId: 2128506});
+ InspectorTest.logMessage(err);
}
+
]);
function setupSessions(num) {
diff --git a/deps/v8/test/inspector/runtime/custom-preview-expected.txt b/deps/v8/test/inspector/runtime/custom-preview-expected.txt
index f32d899e5d..55df125b15 100644
--- a/deps/v8/test/inspector/runtime/custom-preview-expected.txt
+++ b/deps/v8/test/inspector/runtime/custom-preview-expected.txt
@@ -2,7 +2,13 @@ RemoteObject.CustomPreview
Dump custom previews..
{
bodyGetterId : <bodyGetterId>
- header : ["span",{},"Header formatted by 1 ","a"]
+ header : [
+ [0] : span
+ [1] : {
+ }
+ [2] : Header formatted by 1
+ [3] : a
+ ]
}
{
id : <messageId>
@@ -30,7 +36,13 @@ Dump custom previews..
}
{
bodyGetterId : <bodyGetterId>
- header : ["span",{},"Header formatted by 2 ","b"]
+ header : [
+ [0] : span
+ [1] : {
+ }
+ [2] : Header formatted by 2
+ [3] : b
+ ]
}
{
id : <messageId>
@@ -49,7 +61,13 @@ Dump custom previews..
}
{
bodyGetterId : <bodyGetterId>
- header : ["span",{},"Header formatted by 1 ","c"]
+ header : [
+ [0] : span
+ [1] : {
+ }
+ [2] : Header formatted by 1
+ [3] : c
+ ]
}
{
id : <messageId>
@@ -76,12 +94,36 @@ Dump custom previews..
}
}
{
- header : ["span",{},"Formatter with config ",["object",{"type":"object","className":"Object","description":"Object","objectId":"{\"injectedScriptId\":1,\"id\":10}","customPreview":{"header":"[\"span\",{},\"Header \",\"info: \",\"additional info\"]","bodyGetterId":"{\"injectedScriptId\":1,\"id\":11}"}}]]
+ header : [
+ [0] : span
+ [1] : {
+ }
+ [2] : Formatter with config
+ [3] : [
+ [0] : object
+ [1] : {
+ className : Object
+ customPreview : {
+ bodyGetterId : <bodyGetterId>
+ header : ["span",{},"Header ","info: ","additional info"]
+ }
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+ ]
+ ]
}
Change formatters order and dump again..
{
bodyGetterId : <bodyGetterId>
- header : ["span",{},"Header formatted by 1 ","a"]
+ header : [
+ [0] : span
+ [1] : {
+ }
+ [2] : Header formatted by 1
+ [3] : a
+ ]
}
{
id : <messageId>
@@ -109,7 +151,13 @@ Change formatters order and dump again..
}
{
bodyGetterId : <bodyGetterId>
- header : ["span",{},"Header formatted by 2 ","b"]
+ header : [
+ [0] : span
+ [1] : {
+ }
+ [2] : Header formatted by 2
+ [3] : b
+ ]
}
{
id : <messageId>
@@ -128,7 +176,13 @@ Change formatters order and dump again..
}
{
bodyGetterId : <bodyGetterId>
- header : ["span",{},"Header formatted by 2 ","c"]
+ header : [
+ [0] : span
+ [1] : {
+ }
+ [2] : Header formatted by 2
+ [3] : c
+ ]
}
{
id : <messageId>
@@ -146,12 +200,36 @@ Change formatters order and dump again..
}
}
{
- header : ["span",{},"Formatter with config ",["object",{"type":"object","className":"Object","description":"Object","objectId":"{\"injectedScriptId\":1,\"id\":21}","customPreview":{"header":"[\"span\",{},\"Header \",\"info: \",\"additional info\"]","bodyGetterId":"{\"injectedScriptId\":1,\"id\":22}"}}]]
+ header : [
+ [0] : span
+ [1] : {
+ }
+ [2] : Formatter with config
+ [3] : [
+ [0] : object
+ [1] : {
+ className : Object
+ customPreview : {
+ bodyGetterId : <bodyGetterId>
+ header : ["span",{},"Header ","info: ","additional info"]
+ }
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+ ]
+ ]
}
Test Runtime.getProperties
{
bodyGetterId : <bodyGetterId>
- header : ["span",{},"Header formatted by 1 ","a"]
+ header : [
+ [0] : span
+ [1] : {
+ }
+ [2] : Header formatted by 1
+ [3] : a
+ ]
}
{
id : <messageId>
@@ -162,7 +240,7 @@ Test Runtime.getProperties
[0] : span
[1] : {
}
- [2] : Body formatted by 1
+ [2] : Body formatted by 1
[3] : a
[4] : [
[0] : object
diff --git a/deps/v8/test/inspector/runtime/custom-preview.js b/deps/v8/test/inspector/runtime/custom-preview.js
index 62bb848f26..a875bce79b 100644
--- a/deps/v8/test/inspector/runtime/custom-preview.js
+++ b/deps/v8/test/inspector/runtime/custom-preview.js
@@ -120,6 +120,8 @@ function dumpCustomPreviewForEvaluate(result) {
async function dumpCustomPreview(result) {
const { objectId, customPreview } = result;
+ if (customPreview.header)
+ customPreview.header = JSON.parse(customPreview.header);
InspectorTest.logMessage(customPreview);
if (customPreview.bodyGetterId) {
const body = await Protocol.Runtime.callFunctionOn({
diff --git a/deps/v8/test/inspector/runtime/regression-1140845-expected.txt b/deps/v8/test/inspector/runtime/regression-1140845-expected.txt
new file mode 100644
index 0000000000..50f35863cc
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/regression-1140845-expected.txt
@@ -0,0 +1,38 @@
+Regression test for crbug.com/1140845. Check that a "then" gettter on the object prototype does not crash V8
+Evaluating a simple string 'foo' does not cause a crash, but a side-effect exception.
+{
+ id : <messageId>
+ result : {
+ exceptionDetails : {
+ columnNumber : -1
+ exception : {
+ className : EvalError
+ description : EvalError: Possible side-effect in debug-evaluate
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ exceptionId : <exceptionId>
+ lineNumber : -1
+ scriptId : <scriptId>
+ text : Uncaught
+ }
+ result : {
+ className : EvalError
+ description : EvalError: Possible side-effect in debug-evaluate
+ objectId : <objectId>
+ subtype : error
+ type : object
+ }
+ }
+}
+Evaluating a simple string 'foo' with side-effets should give us the string.
+{
+ id : <messageId>
+ result : {
+ result : {
+ type : string
+ value : foo
+ }
+ }
+}
diff --git a/deps/v8/test/inspector/runtime/regression-1140845.js b/deps/v8/test/inspector/runtime/regression-1140845.js
new file mode 100644
index 0000000000..0a09729ee7
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/regression-1140845.js
@@ -0,0 +1,36 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} =
+ InspectorTest.start('Regression test for crbug.com/1140845. Check that a "then" gettter on the object prototype does not crash V8');
+
+const setupScript = `
+ let obj = Object.prototype;
+ obj.__defineGetter__('then', function() {console.log("foo")});
+`;
+
+(async function() {
+ await Protocol.Debugger.enable();
+
+ // Set a custom `then` method on the Object prototype. This causes termination
+ // when 'then' is retrieved, as the 'then' getter is side-effecting.
+ await Protocol.Runtime.evaluate({
+ expression: setupScript,
+ });
+
+ InspectorTest.log(`Evaluating a simple string 'foo' does not cause a crash, but a side-effect exception.`);
+ InspectorTest.logMessage(await Protocol.Runtime.evaluate({
+ expression: `"foo"`,
+ replMode: true,
+ throwOnSideEffect: true,
+ }));
+
+ InspectorTest.log(`Evaluating a simple string 'foo' with side-effets should give us the string.`);
+ InspectorTest.logMessage(await Protocol.Runtime.evaluate({
+ expression: `"foo"`,
+ replMode: true,
+ }));
+
+ InspectorTest.completeTest();
+})();
diff --git a/deps/v8/test/inspector/task-runner.cc b/deps/v8/test/inspector/task-runner.cc
index 7237aad4e0..bc12c6f6a4 100644
--- a/deps/v8/test/inspector/task-runner.cc
+++ b/deps/v8/test/inspector/task-runner.cc
@@ -11,6 +11,9 @@
#include <unistd.h> // NOLINT
#endif // !defined(_WIN32) && !defined(_WIN64)
+namespace v8 {
+namespace internal {
+
namespace {
void ReportUncaughtException(v8::Isolate* isolate,
@@ -26,16 +29,17 @@ void ReportUncaughtException(v8::Isolate* isolate,
isolate, try_catch.Message()
->GetSourceLine(isolate->GetCurrentContext())
.ToLocalChecked());
- fprintf(stderr, "Unhandle exception: %s @%s[%d]\n", message.data(),
+ fprintf(stderr, "Unhandled exception: %s @%s[%d]\n", message.data(),
source_line.data(), line);
}
} // namespace
TaskRunner::TaskRunner(IsolateData::SetupGlobalTasks setup_global_tasks,
- bool catch_exceptions,
+ CatchExceptions catch_exceptions,
v8::base::Semaphore* ready_semaphore,
- v8::StartupData* startup_data, bool with_inspector)
+ v8::StartupData* startup_data,
+ WithInspector with_inspector)
: Thread(Options("Task Runner")),
setup_global_tasks_(std::move(setup_global_tasks)),
startup_data_(startup_data),
@@ -61,13 +65,12 @@ void TaskRunner::Run() {
void TaskRunner::RunMessageLoop(bool only_protocol) {
int loop_number = ++nested_loop_count_;
while (nested_loop_count_ == loop_number && !is_terminated_) {
- TaskRunner::Task* task = GetNext(only_protocol);
+ std::unique_ptr<TaskRunner::Task> task = GetNext(only_protocol);
if (!task) return;
v8::Isolate::Scope isolate_scope(isolate());
if (catch_exceptions_) {
v8::TryCatch try_catch(isolate());
task->Run(data_.get());
- delete task;
if (try_catch.HasCaught()) {
ReportUncaughtException(isolate(), try_catch);
fflush(stdout);
@@ -76,8 +79,8 @@ void TaskRunner::RunMessageLoop(bool only_protocol) {
}
} else {
task->Run(data_.get());
- delete task;
}
+ task.reset();
// Also pump isolate's foreground task queue to ensure progress.
// This can be removed once https://crbug.com/v8/10747 is fixed.
// TODO(10748): Enable --stress-incremental-marking after the existing
@@ -95,8 +98,8 @@ void TaskRunner::QuitMessageLoop() {
--nested_loop_count_;
}
-void TaskRunner::Append(Task* task) {
- queue_.Enqueue(task);
+void TaskRunner::Append(std::unique_ptr<Task> task) {
+ queue_.Enqueue(std::move(task));
process_queue_semaphore_.Signal();
}
@@ -105,21 +108,24 @@ void TaskRunner::Terminate() {
process_queue_semaphore_.Signal();
}
-TaskRunner::Task* TaskRunner::GetNext(bool only_protocol) {
+std::unique_ptr<TaskRunner::Task> TaskRunner::GetNext(bool only_protocol) {
for (;;) {
if (is_terminated_) return nullptr;
if (only_protocol) {
- Task* task = nullptr;
+ std::unique_ptr<Task> task;
if (queue_.Dequeue(&task)) {
if (task->is_priority_task()) return task;
- deffered_queue_.Enqueue(task);
+ deferred_queue_.Enqueue(std::move(task));
}
} else {
- Task* task = nullptr;
- if (deffered_queue_.Dequeue(&task)) return task;
+ std::unique_ptr<Task> task;
+ if (deferred_queue_.Dequeue(&task)) return task;
if (queue_.Dequeue(&task)) return task;
}
process_queue_semaphore_.Wait();
}
return nullptr;
}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/inspector/task-runner.h b/deps/v8/test/inspector/task-runner.h
index afc3c39ab2..039fb61da8 100644
--- a/deps/v8/test/inspector/task-runner.h
+++ b/deps/v8/test/inspector/task-runner.h
@@ -17,6 +17,14 @@
#include "src/utils/vector.h"
#include "test/inspector/isolate-data.h"
+namespace v8 {
+namespace internal {
+
+enum CatchExceptions : bool {
+ kDoCatchExceptions = true,
+ kDontCatchExceptions = false
+};
+
class TaskRunner : public v8::base::Thread {
public:
class Task {
@@ -27,9 +35,12 @@ class TaskRunner : public v8::base::Thread {
};
TaskRunner(IsolateData::SetupGlobalTasks setup_global_tasks,
- bool catch_exceptions, v8::base::Semaphore* ready_semaphore,
- v8::StartupData* startup_data, bool with_inspector);
+ CatchExceptions catch_exceptions,
+ v8::base::Semaphore* ready_semaphore,
+ v8::StartupData* startup_data, WithInspector with_inspector);
~TaskRunner() override;
+ TaskRunner(const TaskRunner&) = delete;
+ TaskRunner& operator=(const TaskRunner&) = delete;
IsolateData* data() const { return data_.get(); }
// Thread implementation.
@@ -39,34 +50,34 @@ class TaskRunner : public v8::base::Thread {
void RunMessageLoop(bool only_protocol);
void QuitMessageLoop();
- // TaskRunner takes ownership.
- void Append(Task* task);
+ void Append(std::unique_ptr<Task>);
void Terminate();
private:
- Task* GetNext(bool only_protocol);
+ std::unique_ptr<Task> GetNext(bool only_protocol);
v8::Isolate* isolate() const { return data_->isolate(); }
IsolateData::SetupGlobalTasks setup_global_tasks_;
v8::StartupData* startup_data_;
- bool with_inspector_;
- bool catch_exceptions_;
+ WithInspector with_inspector_;
+ CatchExceptions catch_exceptions_;
v8::base::Semaphore* ready_semaphore_;
std::unique_ptr<IsolateData> data_;
// deferred_queue_ combined with queue_ (in this order) have all tasks in the
// correct order. Sometimes we skip non-protocol tasks by moving them from
// queue_ to deferred_queue_.
- v8::internal::LockedQueue<Task*> queue_;
- v8::internal::LockedQueue<Task*> deffered_queue_;
+ v8::internal::LockedQueue<std::unique_ptr<Task>> queue_;
+ v8::internal::LockedQueue<std::unique_ptr<Task>> deferred_queue_;
v8::base::Semaphore process_queue_semaphore_;
int nested_loop_count_;
std::atomic<int> is_terminated_;
-
- DISALLOW_COPY_AND_ASSIGN(TaskRunner);
};
+} // namespace internal
+} // namespace v8
+
#endif // V8_TEST_INSPECTOR_PROTOCOL_TASK_RUNNER_H_
diff --git a/deps/v8/test/inspector/tasks.cc b/deps/v8/test/inspector/tasks.cc
new file mode 100644
index 0000000000..bf40847aac
--- /dev/null
+++ b/deps/v8/test/inspector/tasks.cc
@@ -0,0 +1,53 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/inspector/tasks.h"
+
+#include <vector>
+
+#include "include/v8-inspector.h"
+#include "include/v8.h"
+#include "test/inspector/isolate-data.h"
+#include "test/inspector/utils.h"
+
+namespace v8 {
+namespace internal {
+
+void ExecuteStringTask::Run(IsolateData* data) {
+ v8::MicrotasksScope microtasks_scope(data->isolate(),
+ v8::MicrotasksScope::kRunMicrotasks);
+ v8::HandleScope handle_scope(data->isolate());
+ v8::Local<v8::Context> context = data->GetDefaultContext(context_group_id_);
+ v8::Context::Scope context_scope(context);
+ v8::ScriptOrigin origin(
+ ToV8String(data->isolate(), name_),
+ v8::Integer::New(data->isolate(), line_offset_),
+ v8::Integer::New(data->isolate(), column_offset_),
+ /* resource_is_shared_cross_origin */ v8::Local<v8::Boolean>(),
+ /* script_id */ v8::Local<v8::Integer>(),
+ /* source_map_url */ v8::Local<v8::Value>(),
+ /* resource_is_opaque */ v8::Local<v8::Boolean>(),
+ /* is_wasm */ v8::Local<v8::Boolean>(),
+ v8::Boolean::New(data->isolate(), is_module_));
+ v8::Local<v8::String> source;
+ if (expression_.size() != 0)
+ source = ToV8String(data->isolate(), expression_);
+ else
+ source = ToV8String(data->isolate(), expression_utf8_);
+
+ v8::ScriptCompiler::Source scriptSource(source, origin);
+ v8::Isolate::SafeForTerminationScope allowTermination(data->isolate());
+ if (!is_module_) {
+ v8::Local<v8::Script> script;
+ if (!v8::ScriptCompiler::Compile(context, &scriptSource).ToLocal(&script))
+ return;
+ v8::MaybeLocal<v8::Value> result;
+ result = script->Run(context);
+ } else {
+ data->RegisterModule(context, name_, &scriptSource);
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/inspector/tasks.h b/deps/v8/test/inspector/tasks.h
new file mode 100644
index 0000000000..28d38f2a2a
--- /dev/null
+++ b/deps/v8/test/inspector/tasks.h
@@ -0,0 +1,187 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TEST_INSPECTOR_TASKS_H_
+#define V8_TEST_INSPECTOR_TASKS_H_
+
+#include <vector>
+
+#include "include/v8-inspector.h"
+#include "include/v8.h"
+#include "src/base/platform/semaphore.h"
+#include "test/inspector/isolate-data.h"
+#include "test/inspector/task-runner.h"
+#include "test/inspector/utils.h"
+
+namespace v8 {
+namespace internal {
+
+template <typename T>
+void RunSyncTask(TaskRunner* task_runner, T callback) {
+ class SyncTask : public TaskRunner::Task {
+ public:
+ SyncTask(v8::base::Semaphore* ready_semaphore, T callback)
+ : ready_semaphore_(ready_semaphore), callback_(callback) {}
+ ~SyncTask() override = default;
+ bool is_priority_task() final { return true; }
+
+ private:
+ void Run(IsolateData* data) override {
+ callback_(data);
+ if (ready_semaphore_) ready_semaphore_->Signal();
+ }
+
+ v8::base::Semaphore* ready_semaphore_;
+ T callback_;
+ };
+
+ v8::base::Semaphore ready_semaphore(0);
+ task_runner->Append(std::make_unique<SyncTask>(&ready_semaphore, callback));
+ ready_semaphore.Wait();
+}
+
+class SendMessageToBackendTask : public TaskRunner::Task {
+ public:
+ SendMessageToBackendTask(int session_id, const std::vector<uint16_t>& message)
+ : session_id_(session_id), message_(message) {}
+ bool is_priority_task() final { return true; }
+
+ private:
+ void Run(IsolateData* data) override {
+ v8_inspector::StringView message_view(message_.data(), message_.size());
+ data->SendMessage(session_id_, message_view);
+ }
+
+ int session_id_;
+ std::vector<uint16_t> message_;
+};
+
+inline void RunAsyncTask(TaskRunner* task_runner,
+ const v8_inspector::StringView& task_name,
+ std::unique_ptr<TaskRunner::Task> task) {
+ class AsyncTask : public TaskRunner::Task {
+ public:
+ explicit AsyncTask(std::unique_ptr<TaskRunner::Task> inner)
+ : inner_(std::move(inner)) {}
+ ~AsyncTask() override = default;
+ AsyncTask(const AsyncTask&) = delete;
+ AsyncTask& operator=(const AsyncTask&) = delete;
+ bool is_priority_task() override { return inner_->is_priority_task(); }
+ void Run(IsolateData* data) override {
+ data->AsyncTaskStarted(inner_.get());
+ inner_->Run(data);
+ data->AsyncTaskFinished(inner_.get());
+ }
+
+ private:
+ std::unique_ptr<TaskRunner::Task> inner_;
+ };
+
+ task_runner->data()->AsyncTaskScheduled(task_name, task.get(), false);
+ task_runner->Append(std::make_unique<AsyncTask>(std::move(task)));
+}
+
+class ExecuteStringTask : public TaskRunner::Task {
+ public:
+ ExecuteStringTask(v8::Isolate* isolate, int context_group_id,
+ const std::vector<uint16_t>& expression,
+ v8::Local<v8::String> name,
+ v8::Local<v8::Integer> line_offset,
+ v8::Local<v8::Integer> column_offset,
+ v8::Local<v8::Boolean> is_module)
+ : expression_(expression),
+ name_(ToVector(isolate, name)),
+ line_offset_(line_offset.As<v8::Int32>()->Value()),
+ column_offset_(column_offset.As<v8::Int32>()->Value()),
+ is_module_(is_module->Value()),
+ context_group_id_(context_group_id) {}
+
+ ExecuteStringTask(const std::string& expression, int context_group_id)
+ : expression_utf8_(expression), context_group_id_(context_group_id) {}
+
+ ~ExecuteStringTask() override = default;
+ ExecuteStringTask(const ExecuteStringTask&) = delete;
+ ExecuteStringTask& operator=(const ExecuteStringTask&) = delete;
+ bool is_priority_task() override { return false; }
+ void Run(IsolateData* data) override;
+
+ private:
+ std::vector<uint16_t> expression_;
+ std::string expression_utf8_;
+ std::vector<uint16_t> name_;
+ int32_t line_offset_ = 0;
+ int32_t column_offset_ = 0;
+ bool is_module_ = false;
+ int context_group_id_;
+};
+
+class SetTimeoutTask : public TaskRunner::Task {
+ public:
+ SetTimeoutTask(int context_group_id, v8::Isolate* isolate,
+ v8::Local<v8::Function> function)
+ : function_(isolate, function), context_group_id_(context_group_id) {}
+ ~SetTimeoutTask() override = default;
+ bool is_priority_task() final { return false; }
+
+ private:
+ void Run(IsolateData* data) override {
+ v8::MicrotasksScope microtasks_scope(data->isolate(),
+ v8::MicrotasksScope::kRunMicrotasks);
+ v8::HandleScope handle_scope(data->isolate());
+ v8::Local<v8::Context> context = data->GetDefaultContext(context_group_id_);
+ v8::Context::Scope context_scope(context);
+
+ v8::Local<v8::Function> function = function_.Get(data->isolate());
+ v8::MaybeLocal<v8::Value> result;
+ result = function->Call(context, context->Global(), 0, nullptr);
+ }
+
+ v8::Global<v8::Function> function_;
+ int context_group_id_;
+};
+
+class SetTimeoutExtension : public IsolateData::SetupGlobalTask {
+ public:
+ void Run(v8::Isolate* isolate,
+ v8::Local<v8::ObjectTemplate> global) override {
+ global->Set(
+ ToV8String(isolate, "setTimeout"),
+ v8::FunctionTemplate::New(isolate, &SetTimeoutExtension::SetTimeout));
+ }
+
+ private:
+ static void SetTimeout(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() != 2 || !args[1]->IsNumber() ||
+ (!args[0]->IsFunction() && !args[0]->IsString()) ||
+ args[1].As<v8::Number>()->Value() != 0.0) {
+ return;
+ }
+ v8::Isolate* isolate = args.GetIsolate();
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+ IsolateData* data = IsolateData::FromContext(context);
+ int context_group_id = data->GetContextGroupId(context);
+ const char* task_name = "setTimeout";
+ v8_inspector::StringView task_name_view(
+ reinterpret_cast<const uint8_t*>(task_name), strlen(task_name));
+ if (args[0]->IsFunction()) {
+ RunAsyncTask(data->task_runner(), task_name_view,
+ std::make_unique<SetTimeoutTask>(
+ context_group_id, isolate,
+ v8::Local<v8::Function>::Cast(args[0])));
+ } else {
+ RunAsyncTask(
+ data->task_runner(), task_name_view,
+ std::make_unique<ExecuteStringTask>(
+ isolate, context_group_id,
+ ToVector(isolate, args[0].As<v8::String>()),
+ v8::String::Empty(isolate), v8::Integer::New(isolate, 0),
+ v8::Integer::New(isolate, 0), v8::Boolean::New(isolate, false)));
+ }
+ }
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TEST_INSPECTOR_TASKS_H_
diff --git a/deps/v8/test/inspector/utils.cc b/deps/v8/test/inspector/utils.cc
new file mode 100644
index 0000000000..c70382f57f
--- /dev/null
+++ b/deps/v8/test/inspector/utils.cc
@@ -0,0 +1,82 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/inspector/utils.h"
+
+#include <vector>
+
+#include "include/v8-inspector.h"
+#include "include/v8.h"
+
+namespace v8 {
+namespace internal {
+
+std::vector<uint8_t> ToBytes(v8::Isolate* isolate, v8::Local<v8::String> str) {
+ std::vector<uint8_t> buffer(str->Length());
+ str->WriteOneByte(isolate, buffer.data(), 0, str->Length());
+ return buffer;
+}
+
+v8::Local<v8::String> ToV8String(v8::Isolate* isolate, const char* str) {
+ return v8::String::NewFromUtf8(isolate, str).ToLocalChecked();
+}
+
+v8::Local<v8::String> ToV8String(v8::Isolate* isolate,
+ const std::vector<uint8_t>& bytes) {
+ return v8::String::NewFromOneByte(isolate, bytes.data(),
+ v8::NewStringType::kNormal,
+ static_cast<int>(bytes.size()))
+ .ToLocalChecked();
+}
+
+v8::Local<v8::String> ToV8String(v8::Isolate* isolate,
+ const std::string& buffer) {
+ int length = static_cast<int>(buffer.size());
+ return v8::String::NewFromUtf8(isolate, buffer.data(),
+ v8::NewStringType::kNormal, length)
+ .ToLocalChecked();
+}
+
+v8::Local<v8::String> ToV8String(v8::Isolate* isolate,
+ const std::vector<uint16_t>& buffer) {
+ int length = static_cast<int>(buffer.size());
+ return v8::String::NewFromTwoByte(isolate, buffer.data(),
+ v8::NewStringType::kNormal, length)
+ .ToLocalChecked();
+}
+
+v8::Local<v8::String> ToV8String(v8::Isolate* isolate,
+ const v8_inspector::StringView& string) {
+ if (string.is8Bit()) {
+ return v8::String::NewFromOneByte(isolate, string.characters8(),
+ v8::NewStringType::kNormal,
+ static_cast<int>(string.length()))
+ .ToLocalChecked();
+ }
+ return v8::String::NewFromTwoByte(isolate, string.characters16(),
+ v8::NewStringType::kNormal,
+ static_cast<int>(string.length()))
+ .ToLocalChecked();
+}
+
+std::vector<uint16_t> ToVector(v8::Isolate* isolate,
+ v8::Local<v8::String> str) {
+ std::vector<uint16_t> buffer(str->Length());
+ str->Write(isolate, buffer.data(), 0, str->Length());
+ return buffer;
+}
+
+std::vector<uint16_t> ToVector(const v8_inspector::StringView& string) {
+ std::vector<uint16_t> buffer(string.length());
+ for (size_t i = 0; i < string.length(); i++) {
+ if (string.is8Bit())
+ buffer[i] = string.characters8()[i];
+ else
+ buffer[i] = string.characters16()[i];
+ }
+ return buffer;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/inspector/utils.h b/deps/v8/test/inspector/utils.h
new file mode 100644
index 0000000000..845a1c0311
--- /dev/null
+++ b/deps/v8/test/inspector/utils.h
@@ -0,0 +1,36 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TEST_INSPECTOR_UTILS_H_
+#define V8_TEST_INSPECTOR_UTILS_H_
+
+#include <vector>
+
+#include "include/v8-inspector.h"
+#include "include/v8.h"
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace internal {
+
+std::vector<uint8_t> ToBytes(v8::Isolate*, v8::Local<v8::String>);
+
+v8::Local<v8::String> ToV8String(v8::Isolate*, const char*);
+
+v8::Local<v8::String> ToV8String(v8::Isolate*, const std::vector<uint8_t>&);
+
+v8::Local<v8::String> ToV8String(v8::Isolate*, const std::string&);
+
+v8::Local<v8::String> ToV8String(v8::Isolate*, const std::vector<uint16_t>&);
+
+v8::Local<v8::String> ToV8String(v8::Isolate*, const v8_inspector::StringView&);
+
+std::vector<uint16_t> ToVector(v8::Isolate*, v8::Local<v8::String>);
+
+std::vector<uint16_t> ToVector(const v8_inspector::StringView&);
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TEST_INSPECTOR_UTILS_H_
diff --git a/deps/v8/test/inspector/wasm-inspector-test.js b/deps/v8/test/inspector/wasm-inspector-test.js
index 3e9b18907a..e8ebe21439 100644
--- a/deps/v8/test/inspector/wasm-inspector-test.js
+++ b/deps/v8/test/inspector/wasm-inspector-test.js
@@ -5,6 +5,7 @@
utils.load('test/mjsunit/wasm/wasm-module-builder.js');
WasmInspectorTest = {}
+InspectorTest.getWasmOpcodeName = getOpcodeName;
WasmInspectorTest.evalWithUrl = (code, url) =>
Protocol.Runtime
diff --git a/deps/v8/test/intl/DIR_METADATA b/deps/v8/test/intl/DIR_METADATA
new file mode 100644
index 0000000000..8592c0704c
--- /dev/null
+++ b/deps/v8/test/intl/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>Internationalization"
+} \ No newline at end of file
diff --git a/deps/v8/test/intl/OWNERS b/deps/v8/test/intl/OWNERS
index 1fac3a1841..a212e71b22 100644
--- a/deps/v8/test/intl/OWNERS
+++ b/deps/v8/test/intl/OWNERS
@@ -1,4 +1,2 @@
ftang@chromium.org
jshin@chromium.org
-
-# COMPONENT: Blink>JavaScript>Internationalization
diff --git a/deps/v8/test/intl/number-format/check-minimum-fraction-digits.js b/deps/v8/test/intl/number-format/check-minimum-fraction-digits.js
index b7d41dfca1..b3f8c87a00 100755
--- a/deps/v8/test/intl/number-format/check-minimum-fraction-digits.js
+++ b/deps/v8/test/intl/number-format/check-minimum-fraction-digits.js
@@ -55,3 +55,12 @@ nf = new Intl.NumberFormat('en', {maximumFractionDigits: 3, style: 'currency', c
assertEquals("$54,306.405", nf.format(54306.4047970));
assertEquals("$54,306.40", nf.format(54306.4));
assertEquals("$54,306.00", nf.format(54306));
+
+nf = new Intl.NumberFormat('en', {maximumFractionDigits: 0, style: 'currency', currency: 'USD'});
+
+assertEquals("$54,306", nf.format(54306.4047970));
+assertEquals("$54,306", nf.format(54306.4));
+assertEquals("$54,306", nf.format(54306));
+
+assertThrows(() => new Intl.NumberFormat('en',
+ {minimumFractionDigits: 1, maximumFractionDigits: 0, style: 'currency', currency: 'USD'}));
diff --git a/deps/v8/test/intl/regress-1074578.js b/deps/v8/test/intl/regress-1074578.js
index c8fa56e8a2..3240b97545 100644
--- a/deps/v8/test/intl/regress-1074578.js
+++ b/deps/v8/test/intl/regress-1074578.js
@@ -30,10 +30,12 @@ const df2 = new Intl.DateTimeFormat(
const d3 = new Date("2020-03-09T00:00Z");
const d4 = new Date("2021-03-09T00:00Z");
-// Before tz202a change will get "March 8, 2020 at 5:00:00 PM PDT"
-assertEquals("March 8, 2020 at 5:00:00 PM MST", df2.format(d3));
+// Before tz2020a change will get "March 8, 2020 at 5:00:00 PM PDT"
+// In tz2020a it should be "March 8, 2020 at 5:00:00 PM MST"
+// but tz2020b roll this back.
+assertEquals("March 8, 2020 at 5:00:00 PM PDT", df2.format(d3));
-// Before tz202a change will get "March 8, 2021 at 4:00:00 PM PST"
+// Before tz2020a change will get "March 8, 2021 at 4:00:00 PM PST"
assertEquals("March 8, 2021 at 5:00:00 PM MST", df2.format(d4));
// C. Test America/Nuuk renamed from America/Godthab.
diff --git a/deps/v8/test/intl/regress-10960.js b/deps/v8/test/intl/regress-10960.js
new file mode 100644
index 0000000000..45c3e10fe4
--- /dev/null
+++ b/deps/v8/test/intl/regress-10960.js
@@ -0,0 +1,38 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let d1 = new Date(Date.UTC(2019, 4, 23));
+
+// Ensure calendar: "japanese" under "ja" locale is correct.
+assertEquals("R1/5/23", d1.toLocaleDateString(
+ "ja", {calendar: "japanese", timeZone:"UTC"}));
+
+assertEquals("令和元年5月23日木曜日", d1.toLocaleDateString(
+ "ja", {calendar: "japanese", timeZone:"UTC", dateStyle: "full"}));
+
+assertEquals("令和元年5月23日", d1.toLocaleDateString(
+ "ja", {calendar: "japanese", timeZone:"UTC", dateStyle: "long"}));
+
+assertEquals("令和元年5月23日", d1.toLocaleDateString(
+ "ja", {calendar: "japanese", timeZone:"UTC", dateStyle: "medium"}));
+
+assertEquals("R1/5/23", d1.toLocaleDateString(
+ "ja", {calendar: "japanese", timeZone:"UTC", dateStyle: "short"}));
+
+// Ensure calendar: "chinese" under "zh" locale is correct.
+d1 = new Date(Date.UTC(2020, 4, 23));
+assertEquals("2020年闰四月1", d1.toLocaleDateString(
+ "zh", {calendar: "chinese", timeZone:"UTC"}));
+
+assertEquals("2020庚子年闰四月初一星期六", d1.toLocaleDateString(
+ "zh", {calendar: "chinese", timeZone:"UTC", dateStyle: "full"}));
+
+assertEquals("2020庚子年闰四月初一", d1.toLocaleDateString(
+ "zh", {calendar: "chinese", timeZone:"UTC", dateStyle: "long"}));
+
+assertEquals("2020年闰四月初一", d1.toLocaleDateString(
+ "zh", {calendar: "chinese", timeZone:"UTC", dateStyle: "medium"}));
+
+assertEquals("2020/闰4/1", d1.toLocaleDateString(
+ "zh", {calendar: "chinese", timeZone:"UTC", dateStyle: "short"}));
diff --git a/deps/v8/test/intl/regress-1107661.js b/deps/v8/test/intl/regress-1107661.js
index 23738bd5fb..25a16fb087 100644
--- a/deps/v8/test/intl/regress-1107661.js
+++ b/deps/v8/test/intl/regress-1107661.js
@@ -11,11 +11,16 @@ const algorithmicNumberingSystems = [
algorithmicNumberingSystems.forEach(function(numberingSystem) {
let df = new Intl.DateTimeFormat("en", {dateStyle: "full", numberingSystem});
- assertEquals("latn", df.resolvedOptions().numberingSystem);
+ if (df.resolvedOptions().numberingSystem != numberingSystem) {
+ assertEquals("latn", df.resolvedOptions().numberingSystem);
+ }
let df2 = new Intl.DateTimeFormat("en-u-nu-" + numberingSystem,
{dateStyle: "full"});
- assertEquals("latn", df2.resolvedOptions().numberingSystem);
+
+ if (df2.resolvedOptions().numberingSystem != numberingSystem) {
+ assertEquals("latn", df2.resolvedOptions().numberingSystem);
+ }
// Just verify it won't crash
(new Date()).toLocaleString("en-u-nu-" + numberingSystem, {dateStyle: "full"});
diff --git a/deps/v8/test/memory/Memory.json b/deps/v8/test/memory/Memory.json
index 0ddb8b3840..f2b362768b 100644
--- a/deps/v8/test/memory/Memory.json
+++ b/deps/v8/test/memory/Memory.json
@@ -18,7 +18,7 @@
},
{
"name": "SnapshotSizeStartup",
- "results_regexp": "(\\d+) bytes in \\d+ chunks for startup$"
+ "results_regexp": "(\\d+) bytes for startup$"
},
{
"name": "SnapshotSizeReadOnly",
@@ -26,7 +26,7 @@
},
{
"name": "SnapshotSizeContext",
- "results_regexp": "(\\d+) bytes in \\d+ chunks for context #0$"
+ "results_regexp": "(\\d+) bytes for context #0$"
},
{
"name": "DeserializationTimeIsolate",
diff --git a/deps/v8/test/message/fail/dynamic-import-missing-specifier.js b/deps/v8/test/message/fail/dynamic-import-missing-specifier.js
index c2af815f12..a78ce35e03 100644
--- a/deps/v8/test/message/fail/dynamic-import-missing-specifier.js
+++ b/deps/v8/test/message/fail/dynamic-import-missing-specifier.js
@@ -1,7 +1,5 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// Flags: --harmony-dynamic-import
import();
diff --git a/deps/v8/test/message/fail/dynamic-import-missing-specifier.out b/deps/v8/test/message/fail/dynamic-import-missing-specifier.out
index 2f8c1cfa0c..fc455c5752 100644
--- a/deps/v8/test/message/fail/dynamic-import-missing-specifier.out
+++ b/deps/v8/test/message/fail/dynamic-import-missing-specifier.out
@@ -1,4 +1,4 @@
-*%(basename)s:7: SyntaxError: import() requires a specifier
+*%(basename)s:5: SyntaxError: import() requires a specifier
import();
^
SyntaxError: import() requires a specifier
diff --git a/deps/v8/test/message/fail/modules-duplicate-export5.mjs b/deps/v8/test/message/fail/modules-duplicate-export5.mjs
index e936914eab..5d4ad3415e 100644
--- a/deps/v8/test/message/fail/modules-duplicate-export5.mjs
+++ b/deps/v8/test/message/fail/modules-duplicate-export5.mjs
@@ -2,7 +2,5 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-namespace-exports
-
export let foo = 42;
export * as foo from "./doesnt-even-matter.mjs";
diff --git a/deps/v8/test/message/fail/modules-duplicate-export5.out b/deps/v8/test/message/fail/modules-duplicate-export5.out
index 2efaef10c4..7d62b69dc0 100644
--- a/deps/v8/test/message/fail/modules-duplicate-export5.out
+++ b/deps/v8/test/message/fail/modules-duplicate-export5.out
@@ -1,4 +1,4 @@
-*%(basename)s:8: SyntaxError: Duplicate export of 'foo'
+*%(basename)s:6: SyntaxError: Duplicate export of 'foo'
export * as foo from "./doesnt-even-matter.mjs";
^^^
SyntaxError: Duplicate export of 'foo'
diff --git a/deps/v8/test/message/wasm-trace-memory-liftoff.out b/deps/v8/test/message/wasm-trace-memory-liftoff.out
index 5682bca57a..7228cb4607 100644
--- a/deps/v8/test/message/wasm-trace-memory-liftoff.out
+++ b/deps/v8/test/message/wasm-trace-memory-liftoff.out
@@ -1,14 +1,14 @@
-liftoff func: 0+0x3 load from 00000004 val: i32:0 / 00000000
-liftoff func: 1+0x3 load from 00000001 val: i8:0 / 00
-liftoff func: 3+0x5 store to 00000004 val: i32:305419896 / 12345678
-liftoff func: 0+0x3 load from 00000002 val: i32:1450704896 / 56780000
-liftoff func: 1+0x3 load from 00000006 val: i8:52 / 34
-liftoff func: 2+0x3 load from 00000002 val: f32:68169720922112.000000 / 56780000
-liftoff func: 4+0x5 store to 00000004 val: i8:171 / ab
-liftoff func: 0+0x3 load from 00000002 val: i32:1454047232 / 56ab0000
-liftoff func: 2+0x3 load from 00000002 val: f32:94008244174848.000000 / 56ab0000
-liftoff func: 6+0x7 store to 00000004 val: s128:48879 48879 48879 48879 / 0000beef 0000beef 0000beef 0000beef
-liftoff func: 5+0x3 load from 00000002 val: s128:-1091633152 -1091633152 -1091633152 -1091633152 / beef0000 beef0000 beef0000 beef0000
-liftoff func: 7+0x3 load from 00000004 val: i16:48879 / beef
-liftoff func: 8+0x3 load from 00000002 val: i64:-4688528683866062848 / beef0000beef0000
-liftoff func: 9+0x3 load from 00000002 val: f64:-0.000015 / beef0000beef0000
+liftoff func: 0+0x3 load from 0000000000000004 val: i32:0 / 00000000
+liftoff func: 1+0x3 load from 0000000000000001 val: i8:0 / 00
+liftoff func: 3+0x5 store to 0000000000000004 val: i32:305419896 / 12345678
+liftoff func: 0+0x3 load from 0000000000000002 val: i32:1450704896 / 56780000
+liftoff func: 1+0x3 load from 0000000000000006 val: i8:52 / 34
+liftoff func: 2+0x3 load from 0000000000000002 val: f32:68169720922112.000000 / 56780000
+liftoff func: 4+0x5 store to 0000000000000004 val: i8:171 / ab
+liftoff func: 0+0x3 load from 0000000000000002 val: i32:1454047232 / 56ab0000
+liftoff func: 2+0x3 load from 0000000000000002 val: f32:94008244174848.000000 / 56ab0000
+liftoff func: 6+0x7 store to 0000000000000004 val: s128:48879 48879 48879 48879 / 0000beef 0000beef 0000beef 0000beef
+liftoff func: 5+0x3 load from 0000000000000002 val: s128:-1091633152 -1091633152 -1091633152 -1091633152 / beef0000 beef0000 beef0000 beef0000
+liftoff func: 7+0x3 load from 0000000000000004 val: i16:48879 / beef
+liftoff func: 8+0x3 load from 0000000000000002 val: i64:-4688528683866062848 / beef0000beef0000
+liftoff func: 9+0x3 load from 0000000000000002 val: f64:-0.000015 / beef0000beef0000
diff --git a/deps/v8/test/message/wasm-trace-memory.out b/deps/v8/test/message/wasm-trace-memory.out
index cfb707b543..373ff8f78b 100644
--- a/deps/v8/test/message/wasm-trace-memory.out
+++ b/deps/v8/test/message/wasm-trace-memory.out
@@ -1,14 +1,14 @@
-turbofan func: 0+0x3 load from 00000004 val: i32:0 / 00000000
-turbofan func: 1+0x3 load from 00000001 val: i8:0 / 00
-turbofan func: 3+0x5 store to 00000004 val: i32:305419896 / 12345678
-turbofan func: 0+0x3 load from 00000002 val: i32:1450704896 / 56780000
-turbofan func: 1+0x3 load from 00000006 val: i8:52 / 34
-turbofan func: 2+0x3 load from 00000002 val: f32:68169720922112.000000 / 56780000
-turbofan func: 4+0x5 store to 00000004 val: i8:171 / ab
-turbofan func: 0+0x3 load from 00000002 val: i32:1454047232 / 56ab0000
-turbofan func: 2+0x3 load from 00000002 val: f32:94008244174848.000000 / 56ab0000
-turbofan func: 6+0x7 store to 00000004 val: s128:48879 48879 48879 48879 / 0000beef 0000beef 0000beef 0000beef
-turbofan func: 5+0x3 load from 00000002 val: s128:-1091633152 -1091633152 -1091633152 -1091633152 / beef0000 beef0000 beef0000 beef0000
-turbofan func: 7+0x3 load from 00000004 val: i16:48879 / beef
-turbofan func: 8+0x3 load from 00000002 val: i64:-4688528683866062848 / beef0000beef0000
-turbofan func: 9+0x3 load from 00000002 val: f64:-0.000015 / beef0000beef0000
+turbofan func: 0+0x3 load from 0000000000000004 val: i32:0 / 00000000
+turbofan func: 1+0x3 load from 0000000000000001 val: i8:0 / 00
+turbofan func: 3+0x5 store to 0000000000000004 val: i32:305419896 / 12345678
+turbofan func: 0+0x3 load from 0000000000000002 val: i32:1450704896 / 56780000
+turbofan func: 1+0x3 load from 0000000000000006 val: i8:52 / 34
+turbofan func: 2+0x3 load from 0000000000000002 val: f32:68169720922112.000000 / 56780000
+turbofan func: 4+0x5 store to 0000000000000004 val: i8:171 / ab
+turbofan func: 0+0x3 load from 0000000000000002 val: i32:1454047232 / 56ab0000
+turbofan func: 2+0x3 load from 0000000000000002 val: f32:94008244174848.000000 / 56ab0000
+turbofan func: 6+0x7 store to 0000000000000004 val: s128:48879 48879 48879 48879 / 0000beef 0000beef 0000beef 0000beef
+turbofan func: 5+0x3 load from 0000000000000002 val: s128:-1091633152 -1091633152 -1091633152 -1091633152 / beef0000 beef0000 beef0000 beef0000
+turbofan func: 7+0x3 load from 0000000000000004 val: i16:48879 / beef
+turbofan func: 8+0x3 load from 0000000000000002 val: i64:-4688528683866062848 / beef0000beef0000
+turbofan func: 9+0x3 load from 0000000000000002 val: f64:-0.000015 / beef0000beef0000
diff --git a/deps/v8/test/mjsunit/BUILD.gn b/deps/v8/test/mjsunit/BUILD.gn
index f6259de56c..b8de0da11c 100644
--- a/deps/v8/test/mjsunit/BUILD.gn
+++ b/deps/v8/test/mjsunit/BUILD.gn
@@ -13,20 +13,25 @@ group("v8_mjsunit") {
data = [
"./",
"../../tools/arguments.js",
+ "../../tools/arguments.mjs",
"../../tools/clusterfuzz/v8_mock.js",
"../../tools/clusterfuzz/v8_mock_archs.js",
"../../tools/clusterfuzz/v8_mock_webassembly.js",
"../../tools/codemap.mjs",
"../../tools/consarray.mjs",
"../../tools/csvparser.mjs",
+ "../../tools/dumpcpp.mjs",
"../../tools/logreader.mjs",
- "../../tools/arguments.mjs",
"../../tools/profile.mjs",
"../../tools/profile_view.mjs",
"../../tools/splaytree.mjs",
- "../../tools/tickprocessor.mjs",
+ "../../tools/system-analyzer/helper.mjs",
+ "../../tools/system-analyzer/log/deopt.mjs",
+ "../../tools/system-analyzer/log/ic.mjs",
"../../tools/system-analyzer/log/log.mjs",
+ "../../tools/system-analyzer/log/map.mjs",
+ "../../tools/system-analyzer/processor.mjs",
"../../tools/system-analyzer/timeline.mjs",
- "../../tools/dumpcpp.mjs",
+ "../../tools/tickprocessor.mjs",
]
}
diff --git a/deps/v8/test/mjsunit/array-concat.js b/deps/v8/test/mjsunit/array-concat.js
index 6e25b5c5cd..14ca1e8786 100644
--- a/deps/v8/test/mjsunit/array-concat.js
+++ b/deps/v8/test/mjsunit/array-concat.js
@@ -225,15 +225,33 @@ assertEquals([undefined,2,1,3,"X"], r2);
// Make first array change length of second array massively.
arr2.length = 2;
+var largeLength = 500000;
Object.defineProperty(arr1, 0, {get: function() {
- arr2[500000] = "X";
+ arr2[largeLength] = "X";
return undefined;
}, configurable: true})
var r3 = [].concat(arr1, arr2); // [undefined,2,1,3,"X"]
var expected = [undefined,2,1,3];
-expected[500000 + 2] = "X";
-
-assertEquals(expected, r3);
+var initialLen = expected.length;
+expected[largeLength + 2] = "X";
+
+var numElementsToCheck = 10;
+
+// Checking entire massive array is too slow, so check:
+// - the length,
+assertEquals(expected.length, r3.length);
+var slicesToCheck = [
+ // - the first few elements,
+ {start: 0, end: initialLen},
+ // - arbitrary number of elements past the first few elements,
+ {start: initialLen, end: initialLen + numElementsToCheck},
+ // - arbitrary number of elements in the middle of the array
+ {start: largeLength / 2, end: largeLength / 2 + numElementsToCheck},
+ // - last few elements
+ {start: largeLength, end: largeLength + 3}];
+for (const {start, end} of slicesToCheck) {
+ assertEquals(expected.slice(start, end), r3.slice(start, end));
+}
var arr3 = [];
var trace = [];
diff --git a/deps/v8/test/mjsunit/code-coverage-block.js b/deps/v8/test/mjsunit/code-coverage-block.js
index e9d38d7146..ea1c2ea5fc 100644
--- a/deps/v8/test/mjsunit/code-coverage-block.js
+++ b/deps/v8/test/mjsunit/code-coverage-block.js
@@ -1177,59 +1177,4 @@ a(true); // 0500
{"start":0,"end":401,"count":2},
{"start":154,"end":254,"count":0}]);
-TestCoverage(
-"https://crbug.com/v8/11231 - nullish coalescing",
-`
-const a = true // 0000
-const b = false // 0050
-const c = undefined // 0100
-const d = a ?? 99 // 0150
-const e = 33 // 0200
-const f = b ?? (c ?? 99) // 0250
-const g = 33 // 0300
-const h = c ?? (c ?? 'hello') // 0350
-const i = c ?? b ?? 'hello' // 0400
-`,
-[{"start":0,"end":449,"count":1},
- {"start":162,"end":167,"count":0},
- {"start":262,"end":274,"count":0},
- {"start":417,"end":427,"count":0}]);
-
-TestCoverage(
-"Optional Chaining",
-`
-const a = undefined || null // 0000
-const b = a?.b // 0050
-const c = a?.['b'] // 0100
-const d = { // 0150
- e: {f: 99, g: () => {return undefined}} // 0200
-} // 0250
-const e = d?.e?.f // 0300
-const f = d?.e?.['f'] // 0350
-const g = d?.e?.f?.g // 0400
-const h = d?.e?.f?.g?.h // 0450
-const i = d?.['d']?.['e']?.['h'] // 0500
-const k = a?.('b') // 0550
-const l = d?.e?.g?.() // 0600
-const m = d?.e?.g?.()?.a?.b // 0650
-delete a?.b // 0700
-const n = d?.[d?.x?.f] // 0750
-if (a?.[d?.x?.f]) { const p = 99 } else {}// 0800
-const p = d?.[d?.x?.f]?.x // 0850
-`,
-[{"start":0,"end":899,"count":1},
- {"start":61,"end":64,"count":0},
- {"start":111,"end":118,"count":0},
- {"start":470,"end":473,"count":0},
- {"start":518,"end":532,"count":0},
- {"start":561,"end":568,"count":0},
- {"start":671,"end":677,"count":0},
- {"start":708,"end":711,"count":0},
- {"start":768,"end":771,"count":0},
- {"start":805,"end":816,"count":0},
- {"start":818,"end":834,"count":0},
- {"start":868,"end":871,"count":0},
- {"start":872,"end":875,"count":0},
- {"start":216,"end":240,"count":2}]);
-
%DebugToggleBlockCoverage(false);
diff --git a/deps/v8/test/mjsunit/compiler/regress-1125145.js b/deps/v8/test/mjsunit/compiler/regress-1125145.js
new file mode 100644
index 0000000000..58ae8640d8
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-1125145.js
@@ -0,0 +1,20 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+function foo() {}
+for (let i = 0; i < 100000; ++i) {
+ foo = foo.bind();
+}
+
+function main() {
+ foo();
+ foo();
+}
+
+%PrepareFunctionForOptimization(main);
+main();
+%OptimizeFunctionOnNextCall(main);
+main();
diff --git a/deps/v8/test/mjsunit/compiler/regress-1146652.js b/deps/v8/test/mjsunit/compiler/regress-1146652.js
new file mode 100644
index 0000000000..77c2c7ea85
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-1146652.js
@@ -0,0 +1,26 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function IsDataView(obj) {
+ return obj.getFloat64;
+}
+%NeverOptimizeFunction(IsDataView);
+
+function bar(obj) {
+ if (IsDataView(obj)) obj.getFloat64(0);
+}
+
+%PrepareFunctionForOptimization(bar);
+bar(new DataView(new ArrayBuffer(42)));
+
+const proxy = new Proxy({}, {});
+function foo() { bar(proxy) }
+
+%PrepareFunctionForOptimization(foo);
+foo();
+
+%OptimizeFunctionOnNextCall(foo);
+foo();
diff --git a/deps/v8/test/mjsunit/compiler/regress-1150649.js b/deps/v8/test/mjsunit/compiler/regress-1150649.js
new file mode 100644
index 0000000000..a193481a3a
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-1150649.js
@@ -0,0 +1,24 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(a) {
+ var y = 0x7fffffff; // 2^31 - 1
+
+ // Widen the static type of y (this condition never holds).
+ if (a == NaN) y = NaN;
+
+ // The next condition holds only in the warmup run. It leads to Smi
+ // (SignedSmall) feedback being collected for the addition below.
+ if (a) y = -1;
+
+ const z = (y + 1)|0;
+ return z < 0;
+}
+
+%PrepareFunctionForOptimization(foo);
+assertFalse(foo(true));
+%OptimizeFunctionOnNextCall(foo);
+assertTrue(foo(false));
diff --git a/deps/v8/test/mjsunit/compiler/test-dynamic-map-check-deprecated-maps-polymorphic.js b/deps/v8/test/mjsunit/compiler/test-dynamic-map-check-deprecated-maps-polymorphic.js
index 663de4a5bf..e373b03be0 100644
--- a/deps/v8/test/mjsunit/compiler/test-dynamic-map-check-deprecated-maps-polymorphic.js
+++ b/deps/v8/test/mjsunit/compiler/test-dynamic-map-check-deprecated-maps-polymorphic.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --turboprop --dynamic-map-checks --opt
+// Flags: --allow-natives-syntax --turboprop --turboprop-dynamic-map-checks --opt
// Flags: --no-always-opt
function f(o) {
diff --git a/deps/v8/test/mjsunit/compiler/test-dynamic-map-check-deprecated-maps.js b/deps/v8/test/mjsunit/compiler/test-dynamic-map-check-deprecated-maps.js
index b9cee843aa..b9f61ef852 100644
--- a/deps/v8/test/mjsunit/compiler/test-dynamic-map-check-deprecated-maps.js
+++ b/deps/v8/test/mjsunit/compiler/test-dynamic-map-check-deprecated-maps.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --turboprop --dynamic-map-checks --opt
+// Flags: --allow-natives-syntax --turboprop --turboprop-dynamic-map-checks --opt
// Flags: --no-always-opt
function f(o) {
@@ -17,25 +17,20 @@ f(o);
%OptimizeFunctionOnNextCall(f);
f(o);
assertOptimized(f);
-%PrepareFunctionForOptimization(f);
-f(o);
-// Deprecates O's map.
+// Deprecates o's map.
o1.b = 10.23;
-// Deoptimizes but retains code.
+
+// Bails out but retains code.
f(o1);
assertOptimized(f);
-// Deoptimizes and discards code.
+// Passing in original object should not cause any deopts.
f(o);
f(o);
-assertUnoptimized(f);
-
-// When we reoptimize we should include code for migrating deprecated maps.
-%OptimizeFunctionOnNextCall(f);
-f(o);
assertOptimized(f);
+// o and o2 have the same Map, so there should be no deopts.
f(o2);
f(o2);
assertOptimized(f);
diff --git a/deps/v8/test/mjsunit/compiler/test-dynamic-map-check-deprecated-maps2.js b/deps/v8/test/mjsunit/compiler/test-dynamic-map-check-deprecated-maps2.js
new file mode 100644
index 0000000000..0c16ff36b4
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/test-dynamic-map-check-deprecated-maps2.js
@@ -0,0 +1,44 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --turboprop --turboprop-dynamic-map-checks
+// Flags: --opt --no-always-opt --deopt-every-n-times=0
+
+function b(a) { return a; }
+
+function f(o, should_bailout) {
+ b(o.a);
+ let did_bailout = (%GetOptimizationStatus(f) &
+ V8OptimizationStatus.kTopmostFrameIsTurboFanned) == 0;
+ assertEquals(should_bailout, did_bailout);
+}
+
+var o = {a:10, b:20, c:30};
+var o1 = {a:10, b:20, c:30};
+var o2 = {a:10, b:20, c:30};
+%PrepareFunctionForOptimization(f);
+f(o, true);
+%OptimizeFunctionOnNextCall(f);
+f(o, false);
+assertOptimized(f);
+
+// Transition o to a new map and deprecate the old one (which is embedded in the
+// optimized code for the dynamic map check).
+o.b = 10.23;
+f(o, true);
+f(o1, false);
+f(o2, false);
+assertOptimized(f);
+
+// Deprecate o's new map again and update the feedback vector but don't migrate
+// o.
+o1.c = 20.23;
+f(o1, true);
+assertOptimized(f);
+
+// We should migrates o's map without bailing out.
+f(o, false);
+f(o1, false);
+f(o2, false);
+assertOptimized(f);
diff --git a/deps/v8/test/mjsunit/compiler/test-dynamic-map-checks-poly-mono.js b/deps/v8/test/mjsunit/compiler/test-dynamic-map-checks-poly-mono.js
index 4f3b90bb14..c29f64a835 100644
--- a/deps/v8/test/mjsunit/compiler/test-dynamic-map-checks-poly-mono.js
+++ b/deps/v8/test/mjsunit/compiler/test-dynamic-map-checks-poly-mono.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --turboprop --dynamic-map-checks --opt
+// Flags: --allow-natives-syntax --turboprop --turboprop-dynamic-map-checks --opt
// Flags: --no-always-opt
function load(obj){
diff --git a/deps/v8/test/mjsunit/compiler/test-dynamic-map-checks-wrong-handler.js b/deps/v8/test/mjsunit/compiler/test-dynamic-map-checks-wrong-handler.js
index eaae56dc8d..660f3f3081 100644
--- a/deps/v8/test/mjsunit/compiler/test-dynamic-map-checks-wrong-handler.js
+++ b/deps/v8/test/mjsunit/compiler/test-dynamic-map-checks-wrong-handler.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --turboprop --dynamic-map-checks --opt
+// Flags: --allow-natives-syntax --turboprop --turboprop-dynamic-map-checks --opt
// Flags: --no-always-opt
function load(obj){
diff --git a/deps/v8/test/mjsunit/compiler/test-dynamic-map-checks-wrong-handler1.js b/deps/v8/test/mjsunit/compiler/test-dynamic-map-checks-wrong-handler1.js
index c77bcb1bb2..c907d1ad6c 100644
--- a/deps/v8/test/mjsunit/compiler/test-dynamic-map-checks-wrong-handler1.js
+++ b/deps/v8/test/mjsunit/compiler/test-dynamic-map-checks-wrong-handler1.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --turboprop --dynamic-map-checks --opt
+// Flags: --allow-natives-syntax --turboprop --turboprop-dynamic-map-checks --opt
// Flags: --no-always-opt
function load(obj){
diff --git a/deps/v8/test/mjsunit/compiler/test-dynamic-map-checks.js b/deps/v8/test/mjsunit/compiler/test-dynamic-map-checks.js
index a6d0710100..cddba98858 100644
--- a/deps/v8/test/mjsunit/compiler/test-dynamic-map-checks.js
+++ b/deps/v8/test/mjsunit/compiler/test-dynamic-map-checks.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --turboprop --dynamic-map-checks --opt
+// Flags: --allow-natives-syntax --turboprop --turboprop-dynamic-map-checks --opt
// Flags: --no-always-opt
function load(obj){
diff --git a/deps/v8/test/mjsunit/es6/object-assign.js b/deps/v8/test/mjsunit/es6/object-assign.js
index 9c8e349ac4..f842d8edd4 100644
--- a/deps/v8/test/mjsunit/es6/object-assign.js
+++ b/deps/v8/test/mjsunit/es6/object-assign.js
@@ -4,6 +4,8 @@
// Based on Mozilla Object.assign() tests
+// Flags: --allow-natives-syntax
+
function checkDataProperty(object, propertyKey, value, writable, enumerable, configurable) {
var desc = Object.getOwnPropertyDescriptor(object, propertyKey);
assertFalse(desc === undefined);
@@ -227,19 +229,59 @@ assertSame(Object.assign(o, {}), o);
assertEquals(log, ["get a", "get b", "get c", "get sym1", "get sym2"]);
})();
+(function proxy() {
+ const fast_source = { key1: "value1", key2: "value2"};
+ const slow_source = {__proto__:null};
+ for (let i = 0; i < 2000; i++) {
+ slow_source["key" + i] = i;
+ }
+
+ const empty_handler = {};
+ let target = {};
+ let proxy = new Proxy(target, empty_handler);
+ assertArrayEquals(Object.keys(target), []);
+ let result = Object.assign(proxy, fast_source);
+ %HeapObjectVerify(result);
+ assertArrayEquals(Object.keys(result), Object.keys(target));
+ assertArrayEquals(Object.keys(result), Object.keys(fast_source));
+ assertArrayEquals(Object.values(result), Object.values(fast_source));
+
+ target = {};
+ proxy = new Proxy(target, empty_handler);
+ assertArrayEquals(Object.keys(target), []);
+ result = Object.assign(proxy, slow_source);
+ %HeapObjectVerify(result);
+ assertEquals(Object.keys(result).length, Object.keys(target).length);
+ assertEquals(Object.keys(result).length, Object.keys(slow_source).length);
+})();
+
(function global_object() {
let source = {
global1: "global1",
get global2() { return "global2" },
};
let result = Object.assign(globalThis, source);
+ %HeapObjectVerify(result);
assertTrue(result === globalThis);
assertTrue(result.global1 === source.global1);
assertTrue(result.global2 === source.global2);
let target = {};
result = Object.assign(target, globalThis);
+ %HeapObjectVerify(result);
assertTrue(result === target);
assertTrue(result.global1 === source.global1);
assertTrue(result.global2 === source.global2);
+
+ for (let i = 0; i < 2000; i++) {
+ source["property" + i] = i;
+ }
+ result = Object.assign(globalThis, source);
+ %HeapObjectVerify(result);
+ assertTrue(result === globalThis);
+ for (let i = 0; i < 2000; i++) {
+ const key = "property" + i;
+ assertEquals(result[key], i);
+ }
+
})();
diff --git a/deps/v8/test/mjsunit/es6/super-ic-opt-dynamic-map-checks.js b/deps/v8/test/mjsunit/es6/super-ic-opt-dynamic-map-checks.js
new file mode 100644
index 0000000000..b05bc9435a
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/super-ic-opt-dynamic-map-checks.js
@@ -0,0 +1,42 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --super-ic --opt
+// Flags: --no-always-opt --no-stress-opt --turboprop
+// Flags: --turboprop-dynamic-map-checks --deopt-every-n-times=0
+
+// This file contains tests which require --dynamic-map-chekcs.
+
+(function TestMinimorphicPropertyAccess() {
+ class A {}
+ A.prototype.bar = "wrong value: A.prototype.bar";
+
+ class B extends A {};
+ B.prototype.bar = "correct value";
+
+ class C extends B {
+ foo(should_bailout) {
+ const r = super.bar;
+ const did_bailout = (
+ %GetOptimizationStatus(C.prototype.foo) &
+ V8OptimizationStatus.kTopmostFrameIsTurboFanned) == 0;
+ assertEquals(should_bailout, did_bailout);
+ return r;
+ }
+ }
+ C.prototype.bar = "wrong value: C.prototype.bar";
+ %PrepareFunctionForOptimization(C.prototype.foo);
+
+ let o = new C();
+ o.bar = "wrong value: o.bar";
+
+ // Fill in the feedback.
+ let r = o.foo(true);
+ assertEquals("correct value", r);
+ %OptimizeFunctionOnNextCall(C.prototype.foo);
+
+ // Test the optimized function.
+ r = o.foo(false);
+ assertEquals("correct value", r);
+})();
diff --git a/deps/v8/test/mjsunit/es6/super-ic-opt-no-turboprop.js b/deps/v8/test/mjsunit/es6/super-ic-opt-no-turboprop.js
new file mode 100644
index 0000000000..8653ffbe3d
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/super-ic-opt-no-turboprop.js
@@ -0,0 +1,51 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --super-ic --opt
+// Flags: --no-always-opt --no-stress-opt --deopt-every-n-times=0
+
+// This file contains tests which are disabled for TurboProp. TurboProp deopts
+// differently than TurboFan, so the assertions about when a function is
+// deoptimized won't hold.
+
+(function TestPropertyIsConstant() {
+ // Test for a case where the property is a constant found in the lookup start
+ // object.
+ class A {}
+ A.prototype.bar = "wrong value: A.prototype.bar";
+
+ class B extends A {};
+ B.prototype.bar = "correct value";
+
+ class C extends B {
+ foo() { return super.bar; }
+ }
+ C.prototype.bar = "wrong value: C.prototype.bar";
+ %PrepareFunctionForOptimization(C.prototype.foo);
+
+ let o = new C();
+ o.bar = "wrong value: o.bar";
+
+ // Fill in the feedback.
+ r = o.foo();
+ assertEquals("correct value", r);
+
+ %OptimizeFunctionOnNextCall(C.prototype.foo);
+
+ // Test the optimized function.
+ r = o.foo();
+ assertEquals("correct value", r);
+
+ // Assert that the function was not deoptimized.
+ assertOptimized(C.prototype.foo);
+
+ // Change the property value.
+ B.prototype.bar = "new value";
+ r = o.foo();
+ assertEquals("new value", r);
+
+ // Assert that the function was deoptimized (dependency to the constant
+ // value).
+ assertFalse(isOptimized(C.prototype.foo));
+})();
diff --git a/deps/v8/test/mjsunit/es6/super-ic-opt.js b/deps/v8/test/mjsunit/es6/super-ic-opt.js
new file mode 100644
index 0000000000..13b39bdec1
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/super-ic-opt.js
@@ -0,0 +1,608 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --super-ic --opt
+// Flags: --no-always-opt --no-stress-opt --deopt-every-n-times=0
+
+(function TestPropertyIsInTheHomeObjectsProto() {
+ // Test where the property is a constant found on home object's proto. This
+ // will generate a minimorphic property load.
+ class A {}
+ A.prototype.bar = "wrong value: A.prototype.bar";
+
+ class B extends A {};
+ B.prototype.bar = "correct value";
+
+ class C extends B {
+ foo() { return super.bar; }
+ }
+ C.prototype.bar = "wrong value: D.prototype.bar";
+ %PrepareFunctionForOptimization(C.prototype.foo);
+
+ let o = new C();
+ o.bar = "wrong value: o.bar";
+
+ // Fill in the feedback.
+ let r = o.foo();
+ assertEquals("correct value", r);
+ %OptimizeFunctionOnNextCall(C.prototype.foo);
+
+ // Test the optimized function.
+ r = o.foo();
+ assertEquals("correct value", r);
+
+ // Assert that the function was not deoptimized.
+ assertOptimized(C.prototype.foo);
+
+ // Change the property value.
+ B.prototype.bar = "new value";
+ r = o.foo();
+ assertEquals("new value", r);
+})();
+
+(function TestPropertyIsGetterInTheHomeObjectsProto() {
+ // Test where the property is a constant found on home object's proto. This
+ // will generate a minimorphic property load.
+ class A {}
+ A.prototype.bar = "wrong value: A.prototype.bar";
+
+ class B extends A {
+ get bar() { return this.this_value; }
+ }
+ class C extends B {
+ foo() { return super.bar; }
+ }
+ C.prototype.bar = "wrong value: D.prototype.bar";
+ %PrepareFunctionForOptimization(C.prototype.foo);
+
+ let o = new C();
+ o.bar = "wrong value: o.bar";
+ o.this_value = "correct value";
+
+ // Fill in the feedback.
+ let r = o.foo();
+ assertEquals("correct value", r);
+ %OptimizeFunctionOnNextCall(C.prototype.foo);
+
+ // Test the optimized function.
+ r = o.foo();
+ assertEquals("correct value", r);
+
+ // Assert that the function was not deoptimized.
+ assertOptimized(C.prototype.foo);
+
+ // Change the property value.
+ o.this_value = "new value";
+ r = o.foo();
+ assertEquals("new value", r);
+})();
+
+(function TestPropertyIsConstantInThePrototypeChain() {
+ // Test where the property is a constant found on the prototype chain of the
+ // lookup start object.
+ class A {}
+ A.prototype.bar = "wrong value: A.prototype.bar";
+
+ class B extends A {};
+ B.prototype.bar = "correct value";
+
+ class C extends B {};
+
+ class D extends C {
+ foo() { return super.bar; }
+ }
+ D.prototype.bar = "wrong value: D.prototype.bar";
+ %PrepareFunctionForOptimization(D.prototype.foo);
+
+ let o = new D();
+ o.bar = "wrong value: o.bar";
+
+ // Fill in the feedback.
+ let r = o.foo();
+ assertEquals("correct value", r);
+
+ %OptimizeFunctionOnNextCall(D.prototype.foo);
+
+ // Test the optimized function.
+ r = o.foo();
+ assertEquals("correct value", r);
+
+ // Assert that the function was not deoptimized.
+ assertOptimized(D.prototype.foo);
+
+ // Change the property value.
+ B.prototype.bar = "new value";
+ r = o.foo();
+ assertEquals("new value", r);
+
+ // Assert that the function was deoptimized (dependency to the constant
+ // value).
+ assertFalse(isOptimized(D.prototype.foo));
+})();
+
+(function TestPropertyIsNonConstantData() {
+ // Test for a case where the property is a non-constant data property found
+ // in the lookup start object.
+ class A {}
+ A.prototype.bar = "wrong value: A.prototype.bar";
+
+ class B extends A {};
+ B.prototype.bar = "initial value";
+
+ class C extends B {
+ foo() { return super.bar; }
+ }
+ C.prototype.bar = "wrong value: C.prototype.bar";
+ %PrepareFunctionForOptimization(C.prototype.foo);
+
+ let o = new C();
+ o.bar = "wrong value: o.bar";
+
+ // Make the property look like a non-constant.
+ B.prototype.bar = "correct value";
+
+ // Fill in the feedback.
+ let r = o.foo();
+ assertEquals("correct value", r);
+ %OptimizeFunctionOnNextCall(C.prototype.foo);
+
+ // Test the optimized function.
+ r = o.foo();
+ assertEquals("correct value", r);
+
+ // Assert that the function was not deoptimized.
+ assertOptimized(C.prototype.foo);
+
+ // Change the property value.
+ B.prototype.bar = "new value";
+ r = o.foo();
+ assertEquals("new value", r);
+
+ // Assert that the function was still not deoptimized (the value was not a
+ // constant to begin with).
+ assertOptimized(C.prototype.foo);
+})();
+
+(function TestPropertyIsGetter() {
+ class A {}
+ A.prototype.bar = "wrong value: A.prototype.bar";
+
+ class B extends A {
+ get bar() {
+ return this.test_value;
+ }
+ };
+
+ class C extends B {}
+
+ class D extends C {
+ foo() {
+ const b = super.bar;
+ return b;
+ }
+ }
+ %PrepareFunctionForOptimization(D.prototype.foo);
+ D.prototype.bar = "wrong value: D.prototype.bar";
+
+ let o = new D();
+ o.bar = "wrong value: o.bar";
+ o.test_value = "correct value";
+
+ // Fill in the feedback.
+ let r = o.foo();
+ assertEquals("correct value", r);
+
+ %OptimizeFunctionOnNextCall(D.prototype.foo);
+
+ // Test the optimized function.
+ r = o.foo();
+ assertEquals("correct value", r);
+
+ // Assert that the function was not deoptimized.
+ assertOptimized(D.prototype.foo);
+})();
+
+(function TestPropertyInsertedInTheMiddle() {
+ // Test for a case where the property is a constant found in the lookup start
+ // object.
+ class A {}
+ A.prototype.bar = "correct value";
+
+ class B extends A {};
+
+ class C extends B {
+ foo() { return super.bar; }
+ }
+ C.prototype.bar = "wrong value: C.prototype.bar";
+ %PrepareFunctionForOptimization(C.prototype.foo);
+
+ let o = new C();
+ o.bar = "wrong value: o.bar";
+
+ // Fill in the feedback.
+ let r = o.foo();
+ assertEquals("correct value", r);
+ %OptimizeFunctionOnNextCall(C.prototype.foo);
+
+ // Test the optimized function.
+ r = o.foo();
+ assertEquals("correct value", r);
+
+ // Assert that the function was not deoptimized.
+ assertOptimized(C.prototype.foo);
+
+ // Insert the property into the prototype chain between the lookup start
+ // object and the old holder.
+ B.prototype.bar = "new value";
+ r = o.foo();
+ assertEquals("new value", r);
+
+ // Assert that the function was deoptimized (holder changed).
+ assertFalse(isOptimized(C.prototype.foo));
+})();
+
+(function TestUnexpectedHomeObjectPrototypeDeoptimizes() {
+ class A {}
+ A.prototype.bar = "wrong value: A.prototype.bar";
+
+ class B extends A {}
+ B.prototype.bar = "correct value";
+
+ class C extends B {}
+
+ class D extends C {
+ foo() { return super.bar; }
+ }
+ %PrepareFunctionForOptimization(D.prototype.foo);
+ D.prototype.bar = "wrong value: D.prototype.bar";
+
+ const o = new D();
+
+ // Fill in the feedback.
+ let r = o.foo();
+ assertEquals("correct value", r);
+
+ %OptimizeFunctionOnNextCall(D.prototype.foo);
+
+ // Test the optimized function.
+ r = o.foo();
+ assertEquals("correct value", r);
+
+ // Assert that the function was not deoptimized.
+ assertOptimized(D.prototype.foo);
+
+ // Change the home object's prototype.
+ D.prototype.__proto__ = {"bar": "new value"};
+ r = o.foo();
+ assertEquals("new value", r);
+
+ // Assert that the function was deoptimized.
+ assertEquals(false, isOptimized(D.prototype.foo));
+})();
+
+(function TestUnexpectedReceiverDoesNotDeoptimize() {
+ class A {}
+ A.prototype.bar = "wrong value: A.prototype.bar";
+
+ class B extends A {};
+ B.prototype.bar = "correct value";
+
+ class C extends B {
+ foo() { return super.bar; }
+ }
+ C.prototype.bar = "wrong value: C.prototype.bar";
+ %PrepareFunctionForOptimization(C.prototype.foo);
+
+ let o = new C();
+ o.bar = "wrong value: o.bar";
+
+ // Fill in the feedback.
+ let r = o.foo();
+ assertEquals("correct value", r);
+
+ %OptimizeFunctionOnNextCall(C.prototype.foo);
+ o.foo();
+ assertOptimized(C.prototype.foo);
+
+ // Test the optimized function with an unexpected receiver.
+ r = C.prototype.foo.call({'lol': 5});
+ assertEquals("correct value", r);
+
+ // Assert that the function was not deoptimized.
+ assertOptimized(C.prototype.foo);
+})();
+
+(function TestPolymorphic() {
+ class A {}
+ A.prototype.bar = "wrong value: A.prototype.bar";
+
+ class B extends A {}
+ B.prototype.bar = "correct value";
+
+ class C extends B {}
+
+ class D extends C {
+ foo() { return super.bar; }
+ }
+ %PrepareFunctionForOptimization(D.prototype.foo);
+ D.prototype.bar = "wrong value: D.prototype.bar";
+
+ const o = new D();
+
+ // Create objects which will act as the "home object's prototype" later.
+ const prototypes = [{"a": 0}, {"b": 0}];
+ for (p of prototypes) {
+ p.__proto__ = B.prototype;
+ }
+
+ // Fill in the feedback (polymorphic).
+ for (p of prototypes) {
+ D.prototype.__proto__ = p;
+ const r = o.foo();
+ assertEquals("correct value", r);
+ }
+
+ %OptimizeFunctionOnNextCall(D.prototype.foo);
+
+ // Test the optimized function - don't change the home object's proto any
+ // more.
+ let r = o.foo();
+ assertEquals("correct value", r);
+
+ // Assert that the function was not deoptimized.
+ assertOptimized(D.prototype.foo);
+})();
+
+(function TestPolymorphicWithGetter() {
+ class A {}
+ A.prototype.bar = "wrong value: A.prototype.bar";
+
+ class B extends A {
+ get bar() {
+ return this.test_value;
+ }
+ }
+
+ class C extends B {}
+
+ class D extends C {
+ foo() { return super.bar; }
+ }
+ %PrepareFunctionForOptimization(D.prototype.foo);
+ D.prototype.bar = "wrong value: D.prototype.bar";
+
+ const o = new D();
+ o.test_value = "correct value";
+
+ // Create objects which will act as the "home object's prototype" later.
+ const prototypes = [{"a": 0}, {"b": 0}];
+ for (p of prototypes) {
+ p.__proto__ = B.prototype;
+ }
+
+ // Fill in the feedback.
+ for (p of prototypes) {
+ D.prototype.__proto__ = p;
+ const r = o.foo();
+ assertEquals("correct value", r);
+ }
+
+ %OptimizeFunctionOnNextCall(D.prototype.foo);
+
+ // Test the optimized function - don't change the home object's proto any
+ // more.
+ const r = o.foo();
+ assertEquals("correct value", r);
+
+ // Assert that the function was not deoptimized.
+ assertOptimized(D.prototype.foo);
+})();
+
+(function TestPolymorphicMixinDoesNotDeopt() {
+ function createClasses() {
+ class A {}
+ A.prototype.bar = "correct value";
+ class B extends A {
+ foo() { return super.bar; }
+ }
+ return B;
+ }
+
+ const b1 = createClasses();
+ %PrepareFunctionForOptimization(b1.prototype.foo);
+ const b2 = createClasses();
+ %PrepareFunctionForOptimization(b2.prototype.foo);
+
+ class c1 extends b1 {};
+ class c2 extends b2 {};
+
+ const objects = [new c1(), new c2()];
+
+ // Fill in the feedback.
+ for (o of objects) {
+ const r = o.foo();
+ assertEquals("correct value", r);
+ }
+ %OptimizeFunctionOnNextCall(b1.prototype.foo);
+ %OptimizeFunctionOnNextCall(b2.prototype.foo);
+
+ // Test the optimized function.
+ for (o of objects) {
+ const r = o.foo();
+ assertEquals("correct value", r);
+ }
+ assertOptimized(b1.prototype.foo);
+ assertOptimized(b2.prototype.foo);
+})();
+
+(function TestHomeObjectProtoIsGlobalThis() {
+ class A {}
+
+ class B extends A {
+ foo() { return super.bar; }
+ }
+ B.prototype.__proto__ = globalThis;
+ globalThis.bar = "correct value";
+ %PrepareFunctionForOptimization(B.prototype.foo);
+
+ let o = new B();
+
+ // Fill in the feedback.
+ let r = o.foo();
+ assertEquals("correct value", r);
+
+ %OptimizeFunctionOnNextCall(B.prototype.foo);
+
+ // Test the optimized function.
+ r = o.foo();
+ assertEquals("correct value", r);
+
+ // Assert that the function was not deoptimized.
+ assertOptimized(B.prototype.foo);
+
+ globalThis.bar = "new value";
+
+ r = o.foo();
+ assertEquals("new value", r);
+})();
+
+(function TestMegamorphic() {
+ class A {}
+ A.prototype.bar = "wrong value: A.prototype.bar";
+
+ class B extends A {}
+ B.prototype.bar = "correct value";
+
+ class C extends B {}
+
+ class D extends C {
+ foo() { return super.bar; }
+ }
+ %PrepareFunctionForOptimization(D.prototype.foo);
+ D.prototype.bar = "wrong value: D.prototype.bar";
+
+ const o = new D();
+
+ // Create objects which will act as the "home object's prototype" later.
+ const prototypes = [{"a": 0}, {"b": 0}, {"c": 0}, {"d": 0}, {"e": 0},
+ {"f": 0}, {"g": 0}, {"e": 0}];
+ for (p of prototypes) {
+ p.__proto__ = B.prototype;
+ }
+
+ // Fill in the feedback (megamorphic).
+ for (p of prototypes) {
+ D.prototype.__proto__ = p;
+ const r = o.foo();
+ assertEquals("correct value", r);
+ }
+
+ %OptimizeFunctionOnNextCall(D.prototype.foo);
+
+ // Test the optimized function - don't change the home object's proto any
+ // more.
+ let r = o.foo();
+ assertEquals("correct value", r);
+
+ // Assert that the function was not deoptimized.
+ assertOptimized(D.prototype.foo);
+})();
+
+(function TestMegamorphicWithGetter() {
+ class A {}
+ A.prototype.bar = "wrong value: A.prototype.bar";
+
+ class B extends A {
+ get bar() {
+ return this.test_value;
+ }
+ };
+
+ class C extends B {}
+
+ class D extends C {
+ foo() { return super.bar;}
+ }
+ %PrepareFunctionForOptimization(D.prototype.foo);
+ D.prototype.bar = "wrong value: D.prototype.bar";
+
+ const o = new D();
+ o.test_value = "correct value";
+
+ // Create objects which will act as the "home object's prototype" later.
+ const prototypes = [{"a": 0}, {"b": 0}, {"c": 0}, {"d": 0}, {"e": 0},
+ {"f": 0}, {"g": 0}, {"e": 0}];
+ for (p of prototypes) {
+ p.__proto__ = B.prototype;
+ }
+
+ // Fill in the feedback (megamorphic).
+ for (p of prototypes) {
+ D.prototype.__proto__ = p;
+ const r = o.foo();
+ assertEquals("correct value", r);
+ }
+
+ %OptimizeFunctionOnNextCall(D.prototype.foo);
+
+ // Test the optimized function - don't change the home object's proto any
+ // more.
+ const r = o.foo();
+ assertEquals("correct value", r);
+})();
+
+(function TestHomeObjectProtoIsGlobalThisGetterProperty() {
+ class A {}
+
+ class B extends A {
+ foo() { return super.bar; }
+ }
+ B.prototype.__proto__ = globalThis;
+ Object.defineProperty(globalThis, "bar", {get: function() { return this.this_value; }});
+ %PrepareFunctionForOptimization(B.prototype.foo);
+
+ let o = new B();
+ o.this_value = "correct value";
+
+ // Fill in the feedback.
+ let r = o.foo();
+ assertEquals("correct value", r);
+
+ %OptimizeFunctionOnNextCall(B.prototype.foo);
+
+ // Test the optimized function.
+ r = o.foo();
+ assertEquals("correct value", r);
+
+ // Assert that the function was not deoptimized.
+ assertOptimized(B.prototype.foo);
+})();
+
+(function TestHomeObjectProtoIsFunctionAndPropertyIsPrototype() {
+ // There are special optimizations for accessing Function.prototype. Test
+ // that super property access which ends up accessing it works.
+ class A {}
+
+ class B extends A {
+ foo() { return super.prototype; }
+ }
+ function f() {}
+ B.prototype.__proto__ = f;
+ %PrepareFunctionForOptimization(B.prototype.foo);
+
+ let o = new B();
+
+ // Fill in the feedback.
+ let r = o.foo();
+ assertEquals(f.prototype, r);
+
+ %OptimizeFunctionOnNextCall(B.prototype.foo);
+
+ // Test the optimized function.
+ r = o.foo();
+ assertEquals(f.prototype, r);
+
+ // Assert that the function was not deoptimized.
+ assertOptimized(B.prototype.foo);
+})();
diff --git a/deps/v8/test/mjsunit/es6/super-ic.js b/deps/v8/test/mjsunit/es6/super-ic.js
index 60f6a2394a..04223c2c6b 100644
--- a/deps/v8/test/mjsunit/es6/super-ic.js
+++ b/deps/v8/test/mjsunit/es6/super-ic.js
@@ -454,3 +454,13 @@ function forceDictionaryMode(obj) {
obj1.x = "added";
assertEquals("added", obj1.x);
})();
+
+// Regression test for crbug.com/1139786
+(function HomeObjectProtoIsInt8ArrayAndReceiverIsSmi() {
+ class A extends Int8Array {
+ f() {
+ super.toString();
+ }
+ };
+ A.prototype.f.call(42);
+})();
diff --git a/deps/v8/test/mjsunit/harmony/import-from-instantiation-errored.js b/deps/v8/test/mjsunit/harmony/import-from-instantiation-errored.js
index 958ba55e5e..160888c246 100644
--- a/deps/v8/test/mjsunit/harmony/import-from-instantiation-errored.js
+++ b/deps/v8/test/mjsunit/harmony/import-from-instantiation-errored.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --harmony-dynamic-import
+// Flags: --allow-natives-syntax --harmony-dynamic-import --no-stress-snapshot
var error1, error2;
import('modules-skip-10.mjs').catch(e => error1 = e);
diff --git a/deps/v8/test/mjsunit/harmony/promise-all-settled.js b/deps/v8/test/mjsunit/harmony/promise-all-settled.js
index 7571cd4604..5535a6d265 100644
--- a/deps/v8/test/mjsunit/harmony/promise-all-settled.js
+++ b/deps/v8/test/mjsunit/harmony/promise-all-settled.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --harmony-promise-all-settled --ignore-unhandled-promises
+// Flags: --allow-natives-syntax --ignore-unhandled-promises
class MyError extends Error {}
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index b95bc69760..398e7c111a 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -70,10 +70,6 @@
# Enable once serializing a running isolate is fully implemented.
'serialize-deserialize-now': [SKIP],
- # https://crbug.com/v8/issues/10486
- # Enable once multi-byte prefixed opcodes are correctly handled
- 'regress/wasm/regress-1065599': [SKIP],
-
# BUG(v8:9506): times out.
'wasm/shared-memory-worker-explicit-gc-stress': [PASS, SLOW],
'wasm/shared-memory-worker-gc-stress': [PASS, SLOW],
@@ -81,6 +77,10 @@
# https://crbug.com/1129854
'tools/log': ['arch == arm or arch == arm64', SKIP],
+ # crbug.com/1161357
+ # TODO(solanes): Remove this entry once the underlying issue is fixed.
+ 'regress/regress-1161357': [PASS, FAIL],
+
##############################################################################
# Tests where variants make no sense.
'd8/enable-tracing': [PASS, NO_VARIANTS],
@@ -150,11 +150,8 @@
'regress/wasm/regress-810973': [PASS, SLOW],
'sealed-array-reduce': [PASS, SLOW],
'string-replace-gc': [PASS, SLOW],
- 'wasm/asm-wasm-f32': [PASS, SLOW],
- 'wasm/asm-wasm-f64': [PASS, SLOW],
'wasm/embenchen/*': [PASS, SLOW],
'wasm/futex': [PASS, SLOW],
- 'wasm/grow-memory': [PASS, SLOW],
'wasm/unreachable-validation': [PASS, SLOW],
'wasm/atomics-stress': [PASS, SLOW, NO_VARIANTS, ['mode != release or dcheck_always_on', SKIP], ['tsan', SKIP]],
'wasm/atomics64-stress': [PASS, SLOW, NO_VARIANTS, ['mode != release or dcheck_always_on', SKIP], ['tsan', SKIP]],
@@ -273,6 +270,8 @@
'regress/regress-1122': [SKIP],
'regress/regress-331444': [SKIP],
'regress/regress-353551': [SKIP],
+ 'regress/regress-1138075': [SKIP],
+ 'regress/regress-1138611': [SKIP],
'regress/regress-crbug-119926': [SKIP],
'regress/short-circuit': [SKIP],
'stack-traces-overflow': [SKIP],
@@ -498,7 +497,6 @@
'compiler/regress-9017': [SKIP],
# Slow tests.
- 'array-concat': [PASS, SLOW],
'array-indexing': [PASS, SLOW],
'array-reduce': [PASS, SLOW],
'array-sort': [PASS, SLOW],
@@ -625,6 +623,7 @@
'es6/large-classes-properties': [SKIP],
# Slow tests.
+ 'compiler/regress-1125145': [SKIP],
'es6/block-conflicts-sloppy': [PASS, SLOW],
'math-floor-part1': [PASS, SLOW],
'regress/regress-430201': [SKIP],
@@ -808,10 +807,17 @@
##############################################################################
['system == android', {
# Tests consistently failing on Android.
- # Setting the locale with environment variables unavailable
+ # Setting the timezone and locale with environment variables unavailable
'icu-date-to-string': [SKIP],
'icu-date-lord-howe': [SKIP],
'regress/regress-6288': [SKIP],
+ 'tzoffset-transition-apia': [SKIP],
+ 'tzoffset-transition-lord-howe': [SKIP],
+ 'tzoffset-transition-moscow': [SKIP],
+ 'tzoffset-transition-new-york': [SKIP],
+ 'tzoffset-transition-new-york-noi18n': [SKIP],
+ 'tzoffset-seoul': [SKIP],
+ 'tzoffset-seoul-noi18n': [SKIP],
# OOM:
'regress/regress-752764': [FAIL],
# Flaky OOM:
@@ -1059,16 +1065,12 @@
'es6/large-classes-properties': [SKIP],
'generated-transition-stub': [SKIP],
'regress/regress-336820': [SKIP],
- 'wasm/grow-memory': [SKIP],
}], # variant == stress and (arch == arm or arch == arm64) and simulator_run
##############################################################################
['variant in (nooptimization, jitless) and arch in (arm, arm64) and simulator_run', {
# Slow tests: https://crbug.com/v8/7783
'regress/regress-crbug-319860': [SKIP],
- 'wasm/asm-wasm-f32': [SKIP],
- 'wasm/asm-wasm-f64': [SKIP],
- 'wasm/grow-memory': [SKIP],
}], # variant == nooptimization and (arch == arm or arch == arm64) and simulator_run
##############################################################################
@@ -1153,6 +1155,7 @@
'compiler/number-comparison-truncations': [SKIP],
'compiler/redundancy-elimination': [SKIP],
'compiler/regress-9945-*': [SKIP],
+ 'es6/super-ic-opt-no-turboprop': [SKIP],
# Static asserts for optimizations don't hold due to removed optimization
# phases.
@@ -1196,7 +1199,12 @@
# fixed in a later cl.
# TODO(mythria): Reenable this when the cl that migrates deprecated maps
# lands
- 'regress/regress-932953': [FAIL],
+ 'regress/regress-932953': [PASS],
+
+ # Tests failing for the lack of function context specialization in Turboprop.
+ 'compiler/abstract-equal-receiver': [FAIL],
+ 'compiler/constant-fold-cow-array': [FAIL],
+ 'compiler/promise-resolve-stable-maps': [FAIL],
# https://crbug.com/v8/10894
'math-floor-of-div': [SLOW],
@@ -1275,6 +1283,8 @@
# Investigate (IsScript).
'harmony/import-from-compilation-errored': [SKIP],
'harmony/private-fields-special-object': [SKIP],
+ # Skip, since import errors since they refer to the script via debug symbols
+ 'harmony/import-from-instantiation-errored': [SKIP],
# Investigate (JSFunction in startup serializer).
'regress/regress-1034394': [SKIP],
'regress/regress-863810': [SKIP],
@@ -1291,6 +1301,9 @@
'regress/regress-813440': [SKIP],
# Investigate (segfault).
'regress/regress-crbug-397662': [SKIP],
+ # Script referenced only through context-dependent SourceTextModule
+ # https://bugs.chromium.org/p/v8/issues/detail?id=11073
+ 'tools/processor': [SKIP],
}], # variant == stress_snapshot and arch == x64
##############################################################################
@@ -1318,10 +1331,6 @@
'compiler/test-dynamic-map-*': [SKIP],
'es6/collections-constructor-iterator-side-effect': [SKIP],
'es6/collections-constructor-with-modified-protoype': [SKIP],
- # Deopts due to different behavior in BytecodeGraphBuilder::GetForInMode. In
- # default TF, the ForInHint is kAny, in NCI mode kNone (because we currently
- # don't use feedback).
- 'regress/regress-3650-3': [SKIP],
# assertUnoptimized: assumes full turbofan pipeline.
'allocation-site-info': [SKIP],
'array-bounds-check-removal': [SKIP],
@@ -1435,6 +1444,8 @@
'compiler/serializer-feedback-propagation-1': [SKIP],
'compiler/serializer-feedback-propagation-2': [SKIP],
'compiler/serializer-transition-propagation': [SKIP],
+ # crbug.com/v8/11110
+ 'es6/super-ic-opt*': [SKIP],
}], # variant == nci or variant == nci_as_midtier
['((arch == mipsel or arch == mips64el or arch == mips or arch == mips64) and not simd_mips) or (arch in [ppc64, s390x])', {
diff --git a/deps/v8/test/mjsunit/promise-perform-all-settled-resolve-lookup.js b/deps/v8/test/mjsunit/promise-perform-all-settled-resolve-lookup.js
index a2f5f01837..89965c851c 100644
--- a/deps/v8/test/mjsunit/promise-perform-all-settled-resolve-lookup.js
+++ b/deps/v8/test/mjsunit/promise-perform-all-settled-resolve-lookup.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --allow-natives-syntax --harmony-promise-all-settled
+// Flags: --allow-natives-syntax
let count = 0;
class MyPromise extends Promise {
diff --git a/deps/v8/test/mjsunit/regexp-backtrack-limit.js b/deps/v8/test/mjsunit/regexp-backtrack-limit.js
index 6cf1cafbe2..14fcbd9ee4 100644
--- a/deps/v8/test/mjsunit/regexp-backtrack-limit.js
+++ b/deps/v8/test/mjsunit/regexp-backtrack-limit.js
@@ -3,6 +3,7 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax --no-enable-experimental-regexp-engine
+// Flags: --no-enable-experimental-regexp-engine-on-excessive-backtracks
const kNoBacktrackLimit = 0; // To match JSRegExp::kNoBacktrackLimit.
const re0 = %NewRegExpWithBacktrackLimit("(\\d+)+x", "", kNoBacktrackLimit);
diff --git a/deps/v8/test/mjsunit/regexp-experimental.js b/deps/v8/test/mjsunit/regexp-experimental.js
index 469da37d7f..991fb4b576 100644
--- a/deps/v8/test/mjsunit/regexp-experimental.js
+++ b/deps/v8/test/mjsunit/regexp-experimental.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --enable-experimental-regexp-engine
+// Flags: --allow-natives-syntax --default-to-experimental-regexp-engine
function Test(regexp, subject, expectedResult, expectedLastIndex) {
assertEquals(%RegexpTypeTag(regexp), "EXPERIMENTAL");
diff --git a/deps/v8/test/mjsunit/regexp-fallback-large-default.js b/deps/v8/test/mjsunit/regexp-fallback-large-default.js
new file mode 100644
index 0000000000..5196ab6065
--- /dev/null
+++ b/deps/v8/test/mjsunit/regexp-fallback-large-default.js
@@ -0,0 +1,20 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+// Flags: --no-enable-experimental-regexp-engine
+// Flags: --enable-experimental-regexp-engine-on-excessive-backtracks
+// Flags: --regexp-backtracks-before-fallback=1000000000
+
+// This test is similar to regexp-fallback.js but with
+// large--regexp-backtracks-before-fallback value.
+//
+// If the backtrack limit from --regexp-backtracks-before-fallback is larger
+// than an explicit limit, then we should take the explicit limit.
+let regexp = %NewRegExpWithBacktrackLimit(".+".repeat(100) + "x", "", 5000);
+let subject = "a".repeat(100) + "x" + "a".repeat(99);
+let result = ["a".repeat(100) + "x"];
+
+assertArrayEquals(result, regexp.exec(subject));
+assertArrayEquals(result, regexp.exec(subject));
diff --git a/deps/v8/test/mjsunit/regexp-fallback.js b/deps/v8/test/mjsunit/regexp-fallback.js
new file mode 100644
index 0000000000..a2f447b4b6
--- /dev/null
+++ b/deps/v8/test/mjsunit/regexp-fallback.js
@@ -0,0 +1,37 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+// Flags: --no-enable-experimental-regexp-engine
+// Flags: --enable-experimental-regexp-engine-on-excessive-backtracks
+// Flags: --regexp-tier-up --regexp-tier-up-ticks 1
+
+// We should report accurate results on patterns for which irregexp suffers
+// from catastrophic backtracking.
+let regexp = new RegExp("a+".repeat(100) + "x");
+let match = "a".repeat(100) + "x";
+let subject = match.repeat(3);
+
+// First for the irregexp interpreter:
+assertArrayEquals([match], regexp.exec(subject));
+// Now for native irregexp:
+assertArrayEquals([match], regexp.exec(subject));
+
+// Now the same again with String.replace and a replacement function to
+// exercise the RegExpGlobalCache.
+regexp = new RegExp(regexp.source, "g");
+assertEquals("", subject.replace(regexp, function () { return ""; }));
+assertEquals("", subject.replace(regexp, function () { return ""; }));
+
+// If an explicit backtrack limit is larger than the default, then we should
+// take the default limit.
+regexp = %NewRegExpWithBacktrackLimit(regexp.source, "", 1000000000)
+assertArrayEquals([match], regexp.exec(subject));
+assertArrayEquals([match], regexp.exec(subject));
+
+// If the experimental engine can't handle a regexp with an explicit backtrack
+// limit, we should abort and return null on excessive backtracking.
+regexp = %NewRegExpWithBacktrackLimit(regexp.source + "(?=a)", "", 100)
+assertEquals(null, regexp.exec(subject));
+assertEquals(null, regexp.exec(subject));
diff --git a/deps/v8/test/mjsunit/regexp-linear-flag.js b/deps/v8/test/mjsunit/regexp-linear-flag.js
new file mode 100644
index 0000000000..029db097ce
--- /dev/null
+++ b/deps/v8/test/mjsunit/regexp-linear-flag.js
@@ -0,0 +1,35 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+// Flags: --enable-experimental-regexp-engine
+// Flags: --no-default-to-experimental-regexp-engine
+// Flags: --no-force-slow-path
+
+// We shouldn't assign the experimental engine to regexps without 'l' flag.
+assertNotEquals("EXPERIMENTAL", %RegexpTypeTag(/asdf/));
+assertNotEquals("EXPERIMENTAL", %RegexpTypeTag(/123|asdf/));
+assertNotEquals("EXPERIMENTAL", %RegexpTypeTag(/(a*)*x/));
+assertNotEquals("EXPERIMENTAL", %RegexpTypeTag(/(a*)\1/));
+
+// We should assign the experimental engine to regexps with 'l' flag.
+assertEquals("EXPERIMENTAL", %RegexpTypeTag(/asdf/l));
+assertEquals("EXPERIMENTAL", %RegexpTypeTag(/123|asdf/l));
+assertEquals("EXPERIMENTAL", %RegexpTypeTag(/(a*)*x/l));
+
+// We should throw if a regexp with 'l' flag can't be handled by the
+// experimental engine.
+assertThrows(() => /(a*)\1/l, SyntaxError);
+
+// The flags field of a regexp should be sorted.
+assertEquals("glmsy", (/asdf/lymsg).flags);
+
+// The 'linear' member should be set according to the linear flag.
+assertTrue((/asdf/lymsg).linear);
+assertFalse((/asdf/ymsg).linear);
+
+// The new fields installed on the regexp prototype map shouldn't make
+// unmodified regexps slow.
+assertTrue(%RegexpIsUnmodified(/asdf/));
+assertTrue(%RegexpIsUnmodified(/asdf/l));
diff --git a/deps/v8/test/mjsunit/regexp-no-linear-flag.js b/deps/v8/test/mjsunit/regexp-no-linear-flag.js
new file mode 100644
index 0000000000..7df34aa830
--- /dev/null
+++ b/deps/v8/test/mjsunit/regexp-no-linear-flag.js
@@ -0,0 +1,22 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+// Flags: --no-enable-experimental-regexp-engine
+
+// We shouldn't recognize the 'l' flag.
+assertThrows(() => new RegExp("asdf", "l"), SyntaxError)
+assertThrows(() => new RegExp("123|xyz", "l"), SyntaxError)
+assertThrows(() => new RegExp("((a*)*)*", "yls"), SyntaxError)
+assertThrows(() => new RegExp("((a*)*)*\1", "l"), SyntaxError)
+
+// RegExps shouldn't have a 'linear' property.
+assertFalse(RegExp.prototype.hasOwnProperty('linear'));
+assertFalse(/123/.hasOwnProperty('linear'));
+
+{
+ let re = /./;
+ re.linear = true;
+ assertEquals("", re.flags);
+}
diff --git a/deps/v8/test/mjsunit/regress-1146106.js b/deps/v8/test/mjsunit/regress-1146106.js
new file mode 100644
index 0000000000..04205ff7bd
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress-1146106.js
@@ -0,0 +1,18 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --concurrent-inlining --no-use-ic --super-ic
+
+class A {
+ bar() { }
+}
+class B extends A {
+ foo() {
+ return super.bar();
+ }
+}
+%PrepareFunctionForOptimization(B.prototype.foo);
+new B().foo();
+%OptimizeFunctionOnNextCall(B.prototype.foo);
+new B().foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-10908.js b/deps/v8/test/mjsunit/regress/regress-10908.js
new file mode 100644
index 0000000000..7daf1bcadb
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-10908.js
@@ -0,0 +1,19 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+let a = [];
+Object.defineProperty(a, "length", {writable: false});
+function f() {
+ return a.pop();
+}
+assertThrows(f, TypeError, /Cannot assign to read only property 'length'/);
+
+%PrepareFunctionForOptimization(f);
+for (let i = 0; i < 3; i++) {
+ assertThrows(f, TypeError, /Cannot assign to read only property 'length'/);
+}
+%OptimizeFunctionOnNextCall(f);
+assertThrows(f, TypeError, /Cannot assign to read only property 'length'/);
diff --git a/deps/v8/test/mjsunit/regress/regress-10931.js b/deps/v8/test/mjsunit/regress/regress-10931.js
new file mode 100644
index 0000000000..09c84cd562
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-10931.js
@@ -0,0 +1,14 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const kLargerThanFixedArrayMaxLength = 200_000_000;
+var x = new Int8Array(kLargerThanFixedArrayMaxLength);
+try {
+ var y = x.sort((a, b) => b - a);
+} catch (e) {
+ // Throwing is okay, crashing is not.
+ assertInstanceof(e, TypeError);
+ assertMatches(
+ /not supported for huge TypedArrays/, e.message, 'Error message');
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-1112155.js b/deps/v8/test/mjsunit/regress/regress-1112155.js
index ada3b847a7..b7955894c7 100644
--- a/deps/v8/test/mjsunit/regress/regress-1112155.js
+++ b/deps/v8/test/mjsunit/regress/regress-1112155.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --dynamic-map-checks --allow-natives-syntax --opt --no-always-opt
+// Flags: --turboprop-dynamic-map-checks --allow-natives-syntax --opt --no-always-opt
function f(v) {
return v.b;
diff --git a/deps/v8/test/mjsunit/regress/regress-1125871.js b/deps/v8/test/mjsunit/regress/regress-1125871.js
index b062961dd8..706d0b96d8 100644
--- a/deps/v8/test/mjsunit/regress/regress-1125871.js
+++ b/deps/v8/test/mjsunit/regress/regress-1125871.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --dynamic-map-checks --opt --no-always-opt
+// Flags: --allow-natives-syntax --turboprop-dynamic-map-checks --opt --no-always-opt
function bar(obj) {
// Add two dummy loads to make sure obj.b is in the same slot index as obj.a
diff --git a/deps/v8/test/mjsunit/regress/regress-1132111.js b/deps/v8/test/mjsunit/regress/regress-1132111.js
new file mode 100644
index 0000000000..1dd1b58806
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1132111.js
@@ -0,0 +1,23 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Public function field with computed name
+eval(`
+ buggy = ((bug = new class { [0] = x => 1337.0; }) => bug);
+`);
+
+// Public method with computed name
+eval(`
+ buggy = ((bug = new class { [0](x) { return 1337.0}; }) => bug);
+`);
+
+// Private function field with computed name
+eval(`
+ buggy = ((bug = new class { #foo = x => 1337.0; }) => bug);
+`);
+
+// Private method with computed name
+eval(`
+ buggy = ((bug = new class { #foo(x) { return 1337.0; } }) => bug);
+`);
diff --git a/deps/v8/test/mjsunit/regress/regress-1137979.js b/deps/v8/test/mjsunit/regress/regress-1137979.js
new file mode 100644
index 0000000000..2e06a9c3c3
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1137979.js
@@ -0,0 +1,21 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --turboprop --no-lazy-feedback-allocation
+// Flags: --noanalyze-environment-liveness
+
+function foo() {
+ try {
+ bar();
+ } catch (e) {}
+ for (var i = 0; i < 3; i++) {
+ try {
+ %PrepareFunctionForOptimization(foo);
+ %OptimizeOsr();
+ } catch (e) {}
+ }
+}
+
+foo();
+foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-1138075.js b/deps/v8/test/mjsunit/regress/regress-1138075.js
new file mode 100644
index 0000000000..e68e1b5471
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1138075.js
@@ -0,0 +1,27 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --turboprop --max-semi-space-size=1
+
+function runNearStackLimit(f) {
+ function t() {
+ try {
+ return t();
+ } catch (e) {
+ return f();
+ }
+ }
+ %PrepareFunctionForOptimization(t);
+ %OptimizeFunctionOnNextCall(t);
+ return t();
+}
+
+function foo(a) {}
+function bar(a, b) {}
+
+for (let i = 0; i < 150; i++) {
+ runNearStackLimit(() => {
+ return foo(bar(3, 4) === false);
+ });
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-1138611.js b/deps/v8/test/mjsunit/regress/regress-1138611.js
new file mode 100644
index 0000000000..bca6a4bd09
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1138611.js
@@ -0,0 +1,34 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --turboprop --gc-interval=1000
+
+function runNearStackLimit(f) {
+ function t() {
+ try {
+ return t();
+ } catch (e) {
+ return f();
+ }
+ }
+ %PrepareFunctionForOptimization(t);
+ %OptimizeFunctionOnNextCall(t);
+ return t();
+}
+
+function foo() {
+ runNearStackLimit(() => {});
+}
+
+(function () {
+ var a = 42;
+ var b = 153;
+ try {
+ Object.defineProperty({});
+ } catch (e) {}
+ foo();
+ foo();
+})();
+
+runNearStackLimit(() => {});
diff --git a/deps/v8/test/mjsunit/regress/regress-1139782.js b/deps/v8/test/mjsunit/regress/regress-1139782.js
new file mode 100644
index 0000000000..d6dda01769
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1139782.js
@@ -0,0 +1,37 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function main() {
+ for (let v3 = 0; v3 < 120; v3++) {
+ const v6 = [Int16Array,1111];
+ let v12 = 577623200;
+ const v14 = [2];
+ const v18 = [1.7976931348623157e+308,1.7976931348623157e+308,1.7976931348623157e+308,1.7976931348623157e+308];
+ const v20 = [1111,Uint8Array];
+ const v21 = [v20,v20,v18,v20,1111,1111,1111,-1111];
+ const v23 = [11.11,11.11,1.7976931348623157e+308,11.11,11.11];
+ const v26 = -Infinity;
+ const v27 = [v23,v26,1111,v18,Date,1111,-9007199254740992,v21];
+ const v31 = [v14];
+ const v32 = [v31,v12,"object",v21,1111,6.0,v18,v27,Int8Array];
+ const v33 = ["65555",v26,v32];
+ const v34 = v33.toLocaleString();
+ let v35 = "659874589";
+ v35 = v34;
+ const v37 = [11.11,11.11,1111];
+ const v38 = [v6];
+ const v39 = [v38,v37,v38];
+ v37[10000] = v23;
+ v12 = v35;
+ const v54 = [parseInt,v39];
+ const v56 = String.fromCharCode();
+ const v61 = [v12,1111,-9007199254740991,1111];
+ const v63 = [11.11,v54,JSON,v61,11.11,v56,v61];
+ const v64 = JSON.stringify(v63);
+ const v65 = RegExp(v64);
+ const v66 = v65.exec(v64);
+ }
+}
+
+main();
diff --git a/deps/v8/test/mjsunit/regress/regress-1141502.js b/deps/v8/test/mjsunit/regress/regress-1141502.js
new file mode 100644
index 0000000000..b516c1c0c5
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1141502.js
@@ -0,0 +1,21 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --no-lazy-feedback-allocation
+// Flags: --turboprop-dynamic-map-checks
+
+function bar(a) {
+ return a.x;
+}
+
+function foo(a) {
+ return 1 * bar(a);
+}
+
+var obj = {x: 2};
+
+%PrepareFunctionForOptimization(foo);
+foo(obj, obj);
+%OptimizeFunctionOnNextCall(foo);
+assertThrows(() => foo());
diff --git a/deps/v8/test/mjsunit/regress/regress-1142158.js b/deps/v8/test/mjsunit/regress/regress-1142158.js
new file mode 100644
index 0000000000..efbfb1a6cd
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1142158.js
@@ -0,0 +1,37 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax
+
+var __v_0 = {};
+var __v_13 = {};
+var __v_14 = {};
+var __v_15 = {};
+var __v_16 = {};
+var __v_17 = {};
+var __v_18 = {};
+function __f_6(x, deopt) {
+ var __v_1 = x;
+ var __v_2 = 2 * x;
+ var __v_3 = 3 * x;
+ var __v_4 = 4 * x;
+ var __v_5 = 5 * x;
+ var __v_6 = 6 * x;
+ var __v_7 = 7 * x;
+ var __v_9 = 9 * x;
+ var __v_10 = 10 * x;
+ var __v_11 = 11 * x;
+ var __v_12 = 12 * x;
+ var __v_20 = 18 * x;
+ var __v_19 = 19 * x;
+ var __v_8 = 20 * x;
+ __v_0 = 1;
+ deopt + -2147483648;
+ return __v_1 + __v_2 + __v_3 + __v_4 + __v_5 + __v_6 + __v_7 + __v_8 + __v_9 + __v_10 + __v_11 + __v_12 + __v_13 +
+ __v_14 + __v_15 + __v_16 + __v_17 + __v_18 + __v_19 + __v_20;
+};
+%PrepareFunctionForOptimization(__f_6);
+__f_6();
+%OptimizeFunctionOnNextCall(__f_6);
+assertEquals("45[object Object][object Object][object Object][object Object][object Object][object Object]9.59", __f_6(0.5, ""));
diff --git a/deps/v8/test/mjsunit/regress/regress-1144672.js b/deps/v8/test/mjsunit/regress/regress-1144672.js
new file mode 100644
index 0000000000..eaecf2b831
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1144672.js
@@ -0,0 +1,20 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function g(b) {
+ const _ = Object.getOwnPropertyDescriptors(g);
+ // for (const _ of b) {}
+}
+
+function f(...a) {
+ g(a);
+}
+
+%PrepareFunctionForOptimization(f);
+%PrepareFunctionForOptimization(g);
+f([]);
+%OptimizeFunctionOnNextCall(f);
+f([]);
diff --git a/deps/v8/test/mjsunit/regress/regress-1161357.js b/deps/v8/test/mjsunit/regress/regress-1161357.js
new file mode 100644
index 0000000000..b6f03b92ac
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1161357.js
@@ -0,0 +1,15 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+for (let i = 0; i < 3; i++) {
+ for (let j = 0; j < 32767; j++) {
+ Number;
+ }
+ for (let j = 0; j < 2335; j++) {
+ Number;
+ }
+ var arr = [, ...(new Int16Array(0xffff)), 4294967296];
+ arr.concat(Number, arr)
+}
+eval(``);
diff --git a/deps/v8/test/mjsunit/regress/regress-542823.js b/deps/v8/test/mjsunit/regress/regress-542823.js
index d9c23396d4..cc1a80d5a4 100644
--- a/deps/v8/test/mjsunit/regress/regress-542823.js
+++ b/deps/v8/test/mjsunit/regress/regress-542823.js
@@ -1,12 +1,21 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax
-__v_0 = 100000;
-__v_1 = new Array();
-for (var __v_2 = 0; __v_2 < __v_0; __v_2++) {
- __v_1[__v_2] = 0.5;
-}
-for (var __v_2 = 0; __v_2 < 10; __v_2++) {
- var __v_0 = __v_1 + 0.5;
-}
+(function() {
+ // kPageSizeBits is 19 on PPC.
+ const kPageSizeBits = 19;
+ const kMaxHeapObjectSize = (1 << (kPageSizeBits - 1));
+
+ const filler = "Large amount of text per element, so that the joined array is"
+ + "large enough to be allocated in the large object space"
+ const size = Math.ceil(kMaxHeapObjectSize / filler.length + 1);
+ const arr = Array(size).fill(filler);
+
+ for (let i = 0; i < 10; i++) {
+ assertTrue(%InLargeObjectSpace(arr.join("")));
+ }
+
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-6248.js b/deps/v8/test/mjsunit/regress/regress-6248.js
index e16452df92..dd8943b827 100644
--- a/deps/v8/test/mjsunit/regress/regress-6248.js
+++ b/deps/v8/test/mjsunit/regress/regress-6248.js
@@ -23,4 +23,4 @@ assertSame(sentinelObject, new C());
assertSame(sentinelObject, new C());
%OptimizeFunctionOnNextCall(C)
assertSame(sentinelObject, new C());
-assertFalse(evaluatedArg);
+assertTrue(evaluatedArg);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1038178.js b/deps/v8/test/mjsunit/regress/regress-crbug-1038178.js
index 0362f69bcd..3a84066b83 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-1038178.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1038178.js
@@ -15,7 +15,7 @@ function opt(){
(((function(){})())?.v)()
}
%PrepareFunctionForOptimization(opt)
-assertThrows(opt());
-assertThrows(opt());
+assertThrows(() => opt());
+assertThrows(() => opt());
%OptimizeFunctionOnNextCall(opt)
-assertThrows(opt());
+assertThrows(() => opt());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1130213.js b/deps/v8/test/mjsunit/regress/regress-crbug-1130213.js
new file mode 100644
index 0000000000..a2d03d39c2
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1130213.js
@@ -0,0 +1,9 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Realm.createAllowCrossRealmAccess();
+const global = Realm.global(1);
+assertSame(1, Realm.owner(global));
+Realm.detachGlobal(1);
+assertSame(undefined, Realm.owner(global));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1137586.js b/deps/v8/test/mjsunit/regress/regress-crbug-1137586.js
new file mode 100644
index 0000000000..97cb82c08d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1137586.js
@@ -0,0 +1,16 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function f() {
+ for (var i = 0; i < 100000; i++) {
+ var a = arguments[0] + 2;
+ var b = arguments[1] + 2;
+ var c = a + i + 5;
+ var d = c + 3;
+ }
+}
+
+for (var j = 0; j < 3; j++) {
+ f(2, 3);
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1137594.js b/deps/v8/test/mjsunit/regress/regress-crbug-1137594.js
new file mode 100644
index 0000000000..d17a5e6341
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1137594.js
@@ -0,0 +1,17 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+v = { symbol: Symbol() };
+function f() {
+ for (var i = 0; i < 1; ++i) {
+ try { v.symbol(); } catch (e) {}
+ }
+}
+
+%PrepareFunctionForOptimization(f);
+f();
+%OptimizeFunctionOnNextCall(f);
+f();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1151890.js b/deps/v8/test/mjsunit/regress/regress-crbug-1151890.js
new file mode 100644
index 0000000000..70a3d6bbf0
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1151890.js
@@ -0,0 +1,11 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+for (let i = 0, j = 0; i < 10; ++i) {
+ let x = (-0xffffffffffffffff_ffffffffffffffffn >> 0x40n);
+ assertEquals(-0x10000000000000000n, x);
+ %SimulateNewspaceFull();
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1171954.js b/deps/v8/test/mjsunit/regress/regress-crbug-1171954.js
new file mode 100644
index 0000000000..94fbb329bc
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1171954.js
@@ -0,0 +1,19 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --always-opt
+
+// This causes the register used by the call in the later try-catch block to be
+// used by the ToName conversion for null which causes a DCHECK fail when
+// compiling. If register allocation changes, this test may no longer reproduce
+// the crash but it is not easy write a proper test because it is linked to
+// register allocation. This test should always work, so shouldn't cause any
+// flakes.
+try {
+ var { [null]: __v_12, } = {};
+} catch (e) {}
+
+try {
+ assertEquals((__v_40?.o?.m)().p);
+} catch (e) {}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-696622.js b/deps/v8/test/mjsunit/regress/regress-crbug-696622.js
index 79c4144101..52c0f66f8b 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-696622.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-696622.js
@@ -9,7 +9,9 @@ class D extends C { constructor() { super(...unresolved, 75) } }
D.__proto__ = null;
%PrepareFunctionForOptimization(D);
-assertThrows(() => new D(), TypeError);
-assertThrows(() => new D(), TypeError);
+// ReferenceError because argument evaluation happens before calling the super
+// constructor.
+assertThrows(() => new D(), ReferenceError);
+assertThrows(() => new D(), ReferenceError);
%OptimizeFunctionOnNextCall(D);
-assertThrows(() => new D(), TypeError);
+assertThrows(() => new D(), ReferenceError);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1065599.js b/deps/v8/test/mjsunit/regress/wasm/regress-1065599.js
index 85057104cc..55833a76b6 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-1065599.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1065599.js
@@ -13,14 +13,14 @@ builder.addType(makeSig([kWasmI32, kWasmI32, kWasmI32], [kWasmI32]));
builder.addFunction(undefined, 0 /* sig */).addBodyWithEnd([
// signature: i_iii
// body:
- kExprI32Const, 0xba, 0x01, // i32.const
- kSimdPrefix, kExprI16x8Splat, // i16x8.splat
- kExprMemorySize, 0x00, // memory.size
- kSimdPrefix, kExprI16x8ShrS, // i16x8.shr_s
- kSimdPrefix, kExprV8x16AnyTrue, // v8x16.any_true
- kExprMemorySize, 0x00, // memory.size
- kExprI32RemS, // i32.rem_s
- kExprEnd, // end @15
+ kExprI32Const, 0xba, 0x01, // i32.const
+ kSimdPrefix, kExprI16x8Splat, // i16x8.splat
+ kExprMemorySize, 0x00, // memory.size
+ kSimdPrefix, kExprI16x8ShrS, 0x01, // i16x8.shr_s
+ kSimdPrefix, kExprV8x16AnyTrue, // v8x16.any_true
+ kExprMemorySize, 0x00, // memory.size
+ kExprI32RemS, // i32.rem_s
+ kExprEnd, // end @15
]);
builder.addExport('main', 0);
const instance = builder.instantiate();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-11024.js b/deps/v8/test/mjsunit/regress/wasm/regress-11024.js
new file mode 100644
index 0000000000..8f6545acf2
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-11024.js
@@ -0,0 +1,22 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// The test needs --wasm-tier-up because we can't serialize and deserialize
+// Liftoff code.
+// Flags: --expose-wasm --allow-natives-syntax --expose-gc --wasm-tier-up
+
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+
+const wire_bytes = new WasmModuleBuilder().toBuffer();
+
+const serialized = (() => {
+ return %SerializeWasmModule(new WebAssembly.Module(wire_bytes));
+})();
+
+// Collect the compiled module, to avoid sharing of the NativeModule.
+gc();
+
+const module = %DeserializeWasmModule(serialized, wire_bytes);
+%SerializeWasmModule(module);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1132461.js b/deps/v8/test/mjsunit/regress/wasm/regress-1132461.js
new file mode 100644
index 0000000000..325ba08873
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1132461.js
@@ -0,0 +1,27 @@
+
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-staging
+
+// We load-splat a value, then drop it. Verify that the OOB load is not
+// eliminated, it should trap. This test case is simplified from the fuzzer
+// provided test case in https://crbug.com/1132461.
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addMemory(16, 32, false, true);
+builder.addFunction(undefined, makeSig([], [kWasmI32]))
+ .addBodyWithEnd([
+kExprI32Const, 0x00,
+kExprI32Const, 0x00,
+kSimdPrefix, kExprS128Load32Splat, 0x00, 0xb6, 0xec, 0xd8, 0xb1, 0x03,
+kSimdPrefix, kExprI32x4ExtractLane, 0x00,
+kExprDrop,
+kExprEnd,
+]);
+
+builder.addExport('main', 0);
+const instance = builder.instantiate();
+assertThrows(() => instance.exports.main());
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1137582.js b/deps/v8/test/mjsunit/regress/wasm/regress-1137582.js
new file mode 100644
index 0000000000..c2d2a09e90
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1137582.js
@@ -0,0 +1,10 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+const results = new Array(9).fill(kWasmI32);
+builder.addFunction('foo', makeSig([], results)).addBody([kExprUnreachable]);
+builder.instantiate();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1146861.js b/deps/v8/test/mjsunit/regress/wasm/regress-1146861.js
new file mode 100644
index 0000000000..d9d80e58cc
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1146861.js
@@ -0,0 +1,56 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addGlobal(kWasmI32, 1);
+builder.addType(makeSig([], [kWasmF64]));
+// Generate function 1 (out of 1).
+builder.addFunction(undefined, 0 /* sig */)
+ .addLocals(kWasmI32, 8).addLocals(kWasmI64, 3)
+ .addBodyWithEnd([
+// signature: d_v
+// body:
+kExprGlobalGet, 0x00, // global.get
+kExprLocalSet, 0x00, // local.set
+kExprI32Const, 0x00, // i32.const
+kExprI32Eqz, // i32.eqz
+kExprLocalSet, 0x01, // local.set
+kExprGlobalGet, 0x00, // global.get
+kExprLocalSet, 0x02, // local.set
+kExprI32Const, 0x01, // i32.const
+kExprI32Const, 0x01, // i32.const
+kExprI32Sub, // i32.sub
+kExprLocalSet, 0x03, // local.set
+kExprGlobalGet, 0x00, // global.get
+kExprLocalSet, 0x04, // local.set
+kExprI32Const, 0x00, // i32.const
+kExprI32Eqz, // i32.eqz
+kExprLocalSet, 0x05, // local.set
+kExprGlobalGet, 0x00, // global.get
+kExprLocalSet, 0x06, // local.set
+kExprI32Const, 0x00, // i32.const
+kExprI32Const, 0x01, // i32.const
+kExprI32Sub, // i32.sub
+kExprLocalSet, 0x07, // local.set
+kExprBlock, kWasmStmt, // block @45
+ kExprI32Const, 0x00, // i32.const
+ kExprIf, kWasmStmt, // if @49
+ kExprLocalGet, 0x0a, // local.get
+ kExprLocalSet, 0x08, // local.set
+ kExprElse, // else @55
+ kExprNop, // nop
+ kExprEnd, // end @57
+ kExprLocalGet, 0x08, // local.get
+ kExprLocalSet, 0x09, // local.set
+ kExprLocalGet, 0x09, // local.get
+ kExprI64Const, 0xff, 0x01, // i64.const
+ kExprI64Add, // i64.add
+ kExprDrop, // drop
+ kExprEnd, // end @69
+kExprF64Const, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, // f64.const
+kExprEnd, // end @79
+]);
+builder.instantiate();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1153442.js b/deps/v8/test/mjsunit/regress/wasm/regress-1153442.js
new file mode 100644
index 0000000000..989da11a25
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1153442.js
@@ -0,0 +1,61 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-threads
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addMemory(1, 1, false, true);
+builder.addGlobal(kWasmI32, 1);
+builder.addGlobal(kWasmI32, 1);
+builder.addType(makeSig([kWasmI32, kWasmI64, kWasmI32], []));
+// Generate function 1 (out of 1).
+builder.addFunction(undefined, 0 /* sig */)
+ .addLocals(kWasmI32, 10)
+ .addBodyWithEnd([
+// signature: v_ili
+// body:
+kExprI32Const, 0x00, // i32.const
+kExprLocalSet, 0x04, // local.set
+kExprI32Const, 0x01, // i32.const
+kExprLocalSet, 0x05, // local.set
+kExprBlock, kWasmStmt, // block @11
+ kExprBr, 0x00, // br depth=0
+ kExprEnd, // end @15
+kExprGlobalGet, 0x01, // global.get
+kExprLocalSet, 0x03, // local.set
+kExprLocalGet, 0x03, // local.get
+kExprI32Const, 0x01, // i32.const
+kExprI32Sub, // i32.sub
+kExprLocalSet, 0x06, // local.set
+kExprI64Const, 0x01, // i64.const
+kExprLocalSet, 0x01, // local.set
+kExprI32Const, 0x00, // i32.const
+kExprI32Eqz, // i32.eqz
+kExprLocalSet, 0x07, // local.set
+kExprBlock, kWasmStmt, // block @36
+ kExprBr, 0x00, // br depth=0
+ kExprEnd, // end @40
+kExprGlobalGet, 0x01, // global.get
+kExprLocalSet, 0x08, // local.set
+kExprI32Const, 0x01, // i32.const
+kExprI32Const, 0x01, // i32.const
+kExprI32Sub, // i32.sub
+kExprLocalSet, 0x09, // local.set
+kExprLocalGet, 0x00, // local.get
+kExprLocalSet, 0x0a, // local.set
+kExprGlobalGet, 0x00, // global.get
+kExprLocalSet, 0x0b, // local.set
+kExprI32Const, 0x00, // i32.const
+kExprI32Const, 0x0f, // i32.const
+kExprI32And, // i32.and
+kExprLocalSet, 0x0c, // local.set
+kExprI32Const, 0x00, // i32.const
+kAtomicPrefix, kExprI64AtomicLoad, 0x03, 0x04, // i64.atomic.load64
+kExprDrop, // drop
+kExprUnreachable, // unreachable
+kExprEnd, // end @75
+]);
+builder.toModule();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1161654.js b/deps/v8/test/mjsunit/regress/wasm/regress-1161654.js
new file mode 100644
index 0000000000..93f2c3b556
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1161654.js
@@ -0,0 +1,56 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-staging
+
+// This is a fuzzer-generated test case that exposed a bug in Liftoff that only
+// affects ARM, where the fp register aliasing is different from other archs.
+// We were inncorrectly clearing the the high fp register in a LiftoffRegList
+// indicating registers to load, hitting a DCHECK.
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addMemory(19, 32, false);
+builder.addGlobal(kWasmI32, 0);
+builder.addType(makeSig([], []));
+builder.addType(makeSig([kWasmI64, kWasmS128, kWasmF32], [kWasmI32]));
+// Generate function 1 (out of 5).
+builder.addFunction(undefined, 0 /* sig */)
+ .addBodyWithEnd([
+// signature: v_v
+// body:
+kExprI32Const, 0x05, // i32.const
+kExprReturn, // return
+kExprUnreachable, // unreachable
+kExprEnd, // end @5
+]);
+// Generate function 4 (out of 5).
+builder.addFunction(undefined, 1 /* sig */)
+ .addBodyWithEnd([
+// signature: i_lsf
+// body:
+kExprLocalGet, 0x01, // local.get
+kExprLocalGet, 0x01, // local.get
+kExprGlobalGet, 0x00, // global.get
+kExprDrop, // drop
+kExprLoop, kWasmStmt, // loop @8
+ kExprLoop, 0x00, // loop @10
+ kExprI32Const, 0x01, // i32.const
+ kExprMemoryGrow, 0x00, // memory.grow
+ kExprI64LoadMem8U, 0x00, 0x70, // i64.load8_u
+ kExprLoop, 0x00, // loop @19
+ kExprCallFunction, 0x00, // call function #0: v_v
+ kExprEnd, // end @23
+ kExprI64Const, 0xf1, 0x24, // i64.const
+ kExprGlobalGet, 0x00, // global.get
+ kExprDrop, // drop
+ kExprBr, 0x00, // br depth=0
+ kExprEnd, // end @32
+ kExprEnd, // end @33
+kExprI32Const, 0x5b, // i32.const
+kExprReturn, // return
+kExprEnd, // end @37
+]);
+// Instantiation is enough to cause a crash.
+const instance = builder.instantiate();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-9447.js b/deps/v8/test/mjsunit/regress/wasm/regress-9447.js
index 80d64b9b2d..77d819d48c 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-9447.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-9447.js
@@ -33,5 +33,7 @@ var fun2 = (function GenerateFun2() {
})();
// Both exported functions should throw, no matter how often they get wrapped.
-assertThrows(fun1, TypeError, /wasm function signature contains illegal type/);
-assertThrows(fun2, TypeError, /wasm function signature contains illegal type/);
+assertThrows(fun1, TypeError,
+ /type incompatibility when transforming from\/to JS/);
+assertThrows(fun2, TypeError,
+ /type incompatibility when transforming from\/to JS/);
diff --git a/deps/v8/test/mjsunit/runtime-callstats-helpers.js b/deps/v8/test/mjsunit/runtime-callstats-helpers.js
new file mode 100644
index 0000000000..6ca4347284
--- /dev/null
+++ b/deps/v8/test/mjsunit/runtime-callstats-helpers.js
@@ -0,0 +1,21 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Add --allow-natives-syntax --runtime-call-stats to your test file in order to
+// use this function. You can suppress the extra printout by calling
+// %GetAndResetRuntimeCallStats() at the end of the test.
+function getRuntimeFunctionCallCount(function_name) {
+ const stats = %GetAndResetRuntimeCallStats();
+ const lines = stats.split("\n");
+ for (let i = 3; i < lines.length - 3; ++i) {
+ const line = lines[i];
+ const m = line.match(/(?<name>\S+)\s+\S+\s+\S+\s+(?<count>\S+)/);
+ if (function_name == m.groups.name) {
+ return m.groups.count;
+ }
+ }
+ return 0;
+}
diff --git a/deps/v8/test/mjsunit/smi-mul-const.js b/deps/v8/test/mjsunit/smi-mul-const.js
index 1501231ff5..fd26c835c3 100644
--- a/deps/v8/test/mjsunit/smi-mul-const.js
+++ b/deps/v8/test/mjsunit/smi-mul-const.js
@@ -73,16 +73,14 @@ assertUnoptimized(mul_by_2);
// Deopt on overflow.
-// 2^30 is a smi boundary on arm and ia32.
+// -2^30 is in Smi range on most configurations, +2^30 is not.
var two_30 = 1 << 30;
-// 2^31 is a smi boundary on arm64 and x64.
-var two_31 = 2 * two_30;
+assertEquals(two_30, mul_by_neg_1(-two_30));
-// TODO(rmcilroy): replace after r16361 with: if (%IsValidSmi(two_31)) {
-if (true) {
- assertEquals(two_31, mul_by_neg_1(-two_31));
- assertUnoptimized(mul_by_neg_1);
-} else {
- assertEquals(two_30, mul_by_neg_1(-two_30));
- assertUnoptimized(mul_by_neg_1);
-}
+// For good measure, check that overflowing int32 range (or Smi range
+// without pointer compression) works too.
+var two_31 = two_30 * 2;
+assertEquals(two_31, mul_by_neg_1(-two_31));
+
+// One of the two situations deoptimized the code.
+assertUnoptimized(mul_by_neg_1);
diff --git a/deps/v8/test/mjsunit/smi-mul.js b/deps/v8/test/mjsunit/smi-mul.js
index a99b27af13..2d5dcab619 100644
--- a/deps/v8/test/mjsunit/smi-mul.js
+++ b/deps/v8/test/mjsunit/smi-mul.js
@@ -53,17 +53,14 @@ mul2(-1, 2);
mul2(-1, 2);
%OptimizeFunctionOnNextCall(mul2);
-// 2^30 is a smi boundary on arm and ia32.
+// -2^30 is in Smi range on most configurations, +2^30 is not.
var two_30 = 1 << 30;
-// 2^31 is a smi boundary on x64.
-var two_31 = 2 * two_30;
+assertEquals(two_30, mul2(-two_30, -1));
-if (%IsValidSmi(two_31)) {
- // Deopt on two_31 on x64.
- assertEquals(two_31, mul2(-two_31, -1));
- assertUnoptimized(mul2);
-} else {
- // Deopt on two_30 on ia32.
- assertEquals(two_30, mul2(-two_30, -1));
- assertUnoptimized(mul2);
-}
+// For good measure, check that overflowing int32 range (or Smi range
+// without pointer compression) works too.
+var two_31 = two_30 * 2;
+assertEquals(two_31, mul2(-two_31, -1));
+
+// One of the two situations deoptimized the code.
+assertUnoptimized(mul2);
diff --git a/deps/v8/test/mjsunit/stack-traces.js b/deps/v8/test/mjsunit/stack-traces.js
index 949d8390b4..a46b2b3940 100644
--- a/deps/v8/test/mjsunit/stack-traces.js
+++ b/deps/v8/test/mjsunit/stack-traces.js
@@ -439,23 +439,3 @@ var constructor = new Error().stack[0].constructor;
assertThrows(() => constructor.call());
assertThrows(() => constructor.call(
null, {}, () => undefined, {valueOf() { return 0 }}, false));
-
-// Test stack frames populated with line/column information for both call site
-// and enclosing function:
-Error.prepareStackTrace = function(e, frames) {
- assertMatches(/stack-traces\.js/, frames[0].getFileName());
- assertEquals(3, frames[0].getEnclosingColumnNumber());
- assertEquals(11, frames[0].getColumnNumber());
- assertTrue(frames[0].getEnclosingLineNumber() < frames[0].getLineNumber());
-}
-try {
- function a() {
- b();
- }
- function b() {
- throw Error('hello world');
- }
- a();
-} catch (err) {
- err.stack;
-}
diff --git a/deps/v8/test/mjsunit/stackoverflow-underapplication.js b/deps/v8/test/mjsunit/stackoverflow-underapplication.js
new file mode 100644
index 0000000000..0f2da170fc
--- /dev/null
+++ b/deps/v8/test/mjsunit/stackoverflow-underapplication.js
@@ -0,0 +1,54 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --stack-size=100 --no-opt
+
+function f(
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x
+) { }
+
+function runNearStackLimit(f) {
+ let recursing_towards_stack_limit = true;
+ let f_succeeded = false;
+
+ function t() {
+ try {
+ t();
+ if (f_succeeded) return;
+ // Keep calling f until it stops throwing stack overflow exceptions.
+ f();
+ // f didn't throw, so we are done.
+ f_succeeded = true;
+ } catch(e) {
+ if (recursing_towards_stack_limit) {
+ recursing_towards_stack_limit = false;
+ // We reached the near stack limit state, call f first time.
+ f();
+ // f didn't throw, so we are done.
+ f_succeeded = true;
+ }
+ }
+ };
+
+ try {
+ t();
+ } catch(e) {}
+}
+
+runNearStackLimit(f);
diff --git a/deps/v8/test/mjsunit/tools/codemap.mjs b/deps/v8/test/mjsunit/tools/codemap.mjs
index f1150fe364..e70e6a0980 100644
--- a/deps/v8/test/mjsunit/tools/codemap.mjs
+++ b/deps/v8/test/mjsunit/tools/codemap.mjs
@@ -25,12 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import { CodeMap } from "../../../tools/codemap.mjs";
-
-function newCodeEntry(size, name) {
- return new CodeMap.CodeEntry(size, name);
-};
-
+import { CodeMap, CodeEntry } from "../../../tools/codemap.mjs";
function assertEntry(codeMap, expected_name, addr) {
var entry = codeMap.findEntry(addr);
@@ -46,9 +41,9 @@ function assertNoEntry(codeMap, addr) {
(function testLibrariesAndStaticCode() {
var codeMap = new CodeMap();
- codeMap.addLibrary(0x1500, newCodeEntry(0x3000, 'lib1'));
- codeMap.addLibrary(0x15500, newCodeEntry(0x5000, 'lib2'));
- codeMap.addLibrary(0x155500, newCodeEntry(0x10000, 'lib3'));
+ codeMap.addLibrary(0x1500, new CodeEntry(0x3000, 'lib1'));
+ codeMap.addLibrary(0x15500, new CodeEntry(0x5000, 'lib2'));
+ codeMap.addLibrary(0x155500, new CodeEntry(0x10000, 'lib3'));
assertNoEntry(codeMap, 0);
assertNoEntry(codeMap, 0x1500 - 1);
assertEntry(codeMap, 'lib1', 0x1500);
@@ -70,9 +65,9 @@ function assertNoEntry(codeMap, addr) {
assertNoEntry(codeMap, 0x155500 + 0x10000);
assertNoEntry(codeMap, 0xFFFFFFFF);
- codeMap.addStaticCode(0x1510, newCodeEntry(0x30, 'lib1-f1'));
- codeMap.addStaticCode(0x1600, newCodeEntry(0x50, 'lib1-f2'));
- codeMap.addStaticCode(0x15520, newCodeEntry(0x100, 'lib2-f1'));
+ codeMap.addStaticCode(0x1510, new CodeEntry(0x30, 'lib1-f1'));
+ codeMap.addStaticCode(0x1600, new CodeEntry(0x50, 'lib1-f2'));
+ codeMap.addStaticCode(0x15520, new CodeEntry(0x100, 'lib2-f1'));
assertEntry(codeMap, 'lib1', 0x1500);
assertEntry(codeMap, 'lib1', 0x1510 - 1);
assertEntry(codeMap, 'lib1-f1', 0x1510);
@@ -96,10 +91,10 @@ function assertNoEntry(codeMap, addr) {
(function testDynamicCode() {
var codeMap = new CodeMap();
- codeMap.addCode(0x1500, newCodeEntry(0x200, 'code1'));
- codeMap.addCode(0x1700, newCodeEntry(0x100, 'code2'));
- codeMap.addCode(0x1900, newCodeEntry(0x50, 'code3'));
- codeMap.addCode(0x1950, newCodeEntry(0x10, 'code4'));
+ codeMap.addCode(0x1500, new CodeEntry(0x200, 'code1'));
+ codeMap.addCode(0x1700, new CodeEntry(0x100, 'code2'));
+ codeMap.addCode(0x1900, new CodeEntry(0x50, 'code3'));
+ codeMap.addCode(0x1950, new CodeEntry(0x10, 'code4'));
assertNoEntry(codeMap, 0);
assertNoEntry(codeMap, 0x1500 - 1);
assertEntry(codeMap, 'code1', 0x1500);
@@ -122,8 +117,8 @@ function assertNoEntry(codeMap, addr) {
(function testCodeMovesAndDeletions() {
var codeMap = new CodeMap();
- codeMap.addCode(0x1500, newCodeEntry(0x200, 'code1'));
- codeMap.addCode(0x1700, newCodeEntry(0x100, 'code2'));
+ codeMap.addCode(0x1500, new CodeEntry(0x200, 'code1'));
+ codeMap.addCode(0x1700, new CodeEntry(0x100, 'code2'));
assertEntry(codeMap, 'code1', 0x1500);
assertEntry(codeMap, 'code2', 0x1700);
codeMap.moveCode(0x1500, 0x1800);
@@ -139,8 +134,8 @@ function assertNoEntry(codeMap, addr) {
(function testDynamicNamesDuplicates() {
var codeMap = new CodeMap();
// Code entries with same names but different addresses.
- codeMap.addCode(0x1500, newCodeEntry(0x200, 'code'));
- codeMap.addCode(0x1700, newCodeEntry(0x100, 'code'));
+ codeMap.addCode(0x1500, new CodeEntry(0x200, 'code'));
+ codeMap.addCode(0x1700, new CodeEntry(0x100, 'code'));
assertEntry(codeMap, 'code', 0x1500);
assertEntry(codeMap, 'code {1}', 0x1700);
// Test name stability.
@@ -151,9 +146,9 @@ function assertNoEntry(codeMap, addr) {
(function testStaticEntriesExport() {
var codeMap = new CodeMap();
- codeMap.addStaticCode(0x1500, newCodeEntry(0x3000, 'lib1'));
- codeMap.addStaticCode(0x15500, newCodeEntry(0x5000, 'lib2'));
- codeMap.addStaticCode(0x155500, newCodeEntry(0x10000, 'lib3'));
+ codeMap.addStaticCode(0x1500, new CodeEntry(0x3000, 'lib1'));
+ codeMap.addStaticCode(0x15500, new CodeEntry(0x5000, 'lib2'));
+ codeMap.addStaticCode(0x155500, new CodeEntry(0x10000, 'lib3'));
var allStatics = codeMap.getAllStaticEntries();
allStatics = allStatics.map(String);
allStatics.sort();
@@ -163,9 +158,9 @@ function assertNoEntry(codeMap, addr) {
(function testDynamicEntriesExport() {
var codeMap = new CodeMap();
- codeMap.addCode(0x1500, newCodeEntry(0x200, 'code1'));
- codeMap.addCode(0x1700, newCodeEntry(0x100, 'code2'));
- codeMap.addCode(0x1900, newCodeEntry(0x50, 'code3'));
+ codeMap.addCode(0x1500, new CodeEntry(0x200, 'code1'));
+ codeMap.addCode(0x1700, new CodeEntry(0x100, 'code2'));
+ codeMap.addCode(0x1900, new CodeEntry(0x50, 'code3'));
var allDynamics = codeMap.getAllDynamicEntries();
allDynamics = allDynamics.map(String);
allDynamics.sort();
diff --git a/deps/v8/test/mjsunit/tools/log_two_byte.js b/deps/v8/test/mjsunit/tools/log_two_byte.js
index 5e6f89fe16..e181b88cd2 100644
--- a/deps/v8/test/mjsunit/tools/log_two_byte.js
+++ b/deps/v8/test/mjsunit/tools/log_two_byte.js
@@ -20,9 +20,6 @@ function testFunctionWithFunnyName(o) {
for (let i = 0; i < 1000; i++) {
result += o[twoByteName](object);
}
- console.log(result);
- console.log(twoByteName);
-
})();
var __v_3 = {};
diff --git a/deps/v8/test/mjsunit/tools/processor.mjs b/deps/v8/test/mjsunit/tools/processor.mjs
new file mode 100644
index 0000000000..04e7684e3d
--- /dev/null
+++ b/deps/v8/test/mjsunit/tools/processor.mjs
@@ -0,0 +1,56 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --logfile='+' --log --trace-maps --trace-ic --log-code
+// Flags: --log-function-events --no-stress-opt
+
+import { Processor } from "../../../tools/system-analyzer/processor.mjs";
+
+// log code start
+function doWork() {
+ let array = [];
+ for (let i = 0; i < 500; i++) {
+ doWorkStep(i, array);
+ }
+ let sum = 0;
+ for (let i = 0; i < 500; i++) {
+ sum += array[i]["property" + i];
+ }
+ return sum;
+}
+
+function doWorkStep(i, array) {
+ const obj = {
+ ["property" + i]: i,
+ };
+ array.push(obj);
+ obj.custom1 = 1;
+ obj.custom2 = 2;
+}
+
+const result = doWork();
+ // log code end
+
+const logString = d8.log.getAndStop();
+const processor = new Processor();
+processor.processString(logString);
+
+const maps = processor.mapTimeline;
+const ics = processor.icTimeline;
+const scripts = processor.scripts;
+
+(function testResults() {
+ assertEquals(result, 124750);
+ assertTrue(maps.length > 0);
+ assertTrue(ics.length > 0);
+ assertTrue(scripts.length > 0);
+})();
+
+(function testIcKeys() {
+ const keys = new Set();
+ ics.forEach(ic => keys.add(ic.key));
+ assertTrue(keys.has("custom1"));
+ assertTrue(keys.has("custom2"));
+ assertTrue(keys.has("push"));
+})();
diff --git a/deps/v8/test/mjsunit/tools/timeline.mjs b/deps/v8/test/mjsunit/tools/timeline.mjs
index 07fd605992..5d3eca92d6 100644
--- a/deps/v8/test/mjsunit/tools/timeline.mjs
+++ b/deps/v8/test/mjsunit/tools/timeline.mjs
@@ -3,7 +3,7 @@
// found in the LICENSE file.
import { Timeline } from "../../../tools/system-analyzer/timeline.mjs";
-import { Event } from "../../../tools/system-analyzer/log/log.mjs";
+import { LogEntry} from "../../../tools/system-analyzer/log/log.mjs";
(function testTimeline() {
@@ -11,20 +11,20 @@ import { Event } from "../../../tools/system-analyzer/log/log.mjs";
let id1 = "0x3e7e082470cd";
let id2 = "0x3e7e082470ad";
let time = 12;
- let event1 = new Event(id1, time);
- let event2 = new Event(id1, time + 1);
- let event3 = new Event(id1, time + 2);
- let event4 = new Event(id1, time + 3);
- let event5 = new Event(id2, time + 3);
- timeline.push(event1);
- timeline.push(event2);
- timeline.push(event3);
- timeline.push(event4);
- timeline.push(event5);
+ let entry1 = new LogEntry(id1, time);
+ let entry2 = new LogEntry(id1, time + 1);
+ let entry3 = new LogEntry(id1, time + 2);
+ let entry4 = new LogEntry(id1, time + 3);
+ let entry5 = new LogEntry(id2, time + 3);
+ timeline.push(entry1);
+ timeline.push(entry2);
+ timeline.push(entry3);
+ timeline.push(entry4);
+ timeline.push(entry5);
let startTime = time;
let endTime = time + 2;
timeline.selectTimeRange(startTime, endTime);
- assertArrayEquals(timeline.selection, [event1, event2, event3]);
+ assertArrayEquals(timeline.selection, [entry1, entry2, entry3]);
let entryIdx = timeline.find(time + 1);
let entry = timeline.at(entryIdx);
assertEquals(entry.time, time + 1);
diff --git a/deps/v8/test/mjsunit/wasm/asm-wasm-stack.js b/deps/v8/test/mjsunit/wasm/asm-wasm-stack.js
index 9e09419d06..b416aaa141 100644
--- a/deps/v8/test/mjsunit/wasm/asm-wasm-stack.js
+++ b/deps/v8/test/mjsunit/wasm/asm-wasm-stack.js
@@ -154,18 +154,3 @@ function generateOverflowWasmFromAsmJs() {
['f', 135, 12] // --
]);
})();
-
-(function EnclosingFunctionOffsets() {
- const fun = generateWasmFromAsmJs(this, {throwFunc: throwException});
- assertTrue(%IsWasmCode(fun));
- let e = null;
- try {
- fun(0);
- } catch (ex) {
- e = ex;
- }
- assertEquals(68, e.stack[2].getLineNumber());
- assertEquals(15, e.stack[2].getColumnNumber());
- assertEquals(65, e.stack[2].getEnclosingLineNumber());
- assertEquals(3, e.stack[2].getEnclosingColumnNumber());
-})();
diff --git a/deps/v8/test/mjsunit/wasm/atomics-non-shared.js b/deps/v8/test/mjsunit/wasm/atomics-non-shared.js
index 510a2ce165..06eb638dd2 100644
--- a/deps/v8/test/mjsunit/wasm/atomics-non-shared.js
+++ b/deps/v8/test/mjsunit/wasm/atomics-non-shared.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --experimental-wasm-threads --wasm-atomics-on-non-shared-memory
+// Flags: --experimental-wasm-threads
load("test/mjsunit/wasm/wasm-module-builder.js");
diff --git a/deps/v8/test/mjsunit/wasm/atomics.js b/deps/v8/test/mjsunit/wasm/atomics.js
index 9826a155a8..6d37ba5548 100644
--- a/deps/v8/test/mjsunit/wasm/atomics.js
+++ b/deps/v8/test/mjsunit/wasm/atomics.js
@@ -47,7 +47,7 @@ function GetAtomicCmpExchangeFunction(wasmExpression, alignment, offset) {
kExprLocalGet, 1,
kExprLocalGet, 2,
kAtomicPrefix,
- wasmExpression, alignment, offset])
+ wasmExpression, alignment, ...wasmSignedLeb(offset, 5)])
.exportAs("main");
// Instantiate module, get function exports
@@ -251,15 +251,15 @@ function Test8Op(operation, func) {
Test8Op(Exchange, wasmExchange);
})();
-function TestCmpExchange(func, buffer, params, size) {
- for (let i = 0; i < buffer.length; i = inc(i)) {
+function TestCmpExchange(func, buffer, params, size, offset = 0) {
+ for (let i = 0; i + (offset / size) < buffer.length; i = inc(i)) {
for (let j = 0; j < params.length; j++) {
for (let k = 0; k < params.length; k++) {
- buffer[i] = params[j];
+ buffer[i + (offset / size)] = params[j];
let loaded = func(i * size, params[k], params[j]) >>> 0;
let expected = (params[k] == loaded) ? params[j] : loaded;
assertEquals(loaded, params[j]);
- assertEquals(expected, buffer[i]);
+ assertEquals(expected, buffer[i + (offset / size)]);
}
}
}
@@ -268,11 +268,14 @@ function TestCmpExchange(func, buffer, params, size) {
(function TestAtomicCompareExchange() {
print(arguments.callee.name);
+ // Offset is big enough to not fit in a 12-bit immediate on arm64, but small
+ // enough to fit in the maxSize wasm pages.
+ const offset = 0x1234;
let wasmCmpExchange =
- GetAtomicCmpExchangeFunction(kExprI32AtomicCompareExchange, 2, 0);
+ GetAtomicCmpExchangeFunction(kExprI32AtomicCompareExchange, 2, offset);
let i32 = new Uint32Array(memory.buffer);
let params = [0x00000001, 0x00000555, 0x00099999, 0xffffffff];
- TestCmpExchange(wasmCmpExchange, i32, params, kMemtypeSize32);
+ TestCmpExchange(wasmCmpExchange, i32, params, kMemtypeSize32, offset);
})();
(function TestAtomicCompareExchange16U() {
diff --git a/deps/v8/test/mjsunit/wasm/call-ref.js b/deps/v8/test/mjsunit/wasm/call-ref.js
index 1112ed8e3d..7ad056c1a3 100644
--- a/deps/v8/test/mjsunit/wasm/call-ref.js
+++ b/deps/v8/test/mjsunit/wasm/call-ref.js
@@ -22,8 +22,8 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
var sig_index = builder.addType(kSig_i_ii);
- var imported_type_reflection_function_index =
- builder.addImport("imports", "mul", sig_index);
+ var imported_js_api_function_index =
+ builder.addImport("imports", "js_api_mul", sig_index);
var imported_js_function_index =
builder.addImport("imports", "js_add", sig_index);
@@ -31,10 +31,6 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
var imported_wasm_function_index =
builder.addImport("imports", "wasm_add", sig_index);
- builder.addExport("unused", imported_wasm_function_index);
- builder.addExport("reexported_js_function", imported_js_function_index);
- builder.addExport("reexported_webassembly_function",
- imported_type_reflection_function_index);
var locally_defined_function =
builder.addFunction("sub", sig_index)
@@ -57,34 +53,35 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
kExprRefFunc, imported_js_function_index, kExprCallRef])
.exportFunc();
- builder.addFunction("test_wasm_import", kSig_i_v)
- .addBody([kExprI32Const, 15, kExprI32Const, 42,
- kExprRefFunc, imported_wasm_function_index, kExprCallRef])
- .exportFunc();
+ builder.addFunction("test_wasm_import", kSig_i_v)
+ .addBody([kExprI32Const, 15, kExprI32Const, 42,
+ kExprRefFunc, imported_wasm_function_index, kExprCallRef])
+ .exportFunc();
- /* Future use
- builder.addFunction("test_webassembly_import", kSig_i_v)
+ builder.addFunction("test_js_api_import", kSig_i_v)
.addBody([kExprI32Const, 3, kExprI32Const, 7,
- kExprRefFunc, imported_type_reflection_function_index,
+ kExprRefFunc, imported_js_api_function_index,
kExprCallRef])
.exportFunc();
- */
+
+ builder.addExport("reexported_js_function", imported_js_function_index);
+
+ // Just to make these functions eligible for call_ref.
+ builder.addDeclarativeElementSegment([imported_wasm_function_index,
+ imported_js_api_function_index]);
return builder.instantiate({imports: {
js_add: function(a, b) { return a + b; },
wasm_add: exporting_instance.exports.addition,
- mul: new WebAssembly.Function({parameters:['i32', 'i32'],
- results: ['i32']},
- function(a, b) { return a * b; })
+ js_api_mul: new WebAssembly.Function(
+ {parameters:['i32', 'i32'], results: ['i32']},
+ function(a, b) { return a * b; })
}});
})();
- // Check the modules exist.
- assertFalse(instance === undefined);
- assertFalse(instance === null);
- assertFalse(instance === 0);
- assertEquals("object", typeof instance.exports);
- assertEquals("function", typeof instance.exports.main);
+ // Check that the modules exist.
+ assertTrue(!!exporting_instance);
+ assertTrue(!!instance);
print("--locally defined func--");
assertEquals(13, instance.exports.test_local());
@@ -103,9 +100,18 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
assertEquals(19, instance.exports.main(
exporting_instance.exports.addition, 12, 7));
- // TODO(7748): Make these work once we know how we interact
- // with the 'type reflection' proposal.
- //print("--imported WebAssembly.Function--")
- //assertEquals(21, instance.exports.test_webassembly_import());
- //print(" --not imported WebAssembly.Function--")
+ print("--imported WebAssembly.Function--")
+ assertEquals(21, instance.exports.test_js_api_import());
+ print("--not imported WebAssembly.Function--")
+ assertEquals(-5, instance.exports.main(
+ new WebAssembly.Function(
+ {parameters:['i32', 'i32'], results: ['i32']},
+ function(a, b) { return a - b; }),
+ 10, 15));
+ print("--not imported WebAssembly.Function, arity mismatch--")
+ assertEquals(100, instance.exports.main(
+ new WebAssembly.Function(
+ {parameters:['i32', 'i32'], results: ['i32']},
+ function(a) { return a * a; }),
+ 10, 15));
})();
diff --git a/deps/v8/test/mjsunit/wasm/externref-globals-liftoff.js b/deps/v8/test/mjsunit/wasm/externref-globals-liftoff.js
index 53fb6ea0ab..35edc19077 100644
--- a/deps/v8/test/mjsunit/wasm/externref-globals-liftoff.js
+++ b/deps/v8/test/mjsunit/wasm/externref-globals-liftoff.js
@@ -3,6 +3,6 @@
// found in the LICENSE file.
// Flags: --experimental-wasm-reftypes --expose-gc --liftoff
-// Flags: --no-wasm-tier-up --liftoff-extern-ref
+// Flags: --no-wasm-tier-up --experimental-liftoff-extern-ref
load("test/mjsunit/wasm/externref-globals.js");
diff --git a/deps/v8/test/mjsunit/wasm/externref-liftoff.js b/deps/v8/test/mjsunit/wasm/externref-liftoff.js
index bf10030837..dbfff539a0 100644
--- a/deps/v8/test/mjsunit/wasm/externref-liftoff.js
+++ b/deps/v8/test/mjsunit/wasm/externref-liftoff.js
@@ -3,6 +3,6 @@
// found in the LICENSE file.
// Flags: --expose-wasm --experimental-wasm-reftypes --expose-gc --liftoff
-// Flags: --no-wasm-tier-up --liftoff-extern-ref
+// Flags: --no-wasm-tier-up --experimental-liftoff-extern-ref
load("test/mjsunit/wasm/externref.js");
diff --git a/deps/v8/test/mjsunit/wasm/generic-wrapper.js b/deps/v8/test/mjsunit/wasm/generic-wrapper.js
index 1f0845a968..16e5668eb3 100644
--- a/deps/v8/test/mjsunit/wasm/generic-wrapper.js
+++ b/deps/v8/test/mjsunit/wasm/generic-wrapper.js
@@ -730,6 +730,23 @@ let kSig_f_iiliiiffddlifffdi = makeSig([kWasmI32, kWasmI32, kWasmI64, kWasmI32,
assertEquals(360, x);
})();
+(function testCallFromOptimizedFunction() {
+ print(arguments.callee.name);
+ const builder = new WasmModuleBuilder();
+ builder.addFunction('wasm_fn', kSig_v_v).addBody([
+ kExprNop,
+ ]).exportFunc();
+
+ instance = builder.instantiate();
+ function js_caller() {
+ return instance.exports.wasm_fn();
+ }
+ %PrepareFunctionForOptimization(js_caller);
+ js_caller();
+ %OptimizeFunctionOnNextCall(js_caller);
+ js_caller();
+})();
+
(function Regression1130385() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/wasm/imported-function-types.js b/deps/v8/test/mjsunit/wasm/imported-function-types.js
new file mode 100644
index 0000000000..5a06da5964
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/imported-function-types.js
@@ -0,0 +1,44 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-typed-funcref
+
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+var exporting_module = (function() {
+ var builder = new WasmModuleBuilder();
+
+ var binaryType = builder.addType(kSig_i_ii);
+ var unaryType = builder.addType(kSig_i_i);
+
+ builder.addFunction("func1", makeSig([wasmRefType(binaryType)], [kWasmI32])).
+ addBody([kExprI32Const, 42, kExprI32Const, 12, kExprLocalGet, 0,
+ kExprCallRef]).
+ exportFunc();
+
+ builder.addFunction("func2", makeSig([wasmRefType(unaryType)], [kWasmI32])).
+ addBody([kExprI32Const, 42, kExprLocalGet, 0, kExprCallRef]).
+ exportFunc();
+
+ return builder.instantiate({});
+})();
+
+var importing_module = function(imported_function) {
+ var builder = new WasmModuleBuilder();
+
+ var unaryType = builder.addType(kSig_i_i);
+
+ builder.addImport("other", "func",
+ makeSig([wasmRefType(unaryType)], [kWasmI32]));
+
+ return builder.instantiate({other: {func: imported_function}});
+};
+
+// Same form/different index should be fine.
+importing_module(exporting_module.exports.func2);
+// Same index/different form should throw.
+assertThrows(
+ () => importing_module(exporting_module.exports.func1),
+ WebAssembly.LinkError,
+ /imported function does not match the expected type/);
diff --git a/deps/v8/test/mjsunit/wasm/indirect-call-non-zero-table.js b/deps/v8/test/mjsunit/wasm/indirect-call-non-zero-table.js
index 8102ca188e..821bc47c46 100644
--- a/deps/v8/test/mjsunit/wasm/indirect-call-non-zero-table.js
+++ b/deps/v8/test/mjsunit/wasm/indirect-call-non-zero-table.js
@@ -79,11 +79,11 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
assertEquals(v1, instance.exports.call1(0));
assertEquals(v2, instance.exports.call1(1));
assertEquals(v3, instance.exports.call1(2));
- assertTraps(kTrapFuncInvalid, () => instance.exports.call1(3));
+ assertTraps(kTrapTableOutOfBounds, () => instance.exports.call1(3));
assertEquals(v1, instance.exports.return_call1(0));
assertEquals(v2, instance.exports.return_call1(1));
assertEquals(v3, instance.exports.return_call1(2));
- assertTraps(kTrapFuncInvalid, () => instance.exports.return_call1(3));
+ assertTraps(kTrapTableOutOfBounds, () => instance.exports.return_call1(3));
// Try to call through the uninitialized table entry.
assertTraps(kTrapFuncSigMismatch, () => instance.exports.call2(0));
diff --git a/deps/v8/test/mjsunit/wasm/indirect-calls.js b/deps/v8/test/mjsunit/wasm/indirect-calls.js
index 603d7561ec..f866b41d10 100644
--- a/deps/v8/test/mjsunit/wasm/indirect-calls.js
+++ b/deps/v8/test/mjsunit/wasm/indirect-calls.js
@@ -55,7 +55,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
print(" --z1--");
assertTraps(kTrapFuncSigMismatch, () => module.exports.main(2, 12, 33));
print(" --w1--");
- assertTraps(kTrapFuncInvalid, () => module.exports.main(3, 12, 33));
+ assertTraps(kTrapTableOutOfBounds, () => module.exports.main(3, 12, 33));
})();
(function Test2() {
@@ -99,7 +99,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
print(" --q2--");
assertTraps(kTrapFuncSigMismatch, () => module.exports.main(3, 12, 33));
print(" --t2--");
- assertTraps(kTrapFuncInvalid, () => module.exports.main(4, 12, 33));
+ assertTraps(kTrapTableOutOfBounds, () => module.exports.main(4, 12, 33));
})();
@@ -151,7 +151,7 @@ function AddFunctions(builder) {
assertEquals(35, module.exports.main(2, 1));
assertEquals(32, module.exports.main(1, 2));
assertEquals(31, module.exports.main(2, 2));
- assertTraps(kTrapFuncInvalid, () => module.exports.main(12, 3));
+ assertTraps(kTrapTableOutOfBounds, () => module.exports.main(12, 3));
})();
(function ConstBaseTest() {
@@ -187,7 +187,7 @@ function AddFunctions(builder) {
assertEquals(31, main(2, i + 1));
assertEquals(33, main(1, i + 2));
assertEquals(66, main(2, i + 2));
- assertTraps(kTrapFuncInvalid, () => main(12, 10));
+ assertTraps(kTrapTableOutOfBounds, () => main(12, 10));
}
})();
@@ -224,6 +224,6 @@ function AddFunctions(builder) {
assertEquals(35, main(2, i + 1));
assertEquals(32, main(1, i + 2));
assertEquals(31, main(2, i + 2));
- assertTraps(kTrapFuncInvalid, () => main(12, 10));
+ assertTraps(kTrapTableOutOfBounds, () => main(12, 10));
}
})();
diff --git a/deps/v8/test/mjsunit/wasm/indirect-tables.js b/deps/v8/test/mjsunit/wasm/indirect-tables.js
index e48157001b..8296e97b8e 100644
--- a/deps/v8/test/mjsunit/wasm/indirect-tables.js
+++ b/deps/v8/test/mjsunit/wasm/indirect-tables.js
@@ -333,8 +333,8 @@ function js_div(a, b) { return (a / b) | 0; }
assertTraps(kTrapFuncSigMismatch, () => i1.exports.main(2));
assertTraps(kTrapFuncSigMismatch, () => i2.exports.main(2));
- assertTraps(kTrapFuncInvalid, () => i1.exports.main(3));
- assertTraps(kTrapFuncInvalid, () => i2.exports.main(3));
+ assertTraps(kTrapTableOutOfBounds, () => i1.exports.main(3));
+ assertTraps(kTrapTableOutOfBounds, () => i2.exports.main(3));
})();
(function MismatchedTableSize() {
diff --git a/deps/v8/test/mjsunit/wasm/many-memories-no-trap-handler.js b/deps/v8/test/mjsunit/wasm/many-memories-no-trap-handler.js
new file mode 100644
index 0000000000..02a3869a9a
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/many-memories-no-trap-handler.js
@@ -0,0 +1,22 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --no-wasm-trap-handler
+
+// No reason to stress-opt this; save some time.
+// Flags: --no-stress-opt
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+// Without trap handlers, we are able to allocate basically arbitrarily many
+// memories, because we don't need to reserve a huge amount of virtual address
+// space.
+
+const num_memories = 10000;
+
+const memories = [];
+while (memories.length < num_memories) {
+ print('Allocating memory #' + memories.length);
+ memories.push(new WebAssembly.Memory({initial: 1, maximum: 1}));
+}
diff --git a/deps/v8/test/mjsunit/wasm/many-memories.js b/deps/v8/test/mjsunit/wasm/many-memories.js
new file mode 100644
index 0000000000..8ec63806c1
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/many-memories.js
@@ -0,0 +1,24 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// No reason to stress-opt this; save some time.
+// Flags: --no-stress-opt
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+// Test that we can generate at least 50 memories of small size.
+// More memories are currently not possible if the trap handler is enabled,
+// because we reserve 10GB then, and we have a virtual memory space limit of
+// 512GB on MIPS64 and 1TB+4GB on other 64-bit systems.
+
+// The number of memories should be increased in this test once we raise that
+// limit or fix the allocation strategy to allow for more memories generally.
+
+const num_memories = 50;
+
+const memories = [];
+while (memories.length < num_memories) {
+ print('Allocating memory #' + memories.length);
+ memories.push(new WebAssembly.Memory({initial: 1, maximum: 1}));
+}
diff --git a/deps/v8/test/mjsunit/wasm/return-calls.js b/deps/v8/test/mjsunit/wasm/return-calls.js
index f7a90d2678..0cf2639d5b 100644
--- a/deps/v8/test/mjsunit/wasm/return-calls.js
+++ b/deps/v8/test/mjsunit/wasm/return-calls.js
@@ -2,7 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-wasm --experimental-wasm-return-call --stack-size=64
+// Flags: --expose-wasm --experimental-wasm-return-call
+// Reduce the stack size to test that we are indeed doing return calls (instead
+// of standard calls which consume stack space).
+// Flags: --stack-size=128
load("test/mjsunit/wasm/wasm-module-builder.js");
diff --git a/deps/v8/test/mjsunit/wasm/table-grow-from-wasm.js b/deps/v8/test/mjsunit/wasm/table-grow-from-wasm.js
index 797edad88a..269ea67400 100644
--- a/deps/v8/test/mjsunit/wasm/table-grow-from-wasm.js
+++ b/deps/v8/test/mjsunit/wasm/table-grow-from-wasm.js
@@ -93,7 +93,7 @@ function testGrowInternalAnyFuncTable(table_index) {
assertTraps(kTrapFuncSigMismatch, () => instance.exports.call(size - 2));
function growAndCheck(element, grow_by) {
assertEquals(size, instance.exports.size());
- assertTraps(kTrapFuncInvalid, () => instance.exports.call(size));
+ assertTraps(kTrapTableOutOfBounds, () => instance.exports.call(size));
assertEquals(size, instance.exports.grow(dummy_func(element), grow_by));
for (let i = 0; i < grow_by; ++i) {
assertEquals(element, instance.exports.call(size + i));
diff --git a/deps/v8/test/mjsunit/wasm/table-grow.js b/deps/v8/test/mjsunit/wasm/table-grow.js
index 85b43db0d7..2ed529463c 100644
--- a/deps/v8/test/mjsunit/wasm/table-grow.js
+++ b/deps/v8/test/mjsunit/wasm/table-grow.js
@@ -289,7 +289,7 @@ let id = (() => { // identity exported function
assertInvalidFunction = function(s) {
assertThrows(
() => instances[i].exports.main(s), WebAssembly.RuntimeError,
- kTrapMsgs[kTrapFuncInvalid]);
+ kTrapMsgs[kTrapTableOutOfBounds]);
}
assertInvalidFunction(size);
assertInvalidFunction(size + 1);
diff --git a/deps/v8/test/mjsunit/wasm/trap-location.js b/deps/v8/test/mjsunit/wasm/trap-location.js
index 91cb0d0721..db5a9390fd 100644
--- a/deps/v8/test/mjsunit/wasm/trap-location.js
+++ b/deps/v8/test/mjsunit/wasm/trap-location.js
@@ -32,7 +32,7 @@ function testTrapLocations(instance, expected_stack_length) {
testWasmTrap(0, kTrapDivByZero, 14);
testWasmTrap(1, kTrapMemOutOfBounds, 15);
testWasmTrap(2, kTrapUnreachable, 28);
- testWasmTrap(3, kTrapFuncInvalid, 32);
+ testWasmTrap(3, kTrapTableOutOfBounds, 32);
}
var builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/wasm/wasm-module-builder.js b/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
index b914ee91e3..79e70ff792 100644
--- a/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
+++ b/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
@@ -75,8 +75,10 @@ let kLocalNamesCode = 2;
let kWasmFunctionTypeForm = 0x60;
let kWasmAnyFunctionTypeForm = 0x70;
-let kHasMaximumFlag = 1;
-let kSharedHasMaximumFlag = 3;
+let kLimitsNoMaximum = 0
+let kLimitsHasMaximum = 1;
+let kLimitsSharedNoMaximum = 2;
+let kLimitsSharedHasMaximum = 3;
// Segment flags
let kActiveNoIndex = 0;
@@ -197,204 +199,226 @@ function makeSig_r_xx(r, x) {
}
// Opcodes
-let kExprUnreachable = 0x00;
-let kExprNop = 0x01;
-let kExprBlock = 0x02;
-let kExprLoop = 0x03;
-let kExprIf = 0x04;
-let kExprElse = 0x05;
-let kExprTry = 0x06;
-let kExprCatch = 0x07;
-let kExprThrow = 0x08;
-let kExprRethrow = 0x09;
-let kExprBrOnExn = 0x0a;
-let kExprEnd = 0x0b;
-let kExprBr = 0x0c;
-let kExprBrIf = 0x0d;
-let kExprBrTable = 0x0e;
-let kExprReturn = 0x0f;
-let kExprCallFunction = 0x10;
-let kExprCallIndirect = 0x11;
-let kExprReturnCall = 0x12;
-let kExprReturnCallIndirect = 0x13;
-let kExprCallRef = 0x14;
-let kExprReturnCallRef = 0x15;
-let kExprDrop = 0x1a;
-let kExprSelect = 0x1b;
-let kExprSelectWithType = 0x1c;
-let kExprLocalGet = 0x20;
-let kExprLocalSet = 0x21;
-let kExprLocalTee = 0x22;
-let kExprGlobalGet = 0x23;
-let kExprGlobalSet = 0x24;
-let kExprTableGet = 0x25;
-let kExprTableSet = 0x26;
-let kExprI32LoadMem = 0x28;
-let kExprI64LoadMem = 0x29;
-let kExprF32LoadMem = 0x2a;
-let kExprF64LoadMem = 0x2b;
-let kExprI32LoadMem8S = 0x2c;
-let kExprI32LoadMem8U = 0x2d;
-let kExprI32LoadMem16S = 0x2e;
-let kExprI32LoadMem16U = 0x2f;
-let kExprI64LoadMem8S = 0x30;
-let kExprI64LoadMem8U = 0x31;
-let kExprI64LoadMem16S = 0x32;
-let kExprI64LoadMem16U = 0x33;
-let kExprI64LoadMem32S = 0x34;
-let kExprI64LoadMem32U = 0x35;
-let kExprI32StoreMem = 0x36;
-let kExprI64StoreMem = 0x37;
-let kExprF32StoreMem = 0x38;
-let kExprF64StoreMem = 0x39;
-let kExprI32StoreMem8 = 0x3a;
-let kExprI32StoreMem16 = 0x3b;
-let kExprI64StoreMem8 = 0x3c;
-let kExprI64StoreMem16 = 0x3d;
-let kExprI64StoreMem32 = 0x3e;
-let kExprMemorySize = 0x3f;
-let kExprMemoryGrow = 0x40;
-let kExprI32Const = 0x41;
-let kExprI64Const = 0x42;
-let kExprF32Const = 0x43;
-let kExprF64Const = 0x44;
-let kExprI32Eqz = 0x45;
-let kExprI32Eq = 0x46;
-let kExprI32Ne = 0x47;
-let kExprI32LtS = 0x48;
-let kExprI32LtU = 0x49;
-let kExprI32GtS = 0x4a;
-let kExprI32GtU = 0x4b;
-let kExprI32LeS = 0x4c;
-let kExprI32LeU = 0x4d;
-let kExprI32GeS = 0x4e;
-let kExprI32GeU = 0x4f;
-let kExprI64Eqz = 0x50;
-let kExprI64Eq = 0x51;
-let kExprI64Ne = 0x52;
-let kExprI64LtS = 0x53;
-let kExprI64LtU = 0x54;
-let kExprI64GtS = 0x55;
-let kExprI64GtU = 0x56;
-let kExprI64LeS = 0x57;
-let kExprI64LeU = 0x58;
-let kExprI64GeS = 0x59;
-let kExprI64GeU = 0x5a;
-let kExprF32Eq = 0x5b;
-let kExprF32Ne = 0x5c;
-let kExprF32Lt = 0x5d;
-let kExprF32Gt = 0x5e;
-let kExprF32Le = 0x5f;
-let kExprF32Ge = 0x60;
-let kExprF64Eq = 0x61;
-let kExprF64Ne = 0x62;
-let kExprF64Lt = 0x63;
-let kExprF64Gt = 0x64;
-let kExprF64Le = 0x65;
-let kExprF64Ge = 0x66;
-let kExprI32Clz = 0x67;
-let kExprI32Ctz = 0x68;
-let kExprI32Popcnt = 0x69;
-let kExprI32Add = 0x6a;
-let kExprI32Sub = 0x6b;
-let kExprI32Mul = 0x6c;
-let kExprI32DivS = 0x6d;
-let kExprI32DivU = 0x6e;
-let kExprI32RemS = 0x6f;
-let kExprI32RemU = 0x70;
-let kExprI32And = 0x71;
-let kExprI32Ior = 0x72;
-let kExprI32Xor = 0x73;
-let kExprI32Shl = 0x74;
-let kExprI32ShrS = 0x75;
-let kExprI32ShrU = 0x76;
-let kExprI32Rol = 0x77;
-let kExprI32Ror = 0x78;
-let kExprI64Clz = 0x79;
-let kExprI64Ctz = 0x7a;
-let kExprI64Popcnt = 0x7b;
-let kExprI64Add = 0x7c;
-let kExprI64Sub = 0x7d;
-let kExprI64Mul = 0x7e;
-let kExprI64DivS = 0x7f;
-let kExprI64DivU = 0x80;
-let kExprI64RemS = 0x81;
-let kExprI64RemU = 0x82;
-let kExprI64And = 0x83;
-let kExprI64Ior = 0x84;
-let kExprI64Xor = 0x85;
-let kExprI64Shl = 0x86;
-let kExprI64ShrS = 0x87;
-let kExprI64ShrU = 0x88;
-let kExprI64Rol = 0x89;
-let kExprI64Ror = 0x8a;
-let kExprF32Abs = 0x8b;
-let kExprF32Neg = 0x8c;
-let kExprF32Ceil = 0x8d;
-let kExprF32Floor = 0x8e;
-let kExprF32Trunc = 0x8f;
-let kExprF32NearestInt = 0x90;
-let kExprF32Sqrt = 0x91;
-let kExprF32Add = 0x92;
-let kExprF32Sub = 0x93;
-let kExprF32Mul = 0x94;
-let kExprF32Div = 0x95;
-let kExprF32Min = 0x96;
-let kExprF32Max = 0x97;
-let kExprF32CopySign = 0x98;
-let kExprF64Abs = 0x99;
-let kExprF64Neg = 0x9a;
-let kExprF64Ceil = 0x9b;
-let kExprF64Floor = 0x9c;
-let kExprF64Trunc = 0x9d;
-let kExprF64NearestInt = 0x9e;
-let kExprF64Sqrt = 0x9f;
-let kExprF64Add = 0xa0;
-let kExprF64Sub = 0xa1;
-let kExprF64Mul = 0xa2;
-let kExprF64Div = 0xa3;
-let kExprF64Min = 0xa4;
-let kExprF64Max = 0xa5;
-let kExprF64CopySign = 0xa6;
-let kExprI32ConvertI64 = 0xa7;
-let kExprI32SConvertF32 = 0xa8;
-let kExprI32UConvertF32 = 0xa9;
-let kExprI32SConvertF64 = 0xaa;
-let kExprI32UConvertF64 = 0xab;
-let kExprI64SConvertI32 = 0xac;
-let kExprI64UConvertI32 = 0xad;
-let kExprI64SConvertF32 = 0xae;
-let kExprI64UConvertF32 = 0xaf;
-let kExprI64SConvertF64 = 0xb0;
-let kExprI64UConvertF64 = 0xb1;
-let kExprF32SConvertI32 = 0xb2;
-let kExprF32UConvertI32 = 0xb3;
-let kExprF32SConvertI64 = 0xb4;
-let kExprF32UConvertI64 = 0xb5;
-let kExprF32ConvertF64 = 0xb6;
-let kExprF64SConvertI32 = 0xb7;
-let kExprF64UConvertI32 = 0xb8;
-let kExprF64SConvertI64 = 0xb9;
-let kExprF64UConvertI64 = 0xba;
-let kExprF64ConvertF32 = 0xbb;
-let kExprI32ReinterpretF32 = 0xbc;
-let kExprI64ReinterpretF64 = 0xbd;
-let kExprF32ReinterpretI32 = 0xbe;
-let kExprF64ReinterpretI64 = 0xbf;
-let kExprI32SExtendI8 = 0xc0;
-let kExprI32SExtendI16 = 0xc1;
-let kExprI64SExtendI8 = 0xc2;
-let kExprI64SExtendI16 = 0xc3;
-let kExprI64SExtendI32 = 0xc4;
-let kExprRefNull = 0xd0;
-let kExprRefIsNull = 0xd1;
-let kExprRefFunc = 0xd2;
+const kWasmOpcodes = {
+ 'Unreachable': 0x00,
+ 'Nop': 0x01,
+ 'Block': 0x02,
+ 'Loop': 0x03,
+ 'If': 0x04,
+ 'Else': 0x05,
+ 'Try': 0x06,
+ 'Catch': 0x07,
+ 'Throw': 0x08,
+ 'Rethrow': 0x09,
+ 'BrOnExn': 0x0a,
+ 'End': 0x0b,
+ 'Br': 0x0c,
+ 'BrIf': 0x0d,
+ 'BrTable': 0x0e,
+ 'Return': 0x0f,
+ 'CallFunction': 0x10,
+ 'CallIndirect': 0x11,
+ 'ReturnCall': 0x12,
+ 'ReturnCallIndirect': 0x13,
+ 'CallRef': 0x14,
+ 'ReturnCallRef': 0x15,
+ 'Drop': 0x1a,
+ 'Select': 0x1b,
+ 'SelectWithType': 0x1c,
+ 'LocalGet': 0x20,
+ 'LocalSet': 0x21,
+ 'LocalTee': 0x22,
+ 'GlobalGet': 0x23,
+ 'GlobalSet': 0x24,
+ 'TableGet': 0x25,
+ 'TableSet': 0x26,
+ 'I32LoadMem': 0x28,
+ 'I64LoadMem': 0x29,
+ 'F32LoadMem': 0x2a,
+ 'F64LoadMem': 0x2b,
+ 'I32LoadMem8S': 0x2c,
+ 'I32LoadMem8U': 0x2d,
+ 'I32LoadMem16S': 0x2e,
+ 'I32LoadMem16U': 0x2f,
+ 'I64LoadMem8S': 0x30,
+ 'I64LoadMem8U': 0x31,
+ 'I64LoadMem16S': 0x32,
+ 'I64LoadMem16U': 0x33,
+ 'I64LoadMem32S': 0x34,
+ 'I64LoadMem32U': 0x35,
+ 'I32StoreMem': 0x36,
+ 'I64StoreMem': 0x37,
+ 'F32StoreMem': 0x38,
+ 'F64StoreMem': 0x39,
+ 'I32StoreMem8': 0x3a,
+ 'I32StoreMem16': 0x3b,
+ 'I64StoreMem8': 0x3c,
+ 'I64StoreMem16': 0x3d,
+ 'I64StoreMem32': 0x3e,
+ 'MemorySize': 0x3f,
+ 'MemoryGrow': 0x40,
+ 'I32Const': 0x41,
+ 'I64Const': 0x42,
+ 'F32Const': 0x43,
+ 'F64Const': 0x44,
+ 'I32Eqz': 0x45,
+ 'I32Eq': 0x46,
+ 'I32Ne': 0x47,
+ 'I32LtS': 0x48,
+ 'I32LtU': 0x49,
+ 'I32GtS': 0x4a,
+ 'I32GtU': 0x4b,
+ 'I32LeS': 0x4c,
+ 'I32LeU': 0x4d,
+ 'I32GeS': 0x4e,
+ 'I32GeU': 0x4f,
+ 'I64Eqz': 0x50,
+ 'I64Eq': 0x51,
+ 'I64Ne': 0x52,
+ 'I64LtS': 0x53,
+ 'I64LtU': 0x54,
+ 'I64GtS': 0x55,
+ 'I64GtU': 0x56,
+ 'I64LeS': 0x57,
+ 'I64LeU': 0x58,
+ 'I64GeS': 0x59,
+ 'I64GeU': 0x5a,
+ 'F32Eq': 0x5b,
+ 'F32Ne': 0x5c,
+ 'F32Lt': 0x5d,
+ 'F32Gt': 0x5e,
+ 'F32Le': 0x5f,
+ 'F32Ge': 0x60,
+ 'F64Eq': 0x61,
+ 'F64Ne': 0x62,
+ 'F64Lt': 0x63,
+ 'F64Gt': 0x64,
+ 'F64Le': 0x65,
+ 'F64Ge': 0x66,
+ 'I32Clz': 0x67,
+ 'I32Ctz': 0x68,
+ 'I32Popcnt': 0x69,
+ 'I32Add': 0x6a,
+ 'I32Sub': 0x6b,
+ 'I32Mul': 0x6c,
+ 'I32DivS': 0x6d,
+ 'I32DivU': 0x6e,
+ 'I32RemS': 0x6f,
+ 'I32RemU': 0x70,
+ 'I32And': 0x71,
+ 'I32Ior': 0x72,
+ 'I32Xor': 0x73,
+ 'I32Shl': 0x74,
+ 'I32ShrS': 0x75,
+ 'I32ShrU': 0x76,
+ 'I32Rol': 0x77,
+ 'I32Ror': 0x78,
+ 'I64Clz': 0x79,
+ 'I64Ctz': 0x7a,
+ 'I64Popcnt': 0x7b,
+ 'I64Add': 0x7c,
+ 'I64Sub': 0x7d,
+ 'I64Mul': 0x7e,
+ 'I64DivS': 0x7f,
+ 'I64DivU': 0x80,
+ 'I64RemS': 0x81,
+ 'I64RemU': 0x82,
+ 'I64And': 0x83,
+ 'I64Ior': 0x84,
+ 'I64Xor': 0x85,
+ 'I64Shl': 0x86,
+ 'I64ShrS': 0x87,
+ 'I64ShrU': 0x88,
+ 'I64Rol': 0x89,
+ 'I64Ror': 0x8a,
+ 'F32Abs': 0x8b,
+ 'F32Neg': 0x8c,
+ 'F32Ceil': 0x8d,
+ 'F32Floor': 0x8e,
+ 'F32Trunc': 0x8f,
+ 'F32NearestInt': 0x90,
+ 'F32Sqrt': 0x91,
+ 'F32Add': 0x92,
+ 'F32Sub': 0x93,
+ 'F32Mul': 0x94,
+ 'F32Div': 0x95,
+ 'F32Min': 0x96,
+ 'F32Max': 0x97,
+ 'F32CopySign': 0x98,
+ 'F64Abs': 0x99,
+ 'F64Neg': 0x9a,
+ 'F64Ceil': 0x9b,
+ 'F64Floor': 0x9c,
+ 'F64Trunc': 0x9d,
+ 'F64NearestInt': 0x9e,
+ 'F64Sqrt': 0x9f,
+ 'F64Add': 0xa0,
+ 'F64Sub': 0xa1,
+ 'F64Mul': 0xa2,
+ 'F64Div': 0xa3,
+ 'F64Min': 0xa4,
+ 'F64Max': 0xa5,
+ 'F64CopySign': 0xa6,
+ 'I32ConvertI64': 0xa7,
+ 'I32SConvertF32': 0xa8,
+ 'I32UConvertF32': 0xa9,
+ 'I32SConvertF64': 0xaa,
+ 'I32UConvertF64': 0xab,
+ 'I64SConvertI32': 0xac,
+ 'I64UConvertI32': 0xad,
+ 'I64SConvertF32': 0xae,
+ 'I64UConvertF32': 0xaf,
+ 'I64SConvertF64': 0xb0,
+ 'I64UConvertF64': 0xb1,
+ 'F32SConvertI32': 0xb2,
+ 'F32UConvertI32': 0xb3,
+ 'F32SConvertI64': 0xb4,
+ 'F32UConvertI64': 0xb5,
+ 'F32ConvertF64': 0xb6,
+ 'F64SConvertI32': 0xb7,
+ 'F64UConvertI32': 0xb8,
+ 'F64SConvertI64': 0xb9,
+ 'F64UConvertI64': 0xba,
+ 'F64ConvertF32': 0xbb,
+ 'I32ReinterpretF32': 0xbc,
+ 'I64ReinterpretF64': 0xbd,
+ 'F32ReinterpretI32': 0xbe,
+ 'F64ReinterpretI64': 0xbf,
+ 'I32SExtendI8': 0xc0,
+ 'I32SExtendI16': 0xc1,
+ 'I64SExtendI8': 0xc2,
+ 'I64SExtendI16': 0xc3,
+ 'I64SExtendI32': 0xc4,
+ 'RefNull': 0xd0,
+ 'RefIsNull': 0xd1,
+ 'RefFunc': 0xd2
+};
+
+function defineWasmOpcode(name, value) {
+ if (globalThis.kWasmOpcodeNames === undefined) {
+ globalThis.kWasmOpcodeNames = {};
+ }
+ Object.defineProperty(globalThis, name, {value: value});
+ if (globalThis.kWasmOpcodeNames[value] !== undefined) {
+ throw new Error(`Duplicate wasm opcode: ${value}. Previous name: ${
+ globalThis.kWasmOpcodeNames[value]}, new name: ${name}`);
+ }
+ globalThis.kWasmOpcodeNames[value] = name;
+}
+for (let name in kWasmOpcodes) {
+ defineWasmOpcode(`kExpr${name}`, kWasmOpcodes[name]);
+}
// Prefix opcodes
-let kGCPrefix = 0xfb;
-let kNumericPrefix = 0xfc;
-let kSimdPrefix = 0xfd;
-let kAtomicPrefix = 0xfe;
+const kPrefixOpcodes = {
+ 'GC': 0xfb,
+ 'Numeric': 0xfc,
+ 'Simd': 0xfd,
+ 'Atomic': 0xfe
+};
+for (let prefix in kPrefixOpcodes) {
+ defineWasmOpcode(`k${prefix}Prefix`, kPrefixOpcodes[prefix]);
+}
// GC opcodes
let kExprRttCanon = 0x30;
@@ -402,6 +426,14 @@ let kExprRefCast = 0x41;
let kExprI31New = 0x20;
// Numeric opcodes.
+let kExprI32SConvertSatF32 = 0x00;
+let kExprI32UConvertSatF32 = 0x01;
+let kExprI32SConvertSatF64 = 0x02;
+let kExprI32UConvertSatF64 = 0x03;
+let kExprI64SConvertSatF32 = 0x04;
+let kExprI64UConvertSatF32 = 0x05;
+let kExprI64SConvertSatF64 = 0x06;
+let kExprI64UConvertSatF64 = 0x07;
let kExprMemoryInit = 0x08;
let kExprDataDrop = 0x09;
let kExprMemoryCopy = 0x0a;
@@ -572,11 +604,11 @@ let kExprI8x16Shl = 0x6b;
let kExprI8x16ShrS = 0x6c;
let kExprI8x16ShrU = 0x6d;
let kExprI8x16Add = 0x6e;
-let kExprI8x16AddSaturateS = 0x6f;
-let kExprI8x16AddSaturateU = 0x70;
+let kExprI8x16AddSatS = 0x6f;
+let kExprI8x16AddSatU = 0x70;
let kExprI8x16Sub = 0x71;
-let kExprI8x16SubSaturateS = 0x72;
-let kExprI8x16SubSaturateU = 0x73;
+let kExprI8x16SubSatS = 0x72;
+let kExprI8x16SubSatU = 0x73;
let kExprI8x16MinS = 0x76;
let kExprI8x16MinU = 0x77;
let kExprI8x16MaxS = 0x78;
@@ -596,11 +628,11 @@ let kExprI16x8Shl = 0x8b;
let kExprI16x8ShrS = 0x8c;
let kExprI16x8ShrU = 0x8d;
let kExprI16x8Add = 0x8e;
-let kExprI16x8AddSaturateS = 0x8f;
-let kExprI16x8AddSaturateU = 0x90;
+let kExprI16x8AddSatS = 0x8f;
+let kExprI16x8AddSatU = 0x90;
let kExprI16x8Sub = 0x91;
-let kExprI16x8SubSaturateS = 0x92;
-let kExprI16x8SubSaturateU = 0x93;
+let kExprI16x8SubSatS = 0x92;
+let kExprI16x8SubSatU = 0x93;
let kExprI16x8Mul = 0x95;
let kExprI16x8MinS = 0x96;
let kExprI16x8MinU = 0x97;
@@ -670,15 +702,13 @@ let kTrapDivByZero = 2;
let kTrapDivUnrepresentable = 3;
let kTrapRemByZero = 4;
let kTrapFloatUnrepresentable = 5;
-let kTrapFuncInvalid = 6;
+let kTrapTableOutOfBounds = 6;
let kTrapFuncSigMismatch = 7;
-let kTrapTypeError = 8;
-let kTrapUnalignedAccess = 9;
-let kTrapDataSegmentDropped = 10;
-let kTrapElemSegmentDropped = 11;
-let kTrapTableOutOfBounds = 12;
-let kTrapBrOnExnNull = 13;
-let kTrapRethrowNull = 14;
+let kTrapUnalignedAccess = 8;
+let kTrapDataSegmentDropped = 9;
+let kTrapElemSegmentDropped = 10;
+let kTrapBrOnExnNull = 11;
+let kTrapRethrowNull = 12;
let kTrapMsgs = [
"unreachable",
@@ -687,13 +717,11 @@ let kTrapMsgs = [
"divide result unrepresentable",
"remainder by zero",
"float unrepresentable in integer range",
- "invalid index into function table",
+ "table index is out of bounds",
"function signature mismatch",
- "wasm function signature contains illegal type",
"operation does not support unaligned accesses",
"data segment has been dropped",
"element segment has been dropped",
- "table access out of bounds",
"br_on_exn on null value",
"rethrowing null value"
];
@@ -948,8 +976,8 @@ class WasmModuleBuilder {
return this;
}
- addMemory(min, max, exp, shared) {
- this.memory = {min: min, max: max, exp: exp, shared: shared};
+ addMemory(min, max, exported, shared) {
+ this.memory = {min: min, max: max, exported: exported, shared: shared};
return this;
}
@@ -1124,13 +1152,13 @@ class WasmModuleBuilder {
return this;
}
- addPassiveElementSegment(array, is_import = false) {
+ addPassiveElementSegment(array) {
this.element_segments.push(
{array: array, is_active: false, is_declarative: false});
return this;
}
- addDeclarativeElementSegment(array, is_import = false) {
+ addDeclarativeElementSegment(array) {
this.element_segments.push(
{array: array, is_active: false, is_declarative: true});
return this;
@@ -1266,12 +1294,9 @@ class WasmModuleBuilder {
section.emit_u8(1); // one memory entry
const has_max = wasm.memory.max !== undefined;
const is_shared = wasm.memory.shared !== undefined;
- // Emit flags (bit 0: reszeable max, bit 1: shared memory)
- if (is_shared) {
- section.emit_u8(has_max ? kSharedHasMaximumFlag : 2);
- } else {
- section.emit_u8(has_max ? kHasMaximumFlag : 0);
- }
+ section.emit_u8(is_shared
+ ? (has_max ? kLimitsSharedHasMaximum : kLimitsSharedNoMaximum)
+ : (has_max ? kLimitsHasMaximum : kLimitsNoMaximum));
section.emit_u32v(wasm.memory.min);
if (has_max) section.emit_u32v(wasm.memory.max);
});
@@ -1356,7 +1381,7 @@ class WasmModuleBuilder {
}
// Add export table.
- var mem_export = (wasm.memory !== undefined && wasm.memory.exp);
+ var mem_export = (wasm.memory !== undefined && wasm.memory.exported);
var exports_count = wasm.exports.length + (mem_export ? 1 : 0);
if (exports_count > 0) {
if (debug) print("emitting exports @ " + binary.length);
@@ -1666,3 +1691,7 @@ function wasmS128Const(f) {
// Write in little-endian order at offset 0.
return [kSimdPrefix, kExprS128Const, ...f];
}
+
+function getOpcodeName(opcode) {
+ return globalThis.kWasmOpcodeNames?.[opcode] ?? 'unknown';
+}
diff --git a/deps/v8/test/test262/local-tests/test/intl402/NumberFormat/default-currency-maximum-fraction-digits.js b/deps/v8/test/test262/local-tests/test/intl402/NumberFormat/default-currency-maximum-fraction-digits.js
deleted file mode 100644
index 9ca6ffe2b9..0000000000
--- a/deps/v8/test/test262/local-tests/test/intl402/NumberFormat/default-currency-maximum-fraction-digits.js
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// This code is governed by the license found in the LICENSE file.
-
-/*---
-esid: ECMA-402 #sec-setnfdigitoptions
-description: >
- When a currency is used in Intl.NumberFormat and minimumFractionDigits is
- not provided, maximumFractionDigits should be range-checked against it.
-include: [assert.js]
----*/
-
-assert.throws(RangeError,
- () => new Intl.NumberFormat('en', {
- style: 'currency',
- currency: 'USD',
- maximumFractionDigits: 1
- }));
diff --git a/deps/v8/test/test262/test262.status b/deps/v8/test/test262/test262.status
index f27bd83a45..3016baf87f 100644
--- a/deps/v8/test/test262/test262.status
+++ b/deps/v8/test/test262/test262.status
@@ -66,8 +66,6 @@
'language/expressions/postfix-increment/S11.3.1_A5_T1': [FAIL],
'language/expressions/postfix-increment/S11.3.1_A5_T2': [FAIL],
'language/expressions/postfix-increment/S11.3.1_A5_T3': [FAIL],
- 'language/expressions/postfix-increment/S11.3.1_A5_T4': [FAIL],
- 'language/expressions/postfix-increment/S11.3.1_A5_T5': [FAIL],
'language/expressions/postfix-decrement/S11.3.2_A5_*': [FAIL],
'language/expressions/prefix-decrement/S11.4.5_A5_*': [FAIL],
'language/expressions/prefix-increment/S11.4.4_A5_*': [FAIL],
@@ -79,34 +77,14 @@
'language/computed-property-names/class/static/method-symbol': [FAIL],
# https://bugs.chromium.org/p/v8/issues/detail?id=4895
- 'built-ins/TypedArrayConstructors/internals/DefineOwnProperty/detached-buffer': [FAIL],
- 'built-ins/TypedArrayConstructors/internals/DefineOwnProperty/BigInt/detached-buffer': [FAIL],
- 'built-ins/TypedArrayConstructors/internals/DefineOwnProperty/detached-buffer-realm': [FAIL],
- 'built-ins/TypedArrayConstructors/internals/DefineOwnProperty/BigInt/detached-buffer-realm': [FAIL],
'built-ins/TypedArrayConstructors/internals/DefineOwnProperty/tonumber-value-detached-buffer': [FAIL],
'built-ins/TypedArrayConstructors/internals/DefineOwnProperty/BigInt/tonumber-value-detached-buffer': [FAIL],
- 'built-ins/TypedArrayConstructors/internals/Get/detached-buffer': [FAIL],
- 'built-ins/TypedArrayConstructors/internals/Get/BigInt/detached-buffer': [FAIL],
- 'built-ins/TypedArrayConstructors/internals/Get/detached-buffer-realm': [FAIL],
- 'built-ins/TypedArrayConstructors/internals/Get/BigInt/detached-buffer-realm': [FAIL],
- 'built-ins/TypedArrayConstructors/internals/Get/infinity-detached-buffer': [FAIL],
- 'built-ins/TypedArrayConstructors/internals/Get/BigInt/infinity-detached-buffer': [FAIL],
- 'built-ins/TypedArrayConstructors/internals/GetOwnProperty/detached-buffer': [FAIL],
- 'built-ins/TypedArrayConstructors/internals/GetOwnProperty/BigInt/detached-buffer': [FAIL],
- 'built-ins/TypedArrayConstructors/internals/GetOwnProperty/detached-buffer-realm': [FAIL],
- 'built-ins/TypedArrayConstructors/internals/GetOwnProperty/BigInt/detached-buffer-realm': [FAIL],
- 'built-ins/TypedArrayConstructors/internals/GetOwnProperty/enumerate-detached-buffer': [FAIL],
- 'built-ins/TypedArrayConstructors/internals/GetOwnProperty/BigInt/enumerate-detached-buffer': [FAIL],
- 'built-ins/TypedArrayConstructors/internals/HasProperty/detached-buffer': [FAIL],
- 'built-ins/TypedArrayConstructors/internals/HasProperty/BigInt/detached-buffer': [FAIL],
- 'built-ins/TypedArrayConstructors/internals/HasProperty/detached-buffer-realm': [FAIL],
- 'built-ins/TypedArrayConstructors/internals/HasProperty/BigInt/detached-buffer-realm': [FAIL],
- 'built-ins/TypedArrayConstructors/internals/HasProperty/infinity-with-detached-buffer': [FAIL],
- 'built-ins/TypedArrayConstructors/internals/HasProperty/BigInt/infinity-with-detached-buffer': [FAIL],
- 'built-ins/TypedArrayConstructors/internals/Set/detached-buffer': [FAIL],
- 'built-ins/TypedArrayConstructors/internals/Set/BigInt/detached-buffer': [FAIL],
- 'built-ins/TypedArrayConstructors/internals/Set/detached-buffer-realm': [FAIL],
- 'built-ins/TypedArrayConstructors/internals/Set/BigInt/detached-buffer-realm': [FAIL],
+ 'built-ins/TypedArrayConstructors/internals/DefineOwnProperty/BigInt/key-is-numericindex': [FAIL],
+ 'built-ins/TypedArrayConstructors/internals/DefineOwnProperty/BigInt/key-is-numericindex-desc-configurable': [FAIL],
+ 'built-ins/TypedArrayConstructors/internals/DefineOwnProperty/key-is-numericindex': [FAIL],
+ 'built-ins/TypedArrayConstructors/internals/DefineOwnProperty/key-is-numericindex-desc-configurable': [FAIL],
+ 'built-ins/TypedArrayConstructors/internals/GetOwnProperty/BigInt/index-prop-desc': [FAIL],
+ 'built-ins/TypedArrayConstructors/internals/GetOwnProperty/index-prop-desc': [FAIL],
'built-ins/TypedArrayConstructors/internals/Set/tonumber-value-detached-buffer': [FAIL],
'built-ins/TypedArrayConstructors/internals/Set/BigInt/tonumber-value-detached-buffer': [FAIL],
# Some TypedArray methods throw due to the same bug, from Get
@@ -129,10 +107,11 @@
'built-ins/TypedArray/prototype/some/callbackfn-detachbuffer': [FAIL],
'built-ins/TypedArray/prototype/some/BigInt/callbackfn-detachbuffer': [FAIL],
# DataView functions should also throw on detached buffers
- 'built-ins/ArrayBuffer/prototype/byteLength/detached-buffer': [FAIL],
'built-ins/DataView/detached-buffer': [FAIL],
'built-ins/DataView/prototype/byteLength/detached-buffer': [FAIL],
'built-ins/DataView/prototype/byteOffset/detached-buffer': [FAIL],
+ 'built-ins/DataView/prototype/byteLength/instance-has-detached-buffer': [FAIL],
+ 'built-ins/DataView/custom-proto-access-detaches-buffer': [FAIL],
# copyWithin should also throw on detached buffers
'built-ins/TypedArray/prototype/copyWithin/coerced-values-end-detached-prototype': [FAIL],
'built-ins/TypedArray/prototype/copyWithin/coerced-values-start-detached': [FAIL],
@@ -534,9 +513,6 @@
# https://bugs.chromium.org/p/v8/issues/detail?id=7472
'intl402/NumberFormat/currency-digits': [FAIL],
- # http://crbug/v8/10844
- 'intl402/NumberFormat/dft-currency-mnfd-range-check-mxfd': [FAIL],
-
# https://bugs.chromium.org/p/v8/issues/detail?id=7831
'language/statements/generators/generator-created-after-decl-inst': [FAIL],
'language/expressions/generators/generator-created-after-decl-inst': [FAIL],
@@ -595,10 +571,6 @@
# https://bugs.chromium.org/p/v8/issues/detail?id=9818
'built-ins/AsyncFunction/proto-from-ctor-realm': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=10111
- # super() should evaluate arguments before checking IsConstructable
- 'language/expressions/super/call-proto-not-ctor': [FAIL],
-
# https://bugs.chromium.org/p/v8/issues/detail?id=10381
'built-ins/Array/prototype/concat/arg-length-near-integer-limit': [FAIL],
@@ -638,6 +610,44 @@
# http://crbug/v8/10905
'language/identifier-resolution/assign-to-global-undefined': [FAIL],
+ # http://crbug/v8/10961
+ 'built-ins/Array/prototype/item/index-argument-tointeger': [FAIL],
+ 'built-ins/Array/prototype/item/index-non-numeric-argument-tointeger': [FAIL],
+ 'built-ins/Array/prototype/item/index-non-numeric-argument-tointeger-invalid': [FAIL],
+ 'built-ins/Array/prototype/item/length': [FAIL],
+ 'built-ins/Array/prototype/item/name': [FAIL],
+ 'built-ins/Array/prototype/item/prop-desc': [FAIL],
+ 'built-ins/Array/prototype/item/return-abrupt-from-this': [FAIL],
+ 'built-ins/Array/prototype/item/returns-item': [FAIL],
+ 'built-ins/Array/prototype/item/returns-item-relative-index': [FAIL],
+ 'built-ins/Array/prototype/item/returns-undefined-for-holes-in-sparse-arrays': [FAIL],
+ 'built-ins/Array/prototype/item/returns-undefined-for-out-of-range-index': [FAIL],
+ 'built-ins/String/prototype/item/index-argument-tointeger': [FAIL],
+ 'built-ins/String/prototype/item/index-non-numeric-argument-tointeger': [FAIL],
+ 'built-ins/String/prototype/item/index-non-numeric-argument-tointeger-invalid': [FAIL],
+ 'built-ins/String/prototype/item/length': [FAIL],
+ 'built-ins/String/prototype/item/name': [FAIL],
+ 'built-ins/String/prototype/item/prop-desc': [FAIL],
+ 'built-ins/String/prototype/item/return-abrupt-from-this': [FAIL],
+ 'built-ins/String/prototype/item/returns-code-unit': [FAIL],
+ 'built-ins/String/prototype/item/returns-item': [FAIL],
+ 'built-ins/String/prototype/item/returns-item-relative-index': [FAIL],
+ 'built-ins/String/prototype/item/returns-undefined-for-out-of-range-index': [FAIL],
+ 'built-ins/TypedArray/prototype/item/index-argument-tointeger': [FAIL],
+ 'built-ins/TypedArray/prototype/item/index-non-numeric-argument-tointeger': [FAIL],
+ 'built-ins/TypedArray/prototype/item/index-non-numeric-argument-tointeger-invalid': [FAIL],
+ 'built-ins/TypedArray/prototype/item/length': [FAIL],
+ 'built-ins/TypedArray/prototype/item/name': [FAIL],
+ 'built-ins/TypedArray/prototype/item/prop-desc': [FAIL],
+ 'built-ins/TypedArray/prototype/item/return-abrupt-from-this': [FAIL],
+ 'built-ins/TypedArray/prototype/item/returns-item': [FAIL],
+ 'built-ins/TypedArray/prototype/item/returns-item-relative-index': [FAIL],
+ 'built-ins/TypedArray/prototype/item/returns-undefined-for-holes-in-sparse-arrays': [FAIL],
+ 'built-ins/TypedArray/prototype/item/returns-undefined-for-out-of-range-index': [FAIL],
+
+ # http://crbug/v8/11039
+ 'intl402/Locale/reject-duplicate-variants-in-tlang': [FAIL],
+
######################## NEEDS INVESTIGATION ###########################
# https://bugs.chromium.org/p/v8/issues/detail?id=7833
@@ -653,6 +663,11 @@
# https://github.com/tc39/ecma262/pull/889
'annexB/language/function-code/block-decl-func-skip-arguments': [FAIL],
+ # Non-simple assignment targets are runtime errors instead of syntax errors
+ # for web compat. https://crbug.com/358346
+ 'language/expressions/assignmenttargettype/direct-callexpression-arguments': [FAIL],
+ 'language/expressions/assignmenttargettype/parenthesized-callexpression-arguments': [FAIL],
+
############################ INVALID TESTS #############################
# Test makes unjustified assumptions about the number of calls to SortCompare.
diff --git a/deps/v8/test/test262/testcfg.py b/deps/v8/test/test262/testcfg.py
index c88396c3f6..34cf4bab30 100644
--- a/deps/v8/test/test262/testcfg.py
+++ b/deps/v8/test/test262/testcfg.py
@@ -49,8 +49,6 @@ FEATURE_FLAGS = {
'Intl.DateTimeFormat-quarter': '--harmony-intl-dateformat-quarter',
'String.prototype.replaceAll': '--harmony_string_replaceall',
'Symbol.prototype.description': '--harmony-symbol-description',
- 'export-star-as-namespace-from-module': '--harmony-namespace-exports',
- 'Promise.allSettled': '--harmony-promise-all-settled',
'FinalizationRegistry': '--harmony-weak-refs-with-cleanup-some',
'WeakRef': '--harmony-weak-refs-with-cleanup-some',
'host-gc-required': '--expose-gc-as=v8GC',
diff --git a/deps/v8/test/unittests/BUILD.gn b/deps/v8/test/unittests/BUILD.gn
index c239c3825d..290661ce5b 100644
--- a/deps/v8/test/unittests/BUILD.gn
+++ b/deps/v8/test/unittests/BUILD.gn
@@ -80,9 +80,12 @@ v8_source_set("cppgc_unittests_sources") {
testonly = true
sources = [
+ "heap/cppgc/compactor-unittest.cc",
"heap/cppgc/concurrent-marking-unittest.cc",
"heap/cppgc/concurrent-sweeper-unittest.cc",
+ "heap/cppgc/cross-thread-persistent-unittest.cc",
"heap/cppgc/custom-spaces-unittest.cc",
+ "heap/cppgc/ephemeron-pair-unittest.cc",
"heap/cppgc/finalizer-trait-unittest.cc",
"heap/cppgc/free-list-unittest.cc",
"heap/cppgc/garbage-collected-unittest.cc",
@@ -99,9 +102,10 @@ v8_source_set("cppgc_unittests_sources") {
"heap/cppgc/marking-visitor-unittest.cc",
"heap/cppgc/member-unittest.cc",
"heap/cppgc/minor-gc-unittest.cc",
+ "heap/cppgc/name-trait-unittest.cc",
"heap/cppgc/object-start-bitmap-unittest.cc",
"heap/cppgc/page-memory-unittest.cc",
- "heap/cppgc/persistent-unittest.cc",
+ "heap/cppgc/persistent-family-unittest.cc",
"heap/cppgc/prefinalizer-unittest.cc",
"heap/cppgc/source-location-unittest.cc",
"heap/cppgc/stack-unittest.cc",
@@ -112,6 +116,7 @@ v8_source_set("cppgc_unittests_sources") {
"heap/cppgc/tests.cc",
"heap/cppgc/tests.h",
"heap/cppgc/visitor-unittest.cc",
+ "heap/cppgc/weak-container-unittest.cc",
"heap/cppgc/write-barrier-unittest.cc",
]
@@ -282,7 +287,6 @@ v8_source_set("unittests_sources") {
"heap/heap-utils.h",
"heap/index-generator-unittest.cc",
"heap/item-parallel-job-unittest.cc",
- "heap/js-member-unittest.cc",
"heap/list-unittest.cc",
"heap/local-factory-unittest.cc",
"heap/local-heap-unittest.cc",
@@ -294,7 +298,11 @@ v8_source_set("unittests_sources") {
"heap/safepoint-unittest.cc",
"heap/slot-set-unittest.cc",
"heap/spaces-unittest.cc",
+ "heap/traced-reference-unittest.cc",
+ "heap/unified-heap-snapshot-unittest.cc",
"heap/unified-heap-unittest.cc",
+ "heap/unified-heap-utils.cc",
+ "heap/unified-heap-utils.h",
"heap/unmapper-unittest.cc",
"heap/worklist-unittest.cc",
"interpreter/bytecode-array-builder-unittest.cc",
diff --git a/deps/v8/test/unittests/api/isolate-unittest.cc b/deps/v8/test/unittests/api/isolate-unittest.cc
index 392577dcaa..52c7af0386 100644
--- a/deps/v8/test/unittests/api/isolate-unittest.cc
+++ b/deps/v8/test/unittests/api/isolate-unittest.cc
@@ -25,6 +25,8 @@ class MemoryPressureTask : public v8::Task {
MemoryPressureTask(Isolate* isolate, base::Semaphore* semaphore)
: isolate_(isolate), semaphore_(semaphore) {}
~MemoryPressureTask() override = default;
+ MemoryPressureTask(const MemoryPressureTask&) = delete;
+ MemoryPressureTask& operator=(const MemoryPressureTask&) = delete;
// v8::Task implementation.
void Run() override {
@@ -35,8 +37,6 @@ class MemoryPressureTask : public v8::Task {
private:
Isolate* isolate_;
base::Semaphore* semaphore_;
-
- DISALLOW_COPY_AND_ASSIGN(MemoryPressureTask);
};
} // namespace
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-arm-unittest.cc b/deps/v8/test/unittests/assembler/turbo-assembler-arm-unittest.cc
index 0cb70cac00..392337b210 100644
--- a/deps/v8/test/unittests/assembler/turbo-assembler-arm-unittest.cc
+++ b/deps/v8/test/unittests/assembler/turbo-assembler-arm-unittest.cc
@@ -155,7 +155,7 @@ TEST_P(TurboAssemblerTestMoveObjectAndSlot, MoveObjectAndSlot) {
tasm.GetCode(nullptr, &desc);
if (FLAG_print_code) {
Handle<Code> code =
- Factory::CodeBuilder(isolate(), desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate(), desc, CodeKind::FOR_TESTING).Build();
StdoutStream os;
code->Print(os);
}
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-arm64-unittest.cc b/deps/v8/test/unittests/assembler/turbo-assembler-arm64-unittest.cc
index 4e2987f6f4..5ccb706d50 100644
--- a/deps/v8/test/unittests/assembler/turbo-assembler-arm64-unittest.cc
+++ b/deps/v8/test/unittests/assembler/turbo-assembler-arm64-unittest.cc
@@ -161,7 +161,7 @@ TEST_P(TurboAssemblerTestMoveObjectAndSlot, MoveObjectAndSlot) {
tasm.GetCode(nullptr, &desc);
if (FLAG_print_code) {
Handle<Code> code =
- Factory::CodeBuilder(isolate(), desc, CodeKind::STUB).Build();
+ Factory::CodeBuilder(isolate(), desc, CodeKind::FOR_TESTING).Build();
StdoutStream os;
code->Print(os);
}
diff --git a/deps/v8/test/unittests/base/functional-unittest.cc b/deps/v8/test/unittests/base/functional-unittest.cc
index 43b3fe6ebb..efcabb75a2 100644
--- a/deps/v8/test/unittests/base/functional-unittest.cc
+++ b/deps/v8/test/unittests/base/functional-unittest.cc
@@ -45,13 +45,13 @@ class FunctionalTest : public ::testing::Test {
FunctionalTest()
: rng_(GetRandomSeedFromFlag(::v8::internal::FLAG_random_seed)) {}
~FunctionalTest() override = default;
+ FunctionalTest(const FunctionalTest&) = delete;
+ FunctionalTest& operator=(const FunctionalTest&) = delete;
RandomNumberGenerator* rng() { return &rng_; }
private:
RandomNumberGenerator rng_;
-
- DISALLOW_COPY_AND_ASSIGN(FunctionalTest);
};
using FunctionalTypes =
diff --git a/deps/v8/test/unittests/base/platform/platform-unittest.cc b/deps/v8/test/unittests/base/platform/platform-unittest.cc
index b447778b49..192b51cf9c 100644
--- a/deps/v8/test/unittests/base/platform/platform-unittest.cc
+++ b/deps/v8/test/unittests/base/platform/platform-unittest.cc
@@ -91,9 +91,12 @@ TEST(StackTest, GetCurrentStackPosition) {
TEST(StackTest, StackVariableInBounds) {
void* dummy;
- ASSERT_GT(Stack::GetStackStart(), Stack::GetCurrentStackPosition());
- EXPECT_GT(Stack::GetStackStart(), Stack::GetStackSlot(&dummy));
- EXPECT_LT(Stack::GetCurrentStackPosition(), Stack::GetStackSlot(&dummy));
+ ASSERT_GT(static_cast<void*>(Stack::GetStackStart()),
+ Stack::GetCurrentStackPosition());
+ EXPECT_GT(static_cast<void*>(Stack::GetStackStart()),
+ Stack::GetRealStackAddressForSlot(&dummy));
+ EXPECT_LT(static_cast<void*>(Stack::GetCurrentStackPosition()),
+ Stack::GetRealStackAddressForSlot(&dummy));
}
} // namespace base
diff --git a/deps/v8/test/unittests/codegen/code-stub-assembler-unittest.cc b/deps/v8/test/unittests/codegen/code-stub-assembler-unittest.cc
index b566696917..4aa4aaba2b 100644
--- a/deps/v8/test/unittests/codegen/code-stub-assembler-unittest.cc
+++ b/deps/v8/test/unittests/codegen/code-stub-assembler-unittest.cc
@@ -23,8 +23,9 @@ namespace internal {
CodeStubAssemblerTestState::CodeStubAssemblerTestState(
CodeStubAssemblerTest* test)
: compiler::CodeAssemblerState(
- test->isolate(), test->zone(), VoidDescriptor{}, CodeKind::STUB,
- "test", PoisoningMitigationLevel::kPoisonCriticalOnly) {}
+ test->isolate(), test->zone(), VoidDescriptor{},
+ CodeKind::FOR_TESTING, "test",
+ PoisoningMitigationLevel::kPoisonCriticalOnly) {}
TARGET_TEST_F(CodeStubAssemblerTest, SmiTag) {
CodeStubAssemblerTestState state(this);
diff --git a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc b/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
index 36752d7d5c..837b89a667 100644
--- a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
+++ b/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
@@ -29,10 +29,15 @@ namespace internal {
class CompilerDispatcherTestFlags {
public:
+ CompilerDispatcherTestFlags(const CompilerDispatcherTestFlags&) = delete;
+ CompilerDispatcherTestFlags& operator=(const CompilerDispatcherTestFlags&) =
+ delete;
static void SetFlagsForTest() {
CHECK_NULL(save_flags_);
save_flags_ = new SaveFlags();
FLAG_single_threaded = true;
+ // TODO(leszeks): Support background finalization in compiler dispatcher.
+ FLAG_finalize_streaming_on_background = false;
FlagList::EnforceFlagImplications();
FLAG_compiler_dispatcher = true;
}
@@ -45,8 +50,6 @@ class CompilerDispatcherTestFlags {
private:
static SaveFlags* save_flags_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(CompilerDispatcherTestFlags);
};
SaveFlags* CompilerDispatcherTestFlags::save_flags_ = nullptr;
@@ -55,6 +58,8 @@ class CompilerDispatcherTest : public TestWithNativeContext {
public:
CompilerDispatcherTest() = default;
~CompilerDispatcherTest() override = default;
+ CompilerDispatcherTest(const CompilerDispatcherTest&) = delete;
+ CompilerDispatcherTest& operator=(const CompilerDispatcherTest&) = delete;
static void SetUpTestCase() {
CompilerDispatcherTestFlags::SetFlagsForTest();
@@ -100,9 +105,6 @@ class CompilerDispatcherTest : public TestWithNativeContext {
return dispatcher->Enqueue(outer_parse_info.get(), function_name,
function_literal);
}
-
- private:
- DISALLOW_COPY_AND_ASSIGN(CompilerDispatcherTest);
};
namespace {
@@ -121,6 +123,8 @@ class MockPlatform : public v8::Platform {
EXPECT_TRUE(worker_tasks_.empty());
EXPECT_TRUE(idle_task_ == nullptr);
}
+ MockPlatform(const MockPlatform&) = delete;
+ MockPlatform& operator=(const MockPlatform&) = delete;
int NumberOfWorkerThreads() override { return 1; }
@@ -251,6 +255,8 @@ class MockPlatform : public v8::Platform {
std::vector<std::unique_ptr<Task>> tasks, bool signal)
: platform_(platform), tasks_(std::move(tasks)), signal_(signal) {}
~TaskWrapper() override = default;
+ TaskWrapper(const TaskWrapper&) = delete;
+ TaskWrapper& operator=(const TaskWrapper&) = delete;
void Run() override {
for (auto& task : tasks_) {
@@ -265,8 +271,6 @@ class MockPlatform : public v8::Platform {
MockPlatform* platform_;
std::vector<std::unique_ptr<Task>> tasks_;
bool signal_;
-
- DISALLOW_COPY_AND_ASSIGN(TaskWrapper);
};
class MockForegroundTaskRunner final : public TaskRunner {
@@ -317,8 +321,6 @@ class MockPlatform : public v8::Platform {
base::Semaphore sem_;
v8::TracingController* tracing_controller_;
-
- DISALLOW_COPY_AND_ASSIGN(MockPlatform);
};
} // namespace
diff --git a/deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc b/deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc
index 4264f546da..0ee9aad0d4 100644
--- a/deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc
+++ b/deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc
@@ -10,7 +10,9 @@
#include "src/codegen/compiler.h"
#include "src/codegen/optimized-compilation-info.h"
#include "src/execution/isolate.h"
+#include "src/execution/local-isolate.h"
#include "src/handles/handles.h"
+#include "src/heap/local-heap.h"
#include "src/objects/objects-inl.h"
#include "src/parsing/parse-info.h"
#include "test/unittests/test-helpers.h"
@@ -31,10 +33,12 @@ class BlockingCompilationJob : public OptimizedCompilationJob {
State::kReadyToExecute),
shared_(function->shared(), isolate),
zone_(isolate->allocator(), ZONE_NAME),
- info_(&zone_, isolate, shared_, function, CodeKind::OPTIMIZED_FUNCTION),
+ info_(&zone_, isolate, shared_, function, CodeKind::TURBOFAN),
blocking_(false),
semaphore_(0) {}
~BlockingCompilationJob() override = default;
+ BlockingCompilationJob(const BlockingCompilationJob&) = delete;
+ BlockingCompilationJob& operator=(const BlockingCompilationJob&) = delete;
bool IsBlocking() const { return blocking_.Value(); }
void Signal() { semaphore_.Signal(); }
@@ -42,7 +46,8 @@ class BlockingCompilationJob : public OptimizedCompilationJob {
// OptimiziedCompilationJob implementation.
Status PrepareJobImpl(Isolate* isolate) override { UNREACHABLE(); }
- Status ExecuteJobImpl(RuntimeCallStats* stats) override {
+ Status ExecuteJobImpl(RuntimeCallStats* stats,
+ LocalIsolate* local_isolate) override {
blocking_.SetValue(true);
semaphore_.Wait();
blocking_.SetValue(false);
@@ -57,8 +62,6 @@ class BlockingCompilationJob : public OptimizedCompilationJob {
OptimizedCompilationInfo info_;
base::AtomicValue<bool> blocking_;
base::Semaphore semaphore_;
-
- DISALLOW_COPY_AND_ASSIGN(BlockingCompilationJob);
};
} // namespace
diff --git a/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc b/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc
index dc0caba633..f7be8bbfb7 100644
--- a/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc
+++ b/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc
@@ -47,7 +47,7 @@ InstructionSelectorTest::Stream InstructionSelectorTest::StreamBuilder::Build(
InstructionSelector selector(
test_->zone(), node_count, &linkage, &sequence, schedule,
&source_position_table, nullptr,
- InstructionSelector::kEnableSwitchJumpTable, &tick_counter,
+ InstructionSelector::kEnableSwitchJumpTable, &tick_counter, nullptr,
&max_unoptimized_frame_height, &max_pushed_argument_count,
source_position_mode, features, InstructionSelector::kDisableScheduling,
InstructionSelector::kEnableRootsRelativeAddressing,
diff --git a/deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.cc b/deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.cc
index f21f103835..01ec8a0fe8 100644
--- a/deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.cc
+++ b/deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.cc
@@ -14,7 +14,7 @@ namespace compiler {
namespace {
constexpr int kMaxNumAllocatable =
- Max(Register::kNumRegisters, DoubleRegister::kNumRegisters);
+ std::max(Register::kNumRegisters, DoubleRegister::kNumRegisters);
static std::array<int, kMaxNumAllocatable> kAllocatableCodes =
base::make_array<kMaxNumAllocatable>(
[](size_t i) { return static_cast<int>(i); });
diff --git a/deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.h b/deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.h
index b2acbe8245..763c63bfd9 100644
--- a/deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.h
+++ b/deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.h
@@ -148,6 +148,8 @@ class InstructionSequenceTest : public TestWithIsolateAndZone {
}
InstructionSequenceTest();
+ InstructionSequenceTest(const InstructionSequenceTest&) = delete;
+ InstructionSequenceTest& operator=(const InstructionSequenceTest&) = delete;
void SetNumRegs(int num_general_registers, int num_double_registers);
int GetNumRegs(MachineRepresentation rep);
@@ -280,8 +282,6 @@ class InstructionSequenceTest : public TestWithIsolateAndZone {
LoopBlocks loop_blocks_;
InstructionBlock* current_block_;
bool block_returns_;
-
- DISALLOW_COPY_AND_ASSIGN(InstructionSequenceTest);
};
} // namespace compiler
diff --git a/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc b/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc
index c6fe8948bc..2c89252d24 100644
--- a/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc
+++ b/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc
@@ -24,6 +24,8 @@ class BytecodeAnalysisTest : public TestWithIsolateAndZone {
public:
BytecodeAnalysisTest() = default;
~BytecodeAnalysisTest() override = default;
+ BytecodeAnalysisTest(const BytecodeAnalysisTest&) = delete;
+ BytecodeAnalysisTest& operator=(const BytecodeAnalysisTest&) = delete;
static void SetUpTestCase() {
CHECK_NULL(save_flags_);
@@ -83,8 +85,6 @@ class BytecodeAnalysisTest : public TestWithIsolateAndZone {
private:
static SaveFlags* save_flags_;
-
- DISALLOW_COPY_AND_ASSIGN(BytecodeAnalysisTest);
};
SaveFlags* BytecodeAnalysisTest::save_flags_ = nullptr;
diff --git a/deps/v8/test/unittests/compiler/effect-control-linearizer-unittest.cc b/deps/v8/test/unittests/compiler/effect-control-linearizer-unittest.cc
index f280b75530..f01b1adddc 100644
--- a/deps/v8/test/unittests/compiler/effect-control-linearizer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/effect-control-linearizer-unittest.cc
@@ -85,9 +85,10 @@ TEST_F(EffectControlLinearizerTest, SimpleLoad) {
schedule.AddReturn(start, ret);
// Run the state effect introducer.
- LinearizeEffectControl(
- jsgraph(), &schedule, zone(), source_positions(), node_origins(),
- MaskArrayIndexEnable::kDoNotMaskArrayIndex, MaintainSchedule::kDiscard);
+ LinearizeEffectControl(jsgraph(), &schedule, zone(), source_positions(),
+ node_origins(),
+ MaskArrayIndexEnable::kDoNotMaskArrayIndex,
+ MaintainSchedule::kDiscard, broker());
EXPECT_THAT(load,
IsLoadField(AccessBuilder::ForHeapNumberValue(), heap_number,
@@ -147,9 +148,10 @@ TEST_F(EffectControlLinearizerTest, DiamondLoad) {
schedule.AddReturn(mblock, ret);
// Run the state effect introducer.
- LinearizeEffectControl(
- jsgraph(), &schedule, zone(), source_positions(), node_origins(),
- MaskArrayIndexEnable::kDoNotMaskArrayIndex, MaintainSchedule::kDiscard);
+ LinearizeEffectControl(jsgraph(), &schedule, zone(), source_positions(),
+ node_origins(),
+ MaskArrayIndexEnable::kDoNotMaskArrayIndex,
+ MaintainSchedule::kDiscard, broker());
// The effect input to the return should be an effect phi with the
// newly introduced effectful change operators.
@@ -214,9 +216,10 @@ TEST_F(EffectControlLinearizerTest, LoopLoad) {
schedule.AddReturn(rblock, ret);
// Run the state effect introducer.
- LinearizeEffectControl(
- jsgraph(), &schedule, zone(), source_positions(), node_origins(),
- MaskArrayIndexEnable::kDoNotMaskArrayIndex, MaintainSchedule::kDiscard);
+ LinearizeEffectControl(jsgraph(), &schedule, zone(), source_positions(),
+ node_origins(),
+ MaskArrayIndexEnable::kDoNotMaskArrayIndex,
+ MaintainSchedule::kDiscard, broker());
ASSERT_THAT(ret, IsReturn(load, load, if_true));
EXPECT_THAT(load, IsLoadField(AccessBuilder::ForHeapNumberValue(),
@@ -277,9 +280,10 @@ TEST_F(EffectControlLinearizerTest, CloneBranch) {
schedule.AddNode(mblock, merge);
schedule.AddNode(mblock, graph()->end());
- LinearizeEffectControl(
- jsgraph(), &schedule, zone(), source_positions(), node_origins(),
- MaskArrayIndexEnable::kDoNotMaskArrayIndex, MaintainSchedule::kDiscard);
+ LinearizeEffectControl(jsgraph(), &schedule, zone(), source_positions(),
+ node_origins(),
+ MaskArrayIndexEnable::kDoNotMaskArrayIndex,
+ MaintainSchedule::kDiscard, broker());
Capture<Node *> branch1_capture, branch2_capture;
EXPECT_THAT(
@@ -335,9 +339,10 @@ TEST_F(EffectControlLinearizerTest, UnreachableThenBranch) {
ASSERT_THAT(end()->op()->ControlInputCount(), 2);
// Run the state effect linearizer, maintaining the schedule.
- LinearizeEffectControl(
- jsgraph(), &schedule, zone(), source_positions(), node_origins(),
- MaskArrayIndexEnable::kDoNotMaskArrayIndex, MaintainSchedule::kMaintain);
+ LinearizeEffectControl(jsgraph(), &schedule, zone(), source_positions(),
+ node_origins(),
+ MaskArrayIndexEnable::kDoNotMaskArrayIndex,
+ MaintainSchedule::kMaintain, broker());
ASSERT_THAT(end(), IsEnd(IsThrow()));
}
@@ -387,9 +392,10 @@ TEST_F(EffectControlLinearizerTest, UnreachableThenDiamond) {
ASSERT_THAT(end()->op()->ControlInputCount(), 1);
// Run the state effect linearizer, maintaining the schedule.
- LinearizeEffectControl(
- jsgraph(), &schedule, zone(), source_positions(), node_origins(),
- MaskArrayIndexEnable::kDoNotMaskArrayIndex, MaintainSchedule::kMaintain);
+ LinearizeEffectControl(jsgraph(), &schedule, zone(), source_positions(),
+ node_origins(),
+ MaskArrayIndexEnable::kDoNotMaskArrayIndex,
+ MaintainSchedule::kMaintain, broker());
ASSERT_THAT(end(), IsEnd(IsThrow()));
}
@@ -444,9 +450,10 @@ TEST_F(EffectControlLinearizerTest, UnreachableThenLoop) {
ASSERT_THAT(end()->op()->ControlInputCount(), 1);
// Run the state effect linearizer, maintaining the schedule.
- LinearizeEffectControl(
- jsgraph(), &schedule, zone(), source_positions(), node_origins(),
- MaskArrayIndexEnable::kDoNotMaskArrayIndex, MaintainSchedule::kMaintain);
+ LinearizeEffectControl(jsgraph(), &schedule, zone(), source_positions(),
+ node_origins(),
+ MaskArrayIndexEnable::kDoNotMaskArrayIndex,
+ MaintainSchedule::kMaintain, broker());
ASSERT_THAT(end(), IsEnd(IsThrow()));
}
@@ -497,9 +504,10 @@ TEST_F(EffectControlLinearizerTest, UnreachableInChangedBlockThenBranch) {
ASSERT_THAT(end()->op()->ControlInputCount(), 2);
// Run the state effect linearizer, maintaining the schedule.
- LinearizeEffectControl(
- jsgraph(), &schedule, zone(), source_positions(), node_origins(),
- MaskArrayIndexEnable::kDoNotMaskArrayIndex, MaintainSchedule::kMaintain);
+ LinearizeEffectControl(jsgraph(), &schedule, zone(), source_positions(),
+ node_origins(),
+ MaskArrayIndexEnable::kDoNotMaskArrayIndex,
+ MaintainSchedule::kMaintain, broker());
ASSERT_THAT(end(), IsEnd(IsThrow()));
}
diff --git a/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc b/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
index ba6d3f299e..1a153eff9a 100644
--- a/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
+++ b/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
@@ -836,6 +836,56 @@ TEST_F(InstructionSelectorTest, Word32Clz) {
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
+// SIMD.
+
+TEST_F(InstructionSelectorTest, SIMDSplatZero) {
+ // Test optimization for splat of contant 0.
+ // {i8x16,i16x8,i32x4,i64x2}.splat(const(0)) -> v128.zero().
+ // Optimizations for f32x4.splat and f64x2.splat not implemented since it
+ // doesn't improve the codegen as much (same number of instructions).
+ {
+ StreamBuilder m(this, MachineType::Simd128());
+ Node* const splat =
+ m.I64x2SplatI32Pair(m.Int32Constant(0), m.Int32Constant(0));
+ m.Return(splat);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kIA32S128Zero, s[0]->arch_opcode());
+ ASSERT_EQ(0U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, MachineType::Simd128());
+ Node* const splat = m.I32x4Splat(m.Int32Constant(0));
+ m.Return(splat);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kIA32S128Zero, s[0]->arch_opcode());
+ ASSERT_EQ(0U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, MachineType::Simd128());
+ Node* const splat = m.I16x8Splat(m.Int32Constant(0));
+ m.Return(splat);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kIA32S128Zero, s[0]->arch_opcode());
+ ASSERT_EQ(0U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, MachineType::Simd128());
+ Node* const splat = m.I8x16Splat(m.Int32Constant(0));
+ m.Return(splat);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kIA32S128Zero, s[0]->arch_opcode());
+ ASSERT_EQ(0U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/ppc/OWNERS b/deps/v8/test/unittests/compiler/ppc/OWNERS
index 6edd45a6ef..02c2cd757c 100644
--- a/deps/v8/test/unittests/compiler/ppc/OWNERS
+++ b/deps/v8/test/unittests/compiler/ppc/OWNERS
@@ -2,3 +2,4 @@ junyan@redhat.com
joransiu@ca.ibm.com
midawson@redhat.com
mfarazma@redhat.com
+vasili.skurydzin@ibm.com
diff --git a/deps/v8/test/unittests/compiler/regalloc/mid-tier-register-allocator-unittest.cc b/deps/v8/test/unittests/compiler/regalloc/mid-tier-register-allocator-unittest.cc
index 01b91017a6..7d20ec2ad4 100644
--- a/deps/v8/test/unittests/compiler/regalloc/mid-tier-register-allocator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/regalloc/mid-tier-register-allocator-unittest.cc
@@ -12,6 +12,67 @@ namespace compiler {
namespace {
+// We can't just use the size of the moves collection, because of
+// redundant moves which need to be discounted.
+int GetMoveCount(const ParallelMove& moves) {
+ int move_count = 0;
+ for (auto move : moves) {
+ if (move->IsEliminated() || move->IsRedundant()) continue;
+ ++move_count;
+ }
+ return move_count;
+}
+
+bool AreOperandsOfSameType(
+ const AllocatedOperand& op,
+ const InstructionSequenceTest::TestOperand& test_op) {
+ bool test_op_is_reg =
+ (test_op.type_ ==
+ InstructionSequenceTest::TestOperandType::kFixedRegister ||
+ test_op.type_ == InstructionSequenceTest::TestOperandType::kRegister);
+
+ return (op.IsRegister() && test_op_is_reg) ||
+ (op.IsStackSlot() && !test_op_is_reg);
+}
+
+bool AllocatedOperandMatches(
+ const AllocatedOperand& op,
+ const InstructionSequenceTest::TestOperand& test_op) {
+ return AreOperandsOfSameType(op, test_op) &&
+ ((op.IsRegister() ? op.GetRegister().code() : op.index()) ==
+ test_op.value_ ||
+ test_op.value_ == InstructionSequenceTest::kNoValue);
+}
+
+int GetParallelMoveCount(int instr_index, Instruction::GapPosition gap_pos,
+ const InstructionSequence* sequence) {
+ const ParallelMove* moves =
+ sequence->InstructionAt(instr_index)->GetParallelMove(gap_pos);
+ if (moves == nullptr) return 0;
+ return GetMoveCount(*moves);
+}
+
+bool IsParallelMovePresent(int instr_index, Instruction::GapPosition gap_pos,
+ const InstructionSequence* sequence,
+ const InstructionSequenceTest::TestOperand& src,
+ const InstructionSequenceTest::TestOperand& dest) {
+ const ParallelMove* moves =
+ sequence->InstructionAt(instr_index)->GetParallelMove(gap_pos);
+ EXPECT_NE(nullptr, moves);
+
+ bool found_match = false;
+ for (auto move : *moves) {
+ if (move->IsEliminated() || move->IsRedundant()) continue;
+ if (AllocatedOperandMatches(AllocatedOperand::cast(move->source()), src) &&
+ AllocatedOperandMatches(AllocatedOperand::cast(move->destination()),
+ dest)) {
+ found_match = true;
+ break;
+ }
+ }
+ return found_match;
+}
+
class MidTierRegisterAllocatorTest : public InstructionSequenceTest {
public:
void Allocate() {
@@ -608,6 +669,99 @@ TEST_F(MidTierRegisterAllocatorTest, DiamondWithCallSecondBlock) {
Allocate();
}
+TEST_F(MidTierRegisterAllocatorTest, SingleDeferredBlockSpill) {
+ StartBlock(); // B0
+ auto var = EmitOI(Reg(0));
+ EndBlock(Branch(Reg(var), 1, 2));
+
+ StartBlock(); // B1
+ EndBlock(Jump(2));
+
+ StartBlock(true); // B2
+ EmitCall(Slot(-1), Slot(var));
+ EndBlock();
+
+ StartBlock(); // B3
+ EmitNop();
+ EndBlock();
+
+ StartBlock(); // B4
+ Return(Reg(var, 0));
+ EndBlock();
+
+ Allocate();
+
+ const int var_def_index = 1;
+ const int call_index = 3;
+
+ // We should have no parallel moves at the "var_def_index" position.
+ EXPECT_EQ(
+ 0, GetParallelMoveCount(var_def_index, Instruction::START, sequence()));
+
+ // The spill should be performed at the position "call_index".
+ EXPECT_TRUE(IsParallelMovePresent(call_index, Instruction::START, sequence(),
+ Reg(0), Slot(0)));
+}
+
+TEST_F(MidTierRegisterAllocatorTest, ValidMultipleDeferredBlockSpills) {
+ StartBlock(); // B0
+ auto var1 = EmitOI(Reg(0));
+ auto var2 = EmitOI(Reg(1));
+ auto var3 = EmitOI(Reg(2));
+ EndBlock(Branch(Reg(var1, 0), 1, 2));
+
+ StartBlock(true); // B1
+ EmitCall(Slot(-2), Slot(var1));
+ EndBlock(Jump(5));
+
+ StartBlock(); // B2
+ EmitNop();
+ EndBlock();
+
+ StartBlock(); // B3
+ EmitNop();
+ EndBlock(Branch(Reg(var2, 0), 1, 2));
+
+ StartBlock(true); // B4
+ EmitCall(Slot(-1), Slot(var2));
+ EndBlock(Jump(2));
+
+ StartBlock(); // B5
+ EmitNop();
+ EndBlock();
+
+ StartBlock(); // B6
+ Return(Reg(var3, 2));
+ EndBlock();
+
+ const int def_of_v2 = 2;
+ const int start_of_b1 = 4;
+ const int start_of_b4 = 10;
+ const int end_of_b1 = 5;
+ const int end_of_b4 = 11;
+ const int start_of_b6 = 14;
+
+ Allocate();
+
+ const int var3_reg = 2;
+ const int var3_slot = 2;
+
+ EXPECT_FALSE(IsParallelMovePresent(def_of_v2, Instruction::START, sequence(),
+ Reg(var3_reg), Slot()));
+ EXPECT_TRUE(IsParallelMovePresent(start_of_b1, Instruction::START, sequence(),
+ Reg(var3_reg), Slot(var3_slot)));
+ EXPECT_TRUE(IsParallelMovePresent(end_of_b1, Instruction::END, sequence(),
+ Slot(var3_slot), Reg()));
+
+ EXPECT_TRUE(IsParallelMovePresent(start_of_b4, Instruction::START, sequence(),
+ Reg(var3_reg), Slot(var3_slot)));
+ EXPECT_TRUE(IsParallelMovePresent(end_of_b4, Instruction::END, sequence(),
+ Slot(var3_slot), Reg()));
+
+ EXPECT_EQ(0,
+ GetParallelMoveCount(start_of_b6, Instruction::START, sequence()));
+}
+
namespace {
enum class ParameterType { kFixedSlot, kSlot, kRegister, kFixedRegister };
diff --git a/deps/v8/test/unittests/compiler/simplified-lowering-unittest.cc b/deps/v8/test/unittests/compiler/simplified-lowering-unittest.cc
index 8a5a9eda91..6387f814e1 100644
--- a/deps/v8/test/unittests/compiler/simplified-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/simplified-lowering-unittest.cc
@@ -47,9 +47,11 @@ class SimplifiedLoweringTest : public GraphTest {
typer.Run();
}
+ Linkage* linkage = zone()->New<Linkage>(Linkage::GetJSCallDescriptor(
+ zone(), false, num_parameters_ + 1, CallDescriptor::kCanUseRoots));
SimplifiedLowering lowering(
jsgraph(), broker(), zone(), source_positions(), node_origins(),
- PoisoningMitigationLevel::kDontPoison, tick_counter());
+ PoisoningMitigationLevel::kDontPoison, tick_counter(), linkage);
lowering.LowerAllNodes();
}
diff --git a/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc b/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
index d94ae71fcb..fc04f419a0 100644
--- a/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
@@ -90,8 +90,14 @@ static const LoadWithToInt64Extension kLoadWithToInt64Extensions[] = {
{MachineType::Uint16(), kX64Movzxwq},
{MachineType::Int32(), kX64Movsxlq}};
-} // namespace
-
+// The parameterized test that use the following type are intentionally part
+// of the anonymous namespace. The issue here is that the type parameter is
+// using a type that is in the anonymous namespace, but the class generated by
+// TEST_P is not. This will cause GCC to generate a -Wsubobject-linkage warning.
+//
+// In this case there will only be single translation unit and the warning
+// about subobject-linkage can be avoided by placing the class generated
+// by TEST_P in the anoynmous namespace as well.
using InstructionSelectorChangeInt32ToInt64Test =
InstructionSelectorTestWithParam<LoadWithToInt64Extension>;
@@ -104,6 +110,8 @@ TEST_P(InstructionSelectorChangeInt32ToInt64Test, ChangeInt32ToInt64WithLoad) {
EXPECT_EQ(extension.expected_opcode, s[0]->arch_opcode());
}
+} // namespace
+
INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
InstructionSelectorChangeInt32ToInt64Test,
::testing::ValuesIn(kLoadWithToInt64Extensions));
@@ -138,8 +146,14 @@ static const MemoryAccess kMemoryAccesses[] = {
{MachineType::Float32(), kX64Movss, kX64Movss},
{MachineType::Float64(), kX64Movsd, kX64Movsd}};
-} // namespace
-
+// The parameterized test that use the following type are intentionally part
+// of the anonymous namespace. The issue here is that the type parameter is
+// using a type that is in the anonymous namespace, but the class generated by
+// TEST_P is not. This will cause GCC to generate a -Wsubobject-linkage warning.
+//
+// In this case there will only be single translation unit and the warning
+// about subobject-linkage can be avoided by placing the class generated
+// by TEST_P in the anoynmous namespace as well.
using InstructionSelectorMemoryAccessTest =
InstructionSelectorTestWithParam<MemoryAccess>;
@@ -170,6 +184,8 @@ TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
EXPECT_EQ(0U, s[0]->OutputCount());
}
+} // namespace
+
INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
InstructionSelectorMemoryAccessTest,
::testing::ValuesIn(kMemoryAccesses));
@@ -215,8 +231,14 @@ const BinaryOperation kWord32BinaryOperations[] = {
{&RawMachineAssembler::Uint32LessThanOrEqual, "Uint32LessThanOrEqual"},
{&RawMachineAssembler::Uint32Mod, "Uint32Mod"}};
-} // namespace
-
+// The parameterized test that use the following type are intentionally part
+// of the anonymous namespace. The issue here is that the type parameter is
+// using a type that is in the anonymous namespace, but the class generated by
+// TEST_P is not. This will cause GCC to generate a -Wsubobject-linkage warning.
+//
+// In this case there will only be single translation unit and the warning
+// about subobject-linkage can be avoided by placing the class generated
+// by TEST_P in the anoynmous namespace as well.
using InstructionSelectorChangeUint32ToUint64Test =
InstructionSelectorTestWithParam<BinaryOperation>;
@@ -231,6 +253,8 @@ TEST_P(InstructionSelectorChangeUint32ToUint64Test, ChangeUint32ToUint64) {
ASSERT_EQ(1U, s.size());
}
+} // namespace
+
INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
InstructionSelectorChangeUint32ToUint64Test,
::testing::ValuesIn(kWord32BinaryOperations));
@@ -294,8 +318,14 @@ const MachInst2 kCanElideChangeUint32ToUint64[] = {
MachineType::Uint32()},
};
-} // namespace
-
+// The parameterized test that use the following type are intentionally part
+// of the anonymous namespace. The issue here is that the type parameter is
+// using a type that is in the anonymous namespace, but the class generated by
+// TEST_P is not. This will cause GCC to generate a -Wsubobject-linkage warning.
+//
+// In this case there will only be single translation unit and the warning
+// about subobject-linkage can be avoided by placing the class generated
+// by TEST_P in the anoynmous namespace as well.
using InstructionSelectorElidedChangeUint32ToUint64Test =
InstructionSelectorTestWithParam<MachInst2>;
@@ -313,6 +343,8 @@ TEST_P(InstructionSelectorElidedChangeUint32ToUint64Test, Parameter) {
EXPECT_EQ(1U, s[0]->OutputCount());
}
+} // namespace
+
INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
InstructionSelectorElidedChangeUint32ToUint64Test,
::testing::ValuesIn(kCanElideChangeUint32ToUint64));
@@ -1587,6 +1619,112 @@ TEST_F(InstructionSelectorTest, Float64BinopArithmetic) {
}
}
+TEST_F(InstructionSelectorTest, Float32BinopArithmeticWithLoad) {
+ {
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
+ MachineType::Int64(), MachineType::Int64());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const p2 = m.Parameter(2);
+ Node* add = m.Float32Add(
+ p0, m.Load(MachineType::Float32(), p1, m.Int32Constant(127)));
+ Node* sub = m.Float32Sub(
+ add, m.Load(MachineType::Float32(), p1, m.Int32Constant(127)));
+ Node* ret = m.Float32Mul(
+ m.Load(MachineType::Float32(), p2, m.Int32Constant(127)), sub);
+ m.Return(ret);
+ Stream s = m.Build(AVX);
+ ASSERT_EQ(3U, s.size());
+ EXPECT_EQ(kAVXFloat32Add, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(kAVXFloat32Sub, s[1]->arch_opcode());
+ ASSERT_EQ(3U, s[1]->InputCount());
+ EXPECT_EQ(kAVXFloat32Mul, s[2]->arch_opcode());
+ ASSERT_EQ(3U, s[2]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[2]->InputAt(1)));
+ }
+ {
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
+ MachineType::Int64(), MachineType::Int64());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const p2 = m.Parameter(2);
+ Node* add = m.Float32Add(
+ p0, m.Load(MachineType::Float32(), p1, m.Int32Constant(127)));
+ Node* sub = m.Float32Sub(
+ add, m.Load(MachineType::Float32(), p1, m.Int32Constant(127)));
+ Node* ret = m.Float32Mul(
+ m.Load(MachineType::Float32(), p2, m.Int32Constant(127)), sub);
+ m.Return(ret);
+ Stream s = m.Build();
+ ASSERT_EQ(3U, s.size());
+ EXPECT_EQ(kSSEFloat32Add, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(kSSEFloat32Sub, s[1]->arch_opcode());
+ ASSERT_EQ(3U, s[1]->InputCount());
+ EXPECT_EQ(kSSEFloat32Mul, s[2]->arch_opcode());
+ ASSERT_EQ(3U, s[2]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[2]->InputAt(1)));
+ }
+}
+
+TEST_F(InstructionSelectorTest, Float64BinopArithmeticWithLoad) {
+ {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
+ MachineType::Int64(), MachineType::Int64());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const p2 = m.Parameter(2);
+ Node* add = m.Float64Add(
+ p0, m.Load(MachineType::Float64(), p1, m.Int32Constant(127)));
+ Node* sub = m.Float64Sub(
+ add, m.Load(MachineType::Float64(), p1, m.Int32Constant(127)));
+ Node* ret = m.Float64Mul(
+ m.Load(MachineType::Float64(), p2, m.Int32Constant(127)), sub);
+ m.Return(ret);
+ Stream s = m.Build(AVX);
+ ASSERT_EQ(3U, s.size());
+ EXPECT_EQ(kAVXFloat64Add, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(kAVXFloat64Sub, s[1]->arch_opcode());
+ ASSERT_EQ(3U, s[1]->InputCount());
+ EXPECT_EQ(kAVXFloat64Mul, s[2]->arch_opcode());
+ ASSERT_EQ(3U, s[2]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[2]->InputAt(1)));
+ }
+ {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
+ MachineType::Int64(), MachineType::Int64());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const p2 = m.Parameter(2);
+ Node* add = m.Float64Add(
+ p0, m.Load(MachineType::Float64(), p1, m.Int32Constant(127)));
+ Node* sub = m.Float64Sub(
+ add, m.Load(MachineType::Float64(), p1, m.Int32Constant(127)));
+ Node* ret = m.Float64Mul(
+ m.Load(MachineType::Float64(), p2, m.Int32Constant(127)), sub);
+ m.Return(ret);
+ Stream s = m.Build();
+ ASSERT_EQ(3U, s.size());
+ EXPECT_EQ(kSSEFloat64Add, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(kSSEFloat64Sub, s[1]->arch_opcode());
+ ASSERT_EQ(3U, s[1]->InputCount());
+ EXPECT_EQ(kSSEFloat64Mul, s[2]->arch_opcode());
+ ASSERT_EQ(3U, s[2]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[2]->InputAt(1)));
+ }
+}
+
// -----------------------------------------------------------------------------
// Miscellaneous.
@@ -1750,6 +1888,53 @@ TEST_F(InstructionSelectorTest, LoadAndWord64ShiftRight32) {
}
}
+TEST_F(InstructionSelectorTest, SIMDSplatZero) {
+ // Test optimization for splat of contant 0.
+ // {i8x16,i16x8,i32x4,i64x2}.splat(const(0)) -> v128.zero().
+ // Optimizations for f32x4.splat and f64x2.splat not implemented since it
+ // doesn't improve the codegen as much (same number of instructions).
+ {
+ StreamBuilder m(this, MachineType::Simd128());
+ Node* const splat = m.I64x2Splat(m.Int64Constant(0));
+ m.Return(splat);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64S128Zero, s[0]->arch_opcode());
+ ASSERT_EQ(0U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, MachineType::Simd128());
+ Node* const splat = m.I32x4Splat(m.Int32Constant(0));
+ m.Return(splat);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64S128Zero, s[0]->arch_opcode());
+ ASSERT_EQ(0U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, MachineType::Simd128());
+ Node* const splat = m.I16x8Splat(m.Int32Constant(0));
+ m.Return(splat);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64S128Zero, s[0]->arch_opcode());
+ ASSERT_EQ(0U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, MachineType::Simd128());
+ Node* const splat = m.I8x16Splat(m.Int32Constant(0));
+ m.Return(splat);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64S128Zero, s[0]->arch_opcode());
+ ASSERT_EQ(0U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/execution/microtask-queue-unittest.cc b/deps/v8/test/unittests/execution/microtask-queue-unittest.cc
index a19cad7953..7db9261179 100644
--- a/deps/v8/test/unittests/execution/microtask-queue-unittest.cc
+++ b/deps/v8/test/unittests/execution/microtask-queue-unittest.cc
@@ -34,6 +34,9 @@ class WithFinalizationRegistryMixin : public TMixin {
public:
WithFinalizationRegistryMixin() = default;
~WithFinalizationRegistryMixin() override = default;
+ WithFinalizationRegistryMixin(const WithFinalizationRegistryMixin&) = delete;
+ WithFinalizationRegistryMixin& operator=(
+ const WithFinalizationRegistryMixin&) = delete;
static void SetUpTestCase() {
CHECK_NULL(save_flags_);
@@ -53,8 +56,6 @@ class WithFinalizationRegistryMixin : public TMixin {
private:
static SaveFlags* save_flags_;
-
- DISALLOW_COPY_AND_ASSIGN(WithFinalizationRegistryMixin);
};
template <typename TMixin>
diff --git a/deps/v8/test/unittests/heap/base/worklist-unittest.cc b/deps/v8/test/unittests/heap/base/worklist-unittest.cc
index ae737a7aa3..fe9be18ecd 100644
--- a/deps/v8/test/unittests/heap/base/worklist-unittest.cc
+++ b/deps/v8/test/unittests/heap/base/worklist-unittest.cc
@@ -110,21 +110,45 @@ TEST(CppgcWorkListTest, LocalPushPop) {
TEST(CppgcWorkListTest, LocalPushStaysPrivate) {
TestWorklist worklist;
- TestWorklist::Local worklist_view1(&worklist);
- TestWorklist::Local worklist_view2(&worklist);
+ TestWorklist::Local worklist_local1(&worklist);
+ TestWorklist::Local worklist_local2(&worklist);
SomeObject dummy;
SomeObject* retrieved = nullptr;
EXPECT_TRUE(worklist.IsEmpty());
EXPECT_EQ(0U, worklist.Size());
- worklist_view1.Push(&dummy);
+ worklist_local1.Push(&dummy);
EXPECT_EQ(0U, worklist.Size());
- EXPECT_FALSE(worklist_view2.Pop(&retrieved));
+ EXPECT_FALSE(worklist_local2.Pop(&retrieved));
EXPECT_EQ(nullptr, retrieved);
- EXPECT_TRUE(worklist_view1.Pop(&retrieved));
+ EXPECT_TRUE(worklist_local1.Pop(&retrieved));
EXPECT_EQ(&dummy, retrieved);
EXPECT_EQ(0U, worklist.Size());
}
+TEST(CppgcWorkListTest, LocalClear) {
+ TestWorklist worklist;
+ TestWorklist::Local worklist_local(&worklist);
+ SomeObject* object;
+ object = reinterpret_cast<SomeObject*>(&object);
+ // Check push segment:
+ EXPECT_TRUE(worklist_local.IsEmpty());
+ worklist_local.Push(object);
+ EXPECT_FALSE(worklist_local.IsEmpty());
+ worklist_local.Clear();
+ EXPECT_TRUE(worklist_local.IsEmpty());
+ // Check pop segment:
+ worklist_local.Push(object);
+ worklist_local.Push(object);
+ EXPECT_FALSE(worklist_local.IsEmpty());
+ worklist_local.Publish();
+ EXPECT_TRUE(worklist_local.IsEmpty());
+ SomeObject* retrieved;
+ worklist_local.Pop(&retrieved);
+ EXPECT_FALSE(worklist_local.IsEmpty());
+ worklist_local.Clear();
+ EXPECT_TRUE(worklist_local.IsEmpty());
+}
+
TEST(CppgcWorkListTest, GlobalUpdateNull) {
TestWorklist worklist;
TestWorklist::Local worklist_local(&worklist);
diff --git a/deps/v8/test/unittests/heap/cppgc/compactor-unittest.cc b/deps/v8/test/unittests/heap/cppgc/compactor-unittest.cc
new file mode 100644
index 0000000000..92ae9dc6b6
--- /dev/null
+++ b/deps/v8/test/unittests/heap/cppgc/compactor-unittest.cc
@@ -0,0 +1,250 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/compactor.h"
+
+#include "include/cppgc/allocation.h"
+#include "include/cppgc/custom-space.h"
+#include "include/cppgc/persistent.h"
+#include "src/heap/cppgc/heap-object-header.h"
+#include "src/heap/cppgc/heap-page.h"
+#include "src/heap/cppgc/marker.h"
+#include "src/heap/cppgc/stats-collector.h"
+#include "test/unittests/heap/cppgc/tests.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace cppgc {
+
+class CompactableCustomSpace : public CustomSpace<CompactableCustomSpace> {
+ public:
+ static constexpr size_t kSpaceIndex = 0;
+ static constexpr bool kSupportsCompaction = true;
+};
+
+namespace internal {
+
+namespace {
+
+struct CompactableGCed : public GarbageCollected<CompactableGCed> {
+ public:
+ ~CompactableGCed() { ++g_destructor_callcount; }
+ void Trace(Visitor* visitor) const {
+ visitor->Trace(other);
+ visitor->RegisterMovableReference(other.GetSlotForTesting());
+ }
+ static size_t g_destructor_callcount;
+ Member<CompactableGCed> other;
+ size_t id = 0;
+};
+// static
+size_t CompactableGCed::g_destructor_callcount = 0;
+
+template <int kNumObjects>
+struct CompactableHolder
+ : public GarbageCollected<CompactableHolder<kNumObjects>> {
+ public:
+ explicit CompactableHolder(cppgc::AllocationHandle& allocation_handle) {
+ for (int i = 0; i < kNumObjects; ++i)
+ objects[i] = MakeGarbageCollected<CompactableGCed>(allocation_handle);
+ }
+
+ void Trace(Visitor* visitor) const {
+ for (int i = 0; i < kNumObjects; ++i) {
+ visitor->Trace(objects[i]);
+ visitor->RegisterMovableReference(objects[i].GetSlotForTesting());
+ }
+ }
+ Member<CompactableGCed> objects[kNumObjects];
+};
+
+class CompactorTest : public testing::TestWithPlatform {
+ public:
+ CompactorTest() {
+ Heap::HeapOptions options;
+ options.custom_spaces.emplace_back(
+ std::make_unique<CompactableCustomSpace>());
+ heap_ = Heap::Create(platform_, std::move(options));
+ }
+
+ void StartCompaction() {
+ compactor().EnableForNextGCForTesting();
+ compactor().InitializeIfShouldCompact(
+ GarbageCollector::Config::MarkingType::kIncremental,
+ GarbageCollector::Config::StackState::kNoHeapPointers);
+ EXPECT_TRUE(compactor().IsEnabledForTesting());
+ }
+
+ void CancelCompaction() {
+ bool cancelled = compactor().CancelIfShouldNotCompact(
+ GarbageCollector::Config::MarkingType::kAtomic,
+ GarbageCollector::Config::StackState::kMayContainHeapPointers);
+ EXPECT_TRUE(cancelled);
+ }
+
+ void FinishCompaction() { compactor().CompactSpacesIfEnabled(); }
+
+ void StartGC() {
+ CompactableGCed::g_destructor_callcount = 0u;
+ StartCompaction();
+ heap()->StartIncrementalGarbageCollection(
+ GarbageCollector::Config::PreciseIncrementalConfig());
+ }
+
+ void EndGC() {
+ heap()->marker()->FinishMarking(
+ GarbageCollector::Config::StackState::kNoHeapPointers);
+ FinishCompaction();
+ // Sweeping also verifies the object start bitmap.
+ const Sweeper::SweepingConfig sweeping_config{
+ Sweeper::SweepingConfig::SweepingType::kAtomic,
+ Sweeper::SweepingConfig::CompactableSpaceHandling::kIgnore};
+ heap()->sweeper().Start(sweeping_config);
+ }
+
+ Heap* heap() { return Heap::From(heap_.get()); }
+ cppgc::AllocationHandle& GetAllocationHandle() {
+ return heap_->GetAllocationHandle();
+ }
+ Compactor& compactor() { return heap()->compactor(); }
+
+ private:
+ std::unique_ptr<cppgc::Heap> heap_;
+};
+
+} // namespace
+
+} // namespace internal
+
+template <>
+struct SpaceTrait<internal::CompactableGCed> {
+ using Space = CompactableCustomSpace;
+};
+
+namespace internal {
+
+TEST_F(CompactorTest, NothingToCompact) {
+ StartCompaction();
+ FinishCompaction();
+}
+
+TEST_F(CompactorTest, CancelledNothingToCompact) {
+ StartCompaction();
+ CancelCompaction();
+}
+
+TEST_F(CompactorTest, NonEmptySpaceAllLive) {
+ static constexpr int kNumObjects = 10;
+ Persistent<CompactableHolder<kNumObjects>> holder =
+ MakeGarbageCollected<CompactableHolder<kNumObjects>>(
+ GetAllocationHandle(), GetAllocationHandle());
+ CompactableGCed* references[kNumObjects] = {nullptr};
+ for (int i = 0; i < kNumObjects; ++i) {
+ references[i] = holder->objects[i];
+ }
+ StartGC();
+ EndGC();
+ EXPECT_EQ(0u, CompactableGCed::g_destructor_callcount);
+ for (int i = 0; i < kNumObjects; ++i) {
+ EXPECT_EQ(holder->objects[i], references[i]);
+ }
+}
+
+TEST_F(CompactorTest, NonEmptySpaceAllDead) {
+ static constexpr int kNumObjects = 10;
+ Persistent<CompactableHolder<kNumObjects>> holder =
+ MakeGarbageCollected<CompactableHolder<kNumObjects>>(
+ GetAllocationHandle(), GetAllocationHandle());
+ CompactableGCed::g_destructor_callcount = 0u;
+ StartGC();
+ for (int i = 0; i < kNumObjects; ++i) {
+ holder->objects[i] = nullptr;
+ }
+ EndGC();
+ EXPECT_EQ(10u, CompactableGCed::g_destructor_callcount);
+}
+
+TEST_F(CompactorTest, NonEmptySpaceHalfLive) {
+ static constexpr int kNumObjects = 10;
+ Persistent<CompactableHolder<kNumObjects>> holder =
+ MakeGarbageCollected<CompactableHolder<kNumObjects>>(
+ GetAllocationHandle(), GetAllocationHandle());
+ CompactableGCed* references[kNumObjects] = {nullptr};
+ for (int i = 0; i < kNumObjects; ++i) {
+ references[i] = holder->objects[i];
+ }
+ StartGC();
+ for (int i = 0; i < kNumObjects; i += 2) {
+ holder->objects[i] = nullptr;
+ }
+ EndGC();
+ // Half of object were destroyed.
+ EXPECT_EQ(5u, CompactableGCed::g_destructor_callcount);
+ // Remaining objects are compacted.
+ for (int i = 1; i < kNumObjects; i += 2) {
+ EXPECT_EQ(holder->objects[i], references[i / 2]);
+ }
+}
+
+TEST_F(CompactorTest, CompactAcrossPages) {
+ Persistent<CompactableHolder<1>> holder =
+ MakeGarbageCollected<CompactableHolder<1>>(GetAllocationHandle(),
+ GetAllocationHandle());
+ CompactableGCed* reference = holder->objects[0];
+ static constexpr size_t kObjectsPerPage =
+ kPageSize / (sizeof(CompactableGCed) + sizeof(HeapObjectHeader));
+ for (size_t i = 0; i < kObjectsPerPage; ++i) {
+ holder->objects[0] =
+ MakeGarbageCollected<CompactableGCed>(GetAllocationHandle());
+ }
+ // Last allocated object should be on a new page.
+ EXPECT_NE(reference, holder->objects[0]);
+ EXPECT_NE(BasePage::FromInnerAddress(heap(), reference),
+ BasePage::FromInnerAddress(heap(), holder->objects[0].Get()));
+ StartGC();
+ EndGC();
+ // Half of object were destroyed.
+ EXPECT_EQ(kObjectsPerPage, CompactableGCed::g_destructor_callcount);
+ EXPECT_EQ(reference, holder->objects[0]);
+}
+
+TEST_F(CompactorTest, InteriorSlotToPreviousObject) {
+ static constexpr int kNumObjects = 3;
+ Persistent<CompactableHolder<kNumObjects>> holder =
+ MakeGarbageCollected<CompactableHolder<kNumObjects>>(
+ GetAllocationHandle(), GetAllocationHandle());
+ CompactableGCed* references[kNumObjects] = {nullptr};
+ for (int i = 0; i < kNumObjects; ++i) {
+ references[i] = holder->objects[i];
+ }
+ holder->objects[2]->other = holder->objects[1];
+ holder->objects[1] = nullptr;
+ holder->objects[0] = nullptr;
+ StartGC();
+ EndGC();
+ EXPECT_EQ(1u, CompactableGCed::g_destructor_callcount);
+ EXPECT_EQ(references[1], holder->objects[2]);
+ EXPECT_EQ(references[0], holder->objects[2]->other);
+}
+
+TEST_F(CompactorTest, InteriorSlotToNextObject) {
+ static constexpr int kNumObjects = 3;
+ Persistent<CompactableHolder<kNumObjects>> holder =
+ MakeGarbageCollected<CompactableHolder<kNumObjects>>(
+ GetAllocationHandle(), GetAllocationHandle());
+ CompactableGCed* references[kNumObjects] = {nullptr};
+ for (int i = 0; i < kNumObjects; ++i) {
+ references[i] = holder->objects[i];
+ }
+ holder->objects[1]->other = holder->objects[2];
+ holder->objects[2] = nullptr;
+ holder->objects[0] = nullptr;
+ StartGC();
+ EndGC();
+ EXPECT_EQ(1u, CompactableGCed::g_destructor_callcount);
+ EXPECT_EQ(references[0], holder->objects[1]);
+ EXPECT_EQ(references[1], holder->objects[1]->other);
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/concurrent-marking-unittest.cc b/deps/v8/test/unittests/heap/cppgc/concurrent-marking-unittest.cc
index b39a545b7b..4da9870221 100644
--- a/deps/v8/test/unittests/heap/cppgc/concurrent-marking-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/concurrent-marking-unittest.cc
@@ -16,51 +16,17 @@
namespace cppgc {
namespace internal {
-#if defined(THREAD_SANITIZER)
-
namespace {
-class GCed : public GarbageCollected<GCed> {
- public:
- void Trace(cppgc::Visitor* visitor) const { visitor->Trace(child_); }
-
- Member<GCed> child_;
-};
-
-class GCedWithCallback : public GarbageCollected<GCedWithCallback> {
- public:
- template <typename Callback>
- explicit GCedWithCallback(Callback callback) {
- callback(this);
- }
-
- void Trace(cppgc::Visitor* visitor) const { visitor->Trace(child_); }
-
- Member<GCedWithCallback> child_;
-};
-
-class Mixin : public GarbageCollectedMixin {
- public:
- void Trace(cppgc::Visitor* visitor) const { visitor->Trace(child_); }
-
- Member<Mixin> child_;
-};
-
-class GCedWithMixin : public GarbageCollected<GCedWithMixin>, public Mixin {
- public:
- void Trace(cppgc::Visitor* visitor) const { Mixin::Trace(visitor); }
-};
-
-template <typename T>
-class GCedHolder : public GarbageCollected<GCedHolder<T>> {
- public:
- void Trace(cppgc::Visitor* visitor) const { visitor->Trace(object_); }
-
- Member<T> object_;
-};
-
class ConcurrentMarkingTest : public testing::TestWithHeap {
public:
+#if defined(THREAD_SANITIZER)
+ // Use more iteration on tsan builds to expose data races.
+ static constexpr int kNumStep = 1000;
+#else
+ static constexpr int kNumStep = 10;
+#endif // defined(THREAD_SANITIZER)
+
using Config = Heap::Config;
static constexpr Config ConcurrentPreciseConfig = {
Config::CollectionType::kMajor, Config::StackState::kNoHeapPointers,
@@ -95,16 +61,52 @@ class ConcurrentMarkingTest : public testing::TestWithHeap {
constexpr ConcurrentMarkingTest::Config
ConcurrentMarkingTest::ConcurrentPreciseConfig;
+template <typename T>
+struct GCedHolder : public GarbageCollected<GCedHolder<T>> {
+ void Trace(cppgc::Visitor* visitor) const { visitor->Trace(object); }
+ Member<T> object;
+};
+
+class GCed : public GarbageCollected<GCed> {
+ public:
+ void Trace(cppgc::Visitor* visitor) const { visitor->Trace(child_); }
+
+ Member<GCed> child_;
+};
+
+class GCedWithCallback : public GarbageCollected<GCedWithCallback> {
+ public:
+ template <typename Callback>
+ explicit GCedWithCallback(Callback callback) {
+ callback(this);
+ }
+
+ void Trace(cppgc::Visitor* visitor) const { visitor->Trace(child_); }
+
+ Member<GCedWithCallback> child_;
+};
+
+class Mixin : public GarbageCollectedMixin {
+ public:
+ void Trace(cppgc::Visitor* visitor) const { visitor->Trace(child_); }
+
+ Member<Mixin> child_;
+};
+
+class GCedWithMixin : public GarbageCollected<GCedWithMixin>, public Mixin {
+ public:
+ void Trace(cppgc::Visitor* visitor) const { Mixin::Trace(visitor); }
+};
+
} // namespace
// The following tests below check for data races during concurrent marking.
TEST_F(ConcurrentMarkingTest, MarkingObjects) {
- static constexpr int kNumStep = 1000;
StartConcurrentGC();
Persistent<GCedHolder<GCed>> root =
MakeGarbageCollected<GCedHolder<GCed>>(GetAllocationHandle());
- Member<GCed>* last_object = &root->object_;
+ Member<GCed>* last_object = &root->object;
for (int i = 0; i < kNumStep; ++i) {
for (int j = 0; j < kNumStep; ++j) {
*last_object = MakeGarbageCollected<GCed>(GetAllocationHandle());
@@ -117,11 +119,10 @@ TEST_F(ConcurrentMarkingTest, MarkingObjects) {
}
TEST_F(ConcurrentMarkingTest, MarkingInConstructionObjects) {
- static constexpr int kNumStep = 1000;
StartConcurrentGC();
Persistent<GCedHolder<GCedWithCallback>> root =
MakeGarbageCollected<GCedHolder<GCedWithCallback>>(GetAllocationHandle());
- Member<GCedWithCallback>* last_object = &root->object_;
+ Member<GCedWithCallback>* last_object = &root->object;
for (int i = 0; i < kNumStep; ++i) {
for (int j = 0; j < kNumStep; ++j) {
MakeGarbageCollected<GCedWithCallback>(
@@ -137,11 +138,10 @@ TEST_F(ConcurrentMarkingTest, MarkingInConstructionObjects) {
}
TEST_F(ConcurrentMarkingTest, MarkingMixinObjects) {
- static constexpr int kNumStep = 1000;
StartConcurrentGC();
Persistent<GCedHolder<Mixin>> root =
MakeGarbageCollected<GCedHolder<Mixin>>(GetAllocationHandle());
- Member<Mixin>* last_object = &root->object_;
+ Member<Mixin>* last_object = &root->object;
for (int i = 0; i < kNumStep; ++i) {
for (int j = 0; j < kNumStep; ++j) {
*last_object = MakeGarbageCollected<GCedWithMixin>(GetAllocationHandle());
@@ -153,7 +153,58 @@ TEST_F(ConcurrentMarkingTest, MarkingMixinObjects) {
FinishGC();
}
-#endif // defined(THREAD_SANITIZER)
+namespace {
+
+struct ConcurrentlyTraceable : public GarbageCollected<ConcurrentlyTraceable> {
+ static size_t trace_counter;
+ void Trace(Visitor*) const { ++trace_counter; }
+};
+size_t ConcurrentlyTraceable::trace_counter = 0;
+
+struct NotConcurrentlyTraceable
+ : public GarbageCollected<NotConcurrentlyTraceable> {
+ static size_t trace_counter;
+ void Trace(Visitor* visitor) const {
+ if (visitor->DeferTraceToMutatorThreadIfConcurrent(
+ this,
+ [](Visitor*, const void*) {
+ ++NotConcurrentlyTraceable::trace_counter;
+ },
+ sizeof(NotConcurrentlyTraceable)))
+ return;
+ ++trace_counter;
+ }
+};
+size_t NotConcurrentlyTraceable::trace_counter = 0;
+
+} // namespace
+
+TEST_F(ConcurrentMarkingTest, ConcurrentlyTraceableObjectIsTracedConcurrently) {
+ Persistent<GCedHolder<ConcurrentlyTraceable>> root =
+ MakeGarbageCollected<GCedHolder<ConcurrentlyTraceable>>(
+ GetAllocationHandle());
+ root->object =
+ MakeGarbageCollected<ConcurrentlyTraceable>(GetAllocationHandle());
+ EXPECT_EQ(0u, ConcurrentlyTraceable::trace_counter);
+ StartConcurrentGC();
+ GetMarkerRef()->WaitForConcurrentMarkingForTesting();
+ EXPECT_NE(0u, ConcurrentlyTraceable::trace_counter);
+ FinishGC();
+}
+
+TEST_F(ConcurrentMarkingTest,
+ NotConcurrentlyTraceableObjectIsNotTracedConcurrently) {
+ Persistent<GCedHolder<NotConcurrentlyTraceable>> root =
+ MakeGarbageCollected<GCedHolder<NotConcurrentlyTraceable>>(
+ GetAllocationHandle());
+ root->object =
+ MakeGarbageCollected<NotConcurrentlyTraceable>(GetAllocationHandle());
+ EXPECT_EQ(0u, NotConcurrentlyTraceable::trace_counter);
+ StartConcurrentGC();
+ GetMarkerRef()->WaitForConcurrentMarkingForTesting();
+ EXPECT_EQ(0u, NotConcurrentlyTraceable::trace_counter);
+ FinishGC();
+}
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/concurrent-sweeper-unittest.cc b/deps/v8/test/unittests/heap/cppgc/concurrent-sweeper-unittest.cc
index 3794adce25..b1cdc5d8fc 100644
--- a/deps/v8/test/unittests/heap/cppgc/concurrent-sweeper-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/concurrent-sweeper-unittest.cc
@@ -61,6 +61,8 @@ class NonFinalizable : public GarbageCollected<NonFinalizable<Size>> {
using NormalNonFinalizable = NonFinalizable<32>;
using LargeNonFinalizable = NonFinalizable<kLargeObjectSizeThreshold * 2>;
+} // namespace
+
class ConcurrentSweeperTest : public testing::TestWithHeap {
public:
ConcurrentSweeperTest() { g_destructor_callcount = 0; }
@@ -73,7 +75,16 @@ class ConcurrentSweeperTest : public testing::TestWithHeap {
heap->stats_collector()->NotifyMarkingStarted();
heap->stats_collector()->NotifyMarkingCompleted(0);
Sweeper& sweeper = heap->sweeper();
- sweeper.Start(Sweeper::Config::kIncrementalAndConcurrent);
+ const Sweeper::SweepingConfig sweeping_config{
+ Sweeper::SweepingConfig::SweepingType::kIncrementalAndConcurrent,
+ Sweeper::SweepingConfig::CompactableSpaceHandling::kSweep};
+ sweeper.Start(sweeping_config);
+ }
+
+ void WaitForConcurrentSweeping() {
+ Heap* heap = Heap::From(GetHeap());
+ Sweeper& sweeper = heap->sweeper();
+ sweeper.WaitForConcurrentSweepingForTesting();
}
void FinishSweeping() {
@@ -126,8 +137,6 @@ class ConcurrentSweeperTest : public testing::TestWithHeap {
}
};
-} // namespace
-
TEST_F(ConcurrentSweeperTest, BackgroundSweepOfNormalPage) {
// Non finalizable objects are swept right away.
using GCedType = NormalNonFinalizable;
@@ -145,7 +154,7 @@ TEST_F(ConcurrentSweeperTest, BackgroundSweepOfNormalPage) {
StartSweeping();
// Wait for concurrent sweeping to finish.
- GetPlatform().WaitAllBackgroundTasks();
+ WaitForConcurrentSweeping();
#if !defined(CPPGC_YOUNG_GENERATION)
// Check that the marked object was unmarked.
@@ -184,7 +193,7 @@ TEST_F(ConcurrentSweeperTest, BackgroundSweepOfLargePage) {
StartSweeping();
// Wait for concurrent sweeping to finish.
- GetPlatform().WaitAllBackgroundTasks();
+ WaitForConcurrentSweeping();
#if !defined(CPPGC_YOUNG_GENERATION)
// Check that the marked object was unmarked.
@@ -224,7 +233,7 @@ TEST_F(ConcurrentSweeperTest, DeferredFinalizationOfNormalPage) {
StartSweeping();
// Wait for concurrent sweeping to finish.
- GetPlatform().WaitAllBackgroundTasks();
+ WaitForConcurrentSweeping();
// Check that pages are not returned right away.
for (auto* page : pages) {
@@ -256,7 +265,7 @@ TEST_F(ConcurrentSweeperTest, DeferredFinalizationOfLargePage) {
StartSweeping();
// Wait for concurrent sweeping to finish.
- GetPlatform().WaitAllBackgroundTasks();
+ WaitForConcurrentSweeping();
// Check that the page is not returned to the space.
EXPECT_EQ(space->end(), std::find(space->begin(), space->end(), page));
@@ -302,7 +311,7 @@ TEST_F(ConcurrentSweeperTest, IncrementalSweeping) {
EXPECT_TRUE(marked_large_header.IsMarked());
// Wait for incremental sweeper to finish.
- GetPlatform().WaitAllForegroundTasks();
+ GetPlatform().RunAllForegroundTasks();
EXPECT_EQ(2u, g_destructor_callcount);
#if !defined(CPPGC_YOUNG_GENERATION)
diff --git a/deps/v8/test/unittests/heap/cppgc/cross-thread-persistent-unittest.cc b/deps/v8/test/unittests/heap/cppgc/cross-thread-persistent-unittest.cc
new file mode 100644
index 0000000000..3a9dc91000
--- /dev/null
+++ b/deps/v8/test/unittests/heap/cppgc/cross-thread-persistent-unittest.cc
@@ -0,0 +1,101 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/cppgc/cross-thread-persistent.h"
+
+#include "include/cppgc/allocation.h"
+#include "src/base/platform/condition-variable.h"
+#include "src/base/platform/mutex.h"
+#include "src/base/platform/platform.h"
+#include "test/unittests/heap/cppgc/tests.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace cppgc {
+namespace internal {
+
+namespace {
+
+struct GCed final : GarbageCollected<GCed> {
+ static size_t destructor_call_count;
+ GCed() { destructor_call_count = 0; }
+ ~GCed() { destructor_call_count++; }
+ virtual void Trace(cppgc::Visitor*) const {}
+ int a = 0;
+};
+size_t GCed::destructor_call_count = 0;
+
+class Runner final : public v8::base::Thread {
+ public:
+ template <typename Callback>
+ explicit Runner(Callback callback)
+ : Thread(v8::base::Thread::Options("CrossThreadPersistent Thread")),
+ callback_(callback) {}
+
+ void Run() final { callback_(); }
+
+ private:
+ std::function<void()> callback_;
+};
+
+} // namespace
+
+class CrossThreadPersistentTest : public testing::TestWithHeap {};
+
+TEST_F(CrossThreadPersistentTest, RetainStronglyOnDifferentThread) {
+ subtle::CrossThreadPersistent<GCed> holder =
+ MakeGarbageCollected<GCed>(GetAllocationHandle());
+ {
+ Runner runner([obj = std::move(holder)]() {});
+ EXPECT_FALSE(holder);
+ EXPECT_EQ(0u, GCed::destructor_call_count);
+ PreciseGC();
+ EXPECT_EQ(0u, GCed::destructor_call_count);
+ runner.StartSynchronously();
+ runner.Join();
+ }
+ EXPECT_EQ(0u, GCed::destructor_call_count);
+ PreciseGC();
+ EXPECT_EQ(1u, GCed::destructor_call_count);
+}
+
+TEST_F(CrossThreadPersistentTest, RetainWeaklyOnDifferentThread) {
+ subtle::WeakCrossThreadPersistent<GCed> in =
+ MakeGarbageCollected<GCed>(GetAllocationHandle());
+ // Set up |out| with an object that is always retained to ensure that the
+ // different thread indeed moves back an empty handle.
+ Persistent<GCed> out_holder =
+ MakeGarbageCollected<GCed>(GetAllocationHandle());
+ subtle::WeakCrossThreadPersistent<GCed> out = *out_holder;
+ {
+ Persistent<GCed> temporary_holder = *in;
+ Runner runner([obj = std::move(in), &out]() { out = std::move(obj); });
+ EXPECT_FALSE(in);
+ EXPECT_TRUE(out);
+ EXPECT_EQ(0u, GCed::destructor_call_count);
+ PreciseGC();
+ EXPECT_EQ(0u, GCed::destructor_call_count);
+ temporary_holder.Clear();
+ PreciseGC();
+ EXPECT_EQ(1u, GCed::destructor_call_count);
+ runner.StartSynchronously();
+ runner.Join();
+ }
+ EXPECT_FALSE(out);
+}
+
+TEST_F(CrossThreadPersistentTest, DestroyRacingWithGC) {
+ // Destroy a handle on a different thread while at the same time invoking a
+ // garbage collection on the original thread.
+ subtle::CrossThreadPersistent<GCed> holder =
+ MakeGarbageCollected<GCed>(GetAllocationHandle());
+ Runner runner([&obj = holder]() { obj.Clear(); });
+ EXPECT_TRUE(holder);
+ runner.StartSynchronously();
+ PreciseGC();
+ runner.Join();
+ EXPECT_FALSE(holder);
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/custom-spaces-unittest.cc b/deps/v8/test/unittests/heap/cppgc/custom-spaces-unittest.cc
index 24e7367f67..7e73a73178 100644
--- a/deps/v8/test/unittests/heap/cppgc/custom-spaces-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/custom-spaces-unittest.cc
@@ -37,8 +37,9 @@ class TestWithHeapWithCustomSpaces : public testing::TestWithPlatform {
}
void PreciseGC() {
- heap_->ForceGarbageCollectionSlow("TestWithHeapWithCustomSpaces", "Testing",
- cppgc::Heap::StackState::kNoHeapPointers);
+ heap_->ForceGarbageCollectionSlow(
+ ::testing::UnitTest::GetInstance()->current_test_info()->name(),
+ "Testing", cppgc::Heap::StackState::kNoHeapPointers);
}
cppgc::Heap* GetHeap() const { return heap_.get(); }
@@ -140,4 +141,106 @@ TEST_F(TestWithHeapWithCustomSpaces, SweepCustomSpace) {
}
} // namespace internal
+
+// Test custom space compactability.
+
+class CompactableCustomSpace : public CustomSpace<CompactableCustomSpace> {
+ public:
+ static constexpr size_t kSpaceIndex = 0;
+ static constexpr bool kSupportsCompaction = true;
+};
+
+class NotCompactableCustomSpace
+ : public CustomSpace<NotCompactableCustomSpace> {
+ public:
+ static constexpr size_t kSpaceIndex = 1;
+ static constexpr bool kSupportsCompaction = false;
+};
+
+class DefaultCompactableCustomSpace
+ : public CustomSpace<DefaultCompactableCustomSpace> {
+ public:
+ static constexpr size_t kSpaceIndex = 2;
+ // By default space are not compactable.
+};
+
+namespace internal {
+namespace {
+
+class TestWithHeapWithCompactableCustomSpaces
+ : public testing::TestWithPlatform {
+ protected:
+ TestWithHeapWithCompactableCustomSpaces() {
+ Heap::HeapOptions options;
+ options.custom_spaces.emplace_back(
+ std::make_unique<CompactableCustomSpace>());
+ options.custom_spaces.emplace_back(
+ std::make_unique<NotCompactableCustomSpace>());
+ options.custom_spaces.emplace_back(
+ std::make_unique<DefaultCompactableCustomSpace>());
+ heap_ = Heap::Create(platform_, std::move(options));
+ g_destructor_callcount = 0;
+ }
+
+ void PreciseGC() {
+ heap_->ForceGarbageCollectionSlow("TestWithHeapWithCompactableCustomSpaces",
+ "Testing",
+ cppgc::Heap::StackState::kNoHeapPointers);
+ }
+
+ cppgc::Heap* GetHeap() const { return heap_.get(); }
+
+ private:
+ std::unique_ptr<cppgc::Heap> heap_;
+};
+
+class CompactableGCed final : public GarbageCollected<CompactableGCed> {
+ public:
+ void Trace(Visitor*) const {}
+};
+class NotCompactableGCed final : public GarbageCollected<NotCompactableGCed> {
+ public:
+ void Trace(Visitor*) const {}
+};
+class DefaultCompactableGCed final
+ : public GarbageCollected<DefaultCompactableGCed> {
+ public:
+ void Trace(Visitor*) const {}
+};
+
+} // namespace
+} // namespace internal
+
+template <>
+struct SpaceTrait<internal::CompactableGCed> {
+ using Space = CompactableCustomSpace;
+};
+template <>
+struct SpaceTrait<internal::NotCompactableGCed> {
+ using Space = NotCompactableCustomSpace;
+};
+template <>
+struct SpaceTrait<internal::DefaultCompactableGCed> {
+ using Space = DefaultCompactableCustomSpace;
+};
+
+namespace internal {
+
+TEST_F(TestWithHeapWithCompactableCustomSpaces,
+ AllocateOnCompactableCustomSpaces) {
+ auto* compactable =
+ MakeGarbageCollected<CompactableGCed>(GetHeap()->GetAllocationHandle());
+ auto* not_compactable = MakeGarbageCollected<NotCompactableGCed>(
+ GetHeap()->GetAllocationHandle());
+ auto* default_compactable = MakeGarbageCollected<DefaultCompactableGCed>(
+ GetHeap()->GetAllocationHandle());
+ EXPECT_TRUE(NormalPage::FromPayload(compactable)->space()->is_compactable());
+ EXPECT_FALSE(
+ NormalPage::FromPayload(not_compactable)->space()->is_compactable());
+ EXPECT_FALSE(
+ NormalPage::FromPayload(default_compactable)->space()->is_compactable());
+}
+
+} // namespace internal
+
} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/ephemeron-pair-unittest.cc b/deps/v8/test/unittests/heap/cppgc/ephemeron-pair-unittest.cc
new file mode 100644
index 0000000000..1172eedb86
--- /dev/null
+++ b/deps/v8/test/unittests/heap/cppgc/ephemeron-pair-unittest.cc
@@ -0,0 +1,112 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/cppgc/ephemeron-pair.h"
+
+#include "include/cppgc/allocation.h"
+#include "include/cppgc/persistent.h"
+#include "src/heap/cppgc/heap-object-header.h"
+#include "src/heap/cppgc/marking-visitor.h"
+#include "src/heap/cppgc/stats-collector.h"
+#include "test/unittests/heap/cppgc/tests.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace cppgc {
+namespace internal {
+
+namespace {
+class GCed : public GarbageCollected<GCed> {
+ public:
+ void Trace(cppgc::Visitor*) const {}
+};
+
+class EphemeronHolder : public GarbageCollected<GCed> {
+ public:
+ EphemeronHolder(GCed* key, GCed* value) : ephemeron_pair_(key, value) {}
+ void Trace(cppgc::Visitor* visitor) const { visitor->Trace(ephemeron_pair_); }
+
+ private:
+ EphemeronPair<GCed, GCed> ephemeron_pair_;
+};
+
+class EhpemeronPairTest : public testing::TestWithHeap {
+ using MarkingConfig = Marker::MarkingConfig;
+
+ static constexpr Marker::MarkingConfig IncrementalPreciseMarkingConfig = {
+ MarkingConfig::CollectionType::kMajor,
+ MarkingConfig::StackState::kNoHeapPointers,
+ MarkingConfig::MarkingType::kIncremental};
+
+ public:
+ void FinishSteps() {
+ while (!SingleStep()) {
+ }
+ }
+
+ void FinishMarking() {
+ marker_->FinishMarking(MarkingConfig::StackState::kNoHeapPointers);
+ // Pretend do finish sweeping as StatsCollector verifies that Notify*
+ // methods are called in the right order.
+ Heap::From(GetHeap())->stats_collector()->NotifySweepingCompleted();
+ }
+
+ void InitializeMarker(HeapBase& heap, cppgc::Platform* platform) {
+ marker_ = MarkerFactory::CreateAndStartMarking<Marker>(
+ heap, platform, IncrementalPreciseMarkingConfig);
+ }
+
+ Marker* marker() const { return marker_.get(); }
+
+ private:
+ bool SingleStep() {
+ return marker_->IncrementalMarkingStepForTesting(
+ MarkingConfig::StackState::kNoHeapPointers);
+ }
+
+ std::unique_ptr<Marker> marker_;
+};
+
+// static
+constexpr Marker::MarkingConfig
+ EhpemeronPairTest::IncrementalPreciseMarkingConfig;
+
+} // namespace
+
+TEST_F(EhpemeronPairTest, ValueMarkedWhenKeyIsMarked) {
+ GCed* key = MakeGarbageCollected<GCed>(GetAllocationHandle());
+ GCed* value = MakeGarbageCollected<GCed>(GetAllocationHandle());
+ Persistent<EphemeronHolder> holder =
+ MakeGarbageCollected<EphemeronHolder>(GetAllocationHandle(), key, value);
+ HeapObjectHeader::FromPayload(key).TryMarkAtomic();
+ InitializeMarker(*Heap::From(GetHeap()), GetPlatformHandle().get());
+ FinishMarking();
+ EXPECT_TRUE(HeapObjectHeader::FromPayload(value).IsMarked());
+}
+
+TEST_F(EhpemeronPairTest, ValueNotMarkedWhenKeyIsNotMarked) {
+ GCed* key = MakeGarbageCollected<GCed>(GetAllocationHandle());
+ GCed* value = MakeGarbageCollected<GCed>(GetAllocationHandle());
+ Persistent<EphemeronHolder> holder =
+ MakeGarbageCollected<EphemeronHolder>(GetAllocationHandle(), key, value);
+ InitializeMarker(*Heap::From(GetHeap()), GetPlatformHandle().get());
+ FinishMarking();
+ EXPECT_FALSE(HeapObjectHeader::FromPayload(key).IsMarked());
+ EXPECT_FALSE(HeapObjectHeader::FromPayload(value).IsMarked());
+}
+
+TEST_F(EhpemeronPairTest, ValueNotMarkedBeforeKey) {
+ GCed* key = MakeGarbageCollected<GCed>(GetAllocationHandle());
+ GCed* value = MakeGarbageCollected<GCed>(GetAllocationHandle());
+ Persistent<EphemeronHolder> holder =
+ MakeGarbageCollected<EphemeronHolder>(GetAllocationHandle(), key, value);
+ InitializeMarker(*Heap::From(GetHeap()), GetPlatformHandle().get());
+ FinishSteps();
+ EXPECT_FALSE(HeapObjectHeader::FromPayload(value).IsMarked());
+ HeapObjectHeader::FromPayload(key).TryMarkAtomic();
+ FinishMarking();
+ EXPECT_TRUE(HeapObjectHeader::FromPayload(value).IsMarked());
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/gc-info-unittest.cc b/deps/v8/test/unittests/heap/cppgc/gc-info-unittest.cc
index b45de5cd06..9c48621e10 100644
--- a/deps/v8/test/unittests/heap/cppgc/gc-info-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/gc-info-unittest.cc
@@ -14,6 +14,12 @@
namespace cppgc {
namespace internal {
+namespace {
+
+constexpr GCInfo GetEmptyGCInfo() { return {nullptr, nullptr, nullptr, false}; }
+
+} // namespace
+
TEST(GCInfoTableTest, InitialEmpty) {
v8::base::PageAllocator page_allocator;
GCInfoTable table(&page_allocator);
@@ -23,7 +29,7 @@ TEST(GCInfoTableTest, InitialEmpty) {
TEST(GCInfoTableTest, ResizeToMaxIndex) {
v8::base::PageAllocator page_allocator;
GCInfoTable table(&page_allocator);
- GCInfo info = {nullptr, nullptr, false};
+ GCInfo info = GetEmptyGCInfo();
for (GCInfoIndex i = GCInfoTable::kMinIndex; i < GCInfoTable::kMaxIndex;
i++) {
GCInfoIndex index = table.RegisterNewGCInfo(info);
@@ -34,7 +40,7 @@ TEST(GCInfoTableTest, ResizeToMaxIndex) {
TEST(GCInfoTableDeathTest, MoreThanMaxIndexInfos) {
v8::base::PageAllocator page_allocator;
GCInfoTable table(&page_allocator);
- GCInfo info = {nullptr, nullptr, false};
+ GCInfo info = GetEmptyGCInfo();
// Create GCInfoTable::kMaxIndex entries.
for (GCInfoIndex i = GCInfoTable::kMinIndex; i < GCInfoTable::kMaxIndex;
i++) {
@@ -46,7 +52,7 @@ TEST(GCInfoTableDeathTest, MoreThanMaxIndexInfos) {
TEST(GCInfoTableDeathTest, OldTableAreaIsReadOnly) {
v8::base::PageAllocator page_allocator;
GCInfoTable table(&page_allocator);
- GCInfo info = {nullptr, nullptr, false};
+ GCInfo info = GetEmptyGCInfo();
// Use up all slots until limit.
GCInfoIndex limit = table.LimitForTesting();
// Bail out if initial limit is already the maximum because of large committed
@@ -76,7 +82,7 @@ class ThreadRegisteringGCInfoObjects final : public v8::base::Thread {
num_registrations_(num_registrations) {}
void Run() final {
- GCInfo info = {nullptr, nullptr, false};
+ GCInfo info = GetEmptyGCInfo();
for (GCInfoIndex i = 0; i < num_registrations_; i++) {
table_->RegisterNewGCInfo(info);
}
@@ -101,7 +107,7 @@ TEST(GCInfoTableTest, MultiThreadedResizeToMaxIndex) {
v8::base::PageAllocator page_allocator;
GCInfoTable table(&page_allocator);
- GCInfo info = {nullptr, nullptr, false};
+ GCInfo info = GetEmptyGCInfo();
for (size_t i = 0; i < main_thread_initialized; i++) {
table.RegisterNewGCInfo(info);
}
diff --git a/deps/v8/test/unittests/heap/cppgc/gc-invoker-unittest.cc b/deps/v8/test/unittests/heap/cppgc/gc-invoker-unittest.cc
index 9dc1b8d426..319f6e433d 100644
--- a/deps/v8/test/unittests/heap/cppgc/gc-invoker-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/gc-invoker-unittest.cc
@@ -105,7 +105,7 @@ TEST(GCInvokerTest, ConservativeGCIsInvokedAsPreciseGCViaPlatform) {
EXPECT_CALL(gc, epoch).WillRepeatedly(::testing::Return(0));
EXPECT_CALL(gc, CollectGarbage);
invoker.CollectGarbage(GarbageCollector::Config::ConservativeAtomicConfig());
- platform.WaitAllForegroundTasks();
+ platform.RunAllForegroundTasks();
}
TEST(GCInvokerTest, IncrementalGCIsStarted) {
diff --git a/deps/v8/test/unittests/heap/cppgc/heap-object-header-unittest.cc b/deps/v8/test/unittests/heap/cppgc/heap-object-header-unittest.cc
index 71df62a72b..2621af2891 100644
--- a/deps/v8/test/unittests/heap/cppgc/heap-object-header-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/heap-object-header-unittest.cc
@@ -40,8 +40,7 @@ TEST(HeapObjectHeaderTest, GetGCInfoIndex) {
constexpr size_t kSize = kAllocationGranularity;
HeapObjectHeader header(kSize, kGCInfoIndex);
EXPECT_EQ(kGCInfoIndex, header.GetGCInfoIndex());
- EXPECT_EQ(kGCInfoIndex,
- header.GetGCInfoIndex<HeapObjectHeader::AccessMode::kAtomic>());
+ EXPECT_EQ(kGCInfoIndex, header.GetGCInfoIndex<AccessMode::kAtomic>());
}
TEST(HeapObjectHeaderTest, GetSize) {
@@ -49,7 +48,7 @@ TEST(HeapObjectHeaderTest, GetSize) {
constexpr size_t kSize = kAllocationGranularity * 23;
HeapObjectHeader header(kSize, kGCInfoIndex);
EXPECT_EQ(kSize, header.GetSize());
- EXPECT_EQ(kSize, header.GetSize<HeapObjectHeader::AccessMode::kAtomic>());
+ EXPECT_EQ(kSize, header.GetSize<AccessMode::kAtomic>());
}
TEST(HeapObjectHeaderTest, IsLargeObject) {
@@ -57,13 +56,10 @@ TEST(HeapObjectHeaderTest, IsLargeObject) {
constexpr size_t kSize = kAllocationGranularity * 23;
HeapObjectHeader header(kSize, kGCInfoIndex);
EXPECT_EQ(false, header.IsLargeObject());
- EXPECT_EQ(false,
- header.IsLargeObject<HeapObjectHeader::AccessMode::kAtomic>());
+ EXPECT_EQ(false, header.IsLargeObject<AccessMode::kAtomic>());
HeapObjectHeader large_header(0, kGCInfoIndex + 1);
EXPECT_EQ(true, large_header.IsLargeObject());
- EXPECT_EQ(
- true,
- large_header.IsLargeObject<HeapObjectHeader::AccessMode::kAtomic>());
+ EXPECT_EQ(true, large_header.IsLargeObject<AccessMode::kAtomic>());
}
TEST(HeapObjectHeaderTest, MarkObjectAsFullyConstructed) {
@@ -110,7 +106,7 @@ TEST(HeapObjectHeaderTest, Unmark) {
EXPECT_FALSE(header2.IsMarked());
EXPECT_TRUE(header2.TryMarkAtomic());
EXPECT_TRUE(header2.IsMarked());
- header2.Unmark<HeapObjectHeader::AccessMode::kAtomic>();
+ header2.Unmark<AccessMode::kAtomic>();
// GCInfoIndex shares the same bitfield and should be unaffected by Unmark.
EXPECT_EQ(kGCInfoIndex, header2.GetGCInfoIndex());
EXPECT_FALSE(header2.IsMarked());
@@ -130,7 +126,7 @@ class ConcurrentGCThread final : public v8::base::Thread {
payload_(payload) {}
void Run() final {
- while (header_->IsInConstruction<HeapObjectHeader::AccessMode::kAtomic>()) {
+ while (header_->IsInConstruction<AccessMode::kAtomic>()) {
}
USE(v8::base::AsAtomicPtr(const_cast<size_t*>(&payload_->value))
->load(std::memory_order_relaxed));
diff --git a/deps/v8/test/unittests/heap/cppgc/heap-unittest.cc b/deps/v8/test/unittests/heap/cppgc/heap-unittest.cc
index 0357b24ec0..694d031dda 100644
--- a/deps/v8/test/unittests/heap/cppgc/heap-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/heap-unittest.cc
@@ -117,5 +117,41 @@ TEST_F(GCHeapTest, ObjectPayloadSize) {
EXPECT_LE(expected_size, Heap::From(GetHeap())->ObjectPayloadSize());
}
+TEST_F(GCHeapTest, AllocateWithAdditionalBytes) {
+ static constexpr size_t kBaseSize = sizeof(HeapObjectHeader) + sizeof(Foo);
+ static constexpr size_t kAdditionalBytes = 10u * kAllocationGranularity;
+ {
+ Foo* object = MakeGarbageCollected<Foo>(GetAllocationHandle());
+ EXPECT_LE(kBaseSize, HeapObjectHeader::FromPayload(object).GetSize());
+ }
+ {
+ Foo* object = MakeGarbageCollected<Foo>(GetAllocationHandle(),
+ AdditionalBytes(kAdditionalBytes));
+ EXPECT_LE(kBaseSize + kAdditionalBytes,
+ HeapObjectHeader::FromPayload(object).GetSize());
+ }
+ {
+ Foo* object = MakeGarbageCollected<Foo>(
+ GetAllocationHandle(),
+ AdditionalBytes(kAdditionalBytes * kAdditionalBytes));
+ EXPECT_LE(kBaseSize + kAdditionalBytes * kAdditionalBytes,
+ HeapObjectHeader::FromPayload(object).GetSize());
+ }
+}
+
+TEST_F(GCHeapTest, AllocatedSizeDependOnAdditionalBytes) {
+ static constexpr size_t kAdditionalBytes = 10u * kAllocationGranularity;
+ Foo* object = MakeGarbageCollected<Foo>(GetAllocationHandle());
+ Foo* object_with_bytes = MakeGarbageCollected<Foo>(
+ GetAllocationHandle(), AdditionalBytes(kAdditionalBytes));
+ Foo* object_with_more_bytes = MakeGarbageCollected<Foo>(
+ GetAllocationHandle(),
+ AdditionalBytes(kAdditionalBytes * kAdditionalBytes));
+ EXPECT_LT(HeapObjectHeader::FromPayload(object).GetSize(),
+ HeapObjectHeader::FromPayload(object_with_bytes).GetSize());
+ EXPECT_LT(HeapObjectHeader::FromPayload(object_with_bytes).GetSize(),
+ HeapObjectHeader::FromPayload(object_with_more_bytes).GetSize());
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/marker-unittest.cc b/deps/v8/test/unittests/heap/cppgc/marker-unittest.cc
index b879d9b989..2574db151f 100644
--- a/deps/v8/test/unittests/heap/cppgc/marker-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/marker-unittest.cc
@@ -28,7 +28,6 @@ class MarkerTest : public testing::TestWithHeap {
auto* heap = Heap::From(GetHeap());
InitializeMarker(*heap, GetPlatformHandle().get(), config);
marker_->FinishMarking(stack_state);
- marker_->ProcessWeakness();
// Pretend do finish sweeping as StatsCollector verifies that Notify*
// methods are called in the right order.
heap->stats_collector()->NotifySweepingCompleted();
@@ -234,7 +233,7 @@ TEST_F(MarkerTest, InConstructionObjectIsEventuallyMarkedEmptyStack) {
Member<GCedWithCallback> member(obj);
marker->VisitorForTesting().Trace(member);
});
- EXPECT_TRUE(HeapObjectHeader::FromPayload(object).IsMarked());
+ EXPECT_FALSE(HeapObjectHeader::FromPayload(object).IsMarked());
marker()->FinishMarking(MarkingConfig::StackState::kMayContainHeapPointers);
EXPECT_TRUE(HeapObjectHeader::FromPayload(object).IsMarked());
}
@@ -248,7 +247,7 @@ TEST_F(MarkerTest, InConstructionObjectIsEventuallyMarkedNonEmptyStack) {
GetAllocationHandle(), [marker = marker()](GCedWithCallback* obj) {
Member<GCedWithCallback> member(obj);
marker->VisitorForTesting().Trace(member);
- EXPECT_TRUE(HeapObjectHeader::FromPayload(obj).IsMarked());
+ EXPECT_FALSE(HeapObjectHeader::FromPayload(obj).IsMarked());
marker->FinishMarking(
MarkingConfig::StackState::kMayContainHeapPointers);
EXPECT_TRUE(HeapObjectHeader::FromPayload(obj).IsMarked());
@@ -258,14 +257,20 @@ TEST_F(MarkerTest, InConstructionObjectIsEventuallyMarkedNonEmptyStack) {
TEST_F(MarkerTest, SentinelNotClearedOnWeakPersistentHandling) {
static const Marker::MarkingConfig config = {
MarkingConfig::CollectionType::kMajor,
- MarkingConfig::StackState::kNoHeapPointers};
- InitializeMarker(*Heap::From(GetHeap()), GetPlatformHandle().get(), config);
+ MarkingConfig::StackState::kNoHeapPointers,
+ MarkingConfig::MarkingType::kIncremental};
Persistent<GCed> root = MakeGarbageCollected<GCed>(GetAllocationHandle());
auto* tmp = MakeGarbageCollected<GCed>(GetAllocationHandle());
root->SetWeakChild(tmp);
- marker()->FinishMarking(MarkingConfig::StackState::kNoHeapPointers);
+ InitializeMarker(*Heap::From(GetHeap()), GetPlatformHandle().get(), config);
+ while (!marker()->IncrementalMarkingStepForTesting(
+ MarkingConfig::StackState::kNoHeapPointers)) {
+ }
+ // {root} object must be marked at this point because we do not allow
+ // encountering kSentinelPointer in WeakMember on regular Trace() calls.
+ ASSERT_TRUE(HeapObjectHeader::FromPayload(root.Get()).IsMarked());
root->SetWeakChild(kSentinelPointer);
- marker()->ProcessWeakness();
+ marker()->FinishMarking(MarkingConfig::StackState::kNoHeapPointers);
EXPECT_EQ(kSentinelPointer, root->weak_child());
}
@@ -290,7 +295,6 @@ class IncrementalMarkingTest : public testing::TestWithHeap {
void FinishMarking() {
marker_->FinishMarking(MarkingConfig::StackState::kMayContainHeapPointers);
- marker_->ProcessWeakness();
// Pretend do finish sweeping as StatsCollector verifies that Notify*
// methods are called in the right order.
Heap::From(GetHeap())->stats_collector()->NotifySweepingCompleted();
@@ -372,7 +376,7 @@ TEST_F(IncrementalMarkingTest, IncrementalStepDuringAllocation) {
holder->member_ = obj;
EXPECT_FALSE(header->IsMarked());
FinishSteps(MarkingConfig::StackState::kMayContainHeapPointers);
- EXPECT_TRUE(header->IsMarked());
+ EXPECT_FALSE(header->IsMarked());
});
FinishSteps(MarkingConfig::StackState::kNoHeapPointers);
EXPECT_TRUE(header->IsMarked());
diff --git a/deps/v8/test/unittests/heap/cppgc/marking-verifier-unittest.cc b/deps/v8/test/unittests/heap/cppgc/marking-verifier-unittest.cc
index fb5ba772da..603a47399b 100644
--- a/deps/v8/test/unittests/heap/cppgc/marking-verifier-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/marking-verifier-unittest.cc
@@ -23,7 +23,8 @@ class MarkingVerifierTest : public testing::TestWithHeap {
void VerifyMarking(HeapBase& heap, StackState stack_state) {
Heap::From(GetHeap())->object_allocator().ResetLinearAllocationBuffers();
- MarkingVerifier verifier(heap, stack_state);
+ MarkingVerifier verifier(heap);
+ verifier.Run(stack_state);
}
};
@@ -98,6 +99,48 @@ TEST_F(MarkingVerifierTest, DoesntDieOnInConstructionOnObject) {
});
}
+namespace {
+class GCedWithCallbackAndChild final
+ : public GarbageCollected<GCedWithCallbackAndChild> {
+ public:
+ template <typename Callback>
+ GCedWithCallbackAndChild(GCed* gced, Callback callback) : child_(gced) {
+ callback(this);
+ }
+ void Trace(cppgc::Visitor* visitor) const { visitor->Trace(child_); }
+
+ private:
+ Member<GCed> child_;
+};
+
+template <typename T>
+struct Holder : public GarbageCollected<Holder<T>> {
+ public:
+ void Trace(cppgc::Visitor* visitor) const { visitor->Trace(object); }
+ Member<T> object = nullptr;
+};
+} // namespace
+
+TEST_F(MarkingVerifierTest, DoesntDieOnInConstructionObjectWithWriteBarrier) {
+ // Regression test: https://crbug.com/v8/10989.
+ // GCedWithCallbackAndChild is marked by write barrier and then discarded by
+ // FlushNotFullyConstructedObjects because it is already marked.
+ Persistent<Holder<GCedWithCallbackAndChild>> persistent =
+ MakeGarbageCollected<Holder<GCedWithCallbackAndChild>>(
+ GetAllocationHandle());
+ GarbageCollector::Config config =
+ GarbageCollector::Config::PreciseIncrementalConfig();
+ Heap::From(GetHeap())->StartIncrementalGarbageCollection(config);
+ MakeGarbageCollected<GCedWithCallbackAndChild>(
+ GetAllocationHandle(), MakeGarbageCollected<GCed>(GetAllocationHandle()),
+ [&persistent](GCedWithCallbackAndChild* obj) {
+ persistent->object = obj;
+ });
+ GetMarkerRef()->IncrementalMarkingStepForTesting(
+ GarbageCollector::Config::StackState::kNoHeapPointers);
+ Heap::From(GetHeap())->FinalizeIncrementalGarbageCollectionIfRunning(config);
+}
+
// Death tests.
namespace {
diff --git a/deps/v8/test/unittests/heap/cppgc/marking-visitor-unittest.cc b/deps/v8/test/unittests/heap/cppgc/marking-visitor-unittest.cc
index 51387712c7..a411c7e63a 100644
--- a/deps/v8/test/unittests/heap/cppgc/marking-visitor-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/marking-visitor-unittest.cc
@@ -44,17 +44,20 @@ class GCedWithMixin : public GarbageCollected<GCedWithMixin>, public Mixin {
void Trace(cppgc::Visitor*) const override {}
};
-class TestMarkingVisitor : public MarkingVisitor {
+class TestMarkingVisitor : public MutatorMarkingVisitor {
public:
explicit TestMarkingVisitor(Marker* marker)
- : MarkingVisitor(marker->heap(), marker->MarkingStateForTesting()) {}
+ : MutatorMarkingVisitor(marker->heap(),
+ marker->MutatorMarkingStateForTesting()) {}
~TestMarkingVisitor() { marking_state_.Publish(); }
+
+ MarkingStateBase& marking_state() { return marking_state_; }
};
} // namespace
TEST_F(MarkingVisitorTest, MarkedBytesAreInitiallyZero) {
- EXPECT_EQ(0u, GetMarker()->MarkingStateForTesting().marked_bytes());
+ EXPECT_EQ(0u, GetMarker()->MutatorMarkingStateForTesting().marked_bytes());
}
// Strong references are marked.
@@ -216,7 +219,10 @@ TEST_F(MarkingVisitorTest, MarkMemberInConstruction) {
Member<GCedWithInConstructionCallback> object(obj);
visitor.Trace(object);
});
- EXPECT_TRUE(HeapObjectHeader::FromPayload(gced).IsMarked());
+ HeapObjectHeader& header = HeapObjectHeader::FromPayload(gced);
+ EXPECT_TRUE(visitor.marking_state().not_fully_constructed_worklist().Contains(
+ &header));
+ EXPECT_FALSE(header.IsMarked());
}
TEST_F(MarkingVisitorTest, MarkMemberMixinInConstruction) {
@@ -228,7 +234,10 @@ TEST_F(MarkingVisitorTest, MarkMemberMixinInConstruction) {
Member<MixinWithInConstructionCallback> mixin(obj);
visitor.Trace(mixin);
});
- EXPECT_TRUE(HeapObjectHeader::FromPayload(gced).IsMarked());
+ HeapObjectHeader& header = HeapObjectHeader::FromPayload(gced);
+ EXPECT_TRUE(visitor.marking_state().not_fully_constructed_worklist().Contains(
+ &header));
+ EXPECT_FALSE(header.IsMarked());
}
TEST_F(MarkingVisitorTest, DontMarkWeakMemberInConstruction) {
@@ -240,7 +249,11 @@ TEST_F(MarkingVisitorTest, DontMarkWeakMemberInConstruction) {
WeakMember<GCedWithInConstructionCallback> object(obj);
visitor.Trace(object);
});
- EXPECT_FALSE(HeapObjectHeader::FromPayload(gced).IsMarked());
+ HeapObjectHeader& header = HeapObjectHeader::FromPayload(gced);
+ EXPECT_FALSE(
+ visitor.marking_state().not_fully_constructed_worklist().Contains(
+ &header));
+ EXPECT_FALSE(header.IsMarked());
}
TEST_F(MarkingVisitorTest, DontMarkWeakMemberMixinInConstruction) {
@@ -252,7 +265,11 @@ TEST_F(MarkingVisitorTest, DontMarkWeakMemberMixinInConstruction) {
WeakMember<MixinWithInConstructionCallback> mixin(obj);
visitor.Trace(mixin);
});
- EXPECT_FALSE(HeapObjectHeader::FromPayload(gced).IsMarked());
+ HeapObjectHeader& header = HeapObjectHeader::FromPayload(gced);
+ EXPECT_FALSE(
+ visitor.marking_state().not_fully_constructed_worklist().Contains(
+ &header));
+ EXPECT_FALSE(header.IsMarked());
}
TEST_F(MarkingVisitorTest, MarkPersistentInConstruction) {
@@ -264,7 +281,10 @@ TEST_F(MarkingVisitorTest, MarkPersistentInConstruction) {
Persistent<GCedWithInConstructionCallback> object(obj);
visitor.TraceRootForTesting(object, SourceLocation::Current());
});
- EXPECT_TRUE(HeapObjectHeader::FromPayload(gced).IsMarked());
+ HeapObjectHeader& header = HeapObjectHeader::FromPayload(gced);
+ EXPECT_TRUE(visitor.marking_state().not_fully_constructed_worklist().Contains(
+ &header));
+ EXPECT_FALSE(header.IsMarked());
}
TEST_F(MarkingVisitorTest, MarkPersistentMixinInConstruction) {
@@ -276,7 +296,23 @@ TEST_F(MarkingVisitorTest, MarkPersistentMixinInConstruction) {
Persistent<MixinWithInConstructionCallback> mixin(obj);
visitor.TraceRootForTesting(mixin, SourceLocation::Current());
});
- EXPECT_TRUE(HeapObjectHeader::FromPayload(gced).IsMarked());
+ HeapObjectHeader& header = HeapObjectHeader::FromPayload(gced);
+ EXPECT_TRUE(visitor.marking_state().not_fully_constructed_worklist().Contains(
+ &header));
+ EXPECT_FALSE(header.IsMarked());
+}
+
+TEST_F(MarkingVisitorTest, StrongTracingMarksWeakMember) {
+ WeakMember<GCed> object(MakeGarbageCollected<GCed>(GetAllocationHandle()));
+ HeapObjectHeader& header = HeapObjectHeader::FromPayload(object);
+
+ TestMarkingVisitor visitor(GetMarker());
+
+ EXPECT_FALSE(header.IsMarked());
+
+ visitor.TraceStrongly(object);
+
+ EXPECT_TRUE(header.IsMarked());
}
} // namespace internal
diff --git a/deps/v8/test/unittests/heap/cppgc/name-trait-unittest.cc b/deps/v8/test/unittests/heap/cppgc/name-trait-unittest.cc
new file mode 100644
index 0000000000..57f100379d
--- /dev/null
+++ b/deps/v8/test/unittests/heap/cppgc/name-trait-unittest.cc
@@ -0,0 +1,133 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/cppgc/internal/name-trait.h"
+
+#include "include/cppgc/allocation.h"
+#include "include/cppgc/garbage-collected.h"
+#include "src/base/build_config.h"
+#include "test/unittests/heap/cppgc/tests.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace cppgc {
+namespace internal {
+
+namespace {
+
+struct NoName : public GarbageCollected<NoName> {
+ virtual void Trace(Visitor*) const {}
+};
+
+struct OtherNoName : public GarbageCollected<OtherNoName> {
+ virtual void Trace(Visitor*) const {}
+};
+
+class ClassWithName final : public GarbageCollected<OtherNoName>,
+ public NameProvider {
+ public:
+ explicit ClassWithName(const char* name) : name_(name) {}
+ virtual void Trace(Visitor*) const {}
+ const char* GetName() const final { return name_; }
+
+ private:
+ const char* name_;
+};
+
+} // namespace
+
+TEST(NameTraitTest, InternalNamesHiddenInOfficialBuild) {
+ // Use a runtime test instead of static_assert to allow local builds but block
+ // enabling the feature accidentally through the waterfall.
+ //
+ // Do not include such type information in official builds to
+ // (a) save binary size on string literals, and
+ // (b) avoid exposing internal types until it has been clarified whether
+ // exposing internals in DevTools is fine.
+#if defined(OFFICIAL_BUILD)
+ EXPECT_TRUE(NameProvider::HideInternalNames());
+#endif
+}
+
+TEST(NameTraitTest, DefaultName) {
+ EXPECT_STREQ(NameProvider::HideInternalNames()
+ ? "InternalNode"
+ : "cppgc::internal::(anonymous namespace)::NoName",
+ NameTrait<NoName>::GetName(nullptr).value);
+ EXPECT_STREQ(NameProvider::HideInternalNames()
+ ? "InternalNode"
+ : "cppgc::internal::(anonymous namespace)::OtherNoName",
+ NameTrait<OtherNoName>::GetName(nullptr).value);
+}
+
+TEST(NameTraitTest, CustomName) {
+ ClassWithName with_name("CustomName");
+ const char* name = NameTrait<ClassWithName>::GetName(&with_name).value;
+ EXPECT_STREQ("CustomName", name);
+}
+
+namespace {
+
+class TraitTester : public NameTraitBase {
+ public:
+ // Expose type signature parser to allow testing various inputs.
+ using NameTraitBase::GetNameFromTypeSignature;
+};
+
+} // namespace
+
+TEST(NameTraitTest, NoTypeAvailable) {
+ HeapObjectName name = TraitTester::GetNameFromTypeSignature(nullptr);
+ EXPECT_STREQ(NameProvider::kNoNameDeducible, name.value);
+ EXPECT_TRUE(name.name_was_hidden);
+}
+
+TEST(NameTraitTest, ParsingPrettyFunction) {
+ // Test assumes that __PRETTY_FUNCTION__ and friends return a string
+ // containing the the type as [T = <type>].
+ HeapObjectName name = TraitTester::GetNameFromTypeSignature(
+ "Some signature of a method [T = ClassNameInSignature]");
+ EXPECT_STREQ("ClassNameInSignature", name.value);
+ EXPECT_FALSE(name.name_was_hidden);
+ // While object names are generally leaky, the test needs to be cleaned up
+ // gracefully.
+ delete[] name.value;
+}
+
+class HeapObjectHeaderNameTest : public testing::TestWithHeap {};
+
+TEST_F(HeapObjectHeaderNameTest, LookupNameThroughGCInfo) {
+ auto* no_name = MakeGarbageCollected<NoName>(GetAllocationHandle());
+ auto no_name_tuple = HeapObjectHeader::FromPayload(no_name).GetName();
+ if (NameProvider::HideInternalNames()) {
+ EXPECT_STREQ(NameProvider::kHiddenName, no_name_tuple.value);
+ EXPECT_TRUE(no_name_tuple.name_was_hidden);
+ } else {
+ EXPECT_STREQ("cppgc::internal::(anonymous namespace)::NoName",
+ no_name_tuple.value);
+ EXPECT_FALSE(no_name_tuple.name_was_hidden);
+ }
+
+ auto* other_no_name =
+ MakeGarbageCollected<OtherNoName>(GetAllocationHandle());
+ auto other_no_name_tuple =
+ HeapObjectHeader::FromPayload(other_no_name).GetName();
+ if (NameProvider::HideInternalNames()) {
+ EXPECT_STREQ(NameProvider::kHiddenName, no_name_tuple.value);
+ EXPECT_TRUE(no_name_tuple.name_was_hidden);
+ } else {
+ EXPECT_STREQ("cppgc::internal::(anonymous namespace)::OtherNoName",
+ other_no_name_tuple.value);
+ EXPECT_FALSE(other_no_name_tuple.name_was_hidden);
+ }
+
+ auto* class_with_name =
+ MakeGarbageCollected<ClassWithName>(GetAllocationHandle(), "CustomName");
+ auto class_with_name_tuple =
+ HeapObjectHeader::FromPayload(class_with_name).GetName();
+ EXPECT_STREQ("CustomName", class_with_name_tuple.value);
+ EXPECT_FALSE(class_with_name_tuple.name_was_hidden);
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/page-memory-unittest.cc b/deps/v8/test/unittests/heap/cppgc/page-memory-unittest.cc
index 6781dacd3d..6c8533c2f0 100644
--- a/deps/v8/test/unittests/heap/cppgc/page-memory-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/page-memory-unittest.cc
@@ -117,9 +117,16 @@ TEST(PageMemoryRegionTest, PlatformUsesGuardPages) {
v8::base::PageAllocator allocator;
#if defined(V8_HOST_ARCH_PPC64) && !defined(_AIX)
EXPECT_FALSE(SupportsCommittingGuardPages(&allocator));
-#else // !V8_HOST_ARCH_PPC64
+#elif defined(V8_HOST_ARCH_ARM64)
+ if (allocator.CommitPageSize() == 4096) {
+ EXPECT_TRUE(SupportsCommittingGuardPages(&allocator));
+ } else {
+ // Arm64 supports both 16k and 64k OS pages.
+ EXPECT_FALSE(SupportsCommittingGuardPages(&allocator));
+ }
+#else // Regular case.
EXPECT_TRUE(SupportsCommittingGuardPages(&allocator));
-#endif // !V8_HOST_ARCH_PPC64
+#endif
}
namespace {
diff --git a/deps/v8/test/unittests/heap/cppgc/persistent-unittest.cc b/deps/v8/test/unittests/heap/cppgc/persistent-family-unittest.cc
index b832bc9d23..ae6ee23625 100644
--- a/deps/v8/test/unittests/heap/cppgc/persistent-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/persistent-family-unittest.cc
@@ -2,14 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "include/cppgc/persistent.h"
-
#include <vector>
#include "include/cppgc/allocation.h"
+#include "include/cppgc/cross-thread-persistent.h"
#include "include/cppgc/garbage-collected.h"
#include "include/cppgc/internal/pointer-policies.h"
#include "include/cppgc/member.h"
+#include "include/cppgc/persistent.h"
+#include "include/cppgc/source-location.h"
#include "include/cppgc/type-traits.h"
#include "src/heap/cppgc/heap.h"
#include "src/heap/cppgc/liveness-broker.h"
@@ -33,11 +34,39 @@ struct DerivedGCed : GCed {
};
template <template <typename> class PersistentType>
+struct PersistentRegionTrait;
+
+template <>
+struct PersistentRegionTrait<Persistent> {
+ static PersistentRegion& Get(cppgc::Heap* heap) {
+ return internal::Heap::From(heap)->GetStrongPersistentRegion();
+ }
+};
+
+template <>
+struct PersistentRegionTrait<WeakPersistent> {
+ static PersistentRegion& Get(cppgc::Heap* heap) {
+ return internal::Heap::From(heap)->GetWeakPersistentRegion();
+ }
+};
+
+template <>
+struct PersistentRegionTrait<subtle::CrossThreadPersistent> {
+ static PersistentRegion& Get(cppgc::Heap* heap) {
+ return internal::Heap::From(heap)->GetStrongCrossThreadPersistentRegion();
+ }
+};
+
+template <>
+struct PersistentRegionTrait<subtle::WeakCrossThreadPersistent> {
+ static PersistentRegion& Get(cppgc::Heap* heap) {
+ return internal::Heap::From(heap)->GetWeakCrossThreadPersistentRegion();
+ }
+};
+
+template <template <typename> class PersistentType>
PersistentRegion& GetRegion(cppgc::Heap* heap) {
- auto* heap_impl = internal::Heap::From(heap);
- return IsWeak<PersistentType<GCed>>::value
- ? heap_impl->GetWeakPersistentRegion()
- : heap_impl->GetStrongPersistentRegion();
+ return PersistentRegionTrait<PersistentType>::Get(heap);
}
template <typename T>
@@ -46,6 +75,11 @@ using LocalizedPersistent =
internal::KeepLocationPolicy,
internal::DefaultCheckingPolicy>;
+template <typename T>
+using LocalizedCrossThreadPersistent = internal::BasicCrossThreadPersistent<
+ T, internal::StrongCrossThreadPersistentPolicy,
+ internal::KeepLocationPolicy, internal::DefaultCheckingPolicy>;
+
class RootVisitor final : public VisitorBase {
public:
RootVisitor() = default;
@@ -61,11 +95,12 @@ class RootVisitor final : public VisitorBase {
}
protected:
- void VisitRoot(const void* t, TraceDescriptor desc) final {
+ void VisitRoot(const void* t, TraceDescriptor desc,
+ const SourceLocation&) final {
desc.callback(this, desc.base_object_payload);
}
void VisitWeakRoot(const void*, TraceDescriptor, WeakCallback callback,
- const void* object) final {
+ const void* object, const SourceLocation&) final {
weak_callbacks_.emplace_back(callback, object);
}
@@ -112,6 +147,8 @@ TEST_F(PersistentTest, NullStateCtor) {
auto* heap = GetHeap();
NullStateCtor<Persistent>(heap);
NullStateCtor<WeakPersistent>(heap);
+ NullStateCtor<subtle::CrossThreadPersistent>(heap);
+ NullStateCtor<subtle::WeakCrossThreadPersistent>(heap);
}
template <template <typename> class PersistentType>
@@ -136,6 +173,8 @@ TEST_F(PersistentTest, RawCtor) {
auto* heap = GetHeap();
RawCtor<Persistent>(heap);
RawCtor<WeakPersistent>(heap);
+ RawCtor<subtle::CrossThreadPersistent>(heap);
+ RawCtor<subtle::WeakCrossThreadPersistent>(heap);
}
template <template <typename> class PersistentType>
@@ -191,6 +230,8 @@ TEST_F(PersistentTest, CopyCtor) {
auto* heap = GetHeap();
CopyCtor<Persistent>(heap);
CopyCtor<WeakPersistent>(heap);
+ CopyCtor<subtle::CrossThreadPersistent>(heap);
+ CopyCtor<subtle::WeakCrossThreadPersistent>(heap);
}
template <template <typename> class PersistentType>
@@ -233,6 +274,8 @@ TEST_F(PersistentTest, MoveCtor) {
auto* heap = GetHeap();
MoveCtor<Persistent>(heap);
MoveCtor<WeakPersistent>(heap);
+ MoveCtor<subtle::CrossThreadPersistent>(heap);
+ MoveCtor<subtle::WeakCrossThreadPersistent>(heap);
}
template <template <typename> class PersistentType,
@@ -258,10 +301,16 @@ TEST_F(PersistentTest, MemberCtor) {
MemberCtor<WeakPersistent, Member>(heap);
MemberCtor<WeakPersistent, WeakMember>(heap);
MemberCtor<WeakPersistent, UntracedMember>(heap);
+ MemberCtor<subtle::CrossThreadPersistent, Member>(heap);
+ MemberCtor<subtle::CrossThreadPersistent, WeakMember>(heap);
+ MemberCtor<subtle::CrossThreadPersistent, UntracedMember>(heap);
+ MemberCtor<subtle::WeakCrossThreadPersistent, Member>(heap);
+ MemberCtor<subtle::WeakCrossThreadPersistent, WeakMember>(heap);
+ MemberCtor<subtle::WeakCrossThreadPersistent, UntracedMember>(heap);
}
template <template <typename> class PersistentType>
-void NullStateAssignemnt(cppgc::Heap* heap) {
+void NullStateAssignment(cppgc::Heap* heap) {
EXPECT_EQ(0u, GetRegion<PersistentType>(heap).NodesInUse());
{
PersistentType<GCed> p =
@@ -292,8 +341,10 @@ void NullStateAssignemnt(cppgc::Heap* heap) {
TEST_F(PersistentTest, NullStateAssignemnt) {
auto* heap = GetHeap();
- NullStateAssignemnt<Persistent>(heap);
- NullStateAssignemnt<WeakPersistent>(heap);
+ NullStateAssignment<Persistent>(heap);
+ NullStateAssignment<WeakPersistent>(heap);
+ NullStateAssignment<subtle::CrossThreadPersistent>(heap);
+ NullStateAssignment<subtle::WeakCrossThreadPersistent>(heap);
}
template <template <typename> class PersistentType>
@@ -322,6 +373,8 @@ TEST_F(PersistentTest, RawAssignment) {
auto* heap = GetHeap();
RawAssignment<Persistent>(heap);
RawAssignment<WeakPersistent>(heap);
+ RawAssignment<subtle::CrossThreadPersistent>(heap);
+ RawAssignment<subtle::WeakCrossThreadPersistent>(heap);
}
template <template <typename> class PersistentType>
@@ -384,6 +437,8 @@ TEST_F(PersistentTest, CopyAssignment) {
auto* heap = GetHeap();
CopyAssignment<Persistent>(heap);
CopyAssignment<WeakPersistent>(heap);
+ CopyAssignment<subtle::CrossThreadPersistent>(heap);
+ CopyAssignment<subtle::WeakCrossThreadPersistent>(heap);
}
template <template <typename> class PersistentType>
@@ -444,6 +499,8 @@ TEST_F(PersistentTest, MoveAssignment) {
auto* heap = GetHeap();
MoveAssignment<Persistent>(heap);
MoveAssignment<WeakPersistent>(heap);
+ MoveAssignment<subtle::CrossThreadPersistent>(heap);
+ MoveAssignment<subtle::WeakCrossThreadPersistent>(heap);
}
template <template <typename> class PersistentType,
@@ -470,6 +527,12 @@ TEST_F(PersistentTest, MemberAssignment) {
MemberAssignment<WeakPersistent, Member>(heap);
MemberAssignment<WeakPersistent, WeakMember>(heap);
MemberAssignment<WeakPersistent, UntracedMember>(heap);
+ MemberAssignment<subtle::CrossThreadPersistent, Member>(heap);
+ MemberAssignment<subtle::CrossThreadPersistent, WeakMember>(heap);
+ MemberAssignment<subtle::CrossThreadPersistent, UntracedMember>(heap);
+ MemberAssignment<subtle::WeakCrossThreadPersistent, Member>(heap);
+ MemberAssignment<subtle::WeakCrossThreadPersistent, WeakMember>(heap);
+ MemberAssignment<subtle::WeakCrossThreadPersistent, UntracedMember>(heap);
}
template <template <typename> class PersistentType>
@@ -488,6 +551,8 @@ TEST_F(PersistentTest, Clear) {
auto* heap = GetHeap();
ClearTest<Persistent>(heap);
ClearTest<WeakPersistent>(heap);
+ ClearTest<subtle::CrossThreadPersistent>(heap);
+ ClearTest<subtle::WeakCrossThreadPersistent>(heap);
}
template <template <typename> class PersistentType>
@@ -507,6 +572,8 @@ TEST_F(PersistentTest, Release) {
auto* heap = GetHeap();
ReleaseTest<Persistent>(heap);
ReleaseTest<WeakPersistent>(heap);
+ ReleaseTest<subtle::CrossThreadPersistent>(heap);
+ ReleaseTest<subtle::WeakCrossThreadPersistent>(heap);
}
template <template <typename> class PersistentType1,
@@ -630,6 +697,10 @@ TEST_F(PersistentTest, ClearOnHeapDestruction) {
weak_persistent = MakeGarbageCollected<GCed>(heap->GetAllocationHandle());
const Persistent<GCed> persistent_sentinel(kSentinelPointer);
const WeakPersistent<GCed> weak_persistent_sentinel(kSentinelPointer);
+ const subtle::CrossThreadPersistent<GCed> cross_thread_persistent_sentinel(
+ kSentinelPointer);
+ const subtle::WeakCrossThreadPersistent<GCed>
+ cross_thread_weak_persistent_sentinel(kSentinelPointer);
heap.reset();
EXPECT_EQ(nullptr, persistent);
@@ -651,6 +722,14 @@ TEST_F(PersistentTest, LocalizedPersistent) {
EXPECT_EQ(expected_loc.Line() + 1, actual_loc.Line());
}
{
+ const auto expected_loc = SourceLocation::Current();
+ LocalizedCrossThreadPersistent<GCed> p = gced;
+ const auto actual_loc = p.Location();
+ EXPECT_STREQ(expected_loc.Function(), actual_loc.Function());
+ EXPECT_STREQ(expected_loc.FileName(), actual_loc.FileName());
+ EXPECT_EQ(expected_loc.Line() + 1, actual_loc.Line());
+ }
+ {
// Copy ctor doesn't copy source location.
LocalizedPersistent<GCed> p1 = gced;
LocalizedPersistent<GCed> p2 = p1;
@@ -659,6 +738,14 @@ TEST_F(PersistentTest, LocalizedPersistent) {
EXPECT_EQ(p1.Location().Line() + 1, p2.Location().Line());
}
{
+ // Copy ctor doesn't copy source location.
+ LocalizedCrossThreadPersistent<GCed> p1 = gced;
+ LocalizedCrossThreadPersistent<GCed> p2 = p1;
+ EXPECT_STREQ(p1.Location().Function(), p2.Location().Function());
+ EXPECT_STREQ(p1.Location().FileName(), p2.Location().FileName());
+ EXPECT_EQ(p1.Location().Line() + 1, p2.Location().Line());
+ }
+ {
// Copy assignment doesn't copy source location.
LocalizedPersistent<GCed> p1 = gced;
LocalizedPersistent<GCed> p2;
@@ -668,6 +755,15 @@ TEST_F(PersistentTest, LocalizedPersistent) {
EXPECT_EQ(p1.Location().Line() + 1, p2.Location().Line());
}
{
+ // Copy assignment doesn't copy source location.
+ LocalizedCrossThreadPersistent<GCed> p1 = gced;
+ LocalizedCrossThreadPersistent<GCed> p2;
+ p2 = p1;
+ EXPECT_STREQ(p1.Location().Function(), p2.Location().Function());
+ EXPECT_STREQ(p1.Location().FileName(), p2.Location().FileName());
+ EXPECT_EQ(p1.Location().Line() + 1, p2.Location().Line());
+ }
+ {
// Clearing doesn't clear source location.
LocalizedPersistent<GCed> p1 = gced;
LocalizedPersistent<GCed> p2 = gced;
@@ -677,6 +773,15 @@ TEST_F(PersistentTest, LocalizedPersistent) {
EXPECT_EQ(p1.Location().Line() + 1, p2.Location().Line());
}
{
+ // Clearing doesn't clear source location.
+ LocalizedCrossThreadPersistent<GCed> p1 = gced;
+ LocalizedCrossThreadPersistent<GCed> p2 = gced;
+ p2.Clear();
+ EXPECT_STREQ(p1.Location().Function(), p2.Location().Function());
+ EXPECT_STREQ(p1.Location().FileName(), p2.Location().FileName());
+ EXPECT_EQ(p1.Location().Line() + 1, p2.Location().Line());
+ }
+ {
LocalizedPersistent<GCed> p1 = gced;
const auto expected_loc = p1.Location();
LocalizedPersistent<GCed> p2 = std::move(p1);
@@ -685,6 +790,14 @@ TEST_F(PersistentTest, LocalizedPersistent) {
EXPECT_EQ(expected_loc.Line(), p2.Location().Line());
}
{
+ LocalizedCrossThreadPersistent<GCed> p1 = gced;
+ const auto expected_loc = p1.Location();
+ LocalizedCrossThreadPersistent<GCed> p2 = std::move(p1);
+ EXPECT_STREQ(expected_loc.Function(), p2.Location().Function());
+ EXPECT_STREQ(expected_loc.FileName(), p2.Location().FileName());
+ EXPECT_EQ(expected_loc.Line(), p2.Location().Line());
+ }
+ {
LocalizedPersistent<GCed> p1 = gced;
const auto expected_loc = p1.Location();
LocalizedPersistent<GCed> p2;
@@ -693,8 +806,57 @@ TEST_F(PersistentTest, LocalizedPersistent) {
EXPECT_STREQ(expected_loc.FileName(), p2.Location().FileName());
EXPECT_EQ(expected_loc.Line(), p2.Location().Line());
}
+ {
+ LocalizedCrossThreadPersistent<GCed> p1 = gced;
+ const auto expected_loc = p1.Location();
+ LocalizedCrossThreadPersistent<GCed> p2;
+ p2 = std::move(p1);
+ EXPECT_STREQ(expected_loc.Function(), p2.Location().Function());
+ EXPECT_STREQ(expected_loc.FileName(), p2.Location().FileName());
+ EXPECT_EQ(expected_loc.Line(), p2.Location().Line());
+ }
}
+
#endif
+namespace {
+
+class ExpectingLocationVisitor final : public VisitorBase {
+ public:
+ explicit ExpectingLocationVisitor(const SourceLocation& expected_location)
+ : expected_loc_(expected_location) {}
+
+ protected:
+ void VisitRoot(const void* t, TraceDescriptor desc,
+ const SourceLocation& loc) final {
+ EXPECT_STREQ(expected_loc_.Function(), loc.Function());
+ EXPECT_STREQ(expected_loc_.FileName(), loc.FileName());
+ EXPECT_EQ(expected_loc_.Line(), loc.Line());
+ }
+
+ private:
+ const SourceLocation& expected_loc_;
+};
+
+} // namespace
+
+TEST_F(PersistentTest, PersistentTraceLocation) {
+ GCed* gced = MakeGarbageCollected<GCed>(GetAllocationHandle());
+ {
+#if CPPGC_SUPPORTS_SOURCE_LOCATION
+ // Baseline for creating expected location which has a different line
+ // number.
+ const auto loc = SourceLocation::Current();
+ const auto expected_loc =
+ SourceLocation::Current(loc.Function(), loc.FileName(), loc.Line() + 6);
+#else // !CCPPGC_SUPPORTS_SOURCE_LOCATION
+ const SourceLocation expected_loc;
+#endif // !CCPPGC_SUPPORTS_SOURCE_LOCATION
+ LocalizedPersistent<GCed> p = gced;
+ ExpectingLocationVisitor visitor(expected_loc);
+ visitor.TraceRootForTesting(p, p.Location());
+ }
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/stack-unittest.cc b/deps/v8/test/unittests/heap/cppgc/stack-unittest.cc
index a4b50f8d30..0fff908b36 100644
--- a/deps/v8/test/unittests/heap/cppgc/stack-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/stack-unittest.cc
@@ -261,6 +261,8 @@ TEST_F(GCStackTest, IteratePointersFindsParameterNesting7) {
EXPECT_TRUE(scanner->found());
}
+// Disabled on msvc, due to miscompilation, see https://crbug.com/v8/10658.
+#if !defined(_MSC_VER) || defined(__clang__)
TEST_F(GCStackTest, IteratePointersFindsParameterNesting8) {
auto scanner = std::make_unique<StackScanner>();
void* needle = RecursivelyPassOnParameter(8, scanner->needle(), GetStack(),
@@ -268,6 +270,7 @@ TEST_F(GCStackTest, IteratePointersFindsParameterNesting8) {
EXPECT_EQ(scanner->needle(), needle);
EXPECT_TRUE(scanner->found());
}
+#endif // !_MSC_VER || __clang__
// The following test uses inline assembly and has been checked to work on clang
// to verify that the stack-scanning trampoline pushes callee-saved registers.
diff --git a/deps/v8/test/unittests/heap/cppgc/sweeper-unittest.cc b/deps/v8/test/unittests/heap/cppgc/sweeper-unittest.cc
index 3591af29a4..8031deac41 100644
--- a/deps/v8/test/unittests/heap/cppgc/sweeper-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/sweeper-unittest.cc
@@ -48,7 +48,10 @@ class SweeperTest : public testing::TestWithHeap {
// methods are called in the right order.
heap->stats_collector()->NotifyMarkingStarted();
heap->stats_collector()->NotifyMarkingCompleted(0);
- sweeper.Start(Sweeper::Config::kAtomic);
+ const Sweeper::SweepingConfig sweeping_config{
+ Sweeper::SweepingConfig::SweepingType::kAtomic,
+ Sweeper::SweepingConfig::CompactableSpaceHandling::kSweep};
+ sweeper.Start(sweeping_config);
sweeper.FinishIfRunning();
}
diff --git a/deps/v8/test/unittests/heap/cppgc/test-platform.cc b/deps/v8/test/unittests/heap/cppgc/test-platform.cc
index c649b1e89d..2268d546b3 100644
--- a/deps/v8/test/unittests/heap/cppgc/test-platform.cc
+++ b/deps/v8/test/unittests/heap/cppgc/test-platform.cc
@@ -4,132 +4,29 @@
#include "test/unittests/heap/cppgc/test-platform.h"
+#include "include/libplatform/libplatform.h"
#include "src/base/platform/platform.h"
#include "src/base/platform/time.h"
-#include "src/heap/cppgc/default-job.h"
namespace cppgc {
namespace internal {
namespace testing {
-namespace {
-class TestJobThread final : public v8::base::Thread {
- public:
- using id = uint8_t;
-
- explicit TestJobThread(TestJob* job) : Thread(Options("job")), job_(job) {}
-
- void Run() final;
-
- static size_t GetMaxSupportedConcurrency() { return 4u; }
-
- private:
- TestJob* const job_;
-};
-} // namespace
-
-// Default implementation of Jobs based on std::thread.
-class TestJob final : public DefaultJobImpl<TestJobThread> {
- public:
- explicit TestJob(Key key, std::unique_ptr<cppgc::JobTask> job_task)
- : DefaultJobImpl(key, std::move(job_task)) {}
-
- std::shared_ptr<TestJobThread> CreateThread(DefaultJobImpl* job) final {
- std::shared_ptr<TestJobThread> thread =
- std::make_shared<TestJobThread>(this);
- const bool thread_started = thread->Start();
- USE(thread_started);
- DCHECK(thread_started);
- return thread;
- }
-};
-
-void TestJobThread::Run() {
- DCHECK_NOT_NULL(job_);
- job_->RunJobTask();
-}
-
-void TestTaskRunner::PostTask(std::unique_ptr<cppgc::Task> task) {
- tasks_.push_back(std::move(task));
-}
-
-void TestTaskRunner::PostNonNestableTask(std::unique_ptr<cppgc::Task> task) {
- PostTask(std::move(task));
-}
-
-void TestTaskRunner::PostDelayedTask(std::unique_ptr<cppgc::Task> task,
- double) {
- PostTask(std::move(task));
-}
-
-void TestTaskRunner::PostNonNestableDelayedTask(
- std::unique_ptr<cppgc::Task> task, double) {
- PostTask(std::move(task));
-}
-
-void TestTaskRunner::PostIdleTask(std::unique_ptr<cppgc::IdleTask> task) {
- idle_tasks_.push_back(std::move(task));
-}
-
-bool TestTaskRunner::RunSingleTask() {
- if (!tasks_.size()) return false;
-
- tasks_.back()->Run();
- tasks_.pop_back();
-
- return true;
-}
-
-bool TestTaskRunner::RunSingleIdleTask(double deadline_in_seconds) {
- if (!idle_tasks_.size()) return false;
-
- idle_tasks_.back()->Run(deadline_in_seconds);
- idle_tasks_.pop_back();
-
- return true;
-}
-
-void TestTaskRunner::RunUntilIdle() {
- for (auto& task : tasks_) {
- task->Run();
- }
- tasks_.clear();
-
- for (auto& task : idle_tasks_) {
- task->Run(std::numeric_limits<double>::infinity());
- }
- idle_tasks_.clear();
-}
-
TestPlatform::TestPlatform()
- : foreground_task_runner_(std::make_unique<TestTaskRunner>()) {}
-
-TestPlatform::~TestPlatform() V8_NOEXCEPT { WaitAllBackgroundTasks(); }
+ : DefaultPlatform(0, DefaultPlatform::IdleTaskSupport::kEnabled) {}
std::unique_ptr<cppgc::JobHandle> TestPlatform::PostJob(
- cppgc::TaskPriority, std::unique_ptr<cppgc::JobTask> job_task) {
- if (AreBackgroundTasksDisabled()) return {};
-
- std::shared_ptr<TestJob> job =
- DefaultJobFactory<TestJob>::Create(std::move(job_task));
- jobs_.push_back(job);
- return std::make_unique<TestJob::JobHandle>(std::move(job));
-}
-
-double TestPlatform::MonotonicallyIncreasingTime() {
- return v8::base::TimeTicks::HighResolutionNow().ToInternalValue() /
- static_cast<double>(v8::base::Time::kMicrosecondsPerSecond);
-}
-
-void TestPlatform::WaitAllForegroundTasks() {
- foreground_task_runner_->RunUntilIdle();
+ cppgc::TaskPriority priority, std::unique_ptr<cppgc::JobTask> job_task) {
+ if (AreBackgroundTasksDisabled()) return nullptr;
+ return v8_platform_->PostJob(priority, std::move(job_task));
}
-void TestPlatform::WaitAllBackgroundTasks() {
- for (auto& job : jobs_) {
- job->Join();
+void TestPlatform::RunAllForegroundTasks() {
+ v8::platform::PumpMessageLoop(v8_platform_.get(), kNoIsolate);
+ if (GetForegroundTaskRunner()->IdleTasksEnabled()) {
+ v8::platform::RunIdleTasks(v8_platform_.get(), kNoIsolate,
+ std::numeric_limits<double>::max());
}
- jobs_.clear();
}
TestPlatform::DisableBackgroundTasksScope::DisableBackgroundTasksScope(
diff --git a/deps/v8/test/unittests/heap/cppgc/test-platform.h b/deps/v8/test/unittests/heap/cppgc/test-platform.h
index 1faa6efb40..d6a93f45c9 100644
--- a/deps/v8/test/unittests/heap/cppgc/test-platform.h
+++ b/deps/v8/test/unittests/heap/cppgc/test-platform.h
@@ -5,45 +5,14 @@
#ifndef V8_UNITTESTS_HEAP_CPPGC_TEST_PLATFORM_H_
#define V8_UNITTESTS_HEAP_CPPGC_TEST_PLATFORM_H_
-#include <memory>
-#include <vector>
-
-#include "include/cppgc/platform.h"
-#include "src/base/page-allocator.h"
-#include "src/base/platform/platform.h"
+#include "include/cppgc/default-platform.h"
+#include "src/base/compiler-specific.h"
namespace cppgc {
namespace internal {
namespace testing {
-class TestJob;
-
-class TestTaskRunner : public cppgc::TaskRunner {
- public:
- void PostTask(std::unique_ptr<cppgc::Task> task) override;
- void PostDelayedTask(std::unique_ptr<cppgc::Task> task, double) override;
-
- bool NonNestableTasksEnabled() const override { return true; }
- void PostNonNestableTask(std::unique_ptr<cppgc::Task> task) override;
-
- bool NonNestableDelayedTasksEnabled() const override { return true; }
- void PostNonNestableDelayedTask(std::unique_ptr<cppgc::Task> task,
- double) override;
-
- bool IdleTasksEnabled() override { return true; }
- void PostIdleTask(std::unique_ptr<cppgc::IdleTask> task) override;
-
- bool RunSingleTask();
- bool RunSingleIdleTask(double duration_in_seconds);
-
- void RunUntilIdle();
-
- private:
- std::vector<std::unique_ptr<cppgc::Task>> tasks_;
- std::vector<std::unique_ptr<cppgc::IdleTask>> idle_tasks_;
-};
-
-class TestPlatform : public Platform {
+class TestPlatform : public DefaultPlatform {
public:
class DisableBackgroundTasksScope {
public:
@@ -55,32 +24,18 @@ class TestPlatform : public Platform {
};
TestPlatform();
- ~TestPlatform() V8_NOEXCEPT override;
- PageAllocator* GetPageAllocator() override { return &page_allocator_; }
-
- std::shared_ptr<cppgc::TaskRunner> GetForegroundTaskRunner() override {
- return foreground_task_runner_;
- }
-
- // TestPlatform does not support job priorities. All jobs would be assigned
- // the same priority regardless of the cppgc::TaskPriority parameter.
std::unique_ptr<cppgc::JobHandle> PostJob(
- cppgc::TaskPriority, std::unique_ptr<cppgc::JobTask> job_task) override;
-
- double MonotonicallyIncreasingTime() override;
+ cppgc::TaskPriority priority,
+ std::unique_ptr<cppgc::JobTask> job_task) final;
- void WaitAllForegroundTasks();
- void WaitAllBackgroundTasks();
+ void RunAllForegroundTasks();
private:
bool AreBackgroundTasksDisabled() const {
return disabled_background_tasks_ > 0;
}
- v8::base::PageAllocator page_allocator_;
- std::shared_ptr<TestTaskRunner> foreground_task_runner_;
- std::vector<std::shared_ptr<TestJob>> jobs_;
size_t disabled_background_tasks_ = 0;
};
diff --git a/deps/v8/test/unittests/heap/cppgc/tests.h b/deps/v8/test/unittests/heap/cppgc/tests.h
index 175116d985..ac445c3370 100644
--- a/deps/v8/test/unittests/heap/cppgc/tests.h
+++ b/deps/v8/test/unittests/heap/cppgc/tests.h
@@ -33,8 +33,9 @@ class TestWithHeap : public TestWithPlatform {
TestWithHeap();
void PreciseGC() {
- heap_->ForceGarbageCollectionSlow("TestWithHeap", "Testing",
- cppgc::Heap::StackState::kNoHeapPointers);
+ heap_->ForceGarbageCollectionSlow(
+ ::testing::UnitTest::GetInstance()->current_test_info()->name(),
+ "Testing", cppgc::Heap::StackState::kNoHeapPointers);
}
cppgc::Heap* GetHeap() const { return heap_.get(); }
diff --git a/deps/v8/test/unittests/heap/cppgc/weak-container-unittest.cc b/deps/v8/test/unittests/heap/cppgc/weak-container-unittest.cc
new file mode 100644
index 0000000000..d21f4249b3
--- /dev/null
+++ b/deps/v8/test/unittests/heap/cppgc/weak-container-unittest.cc
@@ -0,0 +1,184 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/cppgc/allocation.h"
+#include "src/heap/cppgc/marker.h"
+#include "src/heap/cppgc/marking-visitor.h"
+#include "src/heap/cppgc/stats-collector.h"
+#include "test/unittests/heap/cppgc/tests.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace cppgc {
+namespace internal {
+
+namespace {
+class WeakContainerTest : public testing::TestWithHeap {
+ public:
+ using Config = Marker::MarkingConfig;
+
+ void StartMarking() {
+ Config config = {Config::CollectionType::kMajor,
+ Config::StackState::kNoHeapPointers,
+ Config::MarkingType::kIncremental};
+ GetMarkerRef() = MarkerFactory::CreateAndStartMarking<Marker>(
+ Heap::From(GetHeap())->AsBase(), GetPlatformHandle().get(), config);
+ }
+
+ void FinishMarking(Config::StackState stack_state) {
+ GetMarkerRef()->FinishMarking(stack_state);
+ Heap::From(GetHeap())->stats_collector()->NotifySweepingCompleted();
+ }
+};
+
+class TraceableGCed : public GarbageCollected<TraceableGCed> {
+ public:
+ void Trace(cppgc::Visitor*) const { n_trace_calls++; }
+ static size_t n_trace_calls;
+};
+size_t TraceableGCed::n_trace_calls = 0u;
+
+class NonTraceableGCed : public GarbageCollected<NonTraceableGCed> {
+ public:
+ void Trace(cppgc::Visitor*) const { n_trace_calls++; }
+ static size_t n_trace_calls;
+};
+size_t NonTraceableGCed::n_trace_calls = 0u;
+
+void EmptyWeakCallback(const LivenessBroker&, const void*) {}
+
+template <typename T>
+V8_NOINLINE T access(volatile const T& t) {
+ return t;
+}
+
+} // namespace
+
+} // namespace internal
+
+template <>
+struct TraceTrait<internal::TraceableGCed>
+ : public internal::TraceTraitBase<internal::TraceableGCed> {
+ static TraceDescriptor GetWeakTraceDescriptor(const void* self) {
+ return {self, Trace};
+ }
+};
+
+template <>
+struct TraceTrait<internal::NonTraceableGCed>
+ : public internal::TraceTraitBase<internal::NonTraceableGCed> {
+ static TraceDescriptor GetWeakTraceDescriptor(const void* self) {
+ return {self, nullptr};
+ }
+};
+
+namespace internal {
+
+TEST_F(WeakContainerTest, TraceableGCedTraced) {
+ TraceableGCed* obj =
+ MakeGarbageCollected<TraceableGCed>(GetAllocationHandle());
+ TraceableGCed::n_trace_calls = 0u;
+ StartMarking();
+ GetMarkerRef()->VisitorForTesting().TraceWeakContainer(obj, EmptyWeakCallback,
+ nullptr);
+ FinishMarking(Config::StackState::kNoHeapPointers);
+ EXPECT_NE(0u, TraceableGCed::n_trace_calls);
+ access(obj);
+}
+
+TEST_F(WeakContainerTest, NonTraceableGCedNotTraced) {
+ NonTraceableGCed* obj =
+ MakeGarbageCollected<NonTraceableGCed>(GetAllocationHandle());
+ NonTraceableGCed::n_trace_calls = 0u;
+ StartMarking();
+ GetMarkerRef()->VisitorForTesting().TraceWeakContainer(obj, EmptyWeakCallback,
+ nullptr);
+ FinishMarking(Config::StackState::kNoHeapPointers);
+ EXPECT_EQ(0u, NonTraceableGCed::n_trace_calls);
+ access(obj);
+}
+
+TEST_F(WeakContainerTest, NonTraceableGCedNotTracedConservatively) {
+ NonTraceableGCed* obj =
+ MakeGarbageCollected<NonTraceableGCed>(GetAllocationHandle());
+ NonTraceableGCed::n_trace_calls = 0u;
+ StartMarking();
+ GetMarkerRef()->VisitorForTesting().TraceWeakContainer(obj, EmptyWeakCallback,
+ nullptr);
+ FinishMarking(Config::StackState::kMayContainHeapPointers);
+ EXPECT_NE(0u, NonTraceableGCed::n_trace_calls);
+ access(obj);
+}
+
+TEST_F(WeakContainerTest, ConservativeGCTracesWeakContainer) {
+ size_t trace_count_without_conservative;
+ {
+ TraceableGCed* obj =
+ MakeGarbageCollected<TraceableGCed>(GetAllocationHandle());
+ TraceableGCed::n_trace_calls = 0u;
+ StartMarking();
+ GetMarkerRef()->VisitorForTesting().TraceWeakContainer(
+ obj, EmptyWeakCallback, nullptr);
+ FinishMarking(Config::StackState::kNoHeapPointers);
+ trace_count_without_conservative = TraceableGCed::n_trace_calls;
+ access(obj);
+ }
+ {
+ TraceableGCed* obj =
+ MakeGarbageCollected<TraceableGCed>(GetAllocationHandle());
+ TraceableGCed::n_trace_calls = 0u;
+ StartMarking();
+ GetMarkerRef()->VisitorForTesting().TraceWeakContainer(
+ obj, EmptyWeakCallback, nullptr);
+ FinishMarking(Config::StackState::kMayContainHeapPointers);
+ EXPECT_LT(trace_count_without_conservative, TraceableGCed::n_trace_calls);
+ access(obj);
+ }
+}
+
+TEST_F(WeakContainerTest, ConservativeGCTracesWeakContainerOnce) {
+ NonTraceableGCed* obj =
+ MakeGarbageCollected<NonTraceableGCed>(GetAllocationHandle());
+ NonTraceableGCed* copy_obj = obj;
+ USE(copy_obj);
+ NonTraceableGCed* another_copy_obj = obj;
+ USE(another_copy_obj);
+ NonTraceableGCed::n_trace_calls = 0u;
+ StartMarking();
+ GetMarkerRef()->VisitorForTesting().TraceWeakContainer(obj, EmptyWeakCallback,
+ nullptr);
+ FinishMarking(Config::StackState::kMayContainHeapPointers);
+ EXPECT_EQ(1u, NonTraceableGCed::n_trace_calls);
+ access(obj);
+}
+
+namespace {
+
+struct WeakCallback {
+ static void callback(const LivenessBroker&, const void* data) {
+ n_callback_called++;
+ obj = data;
+ }
+ static size_t n_callback_called;
+ static const void* obj;
+};
+size_t WeakCallback::n_callback_called = 0u;
+const void* WeakCallback::obj = nullptr;
+
+} // namespace
+
+TEST_F(WeakContainerTest, WeakContainerWeakCallbackCalled) {
+ TraceableGCed* obj =
+ MakeGarbageCollected<TraceableGCed>(GetAllocationHandle());
+ WeakCallback::n_callback_called = 0u;
+ WeakCallback::obj = nullptr;
+ StartMarking();
+ GetMarkerRef()->VisitorForTesting().TraceWeakContainer(
+ obj, WeakCallback::callback, obj);
+ FinishMarking(Config::StackState::kMayContainHeapPointers);
+ EXPECT_NE(0u, WeakCallback::n_callback_called);
+ EXPECT_EQ(obj, WeakCallback::obj);
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/write-barrier-unittest.cc b/deps/v8/test/unittests/heap/cppgc/write-barrier-unittest.cc
index b06083d1ef..5673d47c8c 100644
--- a/deps/v8/test/unittests/heap/cppgc/write-barrier-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/write-barrier-unittest.cc
@@ -43,9 +43,10 @@ class ExpectWriteBarrierFires final : private IncrementalMarkingScope {
ExpectWriteBarrierFires(MarkerBase* marker,
std::initializer_list<void*> objects)
: IncrementalMarkingScope(marker),
- marking_worklist_(marker->MarkingStateForTesting().marking_worklist()),
+ marking_worklist_(
+ marker->MutatorMarkingStateForTesting().marking_worklist()),
write_barrier_worklist_(
- marker->MarkingStateForTesting().write_barrier_worklist()),
+ marker->MutatorMarkingStateForTesting().write_barrier_worklist()),
objects_(objects) {
EXPECT_TRUE(marking_worklist_.IsGlobalEmpty());
EXPECT_TRUE(write_barrier_worklist_.IsGlobalEmpty());
@@ -92,9 +93,10 @@ class ExpectNoWriteBarrierFires final : private IncrementalMarkingScope {
ExpectNoWriteBarrierFires(MarkerBase* marker,
std::initializer_list<void*> objects)
: IncrementalMarkingScope(marker),
- marking_worklist_(marker->MarkingStateForTesting().marking_worklist()),
+ marking_worklist_(
+ marker->MutatorMarkingStateForTesting().marking_worklist()),
write_barrier_worklist_(
- marker->MarkingStateForTesting().write_barrier_worklist()) {
+ marker->MutatorMarkingStateForTesting().write_barrier_worklist()) {
EXPECT_TRUE(marking_worklist_.IsGlobalEmpty());
EXPECT_TRUE(write_barrier_worklist_.IsGlobalEmpty());
for (void* object : objects) {
diff --git a/deps/v8/test/unittests/heap/heap-unittest.cc b/deps/v8/test/unittests/heap/heap-unittest.cc
index 34774b3a35..2446e14682 100644
--- a/deps/v8/test/unittests/heap/heap-unittest.cc
+++ b/deps/v8/test/unittests/heap/heap-unittest.cc
@@ -19,8 +19,7 @@
namespace v8 {
namespace internal {
-using HeapTest = TestWithIsolate;
-using HeapWithPointerCompressionTest = TestWithIsolateAndPointerCompression;
+using HeapTest = TestWithContext;
TEST(Heap, YoungGenerationSizeFromOldGenerationSize) {
const size_t MB = static_cast<size_t>(i::MB);
@@ -136,8 +135,8 @@ TEST_F(HeapTest, ExternalLimitStaysAboveDefaultForExplicitHandling) {
EXPECT_GE(heap->external_memory_limit(), kExternalAllocationSoftLimit);
}
-#if V8_TARGET_ARCH_64_BIT
-TEST_F(HeapWithPointerCompressionTest, HeapLayout) {
+#ifdef V8_COMPRESS_POINTERS
+TEST_F(HeapTest, HeapLayout) {
// Produce some garbage.
RunJS(
"let ar = [];"
@@ -163,7 +162,7 @@ TEST_F(HeapWithPointerCompressionTest, HeapLayout) {
EXPECT_TRUE(heap_reservation.contains(address, size));
}
}
-#endif // V8_TARGET_ARCH_64_BIT
+#endif // V8_COMPRESS_POINTERS
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/heap/heap-utils.h b/deps/v8/test/unittests/heap/heap-utils.h
index f231e11c6d..7474370aad 100644
--- a/deps/v8/test/unittests/heap/heap-utils.h
+++ b/deps/v8/test/unittests/heap/heap-utils.h
@@ -17,15 +17,14 @@ template <typename TMixin>
class WithHeapInternals : public TMixin {
public:
WithHeapInternals() = default;
+ WithHeapInternals(const WithHeapInternals&) = delete;
+ WithHeapInternals& operator=(const WithHeapInternals&) = delete;
void CollectGarbage(i::AllocationSpace space) {
heap()->CollectGarbage(space, i::GarbageCollectionReason::kTesting);
}
Heap* heap() const { return this->i_isolate()->heap(); }
-
- private:
- DISALLOW_COPY_AND_ASSIGN(WithHeapInternals);
};
using TestWithHeapInternals = //
diff --git a/deps/v8/test/unittests/heap/item-parallel-job-unittest.cc b/deps/v8/test/unittests/heap/item-parallel-job-unittest.cc
index be3ca20938..7883283766 100644
--- a/deps/v8/test/unittests/heap/item-parallel-job-unittest.cc
+++ b/deps/v8/test/unittests/heap/item-parallel-job-unittest.cc
@@ -13,12 +13,13 @@ namespace internal {
class ItemParallelJobTest : public TestWithIsolate {
public:
ItemParallelJobTest() : parallel_job_semaphore_(0) {}
+ ItemParallelJobTest(const ItemParallelJobTest&) = delete;
+ ItemParallelJobTest& operator=(const ItemParallelJobTest&) = delete;
base::Semaphore* parallel_job_semaphore() { return &parallel_job_semaphore_; }
private:
base::Semaphore parallel_job_semaphore_;
- DISALLOW_COPY_AND_ASSIGN(ItemParallelJobTest);
};
namespace {
diff --git a/deps/v8/test/unittests/heap/js-member-unittest.cc b/deps/v8/test/unittests/heap/js-member-unittest.cc
deleted file mode 100644
index 430395950e..0000000000
--- a/deps/v8/test/unittests/heap/js-member-unittest.cc
+++ /dev/null
@@ -1,164 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "include/v8-cppgc.h"
-#include "test/unittests/test-utils.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace v8 {
-namespace internal {
-
-using JSMemberTest = TestWithIsolate;
-
-TEST_F(JSMemberTest, ResetFromLocal) {
- v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
- v8::Context::Scope context_scope(context);
- v8::JSMember<v8::Object> member;
- {
- v8::HandleScope handles(v8_isolate());
- v8::Local<v8::Object> local =
- v8::Local<v8::Object>::New(v8_isolate(), v8::Object::New(v8_isolate()));
- EXPECT_TRUE(member.IsEmpty());
- EXPECT_NE(member, local);
- member.Set(v8_isolate(), local);
- EXPECT_FALSE(member.IsEmpty());
- EXPECT_EQ(member, local);
- }
-}
-
-TEST_F(JSMemberTest, ConstructFromLocal) {
- v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
- v8::Context::Scope context_scope(context);
- {
- v8::HandleScope handles(v8_isolate());
- v8::Local<v8::Object> local =
- v8::Local<v8::Object>::New(v8_isolate(), v8::Object::New(v8_isolate()));
- v8::JSMember<v8::Object> member(v8_isolate(), local);
- EXPECT_FALSE(member.IsEmpty());
- EXPECT_EQ(member, local);
- }
-}
-
-TEST_F(JSMemberTest, Reset) {
- v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
- v8::Context::Scope context_scope(context);
- {
- v8::HandleScope handles(v8_isolate());
- v8::Local<v8::Object> local =
- v8::Local<v8::Object>::New(v8_isolate(), v8::Object::New(v8_isolate()));
- v8::JSMember<v8::Object> member(v8_isolate(), local);
- EXPECT_FALSE(member.IsEmpty());
- EXPECT_EQ(member, local);
- member.Reset();
- EXPECT_TRUE(member.IsEmpty());
- EXPECT_NE(member, local);
- }
-}
-
-TEST_F(JSMemberTest, Copy) {
- v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
- v8::Context::Scope context_scope(context);
- {
- v8::HandleScope handles(v8_isolate());
- v8::Local<v8::Object> local =
- v8::Local<v8::Object>::New(v8_isolate(), v8::Object::New(v8_isolate()));
- v8::JSMember<v8::Object> member(v8_isolate(), local);
- v8::JSMember<v8::Object> member_copy1(member);
- v8::JSMember<v8::Object> member_copy2 = member;
- EXPECT_EQ(member, local);
- EXPECT_EQ(member_copy1, local);
- EXPECT_EQ(member_copy2, local);
- }
-}
-
-TEST_F(JSMemberTest, CopyHeterogenous) {
- v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
- v8::Context::Scope context_scope(context);
- {
- v8::HandleScope handles(v8_isolate());
- v8::Local<v8::Object> local =
- v8::Local<v8::Object>::New(v8_isolate(), v8::Object::New(v8_isolate()));
- v8::JSMember<v8::Object> member(v8_isolate(), local);
- v8::JSMember<v8::Value> member_copy1(member);
- v8::JSMember<v8::Value> member_copy2 = member;
- EXPECT_EQ(member, local);
- EXPECT_EQ(member_copy1, local);
- EXPECT_EQ(member_copy2, local);
- }
-}
-
-TEST_F(JSMemberTest, Move) {
- v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
- v8::Context::Scope context_scope(context);
- {
- v8::HandleScope handles(v8_isolate());
- v8::Local<v8::Object> local =
- v8::Local<v8::Object>::New(v8_isolate(), v8::Object::New(v8_isolate()));
- v8::JSMember<v8::Object> member(v8_isolate(), local);
- v8::JSMember<v8::Object> member_moved1(std::move(member));
- v8::JSMember<v8::Object> member_moved2 = std::move(member_moved1);
- EXPECT_TRUE(member.IsEmpty());
- EXPECT_TRUE(member_moved1.IsEmpty());
- EXPECT_EQ(member_moved2, local);
- }
-}
-
-TEST_F(JSMemberTest, MoveHeterogenous) {
- v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
- v8::Context::Scope context_scope(context);
- {
- v8::HandleScope handles(v8_isolate());
- v8::Local<v8::Object> local =
- v8::Local<v8::Object>::New(v8_isolate(), v8::Object::New(v8_isolate()));
- v8::JSMember<v8::Object> member1(v8_isolate(), local);
- v8::JSMember<v8::Value> member_moved1(std::move(member1));
- v8::JSMember<v8::Object> member2(v8_isolate(), local);
- v8::JSMember<v8::Object> member_moved2 = std::move(member2);
- EXPECT_TRUE(member1.IsEmpty());
- EXPECT_EQ(member_moved1, local);
- EXPECT_TRUE(member2.IsEmpty());
- EXPECT_EQ(member_moved2, local);
- }
-}
-
-TEST_F(JSMemberTest, Equality) {
- v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
- v8::Context::Scope context_scope(context);
- {
- v8::HandleScope handles(v8_isolate());
- v8::Local<v8::Object> local1 =
- v8::Local<v8::Object>::New(v8_isolate(), v8::Object::New(v8_isolate()));
- v8::JSMember<v8::Object> member1(v8_isolate(), local1);
- v8::JSMember<v8::Object> member2(v8_isolate(), local1);
- EXPECT_EQ(member1, member2);
- EXPECT_EQ(member2, member1);
- v8::Local<v8::Object> local2 =
- v8::Local<v8::Object>::New(v8_isolate(), v8::Object::New(v8_isolate()));
- v8::JSMember<v8::Object> member3(v8_isolate(), local2);
- EXPECT_NE(member2, member3);
- EXPECT_NE(member3, member2);
- }
-}
-
-TEST_F(JSMemberTest, EqualityHeterogenous) {
- v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
- v8::Context::Scope context_scope(context);
- {
- v8::HandleScope handles(v8_isolate());
- v8::Local<v8::Object> local1 =
- v8::Local<v8::Object>::New(v8_isolate(), v8::Object::New(v8_isolate()));
- v8::JSMember<v8::Object> member1(v8_isolate(), local1);
- v8::JSMember<v8::Value> member2(v8_isolate(), local1);
- EXPECT_EQ(member1, member2);
- EXPECT_EQ(member2, member1);
- v8::Local<v8::Object> local2 =
- v8::Local<v8::Object>::New(v8_isolate(), v8::Object::New(v8_isolate()));
- v8::JSMember<v8::Object> member3(v8_isolate(), local2);
- EXPECT_NE(member2, member3);
- EXPECT_NE(member3, member2);
- }
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/unittests/heap/local-factory-unittest.cc b/deps/v8/test/unittests/heap/local-factory-unittest.cc
index a1d750f033..a5b9646d3c 100644
--- a/deps/v8/test/unittests/heap/local-factory-unittest.cc
+++ b/deps/v8/test/unittests/heap/local-factory-unittest.cc
@@ -62,7 +62,8 @@ class LocalFactoryTest : public TestWithIsolateAndZone {
isolate(), true, construct_language_mode(FLAG_use_strict),
REPLMode::kNo),
&state_),
- local_isolate_(isolate()) {
+ local_isolate_(isolate(), ThreadKind::kMain),
+ unparked_scope_(local_isolate_.heap()) {
FLAG_concurrent_allocation = true;
}
@@ -114,6 +115,7 @@ class LocalFactoryTest : public TestWithIsolateAndZone {
UnoptimizedCompileState state_;
ParseInfo parse_info_;
LocalIsolate local_isolate_;
+ UnparkedScope unparked_scope_;
Handle<String> source_string_;
Handle<Script> script_;
};
diff --git a/deps/v8/test/unittests/heap/local-heap-unittest.cc b/deps/v8/test/unittests/heap/local-heap-unittest.cc
index f44075da81..06dfede895 100644
--- a/deps/v8/test/unittests/heap/local-heap-unittest.cc
+++ b/deps/v8/test/unittests/heap/local-heap-unittest.cc
@@ -19,7 +19,7 @@ TEST_F(LocalHeapTest, Initialize) {
CHECK(!heap->safepoint()->ContainsAnyLocalHeap());
{
- LocalHeap lh(heap);
+ LocalHeap lh(heap, ThreadKind::kMain);
CHECK(heap->safepoint()->ContainsLocalHeap(&lh));
}
@@ -32,14 +32,14 @@ TEST_F(LocalHeapTest, Current) {
CHECK_NULL(LocalHeap::Current());
{
- LocalHeap lh(heap);
+ LocalHeap lh(heap, ThreadKind::kMain);
CHECK_EQ(&lh, LocalHeap::Current());
}
CHECK_NULL(LocalHeap::Current());
{
- LocalHeap lh(heap);
+ LocalHeap lh(heap, ThreadKind::kMain);
CHECK_EQ(&lh, LocalHeap::Current());
}
@@ -56,7 +56,7 @@ class BackgroundThread final : public v8::base::Thread {
void Run() override {
CHECK_NULL(LocalHeap::Current());
{
- LocalHeap lh(heap_);
+ LocalHeap lh(heap_, ThreadKind::kBackground);
CHECK_EQ(&lh, LocalHeap::Current());
}
CHECK_NULL(LocalHeap::Current());
@@ -70,7 +70,7 @@ TEST_F(LocalHeapTest, CurrentBackground) {
Heap* heap = i_isolate()->heap();
CHECK_NULL(LocalHeap::Current());
{
- LocalHeap lh(heap);
+ LocalHeap lh(heap, ThreadKind::kMain);
auto thread = std::make_unique<BackgroundThread>(heap);
CHECK(thread->Start());
CHECK_EQ(&lh, LocalHeap::Current());
diff --git a/deps/v8/test/unittests/heap/safepoint-unittest.cc b/deps/v8/test/unittests/heap/safepoint-unittest.cc
index 214d15277c..4ae34adc22 100644
--- a/deps/v8/test/unittests/heap/safepoint-unittest.cc
+++ b/deps/v8/test/unittests/heap/safepoint-unittest.cc
@@ -40,10 +40,9 @@ class ParkedThread final : public v8::base::Thread {
mutex_(mutex) {}
void Run() override {
- LocalHeap local_heap(heap_);
+ LocalHeap local_heap(heap_, ThreadKind::kBackground);
if (mutex_) {
- ParkedScope scope(&local_heap);
base::MutexGuard guard(mutex_);
}
}
@@ -99,7 +98,8 @@ class RunningThread final : public v8::base::Thread {
counter_(counter) {}
void Run() override {
- LocalHeap local_heap(heap_);
+ LocalHeap local_heap(heap_, ThreadKind::kBackground);
+ UnparkedScope unparked_scope(&local_heap);
for (int i = 0; i < kRuns; i++) {
counter_->fetch_add(1);
@@ -147,7 +147,8 @@ TEST_F(SafepointTest, StopRunningThreads) {
TEST_F(SafepointTest, SkipLocalHeapOfThisThread) {
EnsureFlagLocalHeapsEnabled();
Heap* heap = i_isolate()->heap();
- LocalHeap local_heap(heap);
+ LocalHeap local_heap(heap, ThreadKind::kMain);
+ UnparkedScope unparked_scope(&local_heap);
{
SafepointScope scope(heap);
local_heap.Safepoint();
diff --git a/deps/v8/test/unittests/heap/traced-reference-unittest.cc b/deps/v8/test/unittests/heap/traced-reference-unittest.cc
new file mode 100644
index 0000000000..b47262ec57
--- /dev/null
+++ b/deps/v8/test/unittests/heap/traced-reference-unittest.cc
@@ -0,0 +1,204 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/v8-cppgc.h"
+#include "src/heap/cppgc/visitor.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace internal {
+
+using TracedReferenceTest = TestWithIsolate;
+
+TEST_F(TracedReferenceTest, ResetFromLocal) {
+ v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
+ v8::Context::Scope context_scope(context);
+ v8::TracedReference<v8::Object> ref;
+ {
+ v8::HandleScope handles(v8_isolate());
+ v8::Local<v8::Object> local =
+ v8::Local<v8::Object>::New(v8_isolate(), v8::Object::New(v8_isolate()));
+ EXPECT_TRUE(ref.IsEmpty());
+ EXPECT_NE(ref, local);
+ ref.Reset(v8_isolate(), local);
+ EXPECT_FALSE(ref.IsEmpty());
+ EXPECT_EQ(ref, local);
+ }
+}
+
+TEST_F(TracedReferenceTest, ConstructFromLocal) {
+ v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
+ v8::Context::Scope context_scope(context);
+ {
+ v8::HandleScope handles(v8_isolate());
+ v8::Local<v8::Object> local =
+ v8::Local<v8::Object>::New(v8_isolate(), v8::Object::New(v8_isolate()));
+ v8::TracedReference<v8::Object> ref(v8_isolate(), local);
+ EXPECT_FALSE(ref.IsEmpty());
+ EXPECT_EQ(ref, local);
+ }
+}
+
+TEST_F(TracedReferenceTest, Reset) {
+ v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
+ v8::Context::Scope context_scope(context);
+ {
+ v8::HandleScope handles(v8_isolate());
+ v8::Local<v8::Object> local =
+ v8::Local<v8::Object>::New(v8_isolate(), v8::Object::New(v8_isolate()));
+ v8::TracedReference<v8::Object> ref(v8_isolate(), local);
+ EXPECT_FALSE(ref.IsEmpty());
+ EXPECT_EQ(ref, local);
+ ref.Reset();
+ EXPECT_TRUE(ref.IsEmpty());
+ EXPECT_NE(ref, local);
+ }
+}
+
+TEST_F(TracedReferenceTest, Copy) {
+ v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
+ v8::Context::Scope context_scope(context);
+ {
+ v8::HandleScope handles(v8_isolate());
+ v8::Local<v8::Object> local =
+ v8::Local<v8::Object>::New(v8_isolate(), v8::Object::New(v8_isolate()));
+ v8::TracedReference<v8::Object> ref(v8_isolate(), local);
+ v8::TracedReference<v8::Object> ref_copy1(ref);
+ v8::TracedReference<v8::Object> ref_copy2 = ref;
+ EXPECT_EQ(ref, local);
+ EXPECT_EQ(ref_copy1, local);
+ EXPECT_EQ(ref_copy2, local);
+ }
+}
+
+TEST_F(TracedReferenceTest, CopyHeterogenous) {
+ v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
+ v8::Context::Scope context_scope(context);
+ {
+ v8::HandleScope handles(v8_isolate());
+ v8::Local<v8::Object> local =
+ v8::Local<v8::Object>::New(v8_isolate(), v8::Object::New(v8_isolate()));
+ v8::TracedReference<v8::Object> ref(v8_isolate(), local);
+ v8::TracedReference<v8::Value> ref_copy1(ref);
+ v8::TracedReference<v8::Value> ref_copy2 = ref;
+ EXPECT_EQ(ref, local);
+ EXPECT_EQ(ref_copy1, local);
+ EXPECT_EQ(ref_copy2, local);
+ }
+}
+
+TEST_F(TracedReferenceTest, Move) {
+ v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
+ v8::Context::Scope context_scope(context);
+ {
+ v8::HandleScope handles(v8_isolate());
+ v8::Local<v8::Object> local =
+ v8::Local<v8::Object>::New(v8_isolate(), v8::Object::New(v8_isolate()));
+ v8::TracedReference<v8::Object> ref(v8_isolate(), local);
+ v8::TracedReference<v8::Object> ref_moved1(std::move(ref));
+ v8::TracedReference<v8::Object> ref_moved2 = std::move(ref_moved1);
+ EXPECT_TRUE(ref.IsEmpty());
+ EXPECT_TRUE(ref_moved1.IsEmpty());
+ EXPECT_EQ(ref_moved2, local);
+ }
+}
+
+TEST_F(TracedReferenceTest, MoveHeterogenous) {
+ v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
+ v8::Context::Scope context_scope(context);
+ {
+ v8::HandleScope handles(v8_isolate());
+ v8::Local<v8::Object> local =
+ v8::Local<v8::Object>::New(v8_isolate(), v8::Object::New(v8_isolate()));
+ v8::TracedReference<v8::Object> ref1(v8_isolate(), local);
+ v8::TracedReference<v8::Value> ref_moved1(std::move(ref1));
+ v8::TracedReference<v8::Object> ref2(v8_isolate(), local);
+ v8::TracedReference<v8::Object> ref_moved2 = std::move(ref2);
+ EXPECT_TRUE(ref1.IsEmpty());
+ EXPECT_EQ(ref_moved1, local);
+ EXPECT_TRUE(ref2.IsEmpty());
+ EXPECT_EQ(ref_moved2, local);
+ }
+}
+
+TEST_F(TracedReferenceTest, Equality) {
+ v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
+ v8::Context::Scope context_scope(context);
+ {
+ v8::HandleScope handles(v8_isolate());
+ v8::Local<v8::Object> local1 =
+ v8::Local<v8::Object>::New(v8_isolate(), v8::Object::New(v8_isolate()));
+ v8::TracedReference<v8::Object> ref1(v8_isolate(), local1);
+ v8::TracedReference<v8::Object> ref2(v8_isolate(), local1);
+ EXPECT_EQ(ref1, ref2);
+ EXPECT_EQ(ref2, ref1);
+ v8::Local<v8::Object> local2 =
+ v8::Local<v8::Object>::New(v8_isolate(), v8::Object::New(v8_isolate()));
+ v8::TracedReference<v8::Object> ref3(v8_isolate(), local2);
+ EXPECT_NE(ref2, ref3);
+ EXPECT_NE(ref3, ref2);
+ }
+}
+
+TEST_F(TracedReferenceTest, EqualityHeterogenous) {
+ v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
+ v8::Context::Scope context_scope(context);
+ {
+ v8::HandleScope handles(v8_isolate());
+ v8::Local<v8::Object> local1 =
+ v8::Local<v8::Object>::New(v8_isolate(), v8::Object::New(v8_isolate()));
+ v8::TracedReference<v8::Object> ref1(v8_isolate(), local1);
+ v8::TracedReference<v8::Value> ref2(v8_isolate(), local1);
+ EXPECT_EQ(ref1, ref2);
+ EXPECT_EQ(ref2, ref1);
+ v8::Local<v8::Object> local2 =
+ v8::Local<v8::Object>::New(v8_isolate(), v8::Object::New(v8_isolate()));
+ v8::TracedReference<v8::Object> ref3(v8_isolate(), local2);
+ EXPECT_NE(ref2, ref3);
+ EXPECT_NE(ref3, ref2);
+ }
+}
+
+namespace {
+
+// Must be used on stack.
+class JSVisitorForTesting final : public JSVisitor {
+ public:
+ explicit JSVisitorForTesting(v8::Local<v8::Object> expected_object)
+ : JSVisitor(cppgc::internal::VisitorFactory::CreateKey()),
+ expected_object_(expected_object) {}
+
+ void Visit(const TracedReferenceBase& ref) final {
+ EXPECT_EQ(ref, expected_object_);
+ visit_count_++;
+ }
+
+ size_t visit_count() const { return visit_count_; }
+
+ private:
+ v8::Local<v8::Object> expected_object_;
+ size_t visit_count_ = 0;
+};
+
+} // namespace
+
+TEST_F(TracedReferenceTest, TracedReferenceTrace) {
+ v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
+ v8::Context::Scope context_scope(context);
+ {
+ v8::HandleScope handles(v8_isolate());
+ v8::Local<v8::Object> local =
+ v8::Local<v8::Object>::New(v8_isolate(), v8::Object::New(v8_isolate()));
+ v8::TracedReference<v8::Object> js_member(v8_isolate(), local);
+ JSVisitorForTesting visitor(local);
+ // Cast to cppgc::Visitor to ensure that we dispatch through the base
+ // visitor and use traits.
+ static_cast<cppgc::Visitor&>(visitor).Trace(js_member);
+ EXPECT_EQ(1u, visitor.visit_count());
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/heap/unified-heap-snapshot-unittest.cc b/deps/v8/test/unittests/heap/unified-heap-snapshot-unittest.cc
new file mode 100644
index 0000000000..0f8363d069
--- /dev/null
+++ b/deps/v8/test/unittests/heap/unified-heap-snapshot-unittest.cc
@@ -0,0 +1,491 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <cstring>
+
+#include "include/cppgc/allocation.h"
+#include "include/cppgc/cross-thread-persistent.h"
+#include "include/cppgc/garbage-collected.h"
+#include "include/cppgc/name-provider.h"
+#include "include/cppgc/persistent.h"
+#include "include/cppgc/platform.h"
+#include "include/v8-cppgc.h"
+#include "include/v8-profiler.h"
+#include "src/api/api-inl.h"
+#include "src/heap/cppgc-js/cpp-heap.h"
+#include "src/heap/cppgc/object-allocator.h"
+#include "src/objects/objects-inl.h"
+#include "src/profiler/heap-snapshot-generator-inl.h"
+#include "src/profiler/heap-snapshot-generator.h"
+#include "test/unittests/heap/heap-utils.h"
+#include "test/unittests/heap/unified-heap-utils.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+class UnifiedHeapSnapshotTest : public UnifiedHeapTest {
+ public:
+ const v8::HeapSnapshot* TakeHeapSnapshot() {
+ v8::HeapProfiler* heap_profiler = v8_isolate()->GetHeapProfiler();
+ return heap_profiler->TakeHeapSnapshot();
+ }
+};
+
+bool IsValidSnapshot(const v8::HeapSnapshot* snapshot, int depth = 3) {
+ const HeapSnapshot* heap_snapshot =
+ reinterpret_cast<const HeapSnapshot*>(snapshot);
+ std::unordered_set<const HeapEntry*> visited;
+ for (const HeapGraphEdge& edge : heap_snapshot->edges()) {
+ visited.insert(edge.to());
+ }
+ size_t unretained_entries_count = 0;
+ for (const HeapEntry& entry : heap_snapshot->entries()) {
+ if (visited.find(&entry) == visited.end() && entry.id() != 1) {
+ entry.Print("entry with no retainer", "", depth, 0);
+ ++unretained_entries_count;
+ }
+ }
+ return unretained_entries_count == 0;
+}
+
+bool ContainsRetainingPath(const v8::HeapSnapshot& snapshot,
+ const std::vector<std::string> retaining_path,
+ bool debug_retaining_path = false) {
+ const HeapSnapshot& heap_snapshot =
+ reinterpret_cast<const HeapSnapshot&>(snapshot);
+ std::vector<HeapEntry*> haystack = {heap_snapshot.root()};
+ for (size_t i = 0; i < retaining_path.size(); ++i) {
+ const std::string& needle = retaining_path[i];
+ std::vector<HeapEntry*> new_haystack;
+ for (HeapEntry* parent : haystack) {
+ for (int j = 0; j < parent->children_count(); j++) {
+ HeapEntry* child = parent->child(j)->to();
+ if (0 == strcmp(child->name(), needle.c_str())) {
+ new_haystack.push_back(child);
+ }
+ }
+ }
+ if (new_haystack.empty()) {
+ if (debug_retaining_path) {
+ fprintf(stderr,
+ "#\n# Could not find object with name '%s'\n#\n# Path:\n",
+ needle.c_str());
+ for (size_t j = 0; j < retaining_path.size(); ++j) {
+ fprintf(stderr, "# - '%s'%s\n", retaining_path[j].c_str(),
+ i == j ? "\t<--- not found" : "");
+ }
+ fprintf(stderr, "#\n");
+ }
+ return false;
+ }
+ std::swap(haystack, new_haystack);
+ }
+ return true;
+}
+
+class BaseWithoutName : public cppgc::GarbageCollected<BaseWithoutName> {
+ public:
+ static constexpr const char kExpectedName[] =
+ "v8::internal::(anonymous namespace)::BaseWithoutName";
+
+ virtual void Trace(cppgc::Visitor* v) const {
+ v->Trace(next);
+ v->Trace(next2);
+ }
+ cppgc::Member<BaseWithoutName> next;
+ cppgc::Member<BaseWithoutName> next2;
+};
+// static
+constexpr const char BaseWithoutName::kExpectedName[];
+
+class GCed final : public BaseWithoutName, public cppgc::NameProvider {
+ public:
+ static constexpr const char kExpectedName[] = "GCed";
+
+ void Trace(cppgc::Visitor* v) const final { BaseWithoutName::Trace(v); }
+ const char* GetName() const final { return "GCed"; }
+};
+// static
+constexpr const char GCed::kExpectedName[];
+
+constexpr const char kExpectedCppRootsName[] = "C++ roots";
+constexpr const char kExpectedCppCrossThreadRootsName[] =
+ "C++ cross-thread roots";
+
+template <typename T>
+constexpr const char* GetExpectedName() {
+ if (std::is_base_of<cppgc::NameProvider, T>::value ||
+ !cppgc::NameProvider::HideInternalNames()) {
+ return T::kExpectedName;
+ } else {
+ return cppgc::NameProvider::kHiddenName;
+ }
+}
+
+} // namespace
+
+TEST_F(UnifiedHeapSnapshotTest, EmptySnapshot) {
+ const v8::HeapSnapshot* snapshot = TakeHeapSnapshot();
+ EXPECT_TRUE(IsValidSnapshot(snapshot));
+}
+
+TEST_F(UnifiedHeapSnapshotTest, RetainedByCppRoot) {
+ cppgc::Persistent<GCed> gced =
+ cppgc::MakeGarbageCollected<GCed>(allocation_handle());
+ const v8::HeapSnapshot* snapshot = TakeHeapSnapshot();
+ EXPECT_TRUE(IsValidSnapshot(snapshot));
+ EXPECT_TRUE(
+ ContainsRetainingPath(*snapshot, {
+ kExpectedCppRootsName, // NOLINT
+ GetExpectedName<GCed>() // NOLINT
+ }));
+}
+
+TEST_F(UnifiedHeapSnapshotTest, RetainedByCppCrossThreadRoot) {
+ cppgc::subtle::CrossThreadPersistent<GCed> gced =
+ cppgc::MakeGarbageCollected<GCed>(allocation_handle());
+ const v8::HeapSnapshot* snapshot = TakeHeapSnapshot();
+ EXPECT_TRUE(IsValidSnapshot(snapshot));
+ EXPECT_TRUE(ContainsRetainingPath(
+ *snapshot, {
+ kExpectedCppCrossThreadRootsName, // NOLINT
+ GetExpectedName<GCed>() // NOLINT
+ }));
+}
+
+TEST_F(UnifiedHeapSnapshotTest, RetainingUnnamedType) {
+ cppgc::Persistent<BaseWithoutName> base_without_name =
+ cppgc::MakeGarbageCollected<BaseWithoutName>(allocation_handle());
+ const v8::HeapSnapshot* snapshot = TakeHeapSnapshot();
+ EXPECT_TRUE(IsValidSnapshot(snapshot));
+ if (cppgc::NameProvider::HideInternalNames()) {
+ EXPECT_FALSE(ContainsRetainingPath(
+ *snapshot, {kExpectedCppRootsName, cppgc::NameProvider::kHiddenName}));
+ } else {
+ EXPECT_TRUE(ContainsRetainingPath(
+ *snapshot, {
+ kExpectedCppRootsName, // NOLINT
+ GetExpectedName<BaseWithoutName>() // NOLINT
+ }));
+ }
+}
+
+TEST_F(UnifiedHeapSnapshotTest, RetainingNamedThroughUnnamed) {
+ cppgc::Persistent<BaseWithoutName> base_without_name =
+ cppgc::MakeGarbageCollected<BaseWithoutName>(allocation_handle());
+ base_without_name->next =
+ cppgc::MakeGarbageCollected<GCed>(allocation_handle());
+ const v8::HeapSnapshot* snapshot = TakeHeapSnapshot();
+ EXPECT_TRUE(IsValidSnapshot(snapshot));
+ EXPECT_TRUE(ContainsRetainingPath(
+ *snapshot, {
+ kExpectedCppRootsName, // NOLINT
+ GetExpectedName<BaseWithoutName>(), // NOLINT
+ GetExpectedName<GCed>() // NOLINT
+ }));
+}
+
+TEST_F(UnifiedHeapSnapshotTest, PendingCallStack) {
+ // Test ensures that the algorithm handles references into the current call
+ // stack.
+ //
+ // Graph:
+ // Persistent -> BaseWithoutName (2) <-> BaseWithoutName (1) -> GCed (3)
+ //
+ // Visitation order is (1)->(2)->(3) which is a corner case, as when following
+ // back from (2)->(1) the object in (1) is already visited and will only later
+ // be marked as visible.
+ auto* first =
+ cppgc::MakeGarbageCollected<BaseWithoutName>(allocation_handle());
+ auto* second =
+ cppgc::MakeGarbageCollected<BaseWithoutName>(allocation_handle());
+ first->next = second;
+ first->next->next = first;
+ auto* third = cppgc::MakeGarbageCollected<GCed>(allocation_handle());
+ first->next2 = third;
+
+ cppgc::Persistent<BaseWithoutName> holder(second);
+ const v8::HeapSnapshot* snapshot = TakeHeapSnapshot();
+ EXPECT_TRUE(IsValidSnapshot(snapshot));
+ EXPECT_TRUE(
+ ContainsRetainingPath(*snapshot,
+ {
+ kExpectedCppRootsName, // NOLINT
+ GetExpectedName<BaseWithoutName>(), // NOLINT
+ GetExpectedName<BaseWithoutName>(), // NOLINT
+ GetExpectedName<GCed>() // NOLINT
+ }));
+}
+
+TEST_F(UnifiedHeapSnapshotTest, ReferenceToFinishedSCC) {
+ // Test ensures that the algorithm handles reference into an already finished
+ // SCC that is marked as hidden whereas the current SCC would resolve to
+ // visible.
+ //
+ // Graph:
+ // Persistent -> BaseWithoutName (1)
+ // Persistent -> BaseWithoutName (2)
+ // + <-> BaseWithoutName (3) -> BaseWithoutName (1)
+ // + -> GCed (4)
+ //
+ // Visitation order (1)->(2)->(3)->(1) which is a corner case as (3) would set
+ // a dependency on (1) which is hidden. Instead (3) should set a dependency on
+ // (2) as (1) resolves to hidden whereas (2) resolves to visible. The test
+ // ensures that resolved hidden dependencies are ignored.
+ cppgc::Persistent<BaseWithoutName> hidden_holder(
+ cppgc::MakeGarbageCollected<BaseWithoutName>(allocation_handle()));
+ auto* first =
+ cppgc::MakeGarbageCollected<BaseWithoutName>(allocation_handle());
+ auto* second =
+ cppgc::MakeGarbageCollected<BaseWithoutName>(allocation_handle());
+ first->next = second;
+ second->next = *hidden_holder;
+ second->next2 = first;
+ first->next2 = cppgc::MakeGarbageCollected<GCed>(allocation_handle());
+ cppgc::Persistent<BaseWithoutName> holder(first);
+ const v8::HeapSnapshot* snapshot = TakeHeapSnapshot();
+ EXPECT_TRUE(IsValidSnapshot(snapshot));
+ EXPECT_TRUE(
+ ContainsRetainingPath(*snapshot,
+ {
+ kExpectedCppRootsName, // NOLINT
+ GetExpectedName<BaseWithoutName>(), // NOLINT
+ GetExpectedName<BaseWithoutName>(), // NOLINT
+ GetExpectedName<BaseWithoutName>(), // NOLINT
+ GetExpectedName<GCed>() // NOLINT
+ }));
+}
+
+namespace {
+
+class GCedWithJSRef : public cppgc::GarbageCollected<GCedWithJSRef> {
+ public:
+ static constexpr const char kExpectedName[] =
+ "v8::internal::(anonymous namespace)::GCedWithJSRef";
+
+ virtual void Trace(cppgc::Visitor* v) const { v->Trace(v8_object_); }
+
+ void SetV8Object(v8::Isolate* isolate, v8::Local<v8::Object> object) {
+ v8_object_.Reset(isolate, object);
+ }
+
+ void SetWrapperClassId(uint16_t class_id) {
+ v8_object_.SetWrapperClassId(class_id);
+ }
+
+ uint16_t WrapperClassId() const { return v8_object_.WrapperClassId(); }
+
+ TracedReference<v8::Object>& wrapper() { return v8_object_; }
+
+ private:
+ TracedReference<v8::Object> v8_object_;
+};
+constexpr const char GCedWithJSRef::kExpectedName[];
+
+class JsTestingScope {
+ public:
+ explicit JsTestingScope(v8::Isolate* isolate)
+ : isolate_(isolate),
+ handle_scope_(isolate),
+ context_(v8::Context::New(isolate)),
+ context_scope_(context_) {}
+
+ v8::Isolate* isolate() const { return isolate_; }
+ v8::Local<v8::Context> context() const { return context_; }
+
+ private:
+ v8::Isolate* isolate_;
+ v8::HandleScope handle_scope_;
+ v8::Local<v8::Context> context_;
+ v8::Context::Scope context_scope_;
+};
+
+cppgc::Persistent<GCedWithJSRef> SetupWrapperWrappablePair(
+ JsTestingScope& testing_scope, cppgc::AllocationHandle& allocation_handle,
+ const char* name) {
+ cppgc::Persistent<GCedWithJSRef> gc_w_js_ref =
+ cppgc::MakeGarbageCollected<GCedWithJSRef>(allocation_handle);
+ v8::Local<v8::Object> wrapper_object = WrapperHelper::CreateWrapper(
+ testing_scope.context(), gc_w_js_ref.Get(), name);
+ gc_w_js_ref->SetV8Object(testing_scope.isolate(), wrapper_object);
+ return std::move(gc_w_js_ref);
+}
+
+} // namespace
+
+TEST_F(UnifiedHeapSnapshotTest, JSReferenceForcesVisibleObject) {
+ // Test ensures that a C++->JS reference forces an object to be visible in the
+ // snapshot.
+ JsTestingScope testing_scope(v8_isolate());
+ cppgc::Persistent<GCedWithJSRef> gc_w_js_ref = SetupWrapperWrappablePair(
+ testing_scope, allocation_handle(), "LeafJSObject");
+ const v8::HeapSnapshot* snapshot = TakeHeapSnapshot();
+ EXPECT_TRUE(IsValidSnapshot(snapshot));
+ EXPECT_TRUE(
+ ContainsRetainingPath(*snapshot,
+ {
+ kExpectedCppRootsName, // NOLINT
+ GetExpectedName<GCedWithJSRef>(), // NOLINT
+ "LeafJSObject" // NOLINT
+ }));
+}
+
+TEST_F(UnifiedHeapSnapshotTest, MergedWrapperNode) {
+ // Test ensures that the snapshot sets a wrapper node for C++->JS references
+ // that have a class id set and that object nodes are merged into the C++
+ // node, i.e., the directly reachable JS object is merged into the C++ object.
+ JsTestingScope testing_scope(v8_isolate());
+ cppgc::Persistent<GCedWithJSRef> gc_w_js_ref = SetupWrapperWrappablePair(
+ testing_scope, allocation_handle(), "MergedObject");
+ gc_w_js_ref->SetWrapperClassId(1); // Any class id will do.
+ v8::Local<v8::Object> next_object = WrapperHelper::CreateWrapper(
+ testing_scope.context(), nullptr, "NextObject");
+ v8::Local<v8::Object> wrapper_object =
+ gc_w_js_ref->wrapper().Get(v8_isolate());
+ // Chain another object to `wrapper_object`. Since `wrapper_object` should be
+ // merged into `GCedWithJSRef`, the additional object must show up as direct
+ // child from `GCedWithJSRef`.
+ wrapper_object
+ ->Set(testing_scope.context(),
+ v8::String::NewFromUtf8(v8::Isolate::GetCurrent(), "link")
+ .ToLocalChecked(),
+ next_object)
+ .ToChecked();
+ const v8::HeapSnapshot* snapshot = TakeHeapSnapshot();
+ EXPECT_TRUE(IsValidSnapshot(snapshot));
+ EXPECT_TRUE(
+ ContainsRetainingPath(*snapshot,
+ {
+ kExpectedCppRootsName, // NOLINT
+ GetExpectedName<GCedWithJSRef>(), // NOLINT
+ // MergedObject is merged into GCedWithJSRef.
+ "NextObject" // NOLINT
+ }));
+}
+
+namespace {
+
+constexpr uint16_t kClassIdForAttachedState = 0xAAAA;
+
+class DetachednessHandler {
+ public:
+ static size_t callback_count;
+
+ static v8::EmbedderGraph::Node::Detachedness GetDetachedness(
+ v8::Isolate* isolate, const v8::Local<v8::Value>& v8_value,
+ uint16_t class_id, void* data) {
+ callback_count++;
+ return class_id == kClassIdForAttachedState
+ ? v8::EmbedderGraph::Node::Detachedness::kAttached
+ : v8::EmbedderGraph::Node::Detachedness::kDetached;
+ }
+
+ static void Reset() { callback_count = 0; }
+};
+// static
+size_t DetachednessHandler::callback_count = 0;
+
+template <typename Callback>
+void ForEachEntryWithName(const v8::HeapSnapshot* snapshot, const char* needle,
+ Callback callback) {
+ const HeapSnapshot* heap_snapshot =
+ reinterpret_cast<const HeapSnapshot*>(snapshot);
+ for (const HeapEntry& entry : heap_snapshot->entries()) {
+ if (strcmp(entry.name(), needle) == 0) {
+ callback(entry);
+ }
+ }
+}
+
+constexpr uint8_t kExpectedDetachedValueForUnknown =
+ static_cast<uint8_t>(v8::EmbedderGraph::Node::Detachedness::kUnknown);
+constexpr uint8_t kExpectedDetachedValueForAttached =
+ static_cast<uint8_t>(v8::EmbedderGraph::Node::Detachedness::kAttached);
+constexpr uint8_t kExpectedDetachedValueForDetached =
+ static_cast<uint8_t>(v8::EmbedderGraph::Node::Detachedness::kDetached);
+
+} // namespace
+
+TEST_F(UnifiedHeapSnapshotTest, NoTriggerForClassIdZero) {
+ // Test ensures that objects with JS references that have no class id set do
+ // not have their detachedness state queried.
+ JsTestingScope testing_scope(v8_isolate());
+ cppgc::Persistent<GCedWithJSRef> gc_w_js_ref = SetupWrapperWrappablePair(
+ testing_scope, allocation_handle(), "MergedObject");
+ DetachednessHandler::Reset();
+ v8_isolate()->GetHeapProfiler()->SetGetDetachednessCallback(
+ DetachednessHandler::GetDetachedness, nullptr);
+ gc_w_js_ref->SetWrapperClassId(0);
+ EXPECT_EQ(0u, gc_w_js_ref->WrapperClassId());
+ const v8::HeapSnapshot* snapshot = TakeHeapSnapshot();
+ EXPECT_EQ(0u, DetachednessHandler::callback_count);
+ EXPECT_TRUE(IsValidSnapshot(snapshot));
+ EXPECT_TRUE(
+ ContainsRetainingPath(*snapshot,
+ {
+ kExpectedCppRootsName, // NOLINT
+ GetExpectedName<GCedWithJSRef>(), // NOLINT
+ }));
+ ForEachEntryWithName(
+ snapshot, GetExpectedName<GCedWithJSRef>(), [](const HeapEntry& entry) {
+ EXPECT_EQ(kExpectedDetachedValueForUnknown, entry.detachedness());
+ });
+}
+
+TEST_F(UnifiedHeapSnapshotTest, TriggerDetachednessCallbackSettingAttached) {
+ // Test ensures that objects with JS references that have a non-zero class id
+ // set do have their detachedness state queried and set (attached version).
+ JsTestingScope testing_scope(v8_isolate());
+ cppgc::Persistent<GCedWithJSRef> gc_w_js_ref = SetupWrapperWrappablePair(
+ testing_scope, allocation_handle(), "MergedObject");
+ DetachednessHandler::Reset();
+ v8_isolate()->GetHeapProfiler()->SetGetDetachednessCallback(
+ DetachednessHandler::GetDetachedness, nullptr);
+ gc_w_js_ref->SetWrapperClassId(kClassIdForAttachedState);
+ EXPECT_NE(0u, gc_w_js_ref->WrapperClassId());
+ const v8::HeapSnapshot* snapshot = TakeHeapSnapshot();
+ EXPECT_EQ(1u, DetachednessHandler::callback_count);
+ EXPECT_TRUE(IsValidSnapshot(snapshot));
+ EXPECT_TRUE(
+ ContainsRetainingPath(*snapshot,
+ {
+ kExpectedCppRootsName, // NOLINT
+ GetExpectedName<GCedWithJSRef>(), // NOLINT
+ }));
+ ForEachEntryWithName(
+ snapshot, GetExpectedName<GCedWithJSRef>(), [](const HeapEntry& entry) {
+ EXPECT_EQ(kExpectedDetachedValueForAttached, entry.detachedness());
+ });
+}
+
+TEST_F(UnifiedHeapSnapshotTest, TriggerDetachednessCallbackSettingDetached) {
+ // Test ensures that objects with JS references that have a non-zero class id
+ // set do have their detachedness state queried and set (detached version).
+ JsTestingScope testing_scope(v8_isolate());
+ cppgc::Persistent<GCedWithJSRef> gc_w_js_ref = SetupWrapperWrappablePair(
+ testing_scope, allocation_handle(), "MergedObject");
+ DetachednessHandler::Reset();
+ v8_isolate()->GetHeapProfiler()->SetGetDetachednessCallback(
+ DetachednessHandler::GetDetachedness, nullptr);
+ gc_w_js_ref->SetWrapperClassId(kClassIdForAttachedState - 1);
+ EXPECT_NE(0u, gc_w_js_ref->WrapperClassId());
+ const v8::HeapSnapshot* snapshot = TakeHeapSnapshot();
+ EXPECT_EQ(1u, DetachednessHandler::callback_count);
+ EXPECT_TRUE(IsValidSnapshot(snapshot));
+ EXPECT_TRUE(
+ ContainsRetainingPath(*snapshot,
+ {
+ kExpectedCppRootsName, // NOLINT
+ GetExpectedName<GCedWithJSRef>(), // NOLINT
+ }));
+ ForEachEntryWithName(
+ snapshot, GetExpectedName<GCedWithJSRef>(), [](const HeapEntry& entry) {
+ EXPECT_EQ(kExpectedDetachedValueForDetached, entry.detachedness());
+ });
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/heap/unified-heap-unittest.cc b/deps/v8/test/unittests/heap/unified-heap-unittest.cc
index 200ba3404e..ca72a80c64 100644
--- a/deps/v8/test/unittests/heap/unified-heap-unittest.cc
+++ b/deps/v8/test/unittests/heap/unified-heap-unittest.cc
@@ -9,63 +9,13 @@
#include "src/heap/cppgc-js/cpp-heap.h"
#include "src/objects/objects-inl.h"
#include "test/unittests/heap/heap-utils.h"
+#include "test/unittests/heap/unified-heap-utils.h"
namespace v8 {
namespace internal {
namespace {
-v8::Local<v8::Object> ConstructTraceableJSApiObject(
- v8::Local<v8::Context> context, void* object) {
- v8::EscapableHandleScope scope(context->GetIsolate());
- v8::Local<v8::FunctionTemplate> function_t =
- v8::FunctionTemplate::New(context->GetIsolate());
- v8::Local<v8::ObjectTemplate> instance_t = function_t->InstanceTemplate();
- instance_t->SetInternalFieldCount(2);
- v8::Local<v8::Function> function =
- function_t->GetFunction(context).ToLocalChecked();
- v8::Local<v8::Object> instance =
- function->NewInstance(context).ToLocalChecked();
- instance->SetAlignedPointerInInternalField(0, object);
- instance->SetAlignedPointerInInternalField(1, object);
- CHECK(!instance.IsEmpty());
- i::Handle<i::JSReceiver> js_obj = v8::Utils::OpenHandle(*instance);
- CHECK_EQ(i::JS_API_OBJECT_TYPE, js_obj->map().instance_type());
- return scope.Escape(instance);
-}
-
-void ResetWrappableConnection(v8::Local<v8::Object> api_object) {
- api_object->SetAlignedPointerInInternalField(0, nullptr);
- api_object->SetAlignedPointerInInternalField(1, nullptr);
-}
-
-class UnifiedHeapTest : public TestWithHeapInternals {
- public:
- UnifiedHeapTest()
- : saved_incremental_marking_wrappers_(FLAG_incremental_marking_wrappers) {
- FLAG_incremental_marking_wrappers = false;
- cppgc::InitializeProcess(V8::GetCurrentPlatform()->GetPageAllocator());
- cpp_heap_ = std::make_unique<CppHeap>(v8_isolate(), 0);
- heap()->SetEmbedderHeapTracer(&cpp_heap());
- }
-
- ~UnifiedHeapTest() {
- heap()->SetEmbedderHeapTracer(nullptr);
- FLAG_incremental_marking_wrappers = saved_incremental_marking_wrappers_;
- cppgc::ShutdownProcess();
- }
-
- CppHeap& cpp_heap() const { return *cpp_heap_.get(); }
-
- cppgc::AllocationHandle& allocation_handle() {
- return cpp_heap().object_allocator();
- }
-
- private:
- std::unique_ptr<CppHeap> cpp_heap_;
- bool saved_incremental_marking_wrappers_;
-};
-
class Wrappable final : public cppgc::GarbageCollected<Wrappable> {
public:
static size_t destructor_callcount;
@@ -79,20 +29,22 @@ size_t Wrappable::destructor_callcount = 0;
} // namespace
-TEST_F(UnifiedHeapTest, OnlyGC) { CollectGarbage(OLD_SPACE); }
+TEST_F(UnifiedHeapTest, OnlyGC) { CollectGarbageWithEmbedderStack(); }
TEST_F(UnifiedHeapTest, FindingV8ToBlinkReference) {
v8::HandleScope scope(v8_isolate());
v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
v8::Context::Scope context_scope(context);
- v8::Local<v8::Object> api_object = ConstructTraceableJSApiObject(
+ v8::Local<v8::Object> api_object = WrapperHelper::CreateWrapper(
context, cppgc::MakeGarbageCollected<Wrappable>(allocation_handle()));
EXPECT_FALSE(api_object.IsEmpty());
EXPECT_EQ(0u, Wrappable::destructor_callcount);
- CollectGarbage(OLD_SPACE);
+ CollectGarbageWithoutEmbedderStack();
EXPECT_EQ(0u, Wrappable::destructor_callcount);
- ResetWrappableConnection(api_object);
- CollectGarbage(OLD_SPACE);
+ WrapperHelper::ResetWrappableConnection(api_object);
+ CollectGarbageWithoutEmbedderStack();
+ // Calling CollectGarbage twice to force the first GC to finish sweeping.
+ CollectGarbageWithoutEmbedderStack();
EXPECT_EQ(1u, Wrappable::destructor_callcount);
}
diff --git a/deps/v8/test/unittests/heap/unified-heap-utils.cc b/deps/v8/test/unittests/heap/unified-heap-utils.cc
new file mode 100644
index 0000000000..905e09b07a
--- /dev/null
+++ b/deps/v8/test/unittests/heap/unified-heap-utils.cc
@@ -0,0 +1,81 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/unittests/heap/unified-heap-utils.h"
+
+#include "include/cppgc/platform.h"
+#include "src/api/api-inl.h"
+#include "src/heap/cppgc-js/cpp-heap.h"
+#include "src/objects/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+UnifiedHeapTest::UnifiedHeapTest()
+ : saved_incremental_marking_wrappers_(FLAG_incremental_marking_wrappers) {
+ FLAG_incremental_marking_wrappers = false;
+ cppgc::InitializeProcess(V8::GetCurrentPlatform()->GetPageAllocator());
+ cpp_heap_ = std::make_unique<CppHeap>(
+ v8_isolate(), std::vector<std::unique_ptr<cppgc::CustomSpaceBase>>());
+ heap()->SetEmbedderHeapTracer(&cpp_heap());
+}
+
+UnifiedHeapTest::~UnifiedHeapTest() {
+ heap()->SetEmbedderHeapTracer(nullptr);
+ FLAG_incremental_marking_wrappers = saved_incremental_marking_wrappers_;
+ cppgc::ShutdownProcess();
+}
+
+void UnifiedHeapTest::CollectGarbageWithEmbedderStack() {
+ heap()->SetEmbedderStackStateForNextFinalization(
+ EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers);
+ CollectGarbage(OLD_SPACE);
+}
+
+void UnifiedHeapTest::CollectGarbageWithoutEmbedderStack() {
+ heap()->SetEmbedderStackStateForNextFinalization(
+ EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers);
+ CollectGarbage(OLD_SPACE);
+}
+
+CppHeap& UnifiedHeapTest::cpp_heap() const { return *cpp_heap_.get(); }
+
+cppgc::AllocationHandle& UnifiedHeapTest::allocation_handle() {
+ return cpp_heap().object_allocator();
+}
+
+// static
+v8::Local<v8::Object> WrapperHelper::CreateWrapper(
+ v8::Local<v8::Context> context, void* wrappable_object,
+ const char* class_name) {
+ v8::EscapableHandleScope scope(context->GetIsolate());
+ v8::Local<v8::FunctionTemplate> function_t =
+ v8::FunctionTemplate::New(context->GetIsolate());
+ if (strlen(class_name) != 0) {
+ function_t->SetClassName(
+ v8::String::NewFromUtf8(v8::Isolate::GetCurrent(), class_name)
+ .ToLocalChecked());
+ }
+ v8::Local<v8::ObjectTemplate> instance_t = function_t->InstanceTemplate();
+ instance_t->SetInternalFieldCount(2);
+ v8::Local<v8::Function> function =
+ function_t->GetFunction(context).ToLocalChecked();
+ v8::Local<v8::Object> instance =
+ function->NewInstance(context).ToLocalChecked();
+ instance->SetAlignedPointerInInternalField(0, wrappable_object);
+ instance->SetAlignedPointerInInternalField(1, wrappable_object);
+ CHECK(!instance.IsEmpty());
+ i::Handle<i::JSReceiver> js_obj = v8::Utils::OpenHandle(*instance);
+ CHECK_EQ(i::JS_API_OBJECT_TYPE, js_obj->map().instance_type());
+ return scope.Escape(instance);
+}
+
+// static
+void WrapperHelper::ResetWrappableConnection(v8::Local<v8::Object> api_object) {
+ api_object->SetAlignedPointerInInternalField(0, nullptr);
+ api_object->SetAlignedPointerInInternalField(1, nullptr);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/heap/unified-heap-utils.h b/deps/v8/test/unittests/heap/unified-heap-utils.h
new file mode 100644
index 0000000000..e11ab11c42
--- /dev/null
+++ b/deps/v8/test/unittests/heap/unified-heap-utils.h
@@ -0,0 +1,50 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_UNITTESTS_HEAP_UNIFIED_HEAP_UTILS_H_
+#define V8_UNITTESTS_HEAP_UNIFIED_HEAP_UTILS_H_
+
+#include "include/cppgc/heap.h"
+#include "include/v8.h"
+#include "test/unittests/heap/heap-utils.h"
+
+namespace v8 {
+namespace internal {
+
+class CppHeap;
+
+class UnifiedHeapTest : public TestWithHeapInternals {
+ public:
+ UnifiedHeapTest();
+ ~UnifiedHeapTest() override;
+
+ void CollectGarbageWithEmbedderStack();
+ void CollectGarbageWithoutEmbedderStack();
+
+ CppHeap& cpp_heap() const;
+ cppgc::AllocationHandle& allocation_handle();
+
+ private:
+ std::unique_ptr<CppHeap> cpp_heap_;
+ bool saved_incremental_marking_wrappers_;
+};
+
+class WrapperHelper {
+ public:
+ // Sets up a V8 API object so that it points back to a C++ object. The setup
+ // used is recognized by the GC and references will be followed for liveness
+ // analysis (marking) as well as tooling (snapshot).
+ static v8::Local<v8::Object> CreateWrapper(v8::Local<v8::Context> context,
+ void* wrappable_object,
+ const char* class_name = "");
+
+ // Resets the connection of a wrapper (JS) to its wrappable (C++), meaning
+ // that the wrappable object is not longer kept alive by the wrapper object.
+ static void ResetWrappableConnection(v8::Local<v8::Object> api_object);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_UNITTESTS_HEAP_UNIFIED_HEAP_UTILS_H_
diff --git a/deps/v8/test/unittests/heap/unmapper-unittest.cc b/deps/v8/test/unittests/heap/unmapper-unittest.cc
index a919945d3f..cbabfe4773 100644
--- a/deps/v8/test/unittests/heap/unmapper-unittest.cc
+++ b/deps/v8/test/unittests/heap/unmapper-unittest.cc
@@ -227,6 +227,8 @@ class SequentialUnmapperTest : public TestWithIsolate {
public:
SequentialUnmapperTest() = default;
~SequentialUnmapperTest() override = default;
+ SequentialUnmapperTest(const SequentialUnmapperTest&) = delete;
+ SequentialUnmapperTest& operator=(const SequentialUnmapperTest&) = delete;
static void SetUpTestCase() {
CHECK_NULL(tracking_page_allocator_);
@@ -264,8 +266,6 @@ class SequentialUnmapperTest : public TestWithIsolate {
static TrackingPageAllocator* tracking_page_allocator_;
static v8::PageAllocator* old_page_allocator_;
static bool old_flag_;
-
- DISALLOW_COPY_AND_ASSIGN(SequentialUnmapperTest);
};
TrackingPageAllocator* SequentialUnmapperTest::tracking_page_allocator_ =
@@ -290,18 +290,15 @@ TEST_F(SequentialUnmapperTest, UnmapOnTeardownAfterAlreadyFreeingPooled) {
tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
PageAllocator::kNoAccess);
unmapper()->TearDown();
- if (i_isolate()->isolate_allocation_mode() ==
- IsolateAllocationMode::kInV8Heap) {
- // In this mode Isolate uses bounded page allocator which allocates pages
- // inside prereserved region. Thus these pages are kept reserved until
- // the Isolate dies.
- tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
- PageAllocator::kNoAccess);
- } else {
- CHECK_EQ(IsolateAllocationMode::kInCppHeap,
- i_isolate()->isolate_allocation_mode());
- tracking_page_allocator()->CheckIsFree(page->address(), page_size);
- }
+#ifdef V8_COMPRESS_POINTERS
+ // In this mode Isolate uses bounded page allocator which allocates pages
+ // inside prereserved region. Thus these pages are kept reserved until
+ // the Isolate dies.
+ tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
+ PageAllocator::kNoAccess);
+#else
+ tracking_page_allocator()->CheckIsFree(page->address(), page_size);
+#endif // V8_COMPRESS_POINTERS
}
// See v8:5945.
@@ -319,18 +316,15 @@ TEST_F(SequentialUnmapperTest, UnmapOnTeardown) {
tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
PageAllocator::kReadWrite);
unmapper()->TearDown();
- if (i_isolate()->isolate_allocation_mode() ==
- IsolateAllocationMode::kInV8Heap) {
- // In this mode Isolate uses bounded page allocator which allocates pages
- // inside prereserved region. Thus these pages are kept reserved until
- // the Isolate dies.
- tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
- PageAllocator::kNoAccess);
- } else {
- CHECK_EQ(IsolateAllocationMode::kInCppHeap,
- i_isolate()->isolate_allocation_mode());
- tracking_page_allocator()->CheckIsFree(page->address(), page_size);
- }
+#ifdef V8_COMPRESS_POINTERS
+ // In this mode Isolate uses bounded page allocator which allocates pages
+ // inside prereserved region. Thus these pages are kept reserved until
+ // the Isolate dies.
+ tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
+ PageAllocator::kNoAccess);
+#else
+ tracking_page_allocator()->CheckIsFree(page->address(), page_size);
+#endif // V8_COMPRESS_POINTERS
}
} // namespace internal
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
index 7b1150b499..9da64339d5 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
@@ -277,6 +277,9 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
// Emit GetSuperConstructor.
builder.GetSuperConstructor(reg);
+ // Constructor check for GetSuperConstructor.
+ builder.ThrowIfNotSuperConstructor(reg);
+
// Hole checks.
builder.ThrowReferenceErrorIfHole(name)
.ThrowSuperAlreadyCalledIfNotHole()
@@ -532,11 +535,7 @@ TEST_F(BytecodeArrayBuilderTest, Parameters) {
Register receiver(builder.Receiver());
Register param8(builder.Parameter(8));
-#ifdef V8_REVERSE_JSARGS
CHECK_EQ(receiver.index() - param8.index(), 9);
-#else
- CHECK_EQ(param8.index() - receiver.index(), 9);
-#endif
}
TEST_F(BytecodeArrayBuilderTest, Constants) {
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc
index ad25ab06cb..0592a64362 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc
@@ -142,8 +142,8 @@ TEST_F(BytecodeArrayWriterUnittest, SimpleExample) {
Handle<BytecodeArray> bytecode_array =
writer()->ToBytecodeArray(isolate(), 0, 0, factory()->empty_byte_array());
- bytecode_array->set_synchronized_source_position_table(
- *writer()->ToSourcePositionTable(isolate()));
+ bytecode_array->set_source_position_table(
+ *writer()->ToSourcePositionTable(isolate()), kReleaseStore);
CHECK_EQ(bytecodes()->size(), arraysize(expected_bytes));
PositionTableEntry expected_positions[] = {{0, 55, true}, {8, 70, true}};
@@ -229,8 +229,8 @@ TEST_F(BytecodeArrayWriterUnittest, ComplexExample) {
Handle<BytecodeArray> bytecode_array =
writer()->ToBytecodeArray(isolate(), 0, 0, factory()->empty_byte_array());
- bytecode_array->set_synchronized_source_position_table(
- *writer()->ToSourcePositionTable(isolate()));
+ bytecode_array->set_source_position_table(
+ *writer()->ToSourcePositionTable(isolate()), kReleaseStore);
SourcePositionTableIterator source_iterator(
bytecode_array->SourcePositionTable());
for (size_t i = 0; i < arraysize(expected_positions); ++i) {
@@ -278,8 +278,8 @@ TEST_F(BytecodeArrayWriterUnittest, ElideNoneffectfulBytecodes) {
Handle<BytecodeArray> bytecode_array =
writer()->ToBytecodeArray(isolate(), 0, 0, factory()->empty_byte_array());
- bytecode_array->set_synchronized_source_position_table(
- *writer()->ToSourcePositionTable(isolate()));
+ bytecode_array->set_source_position_table(
+ *writer()->ToSourcePositionTable(isolate()), kReleaseStore);
SourcePositionTableIterator source_iterator(
bytecode_array->SourcePositionTable());
for (size_t i = 0; i < arraysize(expected_positions); ++i) {
@@ -346,8 +346,8 @@ TEST_F(BytecodeArrayWriterUnittest, DeadcodeElimination) {
Handle<BytecodeArray> bytecode_array =
writer()->ToBytecodeArray(isolate(), 0, 0, factory()->empty_byte_array());
- bytecode_array->set_synchronized_source_position_table(
- *writer()->ToSourcePositionTable(isolate()));
+ bytecode_array->set_source_position_table(
+ *writer()->ToSourcePositionTable(isolate()), kReleaseStore);
SourcePositionTableIterator source_iterator(
bytecode_array->SourcePositionTable());
for (size_t i = 0; i < arraysize(expected_positions); ++i) {
diff --git a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
index 19ebc7078b..6ec3999968 100644
--- a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
@@ -419,7 +419,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadConstantPoolEntry) {
LoadSensitivity::kCritical));
}
{
- Node* index = m.Parameter(2);
+ Node* index = m.UntypedParameter(2);
TNode<Object> load_constant =
m.LoadConstantPoolEntry(m.ReinterpretCast<IntPtrT>(index));
Matcher<Node*> constant_pool_matcher = m.IsLoadFromObject(
diff --git a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h
index 3abc6ac24e..828af4ade4 100644
--- a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h
+++ b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h
@@ -38,6 +38,9 @@ class InterpreterAssemblerTest : public TestWithIsolateAndZone {
OperandScale operand_scale = OperandScale::kSingle)
: InterpreterAssembler(state, bytecode, operand_scale) {}
~InterpreterAssemblerForTest();
+ InterpreterAssemblerForTest(const InterpreterAssemblerForTest&) = delete;
+ InterpreterAssemblerForTest& operator=(const InterpreterAssemblerForTest&) =
+ delete;
Matcher<compiler::Node*> IsLoad(
const Matcher<compiler::LoadRepresentation>& rep_matcher,
@@ -83,9 +86,6 @@ class InterpreterAssemblerTest : public TestWithIsolateAndZone {
Matcher<compiler::Node*> IsLoadRegisterOperand(int offset,
OperandSize operand_size);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(InterpreterAssemblerForTest);
};
};
diff --git a/deps/v8/test/unittests/objects/value-serializer-unittest.cc b/deps/v8/test/unittests/objects/value-serializer-unittest.cc
index 19ba7f91ad..82afd6fd1f 100644
--- a/deps/v8/test/unittests/objects/value-serializer-unittest.cc
+++ b/deps/v8/test/unittests/objects/value-serializer-unittest.cc
@@ -26,6 +26,10 @@ using ::testing::Invoke;
using ::testing::Return;
class ValueSerializerTest : public TestWithIsolate {
+ public:
+ ValueSerializerTest(const ValueSerializerTest&) = delete;
+ ValueSerializerTest& operator=(const ValueSerializerTest&) = delete;
+
protected:
ValueSerializerTest()
: serialization_context_(Context::New(isolate())),
@@ -266,8 +270,6 @@ class ValueSerializerTest : public TestWithIsolate {
Local<Context> deserialization_context_;
Local<FunctionTemplate> host_object_constructor_template_;
i::Isolate* isolate_;
-
- DISALLOW_COPY_AND_ASSIGN(ValueSerializerTest);
};
TEST_F(ValueSerializerTest, DecodeInvalid) {
@@ -1487,7 +1489,26 @@ TEST_F(ValueSerializerTest, DecodeRegExpDotAll) {
ExpectScriptTrue("result.toString() === '/foo/gimsuy'");
InvalidDecodeTest(
- {0xFF, 0x09, 0x3F, 0x00, 0x52, 0x03, 0x66, 0x6F, 0x6F, 0x7F});
+ {0xFF, 0x09, 0x3F, 0x00, 0x52, 0x03, 0x66, 0x6F, 0x6F, 0xFF});
+}
+
+TEST_F(ValueSerializerTest, DecodeLinearRegExp) {
+ bool flag_was_enabled = i::FLAG_enable_experimental_regexp_engine;
+
+ // The last byte encodes the regexp flags.
+ std::vector<uint8_t> regexp_encoding = {0xFF, 0x09, 0x3F, 0x00, 0x52,
+ 0x03, 0x66, 0x6F, 0x6F, 0x6D};
+
+ i::FLAG_enable_experimental_regexp_engine = true;
+ Local<Value> value = DecodeTest(regexp_encoding);
+ ASSERT_TRUE(value->IsRegExp());
+ ExpectScriptTrue("Object.getPrototypeOf(result) === RegExp.prototype");
+ ExpectScriptTrue("result.toString() === '/foo/glmsy'");
+
+ i::FLAG_enable_experimental_regexp_engine = false;
+ InvalidDecodeTest(regexp_encoding);
+
+ i::FLAG_enable_experimental_regexp_engine = flag_was_enabled;
}
TEST_F(ValueSerializerTest, RoundTripMap) {
diff --git a/deps/v8/test/unittests/parser/preparser-unittest.cc b/deps/v8/test/unittests/parser/preparser-unittest.cc
index 13676af82b..00153472df 100644
--- a/deps/v8/test/unittests/parser/preparser-unittest.cc
+++ b/deps/v8/test/unittests/parser/preparser-unittest.cc
@@ -14,9 +14,8 @@ namespace internal {
class PreParserTest : public TestWithNativeContext {
public:
PreParserTest() = default;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(PreParserTest);
+ PreParserTest(const PreParserTest&) = delete;
+ PreParserTest& operator=(const PreParserTest&) = delete;
};
TEST_F(PreParserTest, LazyFunctionLength) {
diff --git a/deps/v8/test/unittests/tasks/background-compile-task-unittest.cc b/deps/v8/test/unittests/tasks/background-compile-task-unittest.cc
index e0d6c3d7ed..323e429139 100644
--- a/deps/v8/test/unittests/tasks/background-compile-task-unittest.cc
+++ b/deps/v8/test/unittests/tasks/background-compile-task-unittest.cc
@@ -29,12 +29,17 @@ class BackgroundCompileTaskTest : public TestWithNativeContext {
public:
BackgroundCompileTaskTest() : allocator_(isolate()->allocator()) {}
~BackgroundCompileTaskTest() override = default;
+ BackgroundCompileTaskTest(const BackgroundCompileTaskTest&) = delete;
+ BackgroundCompileTaskTest& operator=(const BackgroundCompileTaskTest&) =
+ delete;
AccountingAllocator* allocator() { return allocator_; }
static void SetUpTestCase() {
CHECK_NULL(save_flags_);
save_flags_ = new SaveFlags();
+ // TODO(leszeks): Support background finalization in compiler dispatcher.
+ FLAG_finalize_streaming_on_background = false;
TestWithNativeContext::SetUpTestCase();
}
@@ -85,8 +90,6 @@ class BackgroundCompileTaskTest : public TestWithNativeContext {
private:
AccountingAllocator* allocator_;
static SaveFlags* save_flags_;
-
- DISALLOW_COPY_AND_ASSIGN(BackgroundCompileTaskTest);
};
SaveFlags* BackgroundCompileTaskTest::save_flags_ = nullptr;
@@ -171,6 +174,8 @@ class CompileTask : public Task {
CompileTask(BackgroundCompileTask* task, base::Semaphore* semaphore)
: task_(task), semaphore_(semaphore) {}
~CompileTask() override = default;
+ CompileTask(const CompileTask&) = delete;
+ CompileTask& operator=(const CompileTask&) = delete;
void Run() override {
task_->Run();
@@ -180,7 +185,6 @@ class CompileTask : public Task {
private:
BackgroundCompileTask* task_;
base::Semaphore* semaphore_;
- DISALLOW_COPY_AND_ASSIGN(CompileTask);
};
TEST_F(BackgroundCompileTaskTest, CompileOnBackgroundThread) {
diff --git a/deps/v8/test/unittests/test-helpers.h b/deps/v8/test/unittests/test-helpers.h
index 5cd1beb761..13aacd4398 100644
--- a/deps/v8/test/unittests/test-helpers.h
+++ b/deps/v8/test/unittests/test-helpers.h
@@ -28,6 +28,8 @@ class ScriptResource : public v8::String::ExternalOneByteStringResource {
ScriptResource(const char* data, size_t length)
: data_(data), length_(length) {}
~ScriptResource() override = default;
+ ScriptResource(const ScriptResource&) = delete;
+ ScriptResource& operator=(const ScriptResource&) = delete;
const char* data() const override { return data_; }
size_t length() const override { return length_; }
@@ -35,8 +37,6 @@ class ScriptResource : public v8::String::ExternalOneByteStringResource {
private:
const char* data_;
size_t length_;
-
- DISALLOW_COPY_AND_ASSIGN(ScriptResource);
};
Handle<String> CreateSource(
diff --git a/deps/v8/test/unittests/test-utils.cc b/deps/v8/test/unittests/test-utils.cc
index 86e2854596..80a773ed7e 100644
--- a/deps/v8/test/unittests/test-utils.cc
+++ b/deps/v8/test/unittests/test-utils.cc
@@ -22,8 +22,7 @@ namespace {
CounterMap* kCurrentCounterMap = nullptr;
} // namespace
-IsolateWrapper::IsolateWrapper(CountersMode counters_mode,
- PointerCompressionMode pointer_compression_mode)
+IsolateWrapper::IsolateWrapper(CountersMode counters_mode)
: array_buffer_allocator_(
v8::ArrayBuffer::Allocator::NewDefaultAllocator()) {
CHECK_NULL(kCurrentCounterMap);
@@ -47,13 +46,7 @@ IsolateWrapper::IsolateWrapper(CountersMode counters_mode,
};
}
- if (pointer_compression_mode == kEnforcePointerCompression) {
- isolate_ = reinterpret_cast<v8::Isolate*>(
- i::Isolate::New(i::IsolateAllocationMode::kInV8Heap));
- v8::Isolate::Initialize(isolate(), create_params);
- } else {
- isolate_ = v8::Isolate::New(create_params);
- }
+ isolate_ = v8::Isolate::New(create_params);
CHECK_NOT_NULL(isolate());
}
diff --git a/deps/v8/test/unittests/test-utils.h b/deps/v8/test/unittests/test-utils.h
index ed81f2f382..d1301b8848 100644
--- a/deps/v8/test/unittests/test-utils.h
+++ b/deps/v8/test/unittests/test-utils.h
@@ -27,21 +27,13 @@ using CounterMap = std::map<std::string, int>;
enum CountersMode { kNoCounters, kEnableCounters };
-// When PointerCompressionMode is kEnforcePointerCompression, the Isolate is
-// created with pointer compression force enabled. When it's
-// kDefaultPointerCompression then the Isolate is created with the default
-// pointer compression state for the current build.
-enum PointerCompressionMode {
- kDefaultPointerCompression,
- kEnforcePointerCompression
-};
-
// RAII-like Isolate instance wrapper.
class IsolateWrapper final {
public:
- explicit IsolateWrapper(CountersMode counters_mode,
- PointerCompressionMode pointer_compression_mode);
+ explicit IsolateWrapper(CountersMode counters_mode);
~IsolateWrapper();
+ IsolateWrapper(const IsolateWrapper&) = delete;
+ IsolateWrapper& operator=(const IsolateWrapper&) = delete;
v8::Isolate* isolate() const { return isolate_; }
@@ -49,20 +41,15 @@ class IsolateWrapper final {
std::unique_ptr<v8::ArrayBuffer::Allocator> array_buffer_allocator_;
std::unique_ptr<CounterMap> counter_map_;
v8::Isolate* isolate_;
-
- DISALLOW_COPY_AND_ASSIGN(IsolateWrapper);
};
//
// A set of mixins from which the test fixtures will be constructed.
//
-template <typename TMixin, CountersMode kCountersMode = kNoCounters,
- PointerCompressionMode kPointerCompressionMode =
- kDefaultPointerCompression>
+template <typename TMixin, CountersMode kCountersMode = kNoCounters>
class WithIsolateMixin : public TMixin {
public:
- WithIsolateMixin()
- : isolate_wrapper_(kCountersMode, kPointerCompressionMode) {}
+ WithIsolateMixin() : isolate_wrapper_(kCountersMode) {}
v8::Isolate* v8_isolate() const { return isolate_wrapper_.isolate(); }
@@ -70,15 +57,13 @@ class WithIsolateMixin : public TMixin {
v8::IsolateWrapper isolate_wrapper_;
};
-template <typename TMixin, CountersMode kCountersMode = kNoCounters>
-using WithPointerCompressionIsolateMixin =
- WithIsolateMixin<TMixin, kCountersMode, kEnforcePointerCompression>;
-
template <typename TMixin>
class WithIsolateScopeMixin : public TMixin {
public:
WithIsolateScopeMixin()
: isolate_scope_(this->v8_isolate()), handle_scope_(this->v8_isolate()) {}
+ WithIsolateScopeMixin(const WithIsolateScopeMixin&) = delete;
+ WithIsolateScopeMixin& operator=(const WithIsolateScopeMixin&) = delete;
v8::Isolate* isolate() const { return this->v8_isolate(); }
@@ -89,8 +74,6 @@ class WithIsolateScopeMixin : public TMixin {
private:
v8::Isolate::Scope isolate_scope_;
v8::HandleScope handle_scope_;
-
- DISALLOW_COPY_AND_ASSIGN(WithIsolateScopeMixin);
};
template <typename TMixin>
@@ -98,6 +81,8 @@ class WithContextMixin : public TMixin {
public:
WithContextMixin()
: context_(Context::New(this->v8_isolate())), context_scope_(context_) {}
+ WithContextMixin(const WithContextMixin&) = delete;
+ WithContextMixin& operator=(const WithContextMixin&) = delete;
const Local<Context>& context() const { return v8_context(); }
const Local<Context>& v8_context() const { return context_; }
@@ -133,8 +118,6 @@ class WithContextMixin : public TMixin {
v8::Local<v8::Context> context_;
v8::Context::Scope context_scope_;
-
- DISALLOW_COPY_AND_ASSIGN(WithContextMixin);
};
// Use v8::internal::TestWithIsolate if you are testing internals,
@@ -152,12 +135,6 @@ using TestWithContext = //
WithIsolateMixin< //
::testing::Test>>>;
-using TestWithIsolateAndPointerCompression = //
- WithContextMixin< //
- WithIsolateScopeMixin< //
- WithPointerCompressionIsolateMixin< //
- ::testing::Test>>>;
-
namespace internal {
// Forward declarations.
@@ -167,6 +144,8 @@ template <typename TMixin>
class WithInternalIsolateMixin : public TMixin {
public:
WithInternalIsolateMixin() = default;
+ WithInternalIsolateMixin(const WithInternalIsolateMixin&) = delete;
+ WithInternalIsolateMixin& operator=(const WithInternalIsolateMixin&) = delete;
Factory* factory() const { return isolate()->factory(); }
Isolate* isolate() const { return TMixin::i_isolate(); }
@@ -197,9 +176,6 @@ class WithInternalIsolateMixin : public TMixin {
base::RandomNumberGenerator* random_number_generator() const {
return isolate()->random_number_generator();
}
-
- private:
- DISALLOW_COPY_AND_ASSIGN(WithInternalIsolateMixin);
};
template <typename TMixin>
@@ -207,14 +183,14 @@ class WithZoneMixin : public TMixin {
public:
explicit WithZoneMixin(bool support_zone_compression = false)
: zone_(&allocator_, ZONE_NAME, support_zone_compression) {}
+ WithZoneMixin(const WithZoneMixin&) = delete;
+ WithZoneMixin& operator=(const WithZoneMixin&) = delete;
Zone* zone() { return &zone_; }
private:
v8::internal::AccountingAllocator allocator_;
Zone zone_;
-
- DISALLOW_COPY_AND_ASSIGN(WithZoneMixin);
};
using TestWithIsolate = //
@@ -258,13 +234,13 @@ class SaveFlags {
public:
SaveFlags();
~SaveFlags();
+ SaveFlags(const SaveFlags&) = delete;
+ SaveFlags& operator=(const SaveFlags&) = delete;
private:
#define FLAG_MODE_APPLY(ftype, ctype, nam, def, cmt) ctype SAVED_##nam;
#include "src/flags/flag-definitions.h" // NOLINT
#undef FLAG_MODE_APPLY
-
- DISALLOW_COPY_AND_ASSIGN(SaveFlags);
};
// For GTest.
diff --git a/deps/v8/test/unittests/unittests.status b/deps/v8/test/unittests/unittests.status
index 96dd893db2..283bb9606c 100644
--- a/deps/v8/test/unittests/unittests.status
+++ b/deps/v8/test/unittests/unittests.status
@@ -58,8 +58,6 @@
# BUG(992783).
'Torque.ConditionalFields': [SKIP],
'Torque.UsingUnderscorePrefixedIdentifierError': [SKIP],
- # BUG(10658).
- 'GCStackTest.IteratePointersFindsParameterNesting8': [SKIP],
}], # system == windows and arch == x64 and mode == release
['tsan == True', {
@@ -78,5 +76,4 @@
['variant == stress_snapshot', {
'*': [SKIP], # only relevant for mjsunit tests.
}],
-
]
diff --git a/deps/v8/test/unittests/wasm/DIR_METADATA b/deps/v8/test/unittests/wasm/DIR_METADATA
new file mode 100644
index 0000000000..3b428d9660
--- /dev/null
+++ b/deps/v8/test/unittests/wasm/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>WebAssembly"
+} \ No newline at end of file
diff --git a/deps/v8/test/unittests/wasm/OWNERS b/deps/v8/test/unittests/wasm/OWNERS
index 0b1c176e04..a89e5f1056 100644
--- a/deps/v8/test/unittests/wasm/OWNERS
+++ b/deps/v8/test/unittests/wasm/OWNERS
@@ -1,3 +1 @@
file:../../../src/wasm/OWNERS
-
-# COMPONENT: Blink>JavaScript>WebAssembly
diff --git a/deps/v8/test/unittests/wasm/decoder-unittest.cc b/deps/v8/test/unittests/wasm/decoder-unittest.cc
index c2c0c87aa6..d693eef172 100644
--- a/deps/v8/test/unittests/wasm/decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/decoder-unittest.cc
@@ -20,54 +20,54 @@ class DecoderTest : public TestWithZone {
Decoder decoder;
};
-#define CHECK_UINT32V_INLINE(expected, expected_length, ...) \
- do { \
- const byte data[] = {__VA_ARGS__}; \
- decoder.Reset(data, data + sizeof(data)); \
- unsigned length; \
- EXPECT_EQ( \
- static_cast<uint32_t>(expected), \
- decoder.read_u32v<Decoder::kValidate>(decoder.start(), &length)); \
- EXPECT_EQ(static_cast<unsigned>(expected_length), length); \
- EXPECT_EQ(data, decoder.pc()); \
- EXPECT_TRUE(decoder.ok()); \
- EXPECT_EQ(static_cast<uint32_t>(expected), decoder.consume_u32v()); \
- EXPECT_EQ(data + expected_length, decoder.pc()); \
+#define CHECK_UINT32V_INLINE(expected, expected_length, ...) \
+ do { \
+ const byte data[] = {__VA_ARGS__}; \
+ decoder.Reset(data, data + sizeof(data)); \
+ unsigned length; \
+ EXPECT_EQ(static_cast<uint32_t>(expected), \
+ decoder.read_u32v<Decoder::kFullValidation>(decoder.start(), \
+ &length)); \
+ EXPECT_EQ(static_cast<unsigned>(expected_length), length); \
+ EXPECT_EQ(data, decoder.pc()); \
+ EXPECT_TRUE(decoder.ok()); \
+ EXPECT_EQ(static_cast<uint32_t>(expected), decoder.consume_u32v()); \
+ EXPECT_EQ(data + expected_length, decoder.pc()); \
} while (false)
-#define CHECK_INT32V_INLINE(expected, expected_length, ...) \
- do { \
- const byte data[] = {__VA_ARGS__}; \
- decoder.Reset(data, data + sizeof(data)); \
- unsigned length; \
- EXPECT_EQ(expected, decoder.read_i32v<Decoder::kValidate>(decoder.start(), \
- &length)); \
- EXPECT_EQ(static_cast<unsigned>(expected_length), length); \
- EXPECT_EQ(data, decoder.pc()); \
- EXPECT_TRUE(decoder.ok()); \
- EXPECT_EQ(expected, decoder.consume_i32v()); \
- EXPECT_EQ(data + expected_length, decoder.pc()); \
+#define CHECK_INT32V_INLINE(expected, expected_length, ...) \
+ do { \
+ const byte data[] = {__VA_ARGS__}; \
+ decoder.Reset(data, data + sizeof(data)); \
+ unsigned length; \
+ EXPECT_EQ(expected, decoder.read_i32v<Decoder::kFullValidation>( \
+ decoder.start(), &length)); \
+ EXPECT_EQ(static_cast<unsigned>(expected_length), length); \
+ EXPECT_EQ(data, decoder.pc()); \
+ EXPECT_TRUE(decoder.ok()); \
+ EXPECT_EQ(expected, decoder.consume_i32v()); \
+ EXPECT_EQ(data + expected_length, decoder.pc()); \
} while (false)
-#define CHECK_UINT64V_INLINE(expected, expected_length, ...) \
- do { \
- const byte data[] = {__VA_ARGS__}; \
- decoder.Reset(data, data + sizeof(data)); \
- unsigned length; \
- EXPECT_EQ( \
- static_cast<uint64_t>(expected), \
- decoder.read_u64v<Decoder::kValidate>(decoder.start(), &length)); \
- EXPECT_EQ(static_cast<unsigned>(expected_length), length); \
+#define CHECK_UINT64V_INLINE(expected, expected_length, ...) \
+ do { \
+ const byte data[] = {__VA_ARGS__}; \
+ decoder.Reset(data, data + sizeof(data)); \
+ unsigned length; \
+ EXPECT_EQ(static_cast<uint64_t>(expected), \
+ decoder.read_u64v<Decoder::kFullValidation>(decoder.start(), \
+ &length)); \
+ EXPECT_EQ(static_cast<unsigned>(expected_length), length); \
} while (false)
-#define CHECK_INT64V_INLINE(expected, expected_length, ...) \
- do { \
- const byte data[] = {__VA_ARGS__}; \
- decoder.Reset(data, data + sizeof(data)); \
- unsigned length; \
- EXPECT_EQ(expected, decoder.read_i64v<Decoder::kValidate>(decoder.start(), \
- &length)); \
- EXPECT_EQ(static_cast<unsigned>(expected_length), length); \
+#define CHECK_INT64V_INLINE(expected, expected_length, ...) \
+ do { \
+ const byte data[] = {__VA_ARGS__}; \
+ decoder.Reset(data, data + sizeof(data)); \
+ unsigned length; \
+ EXPECT_EQ(expected, decoder.read_i64v<Decoder::kFullValidation>( \
+ decoder.start(), &length)); \
+ EXPECT_EQ(static_cast<unsigned>(expected_length), length); \
} while (false)
TEST_F(DecoderTest, ReadU32v_OneByte) {
@@ -379,8 +379,7 @@ TEST_F(DecoderTest, ReadU32v_off_end1) {
static const byte data[] = {U32V_1(11)};
unsigned length = 0;
decoder.Reset(data, data);
- decoder.read_u32v<Decoder::kValidate>(decoder.start(), &length);
- EXPECT_EQ(0u, length);
+ decoder.read_u32v<Decoder::kFullValidation>(decoder.start(), &length);
EXPECT_FALSE(decoder.ok());
}
@@ -389,8 +388,7 @@ TEST_F(DecoderTest, ReadU32v_off_end2) {
for (size_t i = 0; i < sizeof(data); i++) {
unsigned length = 0;
decoder.Reset(data, data + i);
- decoder.read_u32v<Decoder::kValidate>(decoder.start(), &length);
- EXPECT_EQ(i, length);
+ decoder.read_u32v<Decoder::kFullValidation>(decoder.start(), &length);
EXPECT_FALSE(decoder.ok());
}
}
@@ -400,8 +398,7 @@ TEST_F(DecoderTest, ReadU32v_off_end3) {
for (size_t i = 0; i < sizeof(data); i++) {
unsigned length = 0;
decoder.Reset(data, data + i);
- decoder.read_u32v<Decoder::kValidate>(decoder.start(), &length);
- EXPECT_EQ(i, length);
+ decoder.read_u32v<Decoder::kFullValidation>(decoder.start(), &length);
EXPECT_FALSE(decoder.ok());
}
}
@@ -411,8 +408,7 @@ TEST_F(DecoderTest, ReadU32v_off_end4) {
for (size_t i = 0; i < sizeof(data); i++) {
unsigned length = 0;
decoder.Reset(data, data + i);
- decoder.read_u32v<Decoder::kValidate>(decoder.start(), &length);
- EXPECT_EQ(i, length);
+ decoder.read_u32v<Decoder::kFullValidation>(decoder.start(), &length);
EXPECT_FALSE(decoder.ok());
}
}
@@ -422,8 +418,7 @@ TEST_F(DecoderTest, ReadU32v_off_end5) {
for (size_t i = 0; i < sizeof(data); i++) {
unsigned length = 0;
decoder.Reset(data, data + i);
- decoder.read_u32v<Decoder::kValidate>(decoder.start(), &length);
- EXPECT_EQ(i, length);
+ decoder.read_u32v<Decoder::kFullValidation>(decoder.start(), &length);
EXPECT_FALSE(decoder.ok());
}
}
@@ -434,8 +429,7 @@ TEST_F(DecoderTest, ReadU32v_extra_bits) {
data[4] = static_cast<byte>(i << 4);
unsigned length = 0;
decoder.Reset(data, data + sizeof(data));
- decoder.read_u32v<Decoder::kValidate>(decoder.start(), &length);
- EXPECT_EQ(5u, length);
+ decoder.read_u32v<Decoder::kFullValidation>(decoder.start(), &length);
EXPECT_FALSE(decoder.ok());
}
}
@@ -445,7 +439,7 @@ TEST_F(DecoderTest, ReadI32v_extra_bits_negative) {
unsigned length = 0;
byte data[] = {0xFF, 0xFF, 0xFF, 0xFF, 0x7F};
decoder.Reset(data, data + sizeof(data));
- decoder.read_i32v<Decoder::kValidate>(decoder.start(), &length);
+ decoder.read_i32v<Decoder::kFullValidation>(decoder.start(), &length);
EXPECT_EQ(5u, length);
EXPECT_TRUE(decoder.ok());
}
@@ -455,8 +449,7 @@ TEST_F(DecoderTest, ReadI32v_extra_bits_positive) {
unsigned length = 0;
byte data[] = {0x80, 0x80, 0x80, 0x80, 0x77};
decoder.Reset(data, data + sizeof(data));
- decoder.read_i32v<Decoder::kValidate>(decoder.start(), &length);
- EXPECT_EQ(5u, length);
+ decoder.read_i32v<Decoder::kFullValidation>(decoder.start(), &length);
EXPECT_FALSE(decoder.ok());
}
@@ -491,7 +484,8 @@ TEST_F(DecoderTest, ReadU32v_Bits) {
for (unsigned limit = 0; limit <= kMaxSize; limit++) {
decoder.Reset(data, data + limit);
unsigned rlen;
- uint32_t result = decoder.read_u32v<Decoder::kValidate>(data, &rlen);
+ uint32_t result =
+ decoder.read_u32v<Decoder::kFullValidation>(data, &rlen);
if (limit < length) {
EXPECT_FALSE(decoder.ok());
} else {
@@ -547,7 +541,8 @@ TEST_F(DecoderTest, ReadU64v_PowerOf2) {
for (unsigned limit = 0; limit <= kMaxSize; limit++) {
decoder.Reset(data, data + limit);
unsigned length;
- uint64_t result = decoder.read_u64v<Decoder::kValidate>(data, &length);
+ uint64_t result =
+ decoder.read_u64v<Decoder::kFullValidation>(data, &length);
if (limit <= index) {
EXPECT_FALSE(decoder.ok());
} else {
@@ -588,7 +583,8 @@ TEST_F(DecoderTest, ReadU64v_Bits) {
for (unsigned limit = 0; limit <= kMaxSize; limit++) {
decoder.Reset(data, data + limit);
unsigned rlen;
- uint64_t result = decoder.read_u64v<Decoder::kValidate>(data, &rlen);
+ uint64_t result =
+ decoder.read_u64v<Decoder::kFullValidation>(data, &rlen);
if (limit < length) {
EXPECT_FALSE(decoder.ok());
} else {
@@ -630,7 +626,8 @@ TEST_F(DecoderTest, ReadI64v_Bits) {
for (unsigned limit = 0; limit <= kMaxSize; limit++) {
decoder.Reset(data, data + limit);
unsigned rlen;
- int64_t result = decoder.read_i64v<Decoder::kValidate>(data, &rlen);
+ int64_t result =
+ decoder.read_i64v<Decoder::kFullValidation>(data, &rlen);
if (limit < length) {
EXPECT_FALSE(decoder.ok());
} else {
@@ -649,8 +646,7 @@ TEST_F(DecoderTest, ReadU64v_extra_bits) {
data[9] = static_cast<byte>(i << 1);
unsigned length = 0;
decoder.Reset(data, data + sizeof(data));
- decoder.read_u64v<Decoder::kValidate>(decoder.start(), &length);
- EXPECT_EQ(10u, length);
+ decoder.read_u64v<Decoder::kFullValidation>(decoder.start(), &length);
EXPECT_FALSE(decoder.ok());
}
}
@@ -660,7 +656,7 @@ TEST_F(DecoderTest, ReadI64v_extra_bits_negative) {
unsigned length = 0;
byte data[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F};
decoder.Reset(data, data + sizeof(data));
- decoder.read_i64v<Decoder::kValidate>(decoder.start(), &length);
+ decoder.read_i64v<Decoder::kFullValidation>(decoder.start(), &length);
EXPECT_EQ(10u, length);
EXPECT_TRUE(decoder.ok());
}
@@ -670,8 +666,7 @@ TEST_F(DecoderTest, ReadI64v_extra_bits_positive) {
unsigned length = 0;
byte data[] = {0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x77};
decoder.Reset(data, data + sizeof(data));
- decoder.read_i64v<Decoder::kValidate>(decoder.start(), &length);
- EXPECT_EQ(10u, length);
+ decoder.read_i64v<Decoder::kFullValidation>(decoder.start(), &length);
EXPECT_FALSE(decoder.ok());
}
diff --git a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
index 0237a7d2ae..312fdacb3e 100644
--- a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
@@ -68,6 +68,8 @@ constexpr size_t kMaxByteSizedLeb128 = 127;
using F = std::pair<ValueType, bool>;
+enum MemoryType { kMemory32, kMemory64 };
+
// A helper for tests that require a module environment for functions,
// globals, or memories.
class TestModuleBuilder {
@@ -88,13 +90,15 @@ class TestModuleBuilder {
return static_cast<byte>(mod.types.size() - 1);
}
byte AddFunction(const FunctionSig* sig, bool declared = true) {
- mod.functions.push_back({sig, // sig
- 0, // func_index
- 0, // sig_index
- {0, 0}, // code
- false, // import
- false, // export
- declared}); // declared
+ byte sig_index = AddSignature(sig);
+ mod.functions.push_back(
+ {sig, // sig
+ static_cast<uint32_t>(mod.functions.size()), // func_index
+ sig_index, // sig_index
+ {0, 0}, // code
+ false, // import
+ false, // export
+ declared}); // declared
CHECK_LE(mod.functions.size(), kMaxByteSizedLeb128);
return static_cast<byte>(mod.functions.size() - 1);
}
@@ -137,8 +141,9 @@ class TestModuleBuilder {
return static_cast<byte>(mod.type_kinds.size() - 1);
}
- void InitializeMemory() {
+ void InitializeMemory(MemoryType mem_type = kMemory32) {
mod.has_memory = true;
+ mod.is_memory64 = mem_type == kMemory64;
mod.initial_pages = 1;
mod.maximum_pages = 100;
}
@@ -750,6 +755,14 @@ TEST_F(FunctionBodyDecoderTest, BlockBrBinop) {
WASM_I32V_1(2))});
}
+TEST_F(FunctionBodyDecoderTest, VoidBlockTypeVariants) {
+ // Valid kVoidCode encoded in 2 bytes.
+ ExpectValidates(sigs.v_v(), {kExprBlock, kVoidCode | 0x80, 0x7F, kExprEnd});
+ // Invalid code, whose last 7 bits coincide with kVoidCode.
+ ExpectFailure(sigs.v_v(), {kExprBlock, kVoidCode | 0x80, 0x45, kExprEnd},
+ kAppendEnd, "Invalid block type");
+}
+
TEST_F(FunctionBodyDecoderTest, If_empty1) {
ExpectValidates(sigs.v_v(), {WASM_ZERO, WASM_IF_OP, kExprEnd});
}
@@ -1068,6 +1081,79 @@ TEST_F(FunctionBodyDecoderTest, Unreachable_select2) {
{WASM_SELECT(WASM_UNREACHABLE, WASM_ZERO, WASM_F32(0.0))});
}
+TEST_F(FunctionBodyDecoderTest, UnreachableRefTypes) {
+ WASM_FEATURE_SCOPE(reftypes);
+ WASM_FEATURE_SCOPE(typed_funcref);
+ WASM_FEATURE_SCOPE(gc);
+ WASM_FEATURE_SCOPE(return_call);
+
+ byte function_index = builder.AddFunction(sigs.i_ii());
+ byte struct_index = builder.AddStruct({F(kWasmI32, true), F(kWasmI64, true)});
+ byte array_index = builder.AddArray(kWasmI32, true);
+
+ ValueType struct_type = ValueType::Ref(struct_index, kNonNullable);
+ FunctionSig sig_v_s(0, 1, &struct_type);
+ byte struct_consumer = builder.AddFunction(&sig_v_s);
+
+ ExpectValidates(sigs.v_v(), {WASM_BLOCK(WASM_UNREACHABLE, kExprBrOnNull, 0)});
+ ExpectValidates(sigs.i_v(), {WASM_UNREACHABLE, kExprRefIsNull});
+ ExpectValidates(sigs.v_v(), {WASM_UNREACHABLE, kExprRefAsNonNull, kExprDrop});
+
+ ExpectValidates(sigs.i_v(), {WASM_UNREACHABLE, kExprCallRef, WASM_I32V(1)});
+ ExpectValidates(sigs.i_v(), {WASM_UNREACHABLE, WASM_REF_FUNC(function_index),
+ kExprCallRef});
+ ExpectValidates(sigs.v_v(), {WASM_UNREACHABLE, kExprReturnCallRef});
+
+ ExpectValidates(sigs.v_v(),
+ {WASM_UNREACHABLE, WASM_GC_OP(kExprStructNewWithRtt),
+ struct_index, kExprCallFunction, struct_consumer});
+ ExpectValidates(sigs.v_v(), {WASM_UNREACHABLE, WASM_RTT_CANON(struct_index),
+ WASM_GC_OP(kExprStructNewWithRtt), struct_index,
+ kExprCallFunction, struct_consumer});
+ ExpectValidates(sigs.v_v(), {WASM_UNREACHABLE, WASM_I64V(42),
+ WASM_RTT_CANON(struct_index),
+ WASM_GC_OP(kExprStructNewWithRtt), struct_index,
+ kExprCallFunction, struct_consumer});
+ ExpectValidates(sigs.v_v(),
+ {WASM_UNREACHABLE, WASM_GC_OP(kExprStructNewDefault),
+ struct_index, kExprDrop});
+ ExpectValidates(sigs.v_v(), {WASM_UNREACHABLE, WASM_RTT_CANON(struct_index),
+ WASM_GC_OP(kExprStructNewDefault), struct_index,
+ kExprCallFunction, struct_consumer});
+
+ ExpectValidates(sigs.v_v(),
+ {WASM_UNREACHABLE, WASM_GC_OP(kExprArrayNewWithRtt),
+ array_index, kExprDrop});
+ ExpectValidates(sigs.v_v(),
+ {WASM_UNREACHABLE, WASM_RTT_CANON(array_index),
+ WASM_GC_OP(kExprArrayNewWithRtt), array_index, kExprDrop});
+ ExpectValidates(sigs.v_v(),
+ {WASM_UNREACHABLE, WASM_I32V(42), WASM_RTT_CANON(array_index),
+ WASM_GC_OP(kExprArrayNewWithRtt), array_index, kExprDrop});
+ ExpectValidates(sigs.v_v(),
+ {WASM_UNREACHABLE, WASM_GC_OP(kExprArrayNewDefault),
+ array_index, kExprDrop});
+ ExpectValidates(sigs.v_v(),
+ {WASM_UNREACHABLE, WASM_RTT_CANON(array_index),
+ WASM_GC_OP(kExprArrayNewDefault), array_index, kExprDrop});
+
+ ExpectValidates(sigs.i_v(), {WASM_UNREACHABLE, WASM_GC_OP(kExprRefTest),
+ struct_index, struct_index});
+ ExpectValidates(sigs.i_v(),
+ {WASM_UNREACHABLE, WASM_RTT_CANON(struct_index),
+ WASM_GC_OP(kExprRefTest), struct_index, struct_index});
+
+ ExpectValidates(sigs.v_v(), {WASM_UNREACHABLE, WASM_GC_OP(kExprRefCast),
+ struct_index, struct_index, kExprDrop});
+ ExpectValidates(sigs.v_v(), {WASM_UNREACHABLE, WASM_RTT_CANON(struct_index),
+ WASM_GC_OP(kExprRefCast), struct_index,
+ struct_index, kExprDrop});
+
+ ExpectValidates(sigs.v_v(),
+ {WASM_UNREACHABLE, WASM_GC_OP(kExprRttSub), array_index,
+ WASM_GC_OP(kExprRttSub), array_index, kExprDrop});
+}
+
TEST_F(FunctionBodyDecoderTest, If1) {
ExpectValidates(sigs.i_i(), {WASM_IF_ELSE_I(WASM_GET_LOCAL(0), WASM_I32V_1(9),
WASM_I32V_1(8))});
@@ -4258,15 +4344,15 @@ class BranchTableIteratorTest : public TestWithZone {
BranchTableIteratorTest() : TestWithZone() {}
void CheckBrTableSize(const byte* start, const byte* end) {
Decoder decoder(start, end);
- BranchTableImmediate<Decoder::kValidate> operand(&decoder, start + 1);
- BranchTableIterator<Decoder::kValidate> iterator(&decoder, operand);
+ BranchTableImmediate<Decoder::kFullValidation> operand(&decoder, start + 1);
+ BranchTableIterator<Decoder::kFullValidation> iterator(&decoder, operand);
EXPECT_EQ(end - start - 1u, iterator.length());
EXPECT_OK(decoder);
}
void CheckBrTableError(const byte* start, const byte* end) {
Decoder decoder(start, end);
- BranchTableImmediate<Decoder::kValidate> operand(&decoder, start + 1);
- BranchTableIterator<Decoder::kValidate> iterator(&decoder, operand);
+ BranchTableImmediate<Decoder::kFullValidation> operand(&decoder, start + 1);
+ BranchTableIterator<Decoder::kFullValidation> iterator(&decoder, operand);
iterator.length();
EXPECT_FALSE(decoder.ok());
}
@@ -4360,10 +4446,10 @@ class WasmOpcodeLengthTest : public TestWithZone {
void ExpectFailure(Bytes... bytes) {
const byte code[] = {bytes..., 0, 0, 0, 0, 0, 0, 0, 0};
WasmFeatures no_features = WasmFeatures::None();
- WasmDecoder<Decoder::kValidate> decoder(this->zone(), nullptr, no_features,
- &no_features, nullptr, code,
- code + sizeof(code), 0);
- WasmDecoder<Decoder::kValidate>::OpcodeLength(&decoder, code);
+ WasmDecoder<Decoder::kFullValidation> decoder(
+ this->zone(), nullptr, no_features, &no_features, nullptr, code,
+ code + sizeof(code), 0);
+ WasmDecoder<Decoder::kFullValidation>::OpcodeLength(&decoder, code);
EXPECT_EQ(decoder.failed(), true);
}
};
@@ -4508,6 +4594,93 @@ TEST_F(WasmOpcodeLengthTest, IllegalRefIndices) {
ExpectFailure(kExprBlock, kOptRefCode, U32V_4(0x01000000));
}
+TEST_F(WasmOpcodeLengthTest, PrefixedOpcodesLEB) {
+ // kExprI32New with a 4-byte LEB-encoded opcode.
+ ExpectLength(5, 0xfb, 0xa0, 0x80, 0x80, 0x00);
+
+ // kExprI8x16Splat with a 3-byte LEB-encoded opcode.
+ ExpectLength(4, 0xfd, 0x8f, 0x80, 0x00);
+
+ // kExprI32SConvertSatF32 with a 4-byte LEB-encoded opcode.
+ ExpectLength(5, 0xfc, 0x80, 0x80, 0x80, 0x00);
+
+ // kExprAtomicNotify with a 2-byte LEB-encoded opcode, and 2 i32 imm for
+ // memarg.
+ ExpectLength(5, 0xfe, 0x80, 0x00, 0x00, 0x00);
+}
+
+class TypeReaderTest : public TestWithZone {
+ public:
+ ValueType DecodeValueType(const byte* start, const byte* end) {
+ Decoder decoder(start, end);
+ uint32_t length;
+ return value_type_reader::read_value_type<Decoder::kFullValidation>(
+ &decoder, start, &length, enabled_features_);
+ }
+
+ HeapType DecodeHeapType(const byte* start, const byte* end) {
+ Decoder decoder(start, end);
+ uint32_t length;
+ return value_type_reader::read_heap_type<Decoder::kFullValidation>(
+ &decoder, start, &length, enabled_features_);
+ }
+
+ // This variable is modified by WASM_FEATURE_SCOPE.
+ WasmFeatures enabled_features_;
+};
+
+TEST_F(TypeReaderTest, HeapTypeDecodingTest) {
+ WASM_FEATURE_SCOPE(gc);
+ WASM_FEATURE_SCOPE(reftypes);
+ WASM_FEATURE_SCOPE(typed_funcref);
+
+ HeapType heap_func = HeapType(HeapType::kFunc);
+ HeapType heap_bottom = HeapType(HeapType::kBottom);
+
+ // 1- to 5-byte representation of kFuncRefCode.
+ {
+ const byte data[] = {kFuncRefCode};
+ HeapType result = DecodeHeapType(data, data + sizeof(data));
+ EXPECT_TRUE(result == heap_func);
+ }
+ {
+ const byte data[] = {kFuncRefCode | 0x80, 0x7F};
+ HeapType result = DecodeHeapType(data, data + sizeof(data));
+ EXPECT_EQ(result, heap_func);
+ }
+ {
+ const byte data[] = {kFuncRefCode | 0x80, 0xFF, 0x7F};
+ HeapType result = DecodeHeapType(data, data + sizeof(data));
+ EXPECT_EQ(result, heap_func);
+ }
+ {
+ const byte data[] = {kFuncRefCode | 0x80, 0xFF, 0xFF, 0x7F};
+ HeapType result = DecodeHeapType(data, data + sizeof(data));
+ EXPECT_EQ(result, heap_func);
+ }
+ {
+ const byte data[] = {kFuncRefCode | 0x80, 0xFF, 0xFF, 0xFF, 0x7F};
+ HeapType result = DecodeHeapType(data, data + sizeof(data));
+ EXPECT_EQ(result, heap_func);
+ }
+
+ {
+ // Some negative number.
+ const byte data[] = {0xB4, 0x7F};
+ HeapType result = DecodeHeapType(data, data + sizeof(data));
+ EXPECT_EQ(result, heap_bottom);
+ }
+
+ {
+ // This differs from kFuncRefCode by one bit outside the 1-byte LEB128
+ // range. This should therefore NOT be decoded as HeapType::kFunc and
+ // instead fail.
+ const byte data[] = {kFuncRefCode | 0x80, 0x6F};
+ HeapType result = DecodeHeapType(data, data + sizeof(data));
+ EXPECT_EQ(result, heap_bottom);
+ }
+}
+
using TypesOfLocals = ZoneVector<ValueType>;
class LocalDeclDecoderTest : public TestWithZone {
@@ -4726,6 +4899,31 @@ TEST_F(BytecodeIteratorTest, WithLocalDecls) {
EXPECT_FALSE(iter.has_next());
}
+/*******************************************************************************
+ * Memory64 tests
+ ******************************************************************************/
+
+TEST_F(FunctionBodyDecoderTest, IndexTypesOn32BitMemory) {
+ builder.InitializeMemory(kMemory32);
+ ExpectValidates(sigs.i_v(), {WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO)});
+ ExpectFailure(sigs.i_v(), {WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO64)});
+ ExpectValidates(sigs.v_v(),
+ {WASM_STORE_MEM(MachineType::Int32(), WASM_ZERO, WASM_ZERO)});
+ ExpectFailure(sigs.v_v(),
+ {WASM_STORE_MEM(MachineType::Int32(), WASM_ZERO64, WASM_ZERO)});
+}
+
+TEST_F(FunctionBodyDecoderTest, IndexTypesOn64BitMemory) {
+ builder.InitializeMemory(kMemory64);
+ ExpectFailure(sigs.i_v(), {WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO)});
+ ExpectValidates(sigs.i_v(),
+ {WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO64)});
+ ExpectFailure(sigs.v_v(),
+ {WASM_STORE_MEM(MachineType::Int32(), WASM_ZERO, WASM_ZERO)});
+ ExpectValidates(sigs.v_v(), {WASM_STORE_MEM(MachineType::Int32(), WASM_ZERO64,
+ WASM_ZERO)});
+}
+
#undef B1
#undef B2
#undef B3
diff --git a/deps/v8/test/unittests/wasm/leb-helper-unittest.cc b/deps/v8/test/unittests/wasm/leb-helper-unittest.cc
index 601de59c57..d1531c4e35 100644
--- a/deps/v8/test/unittests/wasm/leb-helper-unittest.cc
+++ b/deps/v8/test/unittests/wasm/leb-helper-unittest.cc
@@ -88,19 +88,20 @@ TEST_F(LEBHelperTest, sizeof_i32v) {
}
}
-#define DECLARE_ENCODE_DECODE_CHECKER(ctype, name) \
- static void CheckEncodeDecode_##name(ctype val) { \
- static const int kSize = 16; \
- static byte buffer[kSize]; \
- byte* ptr = buffer; \
- LEBHelper::write_##name(&ptr, val); \
- EXPECT_EQ(LEBHelper::sizeof_##name(val), \
- static_cast<size_t>(ptr - buffer)); \
- Decoder decoder(buffer, buffer + kSize); \
- unsigned length = 0; \
- ctype result = decoder.read_##name<Decoder::kNoValidate>(buffer, &length); \
- EXPECT_EQ(val, result); \
- EXPECT_EQ(LEBHelper::sizeof_##name(val), static_cast<size_t>(length)); \
+#define DECLARE_ENCODE_DECODE_CHECKER(ctype, name) \
+ static void CheckEncodeDecode_##name(ctype val) { \
+ static const int kSize = 16; \
+ static byte buffer[kSize]; \
+ byte* ptr = buffer; \
+ LEBHelper::write_##name(&ptr, val); \
+ EXPECT_EQ(LEBHelper::sizeof_##name(val), \
+ static_cast<size_t>(ptr - buffer)); \
+ Decoder decoder(buffer, buffer + kSize); \
+ unsigned length = 0; \
+ ctype result = \
+ decoder.read_##name<Decoder::kNoValidation>(buffer, &length); \
+ EXPECT_EQ(val, result); \
+ EXPECT_EQ(LEBHelper::sizeof_##name(val), static_cast<size_t>(length)); \
}
DECLARE_ENCODE_DECODE_CHECKER(int32_t, i32v)
diff --git a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
index 49528b8aea..a7635dcb59 100644
--- a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
@@ -1264,6 +1264,41 @@ TEST_F(WasmModuleVerifyTest, MultipleSignatures) {
EXPECT_OFF_END_FAILURE(data, 1);
}
+TEST_F(WasmModuleVerifyTest, CanonicalTypeIds) {
+ WASM_FEATURE_SCOPE(typed_funcref);
+ WASM_FEATURE_SCOPE(gc);
+ WASM_FEATURE_SCOPE(reftypes);
+
+ static const byte data[] = {
+ SECTION(Type, // --
+ ENTRY_COUNT(5), // --
+ WASM_STRUCT_DEF( // Struct definition
+ FIELD_COUNT(1), // --
+ STRUCT_FIELD(kI32Code, true)), // --
+ SIG_ENTRY_x_x(kI32Code, kF32Code), // f32 -> i32
+ SIG_ENTRY_x_x(kI32Code, kF64Code), // f64 -> i32
+ SIG_ENTRY_x_x(kI32Code, kF32Code), // f32 -> i32 (again)
+ WASM_ARRAY_DEF(kI32Code, true)) // Array definition
+ };
+
+ ModuleResult result = DecodeModule(data, data + sizeof(data));
+ EXPECT_OK(result);
+ const WasmModule* module = result.value().get();
+
+ EXPECT_EQ(5u, module->types.size());
+ EXPECT_EQ(5u, module->type_kinds.size());
+ EXPECT_EQ(5u, module->canonicalized_type_ids.size());
+ EXPECT_EQ(2u, module->signature_map.size());
+
+ // No canonicalization for structs.
+ EXPECT_EQ(0u, module->canonicalized_type_ids[0]);
+ EXPECT_EQ(0u, module->canonicalized_type_ids[1]);
+ EXPECT_EQ(1u, module->canonicalized_type_ids[2]);
+ EXPECT_EQ(0u, module->canonicalized_type_ids[3]);
+ // No canonicalization for arrays.
+ EXPECT_EQ(0u, module->canonicalized_type_ids[4]);
+}
+
TEST_F(WasmModuleVerifyTest, DataSegmentWithImmutableImportedGlobal) {
// Import 2 globals so that we can initialize data with a global index != 0.
const byte data[] = {
@@ -1420,6 +1455,15 @@ TEST_F(WasmModuleVerifyTest, MaxMaximumMemorySize) {
}
}
+TEST_F(WasmModuleVerifyTest, InvalidMemoryLimits) {
+ {
+ const byte kInvalidLimits = 0x15;
+ const byte data[] = {
+ SECTION(Memory, ENTRY_COUNT(1), kInvalidLimits, 0, 10)};
+ EXPECT_FAILURE_WITH_MSG(data, "invalid memory limits flags 0x15");
+ }
+}
+
TEST_F(WasmModuleVerifyTest, DataSegment_wrong_init_type) {
const byte data[] = {
SECTION(Memory, ENTRY_COUNT(1), kWithMaximum, 28, 28),
diff --git a/deps/v8/test/unittests/wasm/trap-handler-x64-unittest.cc b/deps/v8/test/unittests/wasm/trap-handler-x64-unittest.cc
index 9c72b68e19..478ee45aee 100644
--- a/deps/v8/test/unittests/wasm/trap-handler-x64-unittest.cc
+++ b/deps/v8/test/unittests/wasm/trap-handler-x64-unittest.cc
@@ -80,6 +80,8 @@ class TrapHandlerTest : public TestWithIsolate,
public ::testing::WithParamInterface<TrapHandlerStyle> {
protected:
void SetUp() override {
+ InstallFallbackHandler();
+ SetupTrapHandler(GetParam());
backing_store_ = BackingStore::AllocateWasmMemory(i_isolate(), 1, 1,
SharedFlag::kNotShared);
CHECK(backing_store_);
@@ -92,7 +94,9 @@ class TrapHandlerTest : public TestWithIsolate,
GetRandomMmapAddr());
InitRecoveryCode();
+ }
+ void InstallFallbackHandler() {
#if V8_OS_LINUX || V8_OS_MACOSX || V8_OS_FREEBSD
// Set up a signal handler to recover from the expected crash.
struct sigaction action;
@@ -196,13 +200,13 @@ class TrapHandlerTest : public TestWithIsolate,
}
#endif
- public:
void SetupTrapHandler(TrapHandlerStyle style) {
bool use_default_handler = style == kDefault;
g_use_as_first_chance_handler = !use_default_handler;
CHECK(v8::V8::EnableWebAssemblyTrapHandler(use_default_handler));
}
+ public:
void GenerateSetThreadInWasmFlagCode(MacroAssembler* masm) {
masm->Move(scratch,
i_isolate()->thread_local_top()->thread_in_wasm_flag_address_,
@@ -281,7 +285,6 @@ TEST_P(TrapHandlerTest, TestTrapHandlerRecovery) {
CodeDesc desc;
masm.GetCode(nullptr, &desc);
- SetupTrapHandler(GetParam());
trap_handler::ProtectedInstructionData protected_instruction{crash_offset,
recovery_offset};
trap_handler::RegisterHandlerData(reinterpret_cast<Address>(desc.buffer),
@@ -313,8 +316,6 @@ TEST_P(TrapHandlerTest, TestReleaseHandlerData) {
reinterpret_cast<Address>(desc.buffer), desc.instr_size, 1,
&protected_instruction);
- SetupTrapHandler(GetParam());
-
ExecuteBuffer();
// Deregister from the trap handler. The trap handler should not do the
@@ -344,8 +345,6 @@ TEST_P(TrapHandlerTest, TestNoThreadInWasmFlag) {
trap_handler::RegisterHandlerData(reinterpret_cast<Address>(desc.buffer),
desc.instr_size, 1, &protected_instruction);
- SetupTrapHandler(GetParam());
-
ExecuteExpectCrash(buffer_.get());
}
@@ -372,8 +371,6 @@ TEST_P(TrapHandlerTest, TestCrashInWasmNoProtectedInstruction) {
trap_handler::RegisterHandlerData(reinterpret_cast<Address>(desc.buffer),
desc.instr_size, 1, &protected_instruction);
- SetupTrapHandler(GetParam());
-
ExecuteExpectCrash(buffer_.get());
}
@@ -400,8 +397,6 @@ TEST_P(TrapHandlerTest, TestCrashInWasmWrongCrashType) {
trap_handler::RegisterHandlerData(reinterpret_cast<Address>(desc.buffer),
desc.instr_size, 1, &protected_instruction);
- SetupTrapHandler(GetParam());
-
#if V8_OS_POSIX
// On Posix, the V8 default trap handler does not register for SIGFPE,
// therefore the thread-in-wasm flag is never reset in this test. We
@@ -461,8 +456,6 @@ TEST_P(TrapHandlerTest, TestCrashInOtherThread) {
trap_handler::RegisterHandlerData(reinterpret_cast<Address>(desc.buffer),
desc.instr_size, 1, &protected_instruction);
- SetupTrapHandler(GetParam());
-
CodeRunner runner(this, buffer_.get());
CHECK(!GetThreadInWasmFlag());
// Set the thread-in-wasm flag manually in this thread.
diff --git a/deps/v8/test/wasm-api-tests/traps.cc b/deps/v8/test/wasm-api-tests/traps.cc
index 4060832893..b4165e3093 100644
--- a/deps/v8/test/wasm-api-tests/traps.cc
+++ b/deps/v8/test/wasm-api-tests/traps.cc
@@ -40,10 +40,13 @@ TEST_F(WasmCapiTest, Traps) {
uint32_t callback_index = builder()->AddImport(CStrVector("callback"), &sig);
byte code[] = {WASM_CALL_FUNCTION0(callback_index)};
AddExportedFunction(CStrVector("callback"), code, sizeof(code), &sig);
- // The first constant is a 4-byte dummy so that the {unreachable} trap
- // has a more interesting offset.
- byte code2[] = {WASM_I32V_3(0), WASM_UNREACHABLE, WASM_I32V_1(1)};
+
+ byte code2[] = {WASM_CALL_FUNCTION0(3)};
AddExportedFunction(CStrVector("unreachable"), code2, sizeof(code2), &sig);
+ // The first constant is a 4-byte dummy so that the {unreachable} trap
+ // has a more interesting offset. This is called by code2.
+ byte code3[] = {WASM_I32V_3(0), WASM_UNREACHABLE, WASM_I32V_1(1)};
+ AddFunction(code3, sizeof(code3), &sig);
own<FuncType> func_type =
FuncType::make(ownvec<ValType>::make(),
@@ -65,8 +68,10 @@ TEST_F(WasmCapiTest, Traps) {
ASSERT_TRUE(result.ok());
const WasmFunction* func1 = &result.value()->functions[1];
const WasmFunction* func2 = &result.value()->functions[2];
+ const WasmFunction* func3 = &result.value()->functions[3];
const uint32_t func1_offset = func1->code.offset();
const uint32_t func2_offset = func2->code.offset();
+ const uint32_t func3_offset = func3->code.offset();
Func* cpp_trapping_func = GetExportedFunction(0);
own<Trap> cpp_trap = cpp_trapping_func->call();
@@ -91,15 +96,22 @@ TEST_F(WasmCapiTest, Traps) {
ExpectMessage("Uncaught RuntimeError: unreachable", wasm_trap->message());
frame = wasm_trap->origin();
EXPECT_TRUE(frame->instance()->same(instance()));
- EXPECT_EQ(2u, frame->func_index());
+ EXPECT_EQ(3u, frame->func_index());
EXPECT_EQ(5u, frame->func_offset());
- EXPECT_EQ(func2_offset + frame->func_offset(), frame->module_offset());
+ EXPECT_EQ(func3_offset + frame->func_offset(), frame->module_offset());
trace = wasm_trap->trace();
- EXPECT_EQ(1u, trace.size());
+ EXPECT_EQ(2u, trace.size());
+
frame.reset(trace[0].release());
EXPECT_TRUE(frame->instance()->same(instance()));
- EXPECT_EQ(2u, frame->func_index());
+ EXPECT_EQ(3u, frame->func_index());
EXPECT_EQ(5u, frame->func_offset());
+ EXPECT_EQ(func3_offset + frame->func_offset(), frame->module_offset());
+
+ frame.reset(trace[1].release());
+ EXPECT_TRUE(frame->instance()->same(instance()));
+ EXPECT_EQ(2u, frame->func_index());
+ EXPECT_EQ(1u, frame->func_offset());
EXPECT_EQ(func2_offset + frame->func_offset(), frame->module_offset());
}
diff --git a/deps/v8/test/wasm-api-tests/wasm-api-test.h b/deps/v8/test/wasm-api-tests/wasm-api-test.h
index f566d398ca..b74b927622 100644
--- a/deps/v8/test/wasm-api-tests/wasm-api-test.h
+++ b/deps/v8/test/wasm-api-tests/wasm-api-test.h
@@ -79,6 +79,12 @@ class WasmCapiTest : public ::testing::Test {
builder()->AddExport(name, fun);
}
+ void AddFunction(byte code[], size_t code_size, FunctionSig* sig) {
+ WasmFunctionBuilder* fun = builder()->AddFunction(sig);
+ fun->EmitCode(code, static_cast<uint32_t>(code_size));
+ fun->Emit(kExprEnd);
+ }
+
Func* GetExportedFunction(size_t index) {
DCHECK_GT(exports_.size(), index);
Extern* exported = exports_[index].get();
diff --git a/deps/v8/test/wasm-js/tests.tar.gz.sha1 b/deps/v8/test/wasm-js/tests.tar.gz.sha1
index a3ee7c7f95..c96c9a8ac2 100644
--- a/deps/v8/test/wasm-js/tests.tar.gz.sha1
+++ b/deps/v8/test/wasm-js/tests.tar.gz.sha1
@@ -1 +1 @@
-a7c9db6002250f90f4b316649b2915791ff389a8 \ No newline at end of file
+331d0b45c9b1fe5872b5b27a7eff2c889cecb495 \ No newline at end of file
diff --git a/deps/v8/test/wasm-spec-tests/DIR_METADATA b/deps/v8/test/wasm-spec-tests/DIR_METADATA
new file mode 100644
index 0000000000..3b428d9660
--- /dev/null
+++ b/deps/v8/test/wasm-spec-tests/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Blink>JavaScript>WebAssembly"
+} \ No newline at end of file
diff --git a/deps/v8/test/wasm-spec-tests/OWNERS b/deps/v8/test/wasm-spec-tests/OWNERS
index 95216bba4d..32941e6257 100644
--- a/deps/v8/test/wasm-spec-tests/OWNERS
+++ b/deps/v8/test/wasm-spec-tests/OWNERS
@@ -1,3 +1 @@
file:../../src/wasm/OWNERS
-
-# COMPONENT: Blink>JavaScript>WebAssembly
diff --git a/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1 b/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
index a2b0da9da5..291812527d 100644
--- a/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
+++ b/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
@@ -1 +1 @@
-d4b9b276a7d0608e3a9bb2d62e6d2b877d05da0c \ No newline at end of file
+a58223f93f7c7489abd950f322f970caac3c2cad \ No newline at end of file
diff --git a/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status b/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status
index 88f3c9a9f3..d3b37e8aa0 100644
--- a/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status
+++ b/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status
@@ -7,15 +7,14 @@
'skip-stack-guard-page': [PASS, ['((arch == ppc or arch == ppc64 or arch == s390 or arch == s390x) and simulator_run)', SKIP]],
# TODO(wasm) Investigate failing spec tests after update.
'binary': [FAIL],
- 'proposals/js-types/binary-leb128': [FAIL],
'proposals/bulk-memory-operations/binary': [FAIL],
- 'proposals/bulk-memory-operations/binary-leb128': [FAIL],
+ # TODO(v8:10994): Failing spec test after update.
+ 'proposals/reference-types/br_table': [FAIL],
# TODO(v8:9144): The MVP behavior when bounds-checking segments changed in
# the bulk-memory proposal. Since we've enabled bulk-memory by default, we
# need to update to use its testsuite.
'linking': [FAIL],
- 'binary-leb128': [FAIL],
'elem': [FAIL],
'data': [FAIL],
@@ -36,13 +35,6 @@
# This test requires the reftypes flag to be disabled.
'proposals/bulk-memory-operations/imports': [FAIL],
-
- # SIMD test cases
- # Scalar lowering is incomplete, we skip these and selectively enable as
- # we finish the implementation, see v8:10507.
- 'proposals/simd/simd_conversions' : [PASS, FAIL],
- 'proposals/simd/simd_lane' : [PASS, FAIL],
-
}], # ALWAYS
['arch == arm and not simulator_run', {
@@ -53,6 +45,7 @@
# This test only has 1 problematic use of f32x4.min and f32x4.div, consider
# removing it from upstream, then we can run this test.
'proposals/simd/simd_splat' : [PASS, FAIL],
+ 'proposals/simd/simd_f32x4_pmin_pmax' : [PASS, FAIL],
}], # arch == arm and not simulator_run
['arch == mipsel or arch == mips64el or arch == mips or arch == mips64', {
diff --git a/deps/v8/testing/gtest/BUILD.gn b/deps/v8/testing/gtest/BUILD.gn
index fe7ba6d483..6636b2d9ac 100644
--- a/deps/v8/testing/gtest/BUILD.gn
+++ b/deps/v8/testing/gtest/BUILD.gn
@@ -4,9 +4,9 @@
import("//build_overrides/gtest.gni")
if (is_ios) {
+ import("//build/buildflag_header.gni")
import("//build/config/coverage/coverage.gni")
import("//build/config/ios/ios_sdk.gni")
- import("//build/buildflag_header.gni")
}
config("gtest_direct_config") {
@@ -40,9 +40,7 @@ static_library("gtest") {
# Android. https://codereview.chromium.org/2852613002/#ps20001
"empty.cc",
]
- public_deps = [
- "//third_party/googletest:gtest",
- ]
+ public_deps = [ "//third_party/googletest:gtest" ]
public_configs = [ ":gtest_direct_config" ]
@@ -58,9 +56,6 @@ static_library("gtest") {
}
if ((is_mac || is_ios) && gtest_include_objc_support) {
- if (is_ios) {
- set_sources_assignment_filter([])
- }
sources += [
"../gtest_mac.h",
"../gtest_mac.mm",
@@ -68,7 +63,6 @@ static_library("gtest") {
if (gtest_include_platform_test) {
sources += [ "../platform_test_mac.mm" ]
}
- set_sources_assignment_filter(sources_assignment_filter)
}
if (is_ios && gtest_include_ios_coverage) {
@@ -76,9 +70,7 @@ static_library("gtest") {
"../coverage_util_ios.h",
"../coverage_util_ios.mm",
]
- deps = [
- ":ios_enable_coverage",
- ]
+ deps = [ ":ios_enable_coverage" ]
}
}
@@ -87,9 +79,7 @@ static_library("gtest") {
# into //third_party/googletest.
source_set("gtest_main") {
testonly = true
- deps = [
- "//third_party/googletest:gtest_main",
- ]
+ deps = [ "//third_party/googletest:gtest_main" ]
}
if (is_ios) {
diff --git a/deps/v8/third_party/markupsafe/DIR_METADATA b/deps/v8/third_party/markupsafe/DIR_METADATA
new file mode 100644
index 0000000000..14b5edb5d6
--- /dev/null
+++ b/deps/v8/third_party/markupsafe/DIR_METADATA
@@ -0,0 +1,3 @@
+monorail {
+ component: "Internals"
+}
diff --git a/deps/v8/third_party/markupsafe/OWNERS b/deps/v8/third_party/markupsafe/OWNERS
index 6a57e5dc3f..8edbdf893c 100644
--- a/deps/v8/third_party/markupsafe/OWNERS
+++ b/deps/v8/third_party/markupsafe/OWNERS
@@ -1,5 +1,3 @@
timloh@chromium.org
haraken@chromium.org
nbarth@chromium.org
-
-# COMPONENT: Internals
diff --git a/deps/v8/third_party/zlib/deflate.c b/deps/v8/third_party/zlib/deflate.c
index 8bf93e5248..fc7ae45905 100644
--- a/deps/v8/third_party/zlib/deflate.c
+++ b/deps/v8/third_party/zlib/deflate.c
@@ -321,6 +321,9 @@ int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy,
s->window = (Bytef *) ZALLOC(strm,
s->w_size + window_padding,
2*sizeof(Byte));
+ /* Avoid use of unitialized values in the window, see crbug.com/1137613 and
+ * crbug.com/1144420 */
+ zmemzero(s->window, (s->w_size + window_padding) * (2 * sizeof(Byte)));
s->prev = (Posf *) ZALLOC(strm, s->w_size, sizeof(Pos));
/* Avoid use of uninitialized value, see:
* https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=11360
diff --git a/deps/v8/third_party/zlib/google/compression_utils.cc b/deps/v8/third_party/zlib/google/compression_utils.cc
index d6ee2b61f7..781c805d0a 100644
--- a/deps/v8/third_party/zlib/google/compression_utils.cc
+++ b/deps/v8/third_party/zlib/google/compression_utils.cc
@@ -7,14 +7,13 @@
#include "base/bit_cast.h"
#include "base/check_op.h"
#include "base/process/memory.h"
-#include "base/strings/string_piece.h"
#include "base/sys_byteorder.h"
#include "third_party/zlib/google/compression_utils_portable.h"
namespace compression {
-bool GzipCompress(base::StringPiece input,
+bool GzipCompress(base::span<const char> input,
char* output_buffer,
size_t output_buffer_size,
size_t* compressed_size,
@@ -35,7 +34,11 @@ bool GzipCompress(base::StringPiece input,
return true;
}
-bool GzipCompress(base::StringPiece input, std::string* output) {
+bool GzipCompress(base::span<const char> input, std::string* output) {
+ return GzipCompress(base::as_bytes(input), output);
+}
+
+bool GzipCompress(base::span<const uint8_t> input, std::string* output) {
// Not using std::vector<> because allocation failures are recoverable,
// which is hidden by std::vector<>.
static_assert(sizeof(Bytef) == 1, "");
@@ -87,30 +90,44 @@ bool GzipUncompress(const std::string& input, std::string* output) {
return false;
}
-bool GzipUncompress(base::StringPiece input, base::StringPiece output) {
+bool GzipUncompress(base::span<const char> input,
+ base::span<const char> output) {
+ return GzipUncompress(base::as_bytes(input), base::as_bytes(output));
+}
+
+bool GzipUncompress(base::span<const uint8_t> input,
+ base::span<const uint8_t> output) {
uLongf uncompressed_size = GetUncompressedSize(input);
if (uncompressed_size > output.size())
return false;
return zlib_internal::GzipUncompressHelper(
bit_cast<Bytef*>(output.data()), &uncompressed_size,
bit_cast<const Bytef*>(input.data()),
- static_cast<uLongf>(input.length())) == Z_OK;
+ static_cast<uLongf>(input.size())) == Z_OK;
}
-bool GzipUncompress(base::StringPiece input, std::string* output) {
+bool GzipUncompress(base::span<const char> input, std::string* output) {
+ return GzipUncompress(base::as_bytes(input), output);
+}
+
+bool GzipUncompress(base::span<const uint8_t> input, std::string* output) {
// Disallow in-place usage, i.e., |input| using |*output| as underlying data.
- DCHECK_NE(input.data(), output->data());
+ DCHECK_NE(reinterpret_cast<const char*>(input.data()), output->data());
uLongf uncompressed_size = GetUncompressedSize(input);
output->resize(uncompressed_size);
return zlib_internal::GzipUncompressHelper(
bit_cast<Bytef*>(output->data()), &uncompressed_size,
bit_cast<const Bytef*>(input.data()),
- static_cast<uLongf>(input.length())) == Z_OK;
+ static_cast<uLongf>(input.size())) == Z_OK;
+}
+
+uint32_t GetUncompressedSize(base::span<const char> compressed_data) {
+ return GetUncompressedSize(base::as_bytes(compressed_data));
}
-uint32_t GetUncompressedSize(base::StringPiece compressed_data) {
+uint32_t GetUncompressedSize(base::span<const uint8_t> compressed_data) {
return zlib_internal::GetGzipUncompressedSize(
- bit_cast<Bytef*>(compressed_data.data()), compressed_data.length());
+ bit_cast<Bytef*>(compressed_data.data()), compressed_data.size());
}
} // namespace compression
diff --git a/deps/v8/third_party/zlib/google/compression_utils.h b/deps/v8/third_party/zlib/google/compression_utils.h
index 516220719f..cca47be1ef 100644
--- a/deps/v8/third_party/zlib/google/compression_utils.h
+++ b/deps/v8/third_party/zlib/google/compression_utils.h
@@ -7,7 +7,7 @@
#include <string>
-#include "base/strings/string_piece.h"
+#include "base/containers/span.h"
namespace compression {
@@ -18,7 +18,7 @@ namespace compression {
// |malloc_fn| and |free_fn| are pointers to malloc() and free()-like functions,
// or nullptr to use the standard ones.
// Returns true for success.
-bool GzipCompress(base::StringPiece input,
+bool GzipCompress(base::span<const char> input,
char* output_buffer,
size_t output_buffer_size,
size_t* compressed_size,
@@ -29,27 +29,41 @@ bool GzipCompress(base::StringPiece input,
// |input| and |output| are allowed to point to the same string (in-place
// operation).
// Returns true for success.
-bool GzipCompress(base::StringPiece input, std::string* output);
+bool GzipCompress(base::span<const char> input, std::string* output);
+
+// Like the above method, but using uint8_t instead.
+bool GzipCompress(base::span<const uint8_t> input, std::string* output);
// Uncompresses the data in |input| using gzip, storing the result in |output|.
// |input| and |output| are allowed to be the same string (in-place operation).
// Returns true for success.
bool GzipUncompress(const std::string& input, std::string* output);
-// Like the above method, but uses base::StringPiece to avoid allocations if
+// Like the above method, but uses base::span to avoid allocations if
// needed. |output|'s size must be at least as large as the return value from
// GetUncompressedSize.
// Returns true for success.
-bool GzipUncompress(base::StringPiece input, base::StringPiece output);
+bool GzipUncompress(base::span<const char> input,
+ base::span<const char> output);
+
+// Like the above method, but using uint8_t instead.
+bool GzipUncompress(base::span<const uint8_t> input,
+ base::span<const uint8_t> output);
// Uncompresses the data in |input| using gzip, and writes the results to
// |output|, which must NOT be the underlying string of |input|, and is resized
// if necessary.
// Returns true for success.
-bool GzipUncompress(base::StringPiece input, std::string* output);
+bool GzipUncompress(base::span<const char> input, std::string* output);
+
+// Like the above method, but using uint8_t instead.
+bool GzipUncompress(base::span<const uint8_t> input, std::string* output);
// Returns the uncompressed size from GZIP-compressed |compressed_data|.
-uint32_t GetUncompressedSize(base::StringPiece compressed_data);
+uint32_t GetUncompressedSize(base::span<const char> compressed_data);
+
+// Like the above method, but using uint8_t instead.
+uint32_t GetUncompressedSize(base::span<const uint8_t> compressed_data);
} // namespace compression
diff --git a/deps/v8/third_party/zlib/google/compression_utils_unittest.cc b/deps/v8/third_party/zlib/google/compression_utils_unittest.cc
index 398984bb2e..31c3226f0f 100644
--- a/deps/v8/third_party/zlib/google/compression_utils_unittest.cc
+++ b/deps/v8/third_party/zlib/google/compression_utils_unittest.cc
@@ -54,13 +54,9 @@ TEST(CompressionUtilsTest, GzipUncompression) {
EXPECT_EQ(golden_data, uncompressed_data);
}
-TEST(CompressionUtilsTest, GzipUncompressionFromStringPieceToString) {
- base::StringPiece compressed_data(
- reinterpret_cast<const char*>(kCompressedData),
- base::size(kCompressedData));
-
+TEST(CompressionUtilsTest, GzipUncompressionFromSpanToString) {
std::string uncompressed_data;
- EXPECT_TRUE(GzipUncompress(compressed_data, &uncompressed_data));
+ EXPECT_TRUE(GzipUncompress(kCompressedData, &uncompressed_data));
std::string golden_data(reinterpret_cast<const char*>(kData),
base::size(kData));
diff --git a/deps/v8/third_party/zlib/patches/0007-zero-init-deflate-window.patch b/deps/v8/third_party/zlib/patches/0007-zero-init-deflate-window.patch
new file mode 100644
index 0000000000..9dbbf53a8c
--- /dev/null
+++ b/deps/v8/third_party/zlib/patches/0007-zero-init-deflate-window.patch
@@ -0,0 +1,40 @@
+From 92537ee19784e0e545f06d89b7d89ab532a18cff Mon Sep 17 00:00:00 2001
+From: Hans Wennborg <hans@chromium.org>
+Date: Tue, 3 Nov 2020 15:54:09 +0100
+Subject: [PATCH] [zlib] Zero-initialize the window used for deflation
+
+Otherwise MSan complains about use-of-uninitialized values in the
+window.
+This happens in both regular deflate's longest_match and deflate_rle.
+
+Before crrev.com/822755 we used to suppress those reports, but it seems
+better to fix it properly. That will also allow us to catch other
+potential issues with MSan in these functions.
+
+The instances of this that we've seen only reproduce with
+fill_window_sse(), not with the regular fill_window() function. Since
+the former doesn't exist in upstream zlib, I'm not planning to send this
+patch upstream.
+
+Bug: 1137613, 1144420
+---
+ third_party/zlib/deflate.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/third_party/zlib/deflate.c b/third_party/zlib/deflate.c
+index 8bf93e524875..fc7ae45905ff 100644
+--- a/third_party/zlib/deflate.c
++++ b/third_party/zlib/deflate.c
+@@ -321,6 +321,9 @@ int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy,
+ s->window = (Bytef *) ZALLOC(strm,
+ s->w_size + window_padding,
+ 2*sizeof(Byte));
++ /* Avoid use of unitialized values in the window, see crbug.com/1137613 and
++ * crbug.com/1144420 */
++ zmemzero(s->window, (s->w_size + window_padding) * (2 * sizeof(Byte)));
+ s->prev = (Posf *) ZALLOC(strm, s->w_size, sizeof(Pos));
+ /* Avoid use of uninitialized value, see:
+ * https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=11360
+--
+2.29.1.341.ge80a0c044ae-goog
+
diff --git a/deps/v8/tools/android-sync.sh b/deps/v8/tools/android-sync.sh
index 709bbb7f7d..66d7aed78d 100755
--- a/deps/v8/tools/android-sync.sh
+++ b/deps/v8/tools/android-sync.sh
@@ -92,15 +92,16 @@ sync_file "$OUTDIR/$ARCH_MODE/snapshot_blob.bin"
sync_file "$OUTDIR/$ARCH_MODE/unittests"
echo ""
echo -n "sync to $ANDROID_V8/tools"
-sync_file tools/consarray.js
-sync_file tools/codemap.js
-sync_file tools/csvparser.js
-sync_file tools/profile.js
-sync_file tools/splaytree.js
-sync_file tools/profile_view.js
-sync_file tools/logreader.js
-sync_file tools/arguments.js
-sync_file tools/tickprocessor.js
+sync_file tools/arguments.mjs
+sync_file tools/codemap.mjs
+sync_file tools/consarray.mjs
+sync_file tools/csvparser.mjs
+sync_file tools/dumpcpp.mjs
+sync_file tools/logreader.mjs
+sync_file tools/profile.mjs
+sync_file tools/profile_view.mjs
+sync_file tools/splaytree.mjs
+sync_file tools/tickprocessor.mjs
echo ""
sync_dir test/intl
sync_dir test/message
diff --git a/deps/v8/tools/arguments.mjs b/deps/v8/tools/arguments.mjs
index 232ca6badb..4e607b7ee9 100644
--- a/deps/v8/tools/arguments.mjs
+++ b/deps/v8/tools/arguments.mjs
@@ -27,37 +27,37 @@ export class BaseArgumentsProcessor {
'Default log file name is "' +
this.result_.logFileName + '".\n');
print('Options:');
- for (var arg in this.argsDispatch_) {
- var synonyms = [arg];
- var dispatch = this.argsDispatch_[arg];
- for (var synArg in this.argsDispatch_) {
+ for (const arg in this.argsDispatch_) {
+ const synonyms = [arg];
+ const dispatch = this.argsDispatch_[arg];
+ for (const synArg in this.argsDispatch_) {
if (arg !== synArg && dispatch === this.argsDispatch_[synArg]) {
synonyms.push(synArg);
delete this.argsDispatch_[synArg];
}
}
- print(' ' + synonyms.join(', ').padEnd(20) + " " + dispatch[2]);
+ print(` ${synonyms.join(', ').padEnd(20)} ${dispatch[2]}`);
}
quit(2);
}
parse() {
while (this.args_.length) {
- var arg = this.args_.shift();
+ let arg = this.args_.shift();
if (arg.charAt(0) != '-') {
this.result_.logFileName = arg;
continue;
}
- var userValue = null;
- var eqPos = arg.indexOf('=');
+ let userValue = null;
+ const eqPos = arg.indexOf('=');
if (eqPos != -1) {
userValue = arg.substr(eqPos + 1);
arg = arg.substr(0, eqPos);
}
if (arg in this.argsDispatch_) {
- var dispatch = this.argsDispatch_[arg];
- var property = dispatch[0];
- var defaultValue = dispatch[1];
+ const dispatch = this.argsDispatch_[arg];
+ const property = dispatch[0];
+ const defaultValue = dispatch[1];
if (typeof defaultValue == "function") {
userValue = defaultValue(userValue);
} else if (userValue == null) {
diff --git a/deps/v8/tools/clusterfuzz/js_fuzzer/DIR_METADATA b/deps/v8/tools/clusterfuzz/js_fuzzer/DIR_METADATA
new file mode 100644
index 0000000000..9fc13203d7
--- /dev/null
+++ b/deps/v8/tools/clusterfuzz/js_fuzzer/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Infra>Client>V8"
+} \ No newline at end of file
diff --git a/deps/v8/tools/clusterfuzz/js_fuzzer/OWNERS b/deps/v8/tools/clusterfuzz/js_fuzzer/OWNERS
index 76eb9caeff..bc18f69af5 100644
--- a/deps/v8/tools/clusterfuzz/js_fuzzer/OWNERS
+++ b/deps/v8/tools/clusterfuzz/js_fuzzer/OWNERS
@@ -5,5 +5,3 @@ file:../../../INFRA_OWNERS
msarms@chromium.org
mslekova@chromium.org
ochang@chromium.org
-
-# COMPONENT: Infra>Client>V8
diff --git a/deps/v8/tools/codemap.mjs b/deps/v8/tools/codemap.mjs
index fd0fc808ca..245b6ba42e 100644
--- a/deps/v8/tools/codemap.mjs
+++ b/deps/v8/tools/codemap.mjs
@@ -32,252 +32,232 @@ import { SplayTree } from "./splaytree.mjs";
*
* @constructor
*/
-export function CodeMap() {
+export class CodeMap {
/**
* Dynamic code entries. Used for JIT compiled code.
*/
- this.dynamics_ = new SplayTree();
+ dynamics_ = new SplayTree();
/**
* Name generator for entries having duplicate names.
*/
- this.dynamicsNameGen_ = new CodeMap.NameGenerator();
+ dynamicsNameGen_ = new NameGenerator();
/**
* Static code entries. Used for statically compiled code.
*/
- this.statics_ = new SplayTree();
+ statics_ = new SplayTree();
/**
* Libraries entries. Used for the whole static code libraries.
*/
- this.libraries_ = new SplayTree();
+ libraries_ = new SplayTree();
/**
* Map of memory pages occupied with static code.
*/
- this.pages_ = [];
-};
+ pages_ = [];
-/**
- * The number of alignment bits in a page address.
- */
-CodeMap.PAGE_ALIGNMENT = 12;
-
-
-/**
- * Page size in bytes.
- */
-CodeMap.PAGE_SIZE =
- 1 << CodeMap.PAGE_ALIGNMENT;
-
-
-/**
- * Adds a dynamic (i.e. moveable and discardable) code entry.
- *
- * @param {number} start The starting address.
- * @param {CodeMap.CodeEntry} codeEntry Code entry object.
- */
-CodeMap.prototype.addCode = function(start, codeEntry) {
- this.deleteAllCoveredNodes_(this.dynamics_, start, start + codeEntry.size);
- this.dynamics_.insert(start, codeEntry);
-};
-
-
-/**
- * Moves a dynamic code entry. Throws an exception if there is no dynamic
- * code entry with the specified starting address.
- *
- * @param {number} from The starting address of the entry being moved.
- * @param {number} to The destination address.
- */
-CodeMap.prototype.moveCode = function(from, to) {
- var removedNode = this.dynamics_.remove(from);
- this.deleteAllCoveredNodes_(this.dynamics_, to, to + removedNode.value.size);
- this.dynamics_.insert(to, removedNode.value);
-};
-
-
-/**
- * Discards a dynamic code entry. Throws an exception if there is no dynamic
- * code entry with the specified starting address.
- *
- * @param {number} start The starting address of the entry being deleted.
- */
-CodeMap.prototype.deleteCode = function(start) {
- var removedNode = this.dynamics_.remove(start);
-};
-
-
-/**
- * Adds a library entry.
- *
- * @param {number} start The starting address.
- * @param {CodeMap.CodeEntry} codeEntry Code entry object.
- */
-CodeMap.prototype.addLibrary = function(
- start, codeEntry) {
- this.markPages_(start, start + codeEntry.size);
- this.libraries_.insert(start, codeEntry);
-};
+ /**
+ * The number of alignment bits in a page address.
+ */
+ static PAGE_ALIGNMENT = 12;
-/**
- * Adds a static code entry.
- *
- * @param {number} start The starting address.
- * @param {CodeMap.CodeEntry} codeEntry Code entry object.
- */
-CodeMap.prototype.addStaticCode = function(
- start, codeEntry) {
- this.statics_.insert(start, codeEntry);
-};
+ /**
+ * Page size in bytes.
+ */
+ static PAGE_SIZE = 1 << CodeMap.PAGE_ALIGNMENT;
-/**
- * @private
- */
-CodeMap.prototype.markPages_ = function(start, end) {
- for (var addr = start; addr <= end;
- addr += CodeMap.PAGE_SIZE) {
- this.pages_[(addr / CodeMap.PAGE_SIZE)|0] = 1;
+ /**
+ * Adds a dynamic (i.e. moveable and discardable) code entry.
+ *
+ * @param {number} start The starting address.
+ * @param {CodeMap.CodeEntry} codeEntry Code entry object.
+ */
+ addCode(start, codeEntry) {
+ this.deleteAllCoveredNodes_(this.dynamics_, start, start + codeEntry.size);
+ this.dynamics_.insert(start, codeEntry);
}
-};
-
-/**
- * @private
- */
-CodeMap.prototype.deleteAllCoveredNodes_ = function(tree, start, end) {
- var to_delete = [];
- var addr = end - 1;
- while (addr >= start) {
- var node = tree.findGreatestLessThan(addr);
- if (!node) break;
- var start2 = node.key, end2 = start2 + node.value.size;
- if (start2 < end && start < end2) to_delete.push(start2);
- addr = start2 - 1;
+ /**
+ * Moves a dynamic code entry. Throws an exception if there is no dynamic
+ * code entry with the specified starting address.
+ *
+ * @param {number} from The starting address of the entry being moved.
+ * @param {number} to The destination address.
+ */
+ moveCode(from, to) {
+ const removedNode = this.dynamics_.remove(from);
+ this.deleteAllCoveredNodes_(this.dynamics_, to, to + removedNode.value.size);
+ this.dynamics_.insert(to, removedNode.value);
}
- for (var i = 0, l = to_delete.length; i < l; ++i) tree.remove(to_delete[i]);
-};
-
-
-/**
- * @private
- */
-CodeMap.prototype.isAddressBelongsTo_ = function(addr, node) {
- return addr >= node.key && addr < (node.key + node.value.size);
-};
+ /**
+ * Discards a dynamic code entry. Throws an exception if there is no dynamic
+ * code entry with the specified starting address.
+ *
+ * @param {number} start The starting address of the entry being deleted.
+ */
+ deleteCode(start) {
+ const removedNode = this.dynamics_.remove(start);
+ }
-/**
- * @private
- */
-CodeMap.prototype.findInTree_ = function(tree, addr) {
- var node = tree.findGreatestLessThan(addr);
- return node && this.isAddressBelongsTo_(addr, node) ? node : null;
-};
+ /**
+ * Adds a library entry.
+ *
+ * @param {number} start The starting address.
+ * @param {CodeMap.CodeEntry} codeEntry Code entry object.
+ */
+ addLibrary(start, codeEntry) {
+ this.markPages_(start, start + codeEntry.size);
+ this.libraries_.insert(start, codeEntry);
+ }
+ /**
+ * Adds a static code entry.
+ *
+ * @param {number} start The starting address.
+ * @param {CodeMap.CodeEntry} codeEntry Code entry object.
+ */
+ addStaticCode(start, codeEntry) {
+ this.statics_.insert(start, codeEntry);
+ }
-/**
- * Finds a code entry that contains the specified address. Both static and
- * dynamic code entries are considered. Returns the code entry and the offset
- * within the entry.
- *
- * @param {number} addr Address.
- */
-CodeMap.prototype.findAddress = function(addr) {
- var pageAddr = (addr / CodeMap.PAGE_SIZE)|0;
- if (pageAddr in this.pages_) {
- // Static code entries can contain "holes" of unnamed code.
- // In this case, the whole library is assigned to this address.
- var result = this.findInTree_(this.statics_, addr);
- if (!result) {
- result = this.findInTree_(this.libraries_, addr);
- if (!result) return null;
+ /**
+ * @private
+ */
+ markPages_(start, end) {
+ for (let addr = start; addr <= end;
+ addr += CodeMap.PAGE_SIZE) {
+ this.pages_[(addr / CodeMap.PAGE_SIZE)|0] = 1;
}
- return { entry : result.value, offset : addr - result.key };
}
- var min = this.dynamics_.findMin();
- var max = this.dynamics_.findMax();
- if (max != null && addr < (max.key + max.value.size) && addr >= min.key) {
- var dynaEntry = this.findInTree_(this.dynamics_, addr);
- if (dynaEntry == null) return null;
- // Dedupe entry name.
- var entry = dynaEntry.value;
- if (!entry.nameUpdated_) {
- entry.name = this.dynamicsNameGen_.getName(entry.name);
- entry.nameUpdated_ = true;
+
+ /**
+ * @private
+ */
+ deleteAllCoveredNodes_(tree, start, end) {
+ const to_delete = [];
+ let addr = end - 1;
+ while (addr >= start) {
+ const node = tree.findGreatestLessThan(addr);
+ if (!node) break;
+ const start2 = node.key, end2 = start2 + node.value.size;
+ if (start2 < end && start < end2) to_delete.push(start2);
+ addr = start2 - 1;
}
- return { entry : entry, offset : addr - dynaEntry.key };
+ for (let i = 0, l = to_delete.length; i < l; ++i) tree.remove(to_delete[i]);
}
- return null;
-};
-
-
-/**
- * Finds a code entry that contains the specified address. Both static and
- * dynamic code entries are considered.
- *
- * @param {number} addr Address.
- */
-CodeMap.prototype.findEntry = function(addr) {
- var result = this.findAddress(addr);
- return result ? result.entry : null;
-};
-
-
-/**
- * Returns a dynamic code entry using its starting address.
- *
- * @param {number} addr Address.
- */
-CodeMap.prototype.findDynamicEntryByStartAddress =
- function(addr) {
- var node = this.dynamics_.find(addr);
- return node ? node.value : null;
-};
+ /**
+ * @private
+ */
+ isAddressBelongsTo_(addr, node) {
+ return addr >= node.key && addr < (node.key + node.value.size);
+ }
-/**
- * Returns an array of all dynamic code entries.
- */
-CodeMap.prototype.getAllDynamicEntries = function() {
- return this.dynamics_.exportValues();
-};
+ /**
+ * @private
+ */
+ findInTree_(tree, addr) {
+ const node = tree.findGreatestLessThan(addr);
+ return node && this.isAddressBelongsTo_(addr, node) ? node : null;
+ }
+ /**
+ * Finds a code entry that contains the specified address. Both static and
+ * dynamic code entries are considered. Returns the code entry and the offset
+ * within the entry.
+ *
+ * @param {number} addr Address.
+ */
+ findAddress(addr) {
+ const pageAddr = (addr / CodeMap.PAGE_SIZE)|0;
+ if (pageAddr in this.pages_) {
+ // Static code entries can contain "holes" of unnamed code.
+ // In this case, the whole library is assigned to this address.
+ let result = this.findInTree_(this.statics_, addr);
+ if (!result) {
+ result = this.findInTree_(this.libraries_, addr);
+ if (!result) return null;
+ }
+ return { entry : result.value, offset : addr - result.key };
+ }
+ const min = this.dynamics_.findMin();
+ const max = this.dynamics_.findMax();
+ if (max != null && addr < (max.key + max.value.size) && addr >= min.key) {
+ const dynaEntry = this.findInTree_(this.dynamics_, addr);
+ if (dynaEntry == null) return null;
+ // Dedupe entry name.
+ const entry = dynaEntry.value;
+ if (!entry.nameUpdated_) {
+ entry.name = this.dynamicsNameGen_.getName(entry.name);
+ entry.nameUpdated_ = true;
+ }
+ return { entry, offset : addr - dynaEntry.key };
+ }
+ return null;
+ }
-/**
- * Returns an array of pairs of all dynamic code entries and their addresses.
- */
-CodeMap.prototype.getAllDynamicEntriesWithAddresses = function() {
- return this.dynamics_.exportKeysAndValues();
-};
+ /**
+ * Finds a code entry that contains the specified address. Both static and
+ * dynamic code entries are considered.
+ *
+ * @param {number} addr Address.
+ */
+ findEntry(addr) {
+ const result = this.findAddress(addr);
+ return result ? result.entry : null;
+ }
+ /**
+ * Returns a dynamic code entry using its starting address.
+ *
+ * @param {number} addr Address.
+ */
+ findDynamicEntryByStartAddress(addr) {
+ const node = this.dynamics_.find(addr);
+ return node ? node.value : null;
+ }
-/**
- * Returns an array of all static code entries.
- */
-CodeMap.prototype.getAllStaticEntries = function() {
- return this.statics_.exportValues();
-};
+ /**
+ * Returns an array of all dynamic code entries.
+ */
+ getAllDynamicEntries() {
+ return this.dynamics_.exportValues();
+ }
+ /**
+ * Returns an array of pairs of all dynamic code entries and their addresses.
+ */
+ getAllDynamicEntriesWithAddresses() {
+ return this.dynamics_.exportKeysAndValues();
+ }
-/**
- * Returns an array of pairs of all static code entries and their addresses.
- */
-CodeMap.prototype.getAllStaticEntriesWithAddresses = function() {
- return this.statics_.exportKeysAndValues();
-};
+ /**
+ * Returns an array of all static code entries.
+ */
+ getAllStaticEntries() {
+ return this.statics_.exportValues();
+ }
+ /**
+ * Returns an array of pairs of all static code entries and their addresses.
+ */
+ getAllStaticEntriesWithAddresses() {
+ return this.statics_.exportKeysAndValues();
+ }
-/**
- * Returns an array of all libraries entries.
- */
-CodeMap.prototype.getAllLibrariesEntries = function() {
- return this.libraries_.exportValues();
-};
+ /**
+ * Returns an array of all libraries entries.
+ */
+ getAllLibrariesEntries() {
+ return this.libraries_.exportValues();
+ }
+}
/**
@@ -288,34 +268,31 @@ CodeMap.prototype.getAllLibrariesEntries = function() {
* @param {string} opt_type Code entry type, e.g. SHARED_LIB, CPP.
* @constructor
*/
-CodeMap.CodeEntry = function(size, opt_name, opt_type) {
- this.size = size;
- this.name = opt_name || '';
- this.type = opt_type || '';
- this.nameUpdated_ = false;
-};
-
-
-CodeMap.CodeEntry.prototype.getName = function() {
- return this.name;
-};
-
-
-CodeMap.CodeEntry.prototype.toString = function() {
- return this.name + ': ' + this.size.toString(16);
-};
-
-
-CodeMap.NameGenerator = function() {
- this.knownNames_ = {};
-};
+export class CodeEntry {
+ constructor(size, opt_name, opt_type) {
+ this.size = size;
+ this.name = opt_name || '';
+ this.type = opt_type || '';
+ this.nameUpdated_ = false;
+ }
+ getName() {
+ return this.name;
+ }
-CodeMap.NameGenerator.prototype.getName = function(name) {
- if (!(name in this.knownNames_)) {
- this.knownNames_[name] = 0;
- return name;
+ toString() {
+ return this.name + ': ' + this.size.toString(16);
}
- var count = ++this.knownNames_[name];
- return name + ' {' + count + '}';
-};
+}
+
+class NameGenerator {
+ knownNames_ = { __proto__:null }
+ getName(name) {
+ if (!(name in this.knownNames_)) {
+ this.knownNames_[name] = 0;
+ return name;
+ }
+ const count = ++this.knownNames_[name];
+ return name + ' {' + count + '}';
+ };
+}
diff --git a/deps/v8/tools/consarray.mjs b/deps/v8/tools/consarray.mjs
index 450e18f663..1dc2afe886 100644
--- a/deps/v8/tools/consarray.mjs
+++ b/deps/v8/tools/consarray.mjs
@@ -72,7 +72,7 @@ ConsArray.prototype.atEnd = function() {
* Returns the current item, moves to the next one.
*/
ConsArray.prototype.next = function() {
- var result = this.currCell_.data[this.currCellPos_++];
+ const result = this.currCell_.data[this.currCellPos_++];
if (this.currCellPos_ >= this.currCell_.data.length) {
this.currCell_ = this.currCell_.next;
this.currCellPos_ = 0;
diff --git a/deps/v8/tools/csvparser.mjs b/deps/v8/tools/csvparser.mjs
index 03356d8209..e027d47384 100644
--- a/deps/v8/tools/csvparser.mjs
+++ b/deps/v8/tools/csvparser.mjs
@@ -84,9 +84,9 @@ export class CsvParser {
* @param {string} line Input line.
*/
parseLine(line) {
- var pos = 0;
- var endPos = line.length;
- var fields = [];
+ let pos = 0;
+ const endPos = line.length;
+ const fields = [];
if (endPos == 0) return fields;
let nextPos = 0;
while(nextPos !== -1) {
diff --git a/deps/v8/tools/debug_helper/get-object-properties.cc b/deps/v8/tools/debug_helper/get-object-properties.cc
index c920857976..181c58dbf0 100644
--- a/deps/v8/tools/debug_helper/get-object-properties.cc
+++ b/deps/v8/tools/debug_helper/get-object-properties.cc
@@ -329,9 +329,10 @@ class ReadStringVisitor : public TqObjectVisitor {
ExternalPointer_t resource_data =
GetOrFinish(object->GetResourceDataValue(accessor_));
#ifdef V8_COMPRESS_POINTERS
- uintptr_t data_address = static_cast<uintptr_t>(DecodeExternalPointer(
- Isolate::FromRoot(GetIsolateRoot(heap_addresses_.any_heap_pointer)),
- resource_data));
+ uintptr_t data_address = static_cast<uintptr_t>(
+ DecodeExternalPointer(GetIsolateForPtrComprFromOnHeapAddress(
+ heap_addresses_.any_heap_pointer),
+ resource_data, kExternalStringResourceDataTag));
#else
uintptr_t data_address = static_cast<uintptr_t>(resource_data);
#endif // V8_COMPRESS_POINTERS
diff --git a/deps/v8/tools/dumpcpp-driver.mjs b/deps/v8/tools/dumpcpp-driver.mjs
index fafa85e67e..8f575d07cb 100644
--- a/deps/v8/tools/dumpcpp-driver.mjs
+++ b/deps/v8/tools/dumpcpp-driver.mjs
@@ -11,7 +11,7 @@ import {
// Dump C++ symbols of shared library if possible
function processArguments(args) {
- var processor = new ArgumentsProcessor(args);
+ const processor = new ArgumentsProcessor(args);
if (processor.parse()) {
return processor.result();
} else {
@@ -25,26 +25,26 @@ function initSourceMapSupport() {
// Overwrite the load function to load scripts synchronously.
SourceMap.load = function(sourceMapURL) {
- var content = readFile(sourceMapURL);
- var sourceMapObject = (JSON.parse(content));
+ const content = readFile(sourceMapURL);
+ const sourceMapObject = (JSON.parse(content));
return new SourceMap(sourceMapURL, sourceMapObject);
};
}
-var entriesProviders = {
+const entriesProviders = {
'unix': UnixCppEntriesProvider,
'windows': WindowsCppEntriesProvider,
'mac': MacCppEntriesProvider
};
-var params = processArguments(arguments);
-var sourceMap = null;
+const params = processArguments(arguments);
+let sourceMap = null;
if (params.sourceMap) {
initSourceMapSupport();
sourceMap = SourceMap.load(params.sourceMap);
}
-var cppProcessor = new CppProcessor(
+const cppProcessor = new CppProcessor(
new (entriesProviders[params.platform])(params.nm, params.targetRootFS,
params.apkEmbeddedLibrary),
params.timedRange, params.pairwiseTimedRange);
diff --git a/deps/v8/tools/dumpcpp.mjs b/deps/v8/tools/dumpcpp.mjs
index 9142cad114..be2dd996e4 100644
--- a/deps/v8/tools/dumpcpp.mjs
+++ b/deps/v8/tools/dumpcpp.mjs
@@ -3,7 +3,7 @@
// found in the LICENSE file.
import { LogReader, parseString } from "./logreader.mjs";
-import { CodeMap } from "./codemap.mjs";
+import { CodeMap, CodeEntry } from "./codemap.mjs";
export {
ArgumentsProcessor, UnixCppEntriesProvider,
WindowsCppEntriesProvider, MacCppEntriesProvider,
@@ -11,57 +11,58 @@ export {
import { inherits } from "./tickprocessor.mjs";
-export function CppProcessor(cppEntriesProvider, timedRange, pairwiseTimedRange) {
- LogReader.call(this, {
- 'shared-library': { parsers: [parseString, parseInt, parseInt, parseInt],
+export class CppProcessor extends LogReader {
+ constructor(cppEntriesProvider, timedRange, pairwiseTimedRange) {
+ super({}, timedRange, pairwiseTimedRange);
+ this.dispatchTable_ = {
+ 'shared-library': {
+ parsers: [parseString, parseInt, parseInt, parseInt],
processor: this.processSharedLibrary }
- }, timedRange, pairwiseTimedRange);
+ };
+ this.cppEntriesProvider_ = cppEntriesProvider;
+ this.codeMap_ = new CodeMap();
+ this.lastLogFileName_ = null;
+ }
- this.cppEntriesProvider_ = cppEntriesProvider;
- this.codeMap_ = new CodeMap();
- this.lastLogFileName_ = null;
-}
-inherits(CppProcessor, LogReader);
+ /**
+ * @override
+ */
+ printError(str) {
+ print(str);
+ };
-/**
- * @override
- */
-CppProcessor.prototype.printError = function(str) {
- print(str);
-};
+ processLogFile(fileName) {
+ this.lastLogFileName_ = fileName;
+ let line;
+ while (line = readline()) {
+ this.processLogLine(line);
+ }
+ };
-CppProcessor.prototype.processLogFile = function(fileName) {
- this.lastLogFileName_ = fileName;
- var line;
- while (line = readline()) {
- this.processLogLine(line);
- }
-};
+ processLogFileInTest(fileName) {
+ // Hack file name to avoid dealing with platform specifics.
+ this.lastLogFileName_ = 'v8.log';
+ const contents = readFile(fileName);
+ this.processLogChunk(contents);
+ };
-CppProcessor.prototype.processLogFileInTest = function(fileName) {
- // Hack file name to avoid dealing with platform specifics.
- this.lastLogFileName_ = 'v8.log';
- var contents = readFile(fileName);
- this.processLogChunk(contents);
-};
+ processSharedLibrary(name, startAddr, endAddr, aslrSlide) {
+ const self = this;
+ const libFuncs = this.cppEntriesProvider_.parseVmSymbols(
+ name, startAddr, endAddr, aslrSlide, function(fName, fStart, fEnd) {
+ const entry = new CodeEntry(fEnd - fStart, fName, 'CPP');
+ self.codeMap_.addStaticCode(fStart, entry);
+ });
+ };
-CppProcessor.prototype.processSharedLibrary = function(
- name, startAddr, endAddr, aslrSlide) {
- var self = this;
- var libFuncs = this.cppEntriesProvider_.parseVmSymbols(
- name, startAddr, endAddr, aslrSlide, function(fName, fStart, fEnd) {
- var entry = new CodeMap.CodeEntry(fEnd - fStart, fName, 'CPP');
- self.codeMap_.addStaticCode(fStart, entry);
- });
-};
-
-CppProcessor.prototype.dumpCppSymbols = function() {
- var staticEntries = this.codeMap_.getAllStaticEntriesWithAddresses();
- var total = staticEntries.length;
- for (var i = 0; i < total; ++i) {
- var entry = staticEntries[i];
- var printValues = ['cpp', '0x' + entry[0].toString(16), entry[1].size,
- '"' + entry[1].name + '"'];
- print(printValues.join(','));
+ dumpCppSymbols() {
+ const staticEntries = this.codeMap_.getAllStaticEntriesWithAddresses();
+ const total = staticEntries.length;
+ for (let i = 0; i < total; ++i) {
+ const entry = staticEntries[i];
+ const printValues = ['cpp', `0x${entry[0].toString(16)}`, entry[1].size,
+ `"${entry[1].name}"`];
+ print(printValues.join(','));
+ }
}
-};
+}
diff --git a/deps/v8/tools/gcmole/gcmole-test.cc b/deps/v8/tools/gcmole/gcmole-test.cc
index 92f7a9eda8..8512d7ab4c 100644
--- a/deps/v8/tools/gcmole/gcmole-test.cc
+++ b/deps/v8/tools/gcmole/gcmole-test.cc
@@ -216,5 +216,18 @@ void TestNestedDeadVarAnalysis(Isolate* isolate) {
raw_obj.Print();
}
+// Test that putting a guard in the middle of the function doesn't
+// mistakenly cover the whole scope of the raw variable.
+void TestGuardedDeadVarAnalysisMidFunction(Isolate* isolate) {
+ JSObject raw_obj = *isolate->factory()->NewJSObjectWithNullProto();
+
+ CauseGCRaw(raw_obj, isolate);
+
+ // Guarding the rest of the function from triggering a GC.
+ DisallowHeapAllocation no_gc;
+ // Should cause warning.
+ raw_obj.Print();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/tools/gcmole/gcmole-tools.tar.gz.sha1 b/deps/v8/tools/gcmole/gcmole-tools.tar.gz.sha1
index a8ea9424af..84b3657c6c 100644
--- a/deps/v8/tools/gcmole/gcmole-tools.tar.gz.sha1
+++ b/deps/v8/tools/gcmole/gcmole-tools.tar.gz.sha1
@@ -1 +1 @@
-7736018d2ca616f7c1477102cc169ee579ee6003
+7e31d257a711b1a77823633e4f19152c3e0718f4
diff --git a/deps/v8/tools/gcmole/gcmole.cc b/deps/v8/tools/gcmole/gcmole.cc
index 3c5fb5ce96..7b32f6c7fd 100644
--- a/deps/v8/tools/gcmole/gcmole.cc
+++ b/deps/v8/tools/gcmole/gcmole.cc
@@ -1067,6 +1067,8 @@ class FunctionAnalyzer {
if (callee != NULL) {
if (KnownToCauseGC(ctx_, callee)) {
out.setGC();
+ scopes_.back().SetGCCauseLocation(
+ clang::FullSourceLoc(call->getExprLoc(), sm_));
}
// Support for virtual methods that might be GC suspects.
@@ -1081,6 +1083,8 @@ class FunctionAnalyzer {
if (target != NULL) {
if (KnownToCauseGC(ctx_, target)) {
out.setGC();
+ scopes_.back().SetGCCauseLocation(
+ clang::FullSourceLoc(call->getExprLoc(), sm_));
}
} else {
// According to the documentation, {getDevirtualizedMethod} might
@@ -1089,6 +1093,8 @@ class FunctionAnalyzer {
// to increase coverage.
if (SuspectedToCauseGC(ctx_, method)) {
out.setGC();
+ scopes_.back().SetGCCauseLocation(
+ clang::FullSourceLoc(call->getExprLoc(), sm_));
}
}
}
@@ -1244,7 +1250,7 @@ class FunctionAnalyzer {
}
DECL_VISIT_STMT(CompoundStmt) {
- scopes_.push_back(GCGuard(stmt, false));
+ scopes_.push_back(GCScope());
Environment out = env;
clang::CompoundStmt::body_iterator end = stmt->body_end();
for (clang::CompoundStmt::body_iterator s = stmt->body_begin();
@@ -1422,7 +1428,8 @@ class FunctionAnalyzer {
out = out.Define(var->getNameAsString());
}
if (IsGCGuard(var->getType())) {
- scopes_.back().has_guard = true;
+ scopes_.back().guard_location =
+ clang::FullSourceLoc(decl->getLocation(), sm_);
}
return out;
@@ -1477,7 +1484,7 @@ class FunctionAnalyzer {
bool HasActiveGuard() {
for (auto s : scopes_) {
- if (s.has_guard) return true;
+ if (s.IsBeforeGCCause()) return true;
}
return false;
}
@@ -1503,14 +1510,26 @@ class FunctionAnalyzer {
Block* block_;
- struct GCGuard {
- clang::CompoundStmt* stmt = NULL;
- bool has_guard = false;
+ struct GCScope {
+ clang::FullSourceLoc guard_location;
+ clang::FullSourceLoc gccause_location;
- GCGuard(clang::CompoundStmt* stmt_, bool has_guard_)
- : stmt(stmt_), has_guard(has_guard_) {}
+ // We're only interested in guards that are declared before any further GC
+ // causing calls (see TestGuardedDeadVarAnalysisMidFunction for example).
+ bool IsBeforeGCCause() {
+ if (!guard_location.isValid()) return false;
+ if (!gccause_location.isValid()) return true;
+ return guard_location.isBeforeInTranslationUnitThan(gccause_location);
+ }
+
+ // After we set the first GC cause in the scope, we don't need the later
+ // ones.
+ void SetGCCauseLocation(clang::FullSourceLoc gccause_location_) {
+ if (gccause_location.isValid()) return;
+ gccause_location = gccause_location_;
+ }
};
- std::vector<GCGuard> scopes_;
+ std::vector<GCScope> scopes_;
};
class ProblemsFinder : public clang::ASTConsumer,
diff --git a/deps/v8/tools/gcmole/test-expectations.txt b/deps/v8/tools/gcmole/test-expectations.txt
index 780cea9181..f6c04e4a6c 100644
--- a/deps/v8/tools/gcmole/test-expectations.txt
+++ b/deps/v8/tools/gcmole/test-expectations.txt
@@ -32,4 +32,7 @@ tools/gcmole/gcmole-test.cc:193:3: warning: Possibly dead variable.
tools/gcmole/gcmole-test.cc:216:3: warning: Possibly dead variable.
raw_obj.Print();
^
-11 warnings generated.
+tools/gcmole/gcmole-test.cc:229:3: warning: Possibly dead variable.
+ raw_obj.Print();
+ ^
+12 warnings generated.
diff --git a/deps/v8/tools/gen-postmortem-metadata.py b/deps/v8/tools/gen-postmortem-metadata.py
index 07883bb71b..4e9facd781 100644
--- a/deps/v8/tools/gen-postmortem-metadata.py
+++ b/deps/v8/tools/gen-postmortem-metadata.py
@@ -101,6 +101,16 @@ consts_misc = [
{ 'name': 'OddballOther', 'value': 'Oddball::kOther' },
{ 'name': 'OddballException', 'value': 'Oddball::kException' },
+ { 'name': 'ContextRegister', 'value': 'kContextRegister.code()' },
+ { 'name': 'ReturnRegister0', 'value': 'kReturnRegister0.code()' },
+ { 'name': 'JSFunctionRegister', 'value': 'kJSFunctionRegister.code()' },
+ { 'name': 'InterpreterBytecodeOffsetRegister',
+ 'value': 'kInterpreterBytecodeOffsetRegister.code()' },
+ { 'name': 'InterpreterBytecodeArrayRegister',
+ 'value': 'kInterpreterBytecodeArrayRegister.code()' },
+ { 'name': 'RuntimeCallFunctionRegister',
+ 'value': 'kRuntimeCallFunctionRegister.code()' },
+
{ 'name': 'prop_kind_Data',
'value': 'kData' },
{ 'name': 'prop_kind_Accessor',
@@ -263,8 +273,6 @@ extras_accessors = [
'UncompiledData, inferred_name, String, kInferredNameOffset',
'UncompiledData, start_position, int32_t, kStartPositionOffset',
'UncompiledData, end_position, int32_t, kEndPositionOffset',
- 'Script, name, Object, kNameOffset',
- 'Script, line_ends, Object, kLineEndsOffset',
'SharedFunctionInfo, raw_function_token_offset, int16_t, kFunctionTokenOffsetOffset',
'SharedFunctionInfo, internal_formal_parameter_count, uint16_t, kFormalParameterCountOffset',
'SharedFunctionInfo, flags, int, kFlagsOffset',
@@ -279,7 +287,6 @@ extras_accessors = [
'SlicedString, offset, SMI, kOffsetOffset',
'ThinString, actual, String, kActualOffset',
'Symbol, name, Object, kDescriptionOffset',
- 'FixedArrayBase, length, SMI, kLengthOffset',
];
#
@@ -310,6 +317,7 @@ header = '''
*/
#include "src/init/v8.h"
+#include "src/codegen/register-arch.h"
#include "src/execution/frames.h"
#include "src/execution/frames-inl.h" /* for architecture-specific frame constants */
#include "src/objects/contexts.h"
@@ -325,7 +333,7 @@ extern "C" {
/* stack frame constants */
#define FRAME_CONST(value, klass) \
- int v8dbg_frametype_##klass = StackFrame::value;
+ V8_EXPORT int v8dbg_frametype_##klass = StackFrame::value;
STACK_FRAME_TYPE_LIST(FRAME_CONST)
@@ -622,7 +630,7 @@ def load_fields_from_file(filename):
#
prefixes = [ 'ACCESSORS', 'ACCESSORS2', 'ACCESSORS_GCSAFE',
'SMI_ACCESSORS', 'ACCESSORS_TO_SMI',
- 'SYNCHRONIZED_ACCESSORS', 'WEAK_ACCESSORS' ];
+ 'RELEASE_ACQUIRE_ACCESSORS', 'WEAK_ACCESSORS' ];
prefixes += ([ prefix + "_CHECKED" for prefix in prefixes ] +
[ prefix + "_CHECKED2" for prefix in prefixes ])
current = '';
@@ -670,13 +678,18 @@ def load_fields_from_file(filename):
# Emit a block of constants.
#
def emit_set(out, consts):
+ lines = set() # To remove duplicates.
+
# Fix up overzealous parses. This could be done inside the
# parsers but as there are several, it's easiest to do it here.
ws = re.compile('\s+')
for const in consts:
name = ws.sub('', const['name'])
value = ws.sub('', str(const['value'])) # Can be a number.
- out.write('int v8dbg_%s = %s;\n' % (name, value))
+ lines.add('V8_EXPORT int v8dbg_%s = %s;\n' % (name, value))
+
+ for line in lines:
+ out.write(line);
out.write('\n');
#
diff --git a/deps/v8/tools/ic-explorer.html b/deps/v8/tools/ic-explorer.html
deleted file mode 100644
index 61d94d65d1..0000000000
--- a/deps/v8/tools/ic-explorer.html
+++ /dev/null
@@ -1,389 +0,0 @@
-<!DOCTYPE html>
-<html>
-<!--
-Copyright 2016 the V8 project authors. All rights reserved. Use of this source
-code is governed by a BSD-style license that can be found in the LICENSE file.
--->
-
-<head>
- <meta charset="utf-8">
- <title>V8 IC explorer</title>
- <style>
- html {
- font-family: monospace;
- }
-
- .entry-details {}
-
- .entry-details TD {}
-
- .details {
- width: 0.1em;
- }
-
- .details span {
- padding: 0 0.4em 0 0.4em;
- background-color: black;
- color: white;
- border-radius: 25px;
- text-align: center;
- cursor: -webkit-zoom-in;
- }
-
- .count {
- text-align: right;
- width: 5em;
- }
-
- .percentage {
- text-align: right;
- width: 5em;
- }
-
- .key {
- padding-left: 1em;
- }
-
- .drilldown-group-title {
- font-weight: bold;
- padding: 0.5em 0 0.2em 0;
- }
- </style>
- <script type="module" src="./ic-processor.js"></script>
-
- <script>
- "use strict"
- let entries = [];
-
- let properties = ['type', 'category', 'functionName', 'filePosition',
- 'state', 'key', 'map', 'reason', 'file',
- ];
-
- // For compatibility with console scripts:
- print = console.log;
-
- class CustomIcProcessor extends IcProcessor {
- constructor() {
- super();
- this.entries = [];
- }
-
- functionName(pc) {
- let entry = this.profile_.findEntry(pc);
- return this.formatName(entry);
- }
-
- processPropertyIC(
- type, pc, time, line, column, old_state, new_state, map, key, modifier,
- slow_reason) {
- let fnName = this.functionName(pc);
- this.entries.push(new Entry(
- type, fnName, time, line, column, key, old_state, new_state, map,
- slow_reason));
- }
- };
-
-
- class Entry {
- constructor(
- type, fn_file, time, line, column, key, oldState, newState, map, reason,
- additional) {
- this.time = time;
- this.type = type;
- this.category = 'other';
- if (this.type.indexOf('Store') !== -1) {
- this.category = 'Store';
- } else if (this.type.indexOf('Load') !== -1) {
- this.category = 'Load';
- }
- let parts = fn_file.split(' ');
- this.functionName = parts[0];
- this.file = parts[1];
- let position = line + ':' + column;
- this.filePosition = this.file + ':' + position;
- this.oldState = oldState;
- this.newState = newState;
- this.state = this.oldState + ' → ' + this.newState;
- this.key = key;
- this.map = map.toString(16);
- this.reason = reason;
- this.additional = additional;
- }
-
- parseMapProperties(parts, offset) {
- let next = parts[++offset];
- if (!next.startsWith('dict')) return offset;
- this.propertiesMode = next.substr(5) == '0' ? 'fast' : 'slow';
- this.numberOfOwnProperties = parts[++offset].substr(4);
- next = parts[++offset];
- this.instanceType = next.substr(5, next.length - 6);
- return offset;
- }
-
- parsePositionAndFile(parts, start) {
- // find the position of 'at' in the parts array.
- let offset = start;
- for (let i = start + 1; i < parts.length; i++) {
- offset++;
- if (parts[i] == 'at') break;
- }
- if (parts[offset] !== 'at') return -1;
- this.position = parts.slice(start, offset).join(' ');
- offset += 1;
- this.isNative = parts[offset] == 'native'
- offset += this.isNative ? 1 : 0;
- this.file = parts[offset];
- return offset;
- }
-}
-
- function loadFile() {
- let files = document.getElementById("uploadInput").files;
-
- let file = files[0];
- let reader = new FileReader();
-
- reader.onload = function(evt) {
- let icProcessor = new CustomIcProcessor();
- icProcessor.processString(this.result);
- entries = icProcessor.entries;
-
- document.getElementById("count").innerHTML = entries.length;
- updateTable();
- }
- reader.readAsText(file);
- initGroupKeySelect();
- }
-
-
- class Group {
- constructor(property, key, entry) {
- this.property = property;
- this.key = key;
- this.count = 1;
- this.entries = [entry];
- this.percentage = undefined;
- this.groups = undefined;
- }
-
- add(entry) {
- this.count++;
- this.entries.push(entry)
- }
-
- createSubGroups() {
- this.groups = {};
- for (let i = 0; i < properties.length; i++) {
- let subProperty = properties[i];
- if (this.property == subProperty) continue;
- this.groups[subProperty] = groupBy(this.entries, subProperty);
- }
- }
- }
-
- function groupBy(entries, property) {
- let accumulator = Object.create(null);
- let length = entries.length;
- for (let i = 0; i < length; i++) {
- let entry = entries[i];
- let key = entry[property];
- if (accumulator[key] == undefined) {
- accumulator[key] = new Group(property, key, entry)
- } else {
- let group = accumulator[key];
- if (group.entries == undefined) console.log([group, entry]);
- group.add(entry)
- }
- }
- let result = []
- for (let key in accumulator) {
- let group = accumulator[key];
- group.percentage = Math.round(group.count / length * 100 * 100) / 100;
- result.push(group);
- }
- result.sort((a, b) => {
- return b.count - a.count
- });
- return result;
- }
-
-
-
- function escapeHtml(unsafe) {
- if (!unsafe) return "";
- return unsafe.toString()
- .replace(/&/g, "&amp;")
- .replace(/</g, "&lt;")
- .replace(/>/g, "&gt;")
- .replace(/"/g, "&quot;")
- .replace(/'/g, "&#039;");
- }
-
- function processValue(unsafe) {
- if (!unsafe) return "";
- if (!unsafe.startsWith("http")) return escapeHtml(unsafe);
- let a = document.createElement("a");
- a.href = unsafe;
- a.textContent = unsafe;
- return a;
- }
-
- function updateTable() {
- let select = document.getElementById("group-key");
- let key = select.options[select.selectedIndex].text;
- let tableBody = document.getElementById("table-body");
- removeAllChildren(tableBody);
- let groups = groupBy(entries, key, true);
- display(groups, tableBody);
- }
-
- function selecedOption(node) {
- return node.options[node.selectedIndex]
- }
-
- function removeAllChildren(node) {
- while (node.firstChild) {
- node.removeChild(node.firstChild);
- }
- }
-
- function display(entries, parent) {
- let fragment = document.createDocumentFragment();
-
- function td(tr, content, className) {
- let node = document.createElement("td");
- if (typeof content == "object") {
- node.appendChild(content);
- } else {
- node.innerHTML = content;
- }
- node.className = className
- tr.appendChild(node);
- return node
- }
-
- let max = Math.min(1000, entries.length)
- for (let i = 0; i < max; i++) {
- let entry = entries[i];
- let tr = document.createElement("tr");
- tr.entry = entry;
- td(tr, '<span onclick="toggleDetails(this)">&#8505;</a>', 'details');
- td(tr, entry.percentage + "%", 'percentage');
- td(tr, entry.count, 'count');
- td(tr, processValue(entry.key), 'key');
- fragment.appendChild(tr);
- }
- let omitted = entries.length - max;
- if (omitted > 0) {
- let tr = document.createElement("tr");
- let tdNode = td(tr, 'Omitted ' + omitted + " entries.");
- tdNode.colSpan = 4;
- fragment.appendChild(tr);
- }
- parent.appendChild(fragment);
- }
-
- function displayDrilldown(entry, previousSibling) {
- let tr = document.createElement('tr');
- tr.className = "entry-details";
- tr.style.display = "none";
- // indent by one td.
- tr.appendChild(document.createElement("td"));
- let td = document.createElement("td");
- td.colSpan = 3;
- for (let key in entry.groups) {
- td.appendChild(displayDrilldownGroup(entry, key));
- }
- tr.appendChild(td);
- // Append the new TR after previousSibling.
- previousSibling.parentNode.insertBefore(tr, previousSibling.nextSibling)
- }
-
- function displayDrilldownGroup(entry, key) {
- let max = 20;
- let group = entry.groups[key];
- let div = document.createElement("div")
- div.className = 'drilldown-group-title'
- div.textContent = key + ' [top ' + max + ' out of ' + group.length + ']';
- let table = document.createElement("table");
- display(group.slice(0, max), table, false)
- div.appendChild(table);
- return div;
- }
-
- function toggleDetails(node) {
- let tr = node.parentNode.parentNode;
- let entry = tr.entry;
-
- // Create subgroup in-place if the don't exist yet.
- if (entry.groups === undefined) {
- entry.createSubGroups();
- displayDrilldown(entry, tr);
- }
- let details = tr.nextSibling;
- let display = details.style.display;
- if (display != "none") {
- display = "none";
- } else {
- display = "table-row"
- };
- details.style.display = display;
- }
-
- function initGroupKeySelect() {
- let select = document.getElementById("group-key");
- select.options.length = 0;
- for (let i in properties) {
- let option = document.createElement("option");
- option.text = properties[i];
- select.add(option);
- }
- }
-
- function handleOnLoad() {
- document.querySelector("#uploadInput").focus();
- }
- </script>
-</head>
-
-<body onload="handleOnLoad()">
- <h1>
- <span style="color: #00FF00">I</span>
- <span style="color: #FF00FF">C</span>
- <span style="color: #00FFFF">E</span>
- </h1> Your IC-Explorer.
-
- <div id="legend" style="padding-right: 200px">
- <div style="float:right; border-style: solid; border-width: 1px; padding:20px">
- 0 uninitialized<br>
- X no feedback<br>
- 1 monomorphic<br>
- ^ recompute handler<br>
- P polymorphic<br>
- N megamorphic<br>
- G generic
- </div>
- </div>
-
- <h2>Usage</h2> Run your script with <code>--trace_ic</code> and upload <code>v8.log</code> on this page:<br/>
- <code>/path/to/d8 --trace_ic your_script.js</code>
- <h2>Data</h2>
- <form name="fileForm">
- <p>
- <input id="uploadInput" type="file" name="files" onchange="loadFile();"> trace entries: <span id="count">0</span>
- </p>
- </form>
- <h2>Result</h2>
- <p>
- Group-Key:
- <select id="group-key" onchange="updateTable()"></select>
- </p>
- <p>
- <table id="table" width="100%">
- <tbody id="table-body">
- </tbody>
- </table>
- </p>
-</body>
-
-</html>
diff --git a/deps/v8/tools/ic-processor-driver.mjs b/deps/v8/tools/ic-processor-driver.mjs
index 779837aa3f..ef6d83e1e4 100644
--- a/deps/v8/tools/ic-processor-driver.mjs
+++ b/deps/v8/tools/ic-processor-driver.mjs
@@ -2,11 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-import { IcProcessor, ArgumentsProcessor, readFile } from "./ic-processor.mjs";
+import { Processor } from "./system-analyzer/processor.mjs";
import { WebInspector } from "./sourcemap.mjs";
+import { BaseArgumentsProcessor } from "./arguments.mjs";
function processArguments(args) {
- var processor = new ArgumentsProcessor(args);
+ const processor = new ArgumentsProcessor(args);
if (processor.parse()) {
return processor.result();
} else {
@@ -14,23 +15,80 @@ function processArguments(args) {
}
}
+/**
+ * A thin wrapper around shell's 'read' function showing a file name on error.
+ */
+export function readFile(fileName) {
+ try {
+ return read(fileName);
+ } catch (e) {
+ print(fileName + ': ' + (e.message || e));
+ throw e;
+ }
+}
+
function initSourceMapSupport() {
// Pull dev tools source maps into our name space.
SourceMap = WebInspector.SourceMap;
// Overwrite the load function to load scripts synchronously.
SourceMap.load = function(sourceMapURL) {
- var content = readFile(sourceMapURL);
- var sourceMapObject = (JSON.parse(content));
+ const content = readFile(sourceMapURL);
+ const sourceMapObject = (JSON.parse(content));
return new SourceMap(sourceMapURL, sourceMapObject);
};
}
-var params = processArguments(arguments);
-var sourceMap = null;
+class ArgumentsProcessor extends BaseArgumentsProcessor {
+ getArgsDispatch() {
+ return {
+ '--range': ['range', 'auto,auto',
+ 'Specify the range limit as [start],[end]'],
+ '--source-map': ['sourceMap', null,
+ 'Specify the source map that should be used for output']
+ };
+ }
+ getDefaultResults() {
+ return {
+ logFileName: 'v8.log',
+ range: 'auto,auto',
+ };
+ }
+}
+
+const params = processArguments(arguments);
+let sourceMap = null;
if (params.sourceMap) {
initSourceMapSupport();
sourceMap = SourceMap.load(params.sourceMap);
}
-var icProcessor = new IcProcessor();
-icProcessor.processLogFile(params.logFileName);
+const processor = new Processor();
+processor.processLogFile(params.logFileName);
+
+const typeAccumulator = new Map();
+
+const accumulator = {
+ __proto__: null,
+ LoadGlobalIC: 0,
+ StoreGlobalIC: 0,
+ LoadIC: 0,
+ StoreIC: 0,
+ KeyedLoadIC: 0,
+ KeyedStoreIC: 0,
+ StoreInArrayLiteralIC: 0,
+}
+for (const ic of processor.icTimeline.all) {
+ print(
+ ic.type + ' (' + ic.oldState + '->' + ic.newState + ic.modifier + ') at ' +
+ ic.filePosition + ' ' + ic.key +
+ ' (map 0x' + ic.map.toString(16) + ')' +
+ (ic.reason ? ` ${ic.reason}` : '') + ' time: ' + ic.time);
+ accumulator[ic.type]++;
+}
+
+print("========================================");
+for (const key of Object.keys(accumulator)) {
+ print(key + ": " + accumulator[key]);
+}
+
+
diff --git a/deps/v8/tools/ic-processor.mjs b/deps/v8/tools/ic-processor.mjs
deleted file mode 100644
index 7f6fb03a61..0000000000
--- a/deps/v8/tools/ic-processor.mjs
+++ /dev/null
@@ -1,197 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-import { LogReader, parseString, parseVarArgs } from "./logreader.mjs";
-import { BaseArgumentsProcessor } from "./arguments.mjs";
-import { Profile } from "./profile.mjs";
-
-function inherits(childCtor, parentCtor) {
- childCtor.prototype.__proto__ = parentCtor.prototype;
-};
-
-/**
- * A thin wrapper around shell's 'read' function showing a file name on error.
- */
-export function readFile(fileName) {
- try {
- return read(fileName);
- } catch (e) {
- print(fileName + ': ' + (e.message || e));
- throw e;
- }
-}
-
-/**
- * Parser for dynamic code optimization state.
- */
-function parseState(s) {
- switch (s) {
- case "": return Profile.CodeState.COMPILED;
- case "~": return Profile.CodeState.OPTIMIZABLE;
- case "*": return Profile.CodeState.OPTIMIZED;
- }
- throw new Error("unknown code state: " + s);
-}
-
-
-export function IcProcessor() {
- var propertyICParser = [
- parseInt, parseInt, parseInt, parseInt, parseString, parseString,
- parseInt, parseString, parseString, parseString];
- LogReader.call(this, {
- 'code-creation': {
- parsers: [parseString, parseInt, parseInt, parseInt, parseInt,
- parseString, parseVarArgs],
- processor: this.processCodeCreation },
- 'code-move': { parsers: [parseInt, parseInt],
- processor: this.processCodeMove },
- 'code-delete': { parsers: [parseInt],
- processor: this.processCodeDelete },
- 'sfi-move': { parsers: [parseInt, parseInt],
- processor: this.processFunctionMove },
- 'LoadGlobalIC': {
- parsers : propertyICParser,
- processor: this.processPropertyIC.bind(this, "LoadGlobalIC") },
- 'StoreGlobalIC': {
- parsers : propertyICParser,
- processor: this.processPropertyIC.bind(this, "StoreGlobalIC") },
- 'LoadIC': {
- parsers : propertyICParser,
- processor: this.processPropertyIC.bind(this, "LoadIC") },
- 'StoreIC': {
- parsers : propertyICParser,
- processor: this.processPropertyIC.bind(this, "StoreIC") },
- 'KeyedLoadIC': {
- parsers : propertyICParser,
- processor: this.processPropertyIC.bind(this, "KeyedLoadIC") },
- 'KeyedStoreIC': {
- parsers : propertyICParser,
- processor: this.processPropertyIC.bind(this, "KeyedStoreIC") },
- 'StoreInArrayLiteralIC': {
- parsers : propertyICParser,
- processor: this.processPropertyIC.bind(this, "StoreInArrayLiteralIC") },
- });
- this.profile_ = new Profile();
-
- this.LoadGlobalIC = 0;
- this.StoreGlobalIC = 0;
- this.LoadIC = 0;
- this.StoreIC = 0;
- this.KeyedLoadIC = 0;
- this.KeyedStoreIC = 0;
- this.StoreInArrayLiteralIC = 0;
-}
-inherits(IcProcessor, LogReader);
-
-/**
- * @override
- */
-IcProcessor.prototype.printError = function(str) {
- print(str);
-};
-
-IcProcessor.prototype.processString = function(string) {
- var end = string.length;
- var current = 0;
- var next = 0;
- var line;
- var i = 0;
- var entry;
- while (current < end) {
- next = string.indexOf("\n", current);
- if (next === -1) break;
- i++;
- line = string.substring(current, next);
- current = next + 1;
- this.processLogLine(line);
- }
-}
-
-IcProcessor.prototype.processLogFile = function(fileName) {
- this.collectEntries = true
- this.lastLogFileName_ = fileName;
- var line;
- while (line = readline()) {
- this.processLogLine(line);
- }
- print();
- print("=====================");
- print("LoadGlobal: " + this.LoadGlobalIC);
- print("StoreGlobal: " + this.StoreGlobalIC);
- print("Load: " + this.LoadIC);
- print("Store: " + this.StoreIC);
- print("KeyedLoad: " + this.KeyedLoadIC);
- print("KeyedStore: " + this.KeyedStoreIC);
- print("StoreInArrayLiteral: " + this.StoreInArrayLiteralIC);
-};
-
-IcProcessor.prototype.addEntry = function(entry) {
- this.entries.push(entry);
-}
-
-IcProcessor.prototype.processCodeCreation = function(
- type, kind, timestamp, start, size, name, maybe_func) {
- if (maybe_func.length) {
- var funcAddr = parseInt(maybe_func[0]);
- var state = parseState(maybe_func[1]);
- this.profile_.addFuncCode(type, name, timestamp, start, size, funcAddr, state);
- } else {
- this.profile_.addCode(type, name, timestamp, start, size);
- }
-};
-
-
-IcProcessor.prototype.processCodeMove = function(from, to) {
- this.profile_.moveCode(from, to);
-};
-
-
-IcProcessor.prototype.processCodeDelete = function(start) {
- this.profile_.deleteCode(start);
-};
-
-
-IcProcessor.prototype.processFunctionMove = function(from, to) {
- this.profile_.moveFunc(from, to);
-};
-
-IcProcessor.prototype.formatName = function(entry) {
- if (!entry) return "<unknown>"
- var name = entry.func.getName();
- var re = /(.*):[0-9]+:[0-9]+$/;
- var array = re.exec(name);
- if (!array) return name;
- return entry.getState() + array[1];
-}
-
-IcProcessor.prototype.processPropertyIC = function (
- type, pc, time, line, column, old_state, new_state, map, name, modifier,
- slow_reason) {
-this[type]++;
-let entry = this.profile_.findEntry(pc);
-print(
- type + ' (' + old_state + '->' + new_state + modifier + ') at ' +
- this.formatName(entry) + ':' + line + ':' + column + ' ' + name +
- ' (map 0x' + map.toString(16) + ')' +
- (slow_reason ? ' ' + slow_reason : '') + 'time: ' + time);
-}
-
-
-
-export class ArgumentsProcessor extends BaseArgumentsProcessor {
- getArgsDispatch() {
- return {
- '--range': ['range', 'auto,auto',
- 'Specify the range limit as [start],[end]'],
- '--source-map': ['sourceMap', null,
- 'Specify the source map that should be used for output']
- };
- }
- getDefaultResults() {
- return {
- logFileName: 'v8.log',
- range: 'auto,auto',
- };
- }
-}
diff --git a/deps/v8/tools/index.html b/deps/v8/tools/index.html
index 5ff63c9e33..93155dfbdf 100644
--- a/deps/v8/tools/index.html
+++ b/deps/v8/tools/index.html
@@ -46,7 +46,6 @@ a:hover, a:active {
text-align: center;
padding: 10px 50px 10px 50px ;
box-shadow: 0 4px 8px 0 rgba(0,0,0,0.2);
- transition: 0.3s;
background-color: #121212;
width: auto;
}
@@ -65,6 +64,10 @@ dd, dt {
<p>Search through this page to find about the V8 tools to debug, trace and analyze the log files.</p>
<dl class="grid-container">
<div class="card">
+ <dt><a href="./system-analyzer/index.html">System Analyzer</a></dt>
+ <dd>A unified web interface to trace, debug and analyse patterns of how Maps/ICs are created in the real world applications.</dd>
+ </div>
+ <div class="card">
<dt><a href="./callstats.html">Callstats</a></dt>
<dd>Visualize and compare runtime call stats.</dd>
</div>
@@ -73,14 +76,6 @@ dd, dt {
<dd>Visualize heap memory usage.</dd>
</div>
<div class="card">
- <dt><a href="./ic-explorer.html">IC Explorer</a></dt>
- <dd>Analyse inline caches.</dd>
- </div>
- <div class="card">
- <dt><a href="./map-processor.html">Map Processor</a></dt>
- <dd>Analyse Maps and their transition trees.</dd>
- </div>
- <div class="card">
<dt><a href="./parse-processor.html">Parse Processor</a></dt>
<dd>Analyse parse, compile and first-execution.</dd>
</div>
@@ -89,10 +84,6 @@ dd, dt {
<dd>Fancy sampling profile viewer.</dd>
</div>
<div class="card">
- <dt><a href="./system-analyzer/index.html">System Analyzer</a></dt>
- <dd>A unified web interface to trace, debug and analyse patterns of how Maps/ICs are created in the real world applications.</dd>
- </div>
- <div class="card">
<dt><a href="./tick-processor.html">Tick Processor</a></dt>
<dd>Simple sampling profile viewer.</dd>
</div>
diff --git a/deps/v8/tools/linux-tick-processor b/deps/v8/tools/linux-tick-processor
index 8e5100b095..a2ae2b5441 100755
--- a/deps/v8/tools/linux-tick-processor
+++ b/deps/v8/tools/linux-tick-processor
@@ -34,4 +34,4 @@ fi
# nm spits out 'no symbols found' messages to stderr.
cat $log_file | $d8_exec --enable-os-system \
- --module $tools_path/tickprocessor-driver.mjs -- $@ 2>/dev/null
+ --module $tools_path/tickprocessor-driver.mjs -- $@
diff --git a/deps/v8/tools/logreader.mjs b/deps/v8/tools/logreader.mjs
index 75bc7ddade..1bd9a4ba02 100644
--- a/deps/v8/tools/logreader.mjs
+++ b/deps/v8/tools/logreader.mjs
@@ -146,11 +146,11 @@ LogReader.prototype.processLogLine = function(line) {
* @return {Array.<number>} Processed stack.
*/
LogReader.prototype.processStack = function(pc, func, stack) {
- var fullStack = func ? [pc, func] : [pc];
- var prevFrame = pc;
- for (var i = 0, n = stack.length; i < n; ++i) {
- var frame = stack[i];
- var firstChar = frame.charAt(0);
+ const fullStack = func ? [pc, func] : [pc];
+ let prevFrame = pc;
+ for (let i = 0, n = stack.length; i < n; ++i) {
+ const frame = stack[i];
+ const firstChar = frame.charAt(0);
if (firstChar == '+' || firstChar == '-') {
// An offset from the previous frame.
prevFrame += parseInt(frame, 16);
@@ -159,7 +159,7 @@ LogReader.prototype.processStack = function(pc, func, stack) {
} else if (firstChar != 'o') {
fullStack.push(parseInt(frame, 16));
} else {
- this.printError("dropping: " + frame);
+ this.printError(`dropping: ${frame}`);
}
}
return fullStack;
@@ -172,9 +172,7 @@ LogReader.prototype.processStack = function(pc, func, stack) {
* @param {!Object} dispatch Dispatch record.
* @return {boolean} True if dispatch must be skipped.
*/
-LogReader.prototype.skipDispatch = function(dispatch) {
- return false;
-};
+LogReader.prototype.skipDispatch = dispatch => false;
// Parses dummy variable for readability;
export const parseString = 'parse-string';
@@ -188,17 +186,17 @@ export const parseVarArgs = 'parse-var-args';
*/
LogReader.prototype.dispatchLogRow_ = function(fields) {
// Obtain the dispatch.
- var command = fields[0];
- var dispatch = this.dispatchTable_[command];
+ const command = fields[0];
+ const dispatch = this.dispatchTable_[command];
if (dispatch === undefined) return;
if (dispatch === null || this.skipDispatch(dispatch)) {
return;
}
// Parse fields.
- var parsedFields = [];
- for (var i = 0; i < dispatch.parsers.length; ++i) {
- var parser = dispatch.parsers[i];
+ const parsedFields = [];
+ for (let i = 0; i < dispatch.parsers.length; ++i) {
+ const parser = dispatch.parsers[i];
if (parser === parseString) {
parsedFields.push(fields[1 + i]);
} else if (typeof parser == 'function') {
@@ -208,7 +206,7 @@ LogReader.prototype.dispatchLogRow_ = function(fields) {
parsedFields.push(fields.slice(1 + i));
break;
} else {
- throw new Error("Invalid log field parser: " + parser);
+ throw new Error(`Invalid log field parser: ${parser}`);
}
}
@@ -224,7 +222,7 @@ LogReader.prototype.dispatchLogRow_ = function(fields) {
* @private
*/
LogReader.prototype.processLog_ = function(lines) {
- for (var i = 0, n = lines.length; i < n; ++i) {
+ for (let i = 0, n = lines.length; i < n; ++i) {
this.processLogLine_(lines[i]);
}
}
@@ -238,10 +236,10 @@ LogReader.prototype.processLog_ = function(lines) {
LogReader.prototype.processLogLine_ = function(line) {
if (line.length > 0) {
try {
- var fields = this.csvParser_.parseLine(line);
+ const fields = this.csvParser_.parseLine(line);
this.dispatchLogRow_(fields);
} catch (e) {
- this.printError('line ' + (this.lineNum_ + 1) + ': ' + (e.message || e) + '\n' + e.stack);
+ this.printError(`line ${this.lineNum_ + 1}: ${e.message || e}\n${e.stack}`);
}
}
this.lineNum_++;
diff --git a/deps/v8/tools/map-processor b/deps/v8/tools/map-processor
deleted file mode 100755
index ceb69970df..0000000000
--- a/deps/v8/tools/map-processor
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/bin/sh
-
-# find the name of the log file to process, it must not start with a dash.
-log_file="v8.log"
-for arg in "$@"
-do
- if ! expr "X${arg}" : "^X-" > /dev/null; then
- log_file=${arg}
- fi
-done
-
-tools_path=`cd $(dirname "$0");pwd`
-if [ ! "$D8_PATH" ]; then
- d8_public=`which d8`
- if [ -x "$d8_public" ]; then D8_PATH=$(dirname "$d8_public"); fi
-fi
-[ -n "$D8_PATH" ] || D8_PATH=$tools_path/..
-d8_exec=$D8_PATH/d8
-
-if [ ! -x "$d8_exec" ]; then
- D8_PATH=`pwd`/out/native
- d8_exec=$D8_PATH/d8
-fi
-
-if [ ! -x "$d8_exec" ]; then
- d8_exec=`grep -m 1 -o '".*/d8"' $log_file | sed 's/"//g'`
-fi
-
-if [ ! -x "$d8_exec" ]; then
- echo "d8 shell not found in $D8_PATH"
- echo "Please provide path to d8 as env var in D8_PATH"
- exit 1
-fi
-
-# nm spits out 'no symbols found' messages to stderr.
-cat $log_file | $d8_exec \
- --module $tools_path/map-processor-driver.mjs -- $@ 2>/dev/null
diff --git a/deps/v8/tools/map-processor-driver.mjs b/deps/v8/tools/map-processor-driver.mjs
deleted file mode 100644
index a7a6fefa66..0000000000
--- a/deps/v8/tools/map-processor-driver.mjs
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-import { WebInspector } from "./sourcemap.mjs";
-import {
- MapProcessor, ArgumentsProcessor, readFile
- } from "./map-processor.mjs";
-
-function processArguments(args) {
- var processor = new ArgumentsProcessor(args);
- if (processor.parse()) {
- return processor.result();
- } else {
- processor.printUsageAndExit();
- }
-}
-
-function initSourceMapSupport() {
- // Pull dev tools source maps into our name space.
- SourceMap = WebInspector.SourceMap;
-
- // Overwrite the load function to load scripts synchronously.
- SourceMap.load = function(sourceMapURL) {
- var content = readFile(sourceMapURL);
- var sourceMapObject = (JSON.parse(content));
- return new SourceMap(sourceMapURL, sourceMapObject);
- };
-}
-
-var params = processArguments(arguments);
-var sourceMap = null;
-if (params.sourceMap) {
- initSourceMapSupport();
- sourceMap = SourceMap.load(params.sourceMap);
-}
-var mapProcessor = new MapProcessor();
-mapProcessor.processLogFile(params.logFileName);
diff --git a/deps/v8/tools/map-processor.html b/deps/v8/tools/map-processor.html
deleted file mode 100644
index 16cd224638..0000000000
--- a/deps/v8/tools/map-processor.html
+++ /dev/null
@@ -1,1315 +0,0 @@
-<!DOCTYPE html>
-<html>
- <!--
- Copyright 2017 the V8 project authors. All rights reserved. Use of this source
- code is governed by a BSD-style license that can be found in the LICENSE file.
- -->
-<head>
-<meta charset="utf-8">
-<style>
-html, body {
- font-family: sans-serif;
- padding: 0px;
- margin: 0px;
-}
-h1, h2, h3, section {
- padding-left: 15px;
-}
-
-kbd {
- background-color: #eee;
- border-radius: 3px;
- border: 1px solid black;
- display: inline-block;
- font-size: .9em;
- font-weight: bold;
- padding: 0px 4px 2px 4px;
- white-space: nowrap;
-}
-dl {
- display: grid;
- grid-template-columns: min-content auto;
- grid-gap: 10px;
-}
-dt {
- text-align: right;
- white-space: nowrap;
-}
-dd {
- margin: 0;
-}
-
-#content {
- opacity: 0.0;
- height: 0px;
- transition: all 0.5s ease-in-out;
-}
-
-.success #content {
- height: auto;
- opacity: 1.0;
-}
-
-#fileReader {
- width: 100%;
- height: 100px;
- line-height: 100px;
- text-align: center;
- border: solid 1px #000000;
- border-radius: 5px;
- cursor: pointer;
- transition: all 0.5s ease-in-out;
-}
-
-.failure #fileReader {
- background-color: #FFAAAA;
-}
-
-.success #fileReader {
- height: 20px;
- line-height: 20px;
-}
-
-#fileReader:hover {
- background-color: #e0edfe;
-}
-
-.loading #fileReader {
- cursor: wait;
-}
-
-#fileReader > input {
- display: none;
-}
-
-
-#loader {
- display: none;
-}
-
-.loading #loader {
- display: block;
- position: fixed;
- top: 0px;
- left: 0px;
- width: 100%;
- height: 100%;
- background-color: rgba(255, 255, 255, 0.5);
-}
-
-#spinner {
- position: absolute;
- width: 100px;
- height: 100px;
- top: 40%;
- left: 50%;
- margin-left: -50px;
- border: 30px solid #000;
- border-top: 30px solid #36E;
- border-radius: 50%;
- animation: spin 1s ease-in-out infinite;
-}
-
-@keyframes spin {
- 0% {
- transform: rotate(0deg);
- }
- 100% {
- transform: rotate(360deg);
- }
-}
-
-.colorbox {
- width: 10px;
- height: 10px;
- border: 1px black solid;
-}
-
-#stats {
- display: flex;
- height: 250px;
-}
-
-#stats table {
- flex: 1;
- padding-right: 50px;
- max-height: 250px;
- display: inline-block;
-}
-#stats table td {
- cursor: pointer;
-}
-#stats .transitionTable {
- overflow-y: scroll;
-}
-#stats .transitionTable tr {
- max-width: 200px;
-
-}
-#stats .transitionType {
- text-align: right;
- max-width: 380px;
-}
-#stats .transitionType tr td:nth-child(2) {
- text-align: left;
-}
-#stats table thead td {
- border-bottom: 1px black dotted;
-}
-
-#timeline {
- position: relative;
- height: 300px;
- overflow-y: hidden;
- overflow-x: scroll;
- user-select: none;
-}
-#timelineLabel {
- transform: rotate(90deg);
- transform-origin: left bottom 0;
- position: absolute;
- left: 0;
- width: 250px;
- text-align: center;
- font-size: 10px;
- opacity: 0.5;
-}
-#timelineChunks {
- height: 250px;
- position: absolute;
- margin-right: 100px;
-}
-#timelineCanvas {
- height: 250px;
- position: relative;
- overflow: visible;
- pointer-events: none;
-}
-.chunk {
- width: 6px;
- border: 0px white solid;
- border-width: 0 2px 0 2px;
- position: absolute;
- background-size: 100% 100%;
- image-rendering: pixelated;
- bottom: 0px;
-}
-.timestamp {
- height: 250px;
- width: 100px;
- border-left: 1px black dashed;
- padding-left: 4px;
- position: absolute;
- pointer-events: none;
- font-size: 10px;
- opacity: 0.5;
-}
-#timelineOverview {
- width: 100%;
- height: 50px;
- position: relative;
- margin-top: -50px;
- margin-bottom: 10px;
- background-size: 100% 100%;
- border: 1px black solid;
- border-width: 1px 0 1px 0;
- overflow: hidden;
-}
-#timelineOverviewIndicator {
- height: 100%;
- position: absolute;
- box-shadow: 0px 2px 20px -5px black inset;
- top: 0px;
- cursor: ew-resize;
-}
-#timelineOverviewIndicator .leftMask,
-#timelineOverviewIndicator .rightMask {
- background-color: rgba(200, 200, 200, 0.5);
- width: 10000px;
- height: 100%;
- position: absolute;
- top: 0px;
-}
-#timelineOverviewIndicator .leftMask {
- right: 100%;
-}
-#timelineOverviewIndicator .rightMask {
- left: 100%;
-}
-#mapDetails {
- font-family: monospace;
- white-space: pre;
-}
-#transitionView {
- overflow-x: scroll;
- white-space: nowrap;
- min-height: 50px;
- max-height: 200px;
- padding: 50px 0 0 0;
- margin-top: -25px;
- width: 100%;
-}
-.map {
- width: 20px;
- height: 20px;
- display: inline-block;
- border-radius: 50%;
- background-color: black;
- border: 4px solid white;
- font-size: 10px;
- text-align: center;
- line-height: 18px;
- color: white;
- vertical-align: top;
- margin-top: -13px;
- /* raise z-index */
- position: relative;
- z-index: 2;
- cursor: pointer;
-}
-.map.selected {
- border-color: black;
-}
-.transitions {
- display: inline-block;
- margin-left: -15px;
-}
-.transition {
- min-height: 55px;
- margin: 0 0 -2px 2px;
-}
-/* gray out deprecated transitions */
-.deprecated > .transitionEdge,
-.deprecated > .map {
- opacity: 0.5;
-}
-.deprecated > .transition {
- border-color: rgba(0, 0, 0, 0.5);
-}
-/* Show a border for all but the first transition */
-.transition:nth-of-type(2),
-.transition:nth-last-of-type(n+2) {
- border-left: 2px solid;
- margin-left: 0px;
-}
-/* special case for 2 transitions */
-.transition:nth-last-of-type(1) {
- border-left: none;
-}
-/* topmost transitions are not related */
-#transitionView > .transition {
- border-left: none;
-}
-/* topmost transition edge needs initial offset to be aligned */
-#transitionView > .transition > .transitionEdge {
- margin-left: 13px;
-}
-.transitionEdge {
- height: 2px;
- width: 80px;
- display: inline-block;
- margin: 0 0 2px 0;
- background-color: black;
- vertical-align: top;
- padding-left: 15px;
-}
-.transitionLabel {
- color: black;
- transform: rotate(-15deg);
- transform-origin: top left;
- margin-top: -10px;
- font-size: 10px;
- white-space: normal;
- word-break: break-all;
- background-color: rgba(255,255,255,0.5);
-}
-.black{
- background-color: black;
-}
-.red {
- background-color: red;
-}
-.green {
- background-color: green;
-}
-.yellow {
- background-color: yellow;
- color: black;
-}
-.blue {
- background-color: blue;
-}
-.orange {
- background-color: orange;
-}
-.violet {
- background-color: violet;
- color: black;
-}
-.showSubtransitions {
- width: 0;
- height: 0;
- border-left: 6px solid transparent;
- border-right: 6px solid transparent;
- border-top: 10px solid black;
- cursor: zoom-in;
- margin: 4px 0 0 4px;
-}
-.showSubtransitions.opened {
- border-top: none;
- border-bottom: 10px solid black;
- cursor: zoom-out;
-}
-#tooltip {
- position: absolute;
- width: 10px;
- height: 10px;
- background-color: red;
- pointer-events: none;
- z-index: 100;
- display: none;
-}
-#searchBarInput {
- width: 200px;
-}
-
-</style>
-<script type="module" src="./map-processor.js"></script>
-<script>
-"use strict"
-// =========================================================================
-const kChunkHeight = 250;
-const kChunkWidth = 10;
-
-class State {
- constructor() {
- this._nofChunks = 400;
- this._map = undefined;
- this._timeline = undefined;
- this._chunks = undefined;
- this._view = new View(this);
- this._navigation = new Navigation(this, this.view);
- }
- get timeline() { return this._timeline }
- set timeline(value) {
- this._timeline = value;
- this.updateChunks();
- this.view.updateTimeline();
- this.view.updateStats();
- }
- get chunks() { return this._chunks }
- get nofChunks() { return this._nofChunks }
- set nofChunks(count) {
- this._nofChunks = count;
- this.updateChunks();
- this.view.updateTimeline();
- }
- get view() { return this._view }
- get navigation() { return this._navigation }
- get map() { return this._map }
- set map(value) {
- this._map = value;
- this._navigation.updateUrl();
- this.view.updateMapDetails();
- this.view.redraw();
- }
- updateChunks() {
- this._chunks = this._timeline.chunks(this._nofChunks);
- }
- get entries() {
- if (!this.map) return {};
- return {
- map: this.map.id,
- time: this.map.time
- }
- }
-}
-
-// =========================================================================
-// DOM Helper
-function $(id) {
- return document.getElementById(id)
-}
-
-function removeAllChildren(node) {
- while (node.lastChild) {
- node.removeChild(node.lastChild);
- }
-}
-
-function selectOption(select, match) {
- let options = select.options;
- for (let i = 0; i < options.length; i++) {
- if (match(i, options[i])) {
- select.selectedIndex = i;
- return;
- }
- }
-}
-
-function div(classes) {
- let node = document.createElement('div');
- if (classes !== void 0) {
- if (typeof classes === "string") {
- node.classList.add(classes);
- } else {
- classes.forEach(cls => node.classList.add(cls));
- }
- }
- return node;
-}
-
-function table(className) {
- let node = document.createElement("table")
- if (className) node.classList.add(className)
- return node;
-}
-
-function td(textOrNode) {
- let node = document.createElement("td");
- if (typeof textOrNode === "object") {
- node.appendChild(textOrNode);
- } else {
- node.innerText = textOrNode;
- }
- return node;
-}
-
-
-function tr() {
- return document.createElement("tr");
-}
-
-define(Array.prototype, "histogram", function(mapFn) {
- let histogram = [];
- for (let i = 0; i < this.length; i++) {
- let value = this[i];
- let index = Math.round(mapFn(value))
- let bucket = histogram[index];
- if (bucket !== undefined) {
- bucket.push(value);
- } else {
- histogram[index] = [value];
- }
- }
- for (let i = 0; i < histogram.length; i++) {
- histogram[i] = histogram[i] || [];
- }
- return histogram;
-});
-
-
-// =========================================================================
-// EventHandlers
-function handleSearchBar(){
- let searchBar = $('searchBarInput');
- let searchBarInput = searchBar.value;
- let selectedMap = V8Map.get(searchBarInput);
- //removeAllChildren($('mapIdList'));
- if(selectedMap){
- let map = selectedMap;
- document.state.map = map;
- searchBar.className = "green";
- } else {
- searchBar.className = "red";
- }
-}
-
-function handleBodyLoad() {
- let upload = $('fileReader');
- upload.onclick = (e) => $("file").click();
- upload.ondragover = (e) => e.preventDefault();
- upload.ondrop = (e) => handleLoadFile(e);
- $('file').onchange = (e) => handleLoadFile(e);
- upload.onkeydown = (e) => {
- if (event.key == "Enter") $("file").click();
- };
- upload.focus();
-
- document.state = new State();
- $("transitionView").addEventListener("mousemove", e => {
- let tooltip = $("tooltip");
- tooltip.style.left = e.pageX + "px";
- tooltip.style.top = e.pageY + "px";
- let map = e.target.map;
- if (map) {
- $("tooltipContents").innerText = map.description;
- }
- });
-
- function handleLoadFile(event) {
- // Used for drop and file change.
- event.preventDefault();
- let host = event.dataTransfer ? event.dataTransfer : event.target;
- let file = host.files[0];
- let reader = new FileReader();
- document.body.className = 'loading';
- reader.onload = function(evt) {
- try {
- handleLoadText(this.result);
- document.body.className = 'success';
- } catch(e) {
- document.body.className = 'failure';
- console.error(e);
- }
- }
- // Defer the reading to allow spinner CSS animation.
- setTimeout(() => reader.readAsText(file), 0);
- }
-}
-
-
-function handleLoadText(text) {
- let mapProcessor = new MapProcessor();
- document.state.timeline = mapProcessor.processString(text);
-}
-
-function handleKeyDown(event) {
- let nav = document.state.navigation;
- switch(event.key) {
- case "ArrowUp":
- event.preventDefault();
- if (event.shiftKey) {
- nav.selectPrevEdge();
- } else {
- nav.moveInChunk(-1);
- }
- return false;
- case "ArrowDown":
- event.preventDefault();
- if (event.shiftKey) {
- nav.selectNextEdge();
- } else {
- nav.moveInChunk(1);
- }
- return false;
- case "ArrowLeft":
- nav.moveInChunks(false);
- break;
- case "ArrowRight":
- nav.moveInChunks(true);
- break;
- case "+":
- nav.increaseTimelineResolution();
- break;
- case "-":
- nav.decreaseTimelineResolution();
- break;
- }
-};
-document.onkeydown = handleKeyDown;
-
-function handleTimelineIndicatorMove(event) {
- if (event.buttons == 0) return;
- let timelineTotalWidth = $("timelineCanvas").offsetWidth;
- let factor = $("timelineOverview").offsetWidth / timelineTotalWidth;
- $("timeline").scrollLeft += event.movementX / factor;
-}
-
-// =========================================================================
-
-Object.defineProperty(Edge.prototype, 'getColor', { value:function() {
- return transitionTypeToColor(this.type);
-}});
-
-class Navigation {
- constructor(state, view) {
- this.state = state;
- this.view = view;
- }
- get map() { return this.state.map }
- set map(value) { this.state.map = value }
- get chunks() { return this.state.chunks }
-
- increaseTimelineResolution() {
- this.state.nofChunks *= 1.5;
- }
-
- decreaseTimelineResolution() {
- this.state.nofChunks /= 1.5;
- }
-
- selectNextEdge() {
- if (!this.map) return;
- if (this.map.children.length != 1) return;
- this.map = this.map.children[0].to;
- }
-
- selectPrevEdge() {
- if (!this.map) return;
- if (!this.map.parent()) return;
- this.map = this.map.parent();
- }
-
- selectDefaultMap() {
- this.map = this.chunks[0].at(0);
- }
- moveInChunks(next) {
- if (!this.map) return this.selectDefaultMap();
- let chunkIndex = this.map.chunkIndex(this.chunks);
- let chunk = this.chunks[chunkIndex];
- let index = chunk.indexOf(this.map);
- if (next) {
- chunk = chunk.next(this.chunks);
- } else {
- chunk = chunk.prev(this.chunks);
- }
- if (!chunk) return;
- index = Math.min(index, chunk.size()-1);
- this.map = chunk.at(index);
- }
-
- moveInChunk(delta) {
- if (!this.map) return this.selectDefaultMap();
- let chunkIndex = this.map.chunkIndex(this.chunks)
- let chunk = this.chunks[chunkIndex];
- let index = chunk.indexOf(this.map) + delta;
- let map;
- if (index < 0) {
- map = chunk.prev(this.chunks).last();
- } else if (index >= chunk.size()) {
- map = chunk.next(this.chunks).first()
- } else {
- map = chunk.at(index);
- }
- this.map = map;
- }
-
- updateUrl() {
- let entries = this.state.entries;
- let params = new URLSearchParams(entries);
- window.history.pushState(entries, "", "?" + params.toString());
- }
-}
-
-class View {
- constructor(state) {
- this.state = state;
- setInterval(this.updateOverviewWindow, 50);
- this.backgroundCanvas = document.createElement("canvas");
- this.transitionView = new TransitionView(state, $("transitionView"));
- this.statsView = new StatsView(state, $("stats"));
- this.isLocked = false;
- }
- get chunks() { return this.state.chunks }
- get timeline() { return this.state.timeline }
- get map() { return this.state.map }
-
- updateStats() {
- this.statsView.update();
- }
-
- updateMapDetails() {
- let details = "";
- if (this.map) {
- details += "ID: " + this.map.id;
- details += "\nSource location: " + this.map.filePosition;
- details += "\n" + this.map.description;
- }
- $("mapDetails").innerText = details;
- this.transitionView.showMap(this.map);
- }
-
- updateTimeline() {
- let chunksNode = $("timelineChunks");
- removeAllChildren(chunksNode);
- let chunks = this.chunks;
- let max = chunks.max(each => each.size());
- let start = this.timeline.startTime;
- let end = this.timeline.endTime;
- let duration = end - start;
- const timeToPixel = chunks.length * kChunkWidth / duration;
- let addTimestamp = (time, name) => {
- let timeNode = div("timestamp");
- timeNode.innerText = name;
- timeNode.style.left = ((time-start) * timeToPixel) + "px";
- chunksNode.appendChild(timeNode);
- };
- let backgroundTodo = [];
- for (let i = 0; i < chunks.length; i++) {
- let chunk = chunks[i];
- let height = (chunk.size() / max * kChunkHeight);
- chunk.height = height;
- if (chunk.isEmpty()) continue;
- let node = div();
- node.className = "chunk";
- node.style.left = (i * kChunkWidth) + "px";
- node.style.height = height + "px";
- node.chunk = chunk;
- node.addEventListener("mousemove", e => this.handleChunkMouseMove(e));
- node.addEventListener("click", e => this.handleChunkClick(e));
- node.addEventListener("dblclick", e => this.handleChunkDoubleClick(e));
- backgroundTodo.push([chunk, node])
- chunksNode.appendChild(node);
- chunk.markers.forEach(marker => addTimestamp(marker.time, marker.name));
- }
-
- this.asyncSetTimelineChunkBackground(backgroundTodo)
-
- // Put a time marker roughly every 20 chunks.
- let expected = duration / chunks.length * 20;
- let interval = (10 ** Math.floor(Math.log10(expected)));
- let correction = Math.log10(expected / interval);
- correction = (correction < 0.33) ? 1 : (correction < 0.75) ? 2.5 : 5;
- interval *= correction;
-
- let time = start;
- while (time < end) {
- addTimestamp(time, ((time-start) / 1000) + " ms");
- time += interval;
- }
- this.drawOverview();
- this.redraw();
- }
-
- handleChunkMouseMove(event) {
- if (this.isLocked) return false;
- let chunk = event.target.chunk;
- if (!chunk) return;
- // topmost map (at chunk.height) == map #0.
- let relativeIndex =
- Math.round(event.layerY / event.target.offsetHeight * chunk.size());
- let map = chunk.at(relativeIndex);
- this.state.map = map;
- }
-
- handleChunkClick(event) {
- this.isLocked = !this.isLocked;
- }
-
- handleChunkDoubleClick(event) {
- this.isLocked = true;
- let chunk = event.target.chunk;
- if (!chunk) return;
- this.transitionView.showMaps(chunk.getUniqueTransitions());
- }
-
- asyncSetTimelineChunkBackground(backgroundTodo) {
- const kIncrement = 100;
- let start = 0;
- let delay = 1;
- while (start < backgroundTodo.length) {
- let end = Math.min(start+kIncrement, backgroundTodo.length);
- setTimeout((from, to) => {
- for (let i = from; i < to; i++) {
- let [chunk, node] = backgroundTodo[i];
- this.setTimelineChunkBackground(chunk, node);
- }
- }, delay++, start, end);
- start = end;
- }
- }
-
- setTimelineChunkBackground(chunk, node) {
- // Render the types of transitions as bar charts
- const kHeight = chunk.height;
- const kWidth = 1;
- this.backgroundCanvas.width = kWidth;
- this.backgroundCanvas.height = kHeight;
- let ctx = this.backgroundCanvas.getContext("2d");
- ctx.clearRect(0, 0, kWidth, kHeight);
- let y = 0;
- let total = chunk.size();
- let type, count;
- if (true) {
- chunk.getTransitionBreakdown().forEach(([type, count]) => {
- ctx.fillStyle = transitionTypeToColor(type);
- let height = count / total * kHeight;
- ctx.fillRect(0, y, kWidth, y + height);
- y += height;
- });
- } else {
- chunk.items.forEach(map => {
- ctx.fillStyle = transitionTypeToColor(map.getType());
- let y = chunk.yOffset(map);
- ctx.fillRect(0, y, kWidth, y + 1);
- });
- }
-
- let imageData = this.backgroundCanvas.toDataURL("image/webp", 0.2);
- node.style.backgroundImage = "url(" + imageData + ")";
- }
-
- updateOverviewWindow() {
- let indicator = $("timelineOverviewIndicator");
- let totalIndicatorWidth = $("timelineOverview").offsetWidth;
- let div = $("timeline");
- let timelineTotalWidth = $("timelineCanvas").offsetWidth;
- let factor = $("timelineOverview").offsetWidth / timelineTotalWidth;
- let width = div.offsetWidth * factor;
- let left = div.scrollLeft * factor;
- indicator.style.width = width + "px";
- indicator.style.left = left + "px";
- }
-
- drawOverview() {
- const height = 50;
- const kFactor = 2;
- let canvas = this.backgroundCanvas;
- canvas.height = height;
- canvas.width = window.innerWidth;
- let ctx = canvas.getContext("2d");
-
- let chunks = this.state.timeline.chunkSizes(canvas.width * kFactor);
- let max = chunks.max();
-
- ctx.clearRect(0, 0, canvas.width, height);
- ctx.strokeStyle = "black";
- ctx.fillStyle = "black";
- ctx.beginPath();
- ctx.moveTo(0,height);
- for (let i = 0; i < chunks.length; i++) {
- ctx.lineTo(i/kFactor, height - chunks[i]/max * height);
- }
- ctx.lineTo(chunks.length, height);
- ctx.stroke();
- ctx.closePath();
- ctx.fill();
- let imageData = canvas.toDataURL("image/webp", 0.2);
- $("timelineOverview").style.backgroundImage = "url(" + imageData + ")";
- }
-
- redraw() {
- let canvas= $("timelineCanvas");
- canvas.width = (this.chunks.length+1) * kChunkWidth;
- canvas.height = kChunkHeight;
- let ctx = canvas.getContext("2d");
- ctx.clearRect(0, 0, canvas.width, kChunkHeight);
- if (!this.state.map) return;
- this.drawEdges(ctx);
- }
-
- setMapStyle(map, ctx) {
- ctx.fillStyle = map.edge && map.edge.from ? "black" : "green";
- }
-
- setEdgeStyle(edge, ctx) {
- let color = edge.getColor();
- ctx.strokeStyle = color;
- ctx.fillStyle = color;
- }
-
- markMap(ctx, map) {
- let [x, y] = map.position(this.state.chunks);
- ctx.beginPath();
- this.setMapStyle(map, ctx);
- ctx.arc(x, y, 3, 0, 2 * Math.PI);
- ctx.fill();
- ctx.beginPath();
- ctx.fillStyle = "white";
- ctx.arc(x, y, 2, 0, 2 * Math.PI);
- ctx.fill();
- }
-
- markSelectedMap(ctx, map) {
- let [x, y] = map.position(this.state.chunks);
- ctx.beginPath();
- this.setMapStyle(map, ctx);
- ctx.arc(x, y, 6, 0, 2 * Math.PI);
- ctx.stroke();
- }
-
- drawEdges(ctx) {
- // Draw the trace of maps in reverse order to make sure the outgoing
- // transitions of previous maps aren't drawn over.
- const kMaxOutgoingEdges = 100;
- let nofEdges = 0;
- let stack = [];
- let current = this.state.map;
- while (current && nofEdges < kMaxOutgoingEdges) {
- nofEdges += current.children.length;
- stack.push(current);
- current = current.parent();
- }
- ctx.save();
- this.drawOutgoingEdges(ctx, this.state.map, 3);
- ctx.restore();
-
- let labelOffset = 15;
- let xPrev = 0;
- while (current = stack.pop()) {
- if (current.edge) {
- this.setEdgeStyle(current.edge, ctx);
- let [xTo, yTo] = this.drawEdge(ctx, current.edge, true, labelOffset);
- if (xTo == xPrev) {
- labelOffset += 8;
- } else {
- labelOffset = 15
- }
- xPrev = xTo;
- }
- this.markMap(ctx, current);
- current = current.parent();
- ctx.save();
- // this.drawOutgoingEdges(ctx, current, 1);
- ctx.restore();
- }
- // Mark selected map
- this.markSelectedMap(ctx, this.state.map);
- }
-
- drawEdge(ctx, edge, showLabel=true, labelOffset=20) {
- if (!edge.from || !edge.to) return [-1, -1];
- let [xFrom, yFrom] = edge.from.position(this.chunks);
- let [xTo, yTo] = edge.to.position(this.chunks);
- let sameChunk = xTo == xFrom;
- if (sameChunk) labelOffset += 8;
-
- ctx.beginPath();
- ctx.moveTo(xFrom, yFrom);
- let offsetX = 20;
- let offsetY = 20;
- let midX = xFrom + (xTo- xFrom) / 2;
- let midY = (yFrom + yTo) / 2 - 100;
- if (!sameChunk) {
- ctx.quadraticCurveTo(midX, midY, xTo, yTo);
- } else {
- ctx.lineTo(xTo, yTo);
- }
- if (!showLabel) {
- ctx.stroke();
- } else {
- let centerX, centerY;
- if (!sameChunk) {
- centerX = (xFrom/2 + midX + xTo/2)/2;
- centerY = (yFrom/2 + midY + yTo/2)/2;
- } else {
- centerX = xTo;
- centerY = yTo;
- }
- ctx.moveTo(centerX, centerY);
- ctx.lineTo(centerX + offsetX, centerY - labelOffset);
- ctx.stroke();
- ctx.textAlign = "left";
- ctx.fillText(edge.toString(), centerX + offsetX + 2, centerY - labelOffset)
- }
- return [xTo, yTo];
- }
-
- drawOutgoingEdges(ctx, map, max=10, depth=0) {
- if (!map) return;
- if (depth >= max) return;
- ctx.globalAlpha = 0.5 - depth * (0.3/max);
- ctx.strokeStyle = "#666";
-
- const limit = Math.min(map.children.length, 100)
- for (let i = 0; i < limit; i++) {
- let edge = map.children[i];
- this.drawEdge(ctx, edge, true);
- this.drawOutgoingEdges(ctx, edge.to, max, depth+1);
- }
- }
-}
-
-
-class TransitionView {
- constructor(state, node) {
- this.state = state;
- this.container = node;
- this.currentNode = node;
- this.currentMap = undefined;
- }
-
- selectMap(map) {
- this.currentMap = map;
- this.state.map = map;
- }
-
- showMap(map) {
- if (this.currentMap === map) return;
- this.currentMap = map;
- this._showMaps([map]);
- }
-
- showMaps(list, name) {
- this.state.view.isLocked = true;
- this._showMaps(list);
- }
-
- _showMaps(list, name) {
- // Hide the container to avoid any layouts.
- this.container.style.display = "none";
- removeAllChildren(this.container);
- list.forEach(map => this.addMapAndParentTransitions(map));
- this.container.style.display = ""
- }
-
- addMapAndParentTransitions(map) {
- if (map === void 0) return;
- this.currentNode = this.container;
- let parents = map.getParents();
- if (parents.length > 0) {
- this.addTransitionTo(parents.pop());
- parents.reverse().forEach(each => this.addTransitionTo(each));
- }
- let mapNode = this.addSubtransitions(map);
- // Mark and show the selected map.
- mapNode.classList.add("selected");
- if (this.selectedMap == map) {
- setTimeout(() => mapNode.scrollIntoView({
- behavior: "smooth", block: "nearest", inline: "nearest"
- }), 1);
- }
- }
-
- addMapNode(map) {
- let node = div("map");
- if (map.edge) node.classList.add(map.edge.getColor());
- node.map = map;
- node.addEventListener("click", () => this.selectMap(map));
- if (map.children.length > 1) {
- node.innerText = map.children.length;
- let showSubtree = div("showSubtransitions");
- showSubtree.addEventListener("click", (e) => this.toggleSubtree(e, node));
- node.appendChild(showSubtree);
- } else if (map.children.length == 0) {
- node.innerHTML = "&#x25CF;"
- }
- this.currentNode.appendChild(node);
- return node;
- }
-
- addSubtransitions(map) {
- let mapNode = this.addTransitionTo(map);
- // Draw outgoing linear transition line.
- let current = map;
- while (current.children.length == 1) {
- current = current.children[0].to;
- this.addTransitionTo(current);
- }
- return mapNode;
- }
-
- addTransitionEdge(map) {
- let classes = ["transitionEdge", map.edge.getColor()];
- let edge = div(classes);
- let labelNode = div("transitionLabel");
- labelNode.innerText = map.edge.toString();
- edge.appendChild(labelNode);
- return edge;
- }
-
- addTransitionTo(map) {
- // transition[ transitions[ transition[...], transition[...], ...]];
-
- let transition = div("transition");
- if (map.isDeprecated()) transition.classList.add("deprecated");
- if (map.edge) {
- transition.appendChild(this.addTransitionEdge(map));
- }
- let mapNode = this.addMapNode(map);
- transition.appendChild(mapNode);
-
- let subtree = div("transitions");
- transition.appendChild(subtree);
-
- this.currentNode.appendChild(transition);
- this.currentNode = subtree;
-
- return mapNode;
-
- }
-
- toggleSubtree(event, node) {
- let map = node.map;
- event.target.classList.toggle("opened");
- let transitionsNode = node.parentElement.querySelector(".transitions");
- let subtransitionNodes = transitionsNode.children;
- if (subtransitionNodes.length <= 1) {
- // Add subtransitions excepth the one that's already shown.
- let visibleTransitionMap = subtransitionNodes.length == 1 ?
- transitionsNode.querySelector(".map").map : void 0;
- map.children.forEach(edge => {
- if (edge.to != visibleTransitionMap) {
- this.currentNode = transitionsNode;
- this.addSubtransitions(edge.to);
- }
- });
- } else {
- // remove all but the first (currently selected) subtransition
- for (let i = subtransitionNodes.length-1; i > 0; i--) {
- transitionsNode.removeChild(subtransitionNodes[i]);
- }
- }
- }
-}
-
-class StatsView {
- constructor(state, node) {
- this.state = state;
- this.node = node;
- }
- get timeline() { return this.state.timeline }
- get transitionView() { return this.state.view.transitionView; }
- update() {
- removeAllChildren(this.node);
- this.updateGeneralStats();
- this.updateNamedTransitionsStats();
- }
- updateGeneralStats() {
- let pairs = [
- ["Total", null, e => true],
- ["Transitions", 'black', e => e.edge && e.edge.isTransition()],
- ["Fast to Slow", 'violet', e => e.edge && e.edge.isFastToSlow()],
- ["Slow to Fast", 'orange', e => e.edge && e.edge.isSlowToFast()],
- ["Initial Map", 'yellow', e => e.edge && e.edge.isInitial()],
- ["Replace Descriptors", 'red', e => e.edge && e.edge.isReplaceDescriptors()],
- ["Copy as Prototype", 'red', e => e.edge && e.edge.isCopyAsPrototype()],
- ["Optimize as Prototype", null, e => e.edge && e.edge.isOptimizeAsPrototype()],
- ["Deprecated", null, e => e.isDeprecated()],
- ["Bootstrapped", 'green', e => e.isBootstrapped()],
- ];
-
- let text = "";
- let tableNode = table("transitionType");
- tableNode.innerHTML = "<thead><tr><td>Color</td><td>Type</td><td>Count</td><td>Percent</td></tr></thead>";
- let name, filter;
- let total = this.timeline.size();
- pairs.forEach(([name, color, filter]) => {
- let row = tr();
- if (color !== null) {
- row.appendChild(td(div(['colorbox', color])));
- } else {
- row.appendChild(td(""));
- }
- row.onclick = (e) => {
- // lazily compute the stats
- let node = e.target.parentNode;
- if (node.maps == undefined) {
- node.maps = this.timeline.filterUniqueTransitions(filter);
- }
- this.transitionView.showMaps(node.maps);
- }
- row.appendChild(td(name));
- let count = this.timeline.count(filter);
- row.appendChild(td(count));
- let percent = Math.round(count / total * 1000) / 10;
- row.appendChild(td(percent.toFixed(1) + "%"));
- tableNode.appendChild(row);
- });
- this.node.appendChild(tableNode);
- };
- updateNamedTransitionsStats() {
- let tableNode = table("transitionTable");
- let nameMapPairs = Array.from(this.timeline.transitions.entries());
- tableNode.innerHTML = "<thead><tr><td>Propery Name</td><td>#</td></tr></thead>";
- nameMapPairs
- .sort((a,b) => b[1].length - a[1].length)
- .forEach(([name, maps]) => {
- let row = tr();
- row.maps = maps;
- row.addEventListener("click",
- e => this.transitionView.showMaps(
- e.target.parentNode.maps.map(map => map.to)));
- row.appendChild(td(name));
- row.appendChild(td(maps.length));
- tableNode.appendChild(row);
- });
- this.node.appendChild(tableNode);
- }
-}
-
-// =========================================================================
-
-function transitionTypeToColor(type) {
- switch(type) {
- case "new": return "green";
- case "Normalize": return "violet";
- case "SlowToFast": return "orange";
- case "InitialMap": return "yellow";
- case "Transition": return "black";
- case "ReplaceDescriptors": return "red";
- }
- return "black";
-}
-
-// ShadowDom elements =========================================================
-
-</script>
-</head>
-<body onload="handleBodyLoad(event)" onkeypress="handleKeyDown(event)">
- <h1>V8 Map Explorer</h1>
- <section>
- <div id="fileReader" tabindex=1 >
- <span id="label">
- Drag and drop a v8.log file into this area, or click to choose from disk.
- </span>
- <input id="file" type="file" name="files">
- </div>
- <div id="loader">
- <div id="spinner"></div>
- </div>
- </section>
-
- <div id="content">
- <h2>Stats</h2>
- <section id="stats"></section>
-
- <h2>Timeline</h2>
- <div id="timeline">
- <div id="timelineLabel">Frequency</div>
- <div id="timelineChunks"></div>
- <canvas id="timelineCanvas"></canvas>
- </div>
- <div id="timelineOverview"
- onmousemove="handleTimelineIndicatorMove(event)" >
- <div id="timelineOverviewIndicator">
- <div class="leftMask"></div>
- <div class="rightMask"></div>
- </div>
- </div>
-
- <h2>Transitions</h2>
- <section id="transitionView"></section>
- <br/>
-
-
- <h2>Search Map by Address</h2>
- <section id="searchBar"></section>
- <input type="search" id="searchBarInput" placeholder="Search maps by address..">
- <button onclick="handleSearchBar()">Search</button>
- <ul id="mapIdList" title="Map Id List">
- </ul>
-
-
- <h2>Selected Map</h2>
- <section id="mapDetails"></section>
- </div>
-
- <section>
- <h2>Instructions</h2>
- <p>Visualize Map trees that have been gathered using <code>path/to/d8 $FILE --trace-maps</code>.</p>
- <p>You can inspect the transition tree in DevTools by looking at <code>document.state.timeline.values</code>.
- <h3>Keyboard Shortcuts</h3>
- <dl>
- <dt><kbd>SHIFT</kbd> + <kbd>Arrow Up</kbd></dt>
- <dd>Follow Map transition forward (first child)</dd>
-
- <dt><kbd>SHIFT</kbd> + <kbd>Arrow Down</kbd></dt>
- <dd>Follow Map transition backwards</dd>
-
- <dt><kbd>Arrow Up</kbd></dt>
- <dd>Go to previous Map chunk</dd>
-
- <dt><kbd>Arrow Down</kbd></dt>
- <dd>Go to next Map in chunk</dd>
-
- <dt><kbd>Arrow Left</kbd></dt>
- <dd>Go to previous chunk</dd>
-
- <dt><kbd>Arrow Right</kbd></dt>
- <dd>Go to next chunk</dd>
-
- <dt><kbd>+</kbd></dt>
- <dd>Timeline zoom in</dd>
-
- <dt><kbd>-</kbd></dt>
- <dd>Timeline zoom out</dd>
- </dl>
- </section>
-
- <div id="tooltip">
- <div id="tooltipContents"></div>
- </div>
-</body>
-</html>
diff --git a/deps/v8/tools/map-processor.mjs b/deps/v8/tools/map-processor.mjs
deleted file mode 100644
index 7c290abb8d..0000000000
--- a/deps/v8/tools/map-processor.mjs
+++ /dev/null
@@ -1,783 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-import { LogReader, parseString, parseVarArgs } from "./logreader.mjs";
-import { BaseArgumentsProcessor } from "./arguments.mjs";
-import { Profile } from "./profile.mjs";
-
-// ===========================================================================
-function define(prototype, name, fn) {
- Object.defineProperty(prototype, name, {value:fn, enumerable:false});
-}
-
-define(Array.prototype, "max", function(fn) {
- if (this.length === 0) return undefined;
- if (fn === undefined) fn = (each) => each;
- let max = fn(this[0]);
- for (let i = 1; i < this.length; i++) {
- max = Math.max(max, fn(this[i]));
- }
- return max;
-})
-define(Array.prototype, "first", function() { return this[0] });
-define(Array.prototype, "last", function() { return this[this.length - 1] });
-
-
-/**
- * A thin wrapper around shell's 'read' function showing a file name on error.
- */
-export function readFile(fileName) {
- try {
- return read(fileName);
- } catch (e) {
- console.log(fileName + ': ' + (e.message || e));
- throw e;
- }
-}
-// ===========================================================================
-
-export class MapProcessor extends LogReader {
- constructor() {
- super();
- this.dispatchTable_ = {
- __proto__:null,
- 'code-creation': {
- parsers: [parseString, parseInt, parseInt, parseInt, parseInt,
- parseString, parseVarArgs],
- processor: this.processCodeCreation
- },
- 'code-move': {
- parsers: [parseInt, parseInt],
- 'sfi-move': {
- parsers: [parseInt, parseInt],
- processor: this.processCodeMove
- },
- 'code-delete': {
- parsers: [parseInt],
- processor: this.processCodeDelete
- },
- processor: this.processFunctionMove
- },
- 'map-create': {
- parsers: [parseInt, parseString],
- processor: this.processMapCreate
- },
- 'map': {
- parsers: [parseString, parseInt, parseString, parseString, parseInt, parseInt,
- parseString, parseString, parseString
- ],
- processor: this.processMap
- },
- 'map-details': {
- parsers: [parseInt, parseString, parseString],
- processor: this.processMapDetails
- }
- };
- this.profile_ = new Profile();
- this.timeline_ = new Timeline();
- this.formatPCRegexp_ = /(.*):[0-9]+:[0-9]+$/;
- }
-
- printError(str) {
- console.error(str);
- throw str
- }
-
- processString(string) {
- let end = string.length;
- let current = 0;
- let next = 0;
- let line;
- let i = 0;
- let entry;
- try {
- while (current < end) {
- next = string.indexOf("\n", current);
- if (next === -1) break;
- i++;
- line = string.substring(current, next);
- current = next + 1;
- this.processLogLine(line);
- }
- } catch(e) {
- console.error("Error occurred during parsing, trying to continue: " + e);
- }
- return this.finalize();
- }
-
- processLogFile(fileName) {
- this.collectEntries = true
- this.lastLogFileName_ = fileName;
- let i = 1;
- let line;
- try {
- while (line = readline()) {
- this.processLogLine(line);
- i++;
- }
- } catch(e) {
- console.error("Error occurred during parsing line " + i + ", trying to continue: " + e);
- }
- return this.finalize();
- }
-
- finalize() {
- // TODO(cbruni): print stats;
- this.timeline_.finalize();
- return this.timeline_;
- }
-
- addEntry(entry) {
- this.entries.push(entry);
- }
-
- /**
- * Parser for dynamic code optimization state.
- */
- parseState(s) {
- switch (s) {
- case "":
- return Profile.CodeState.COMPILED;
- case "~":
- return Profile.CodeState.OPTIMIZABLE;
- case "*":
- return Profile.CodeState.OPTIMIZED;
- }
- throw new Error("unknown code state: " + s);
- }
-
- processCodeCreation(
- type, kind, timestamp, start, size, name, maybe_func) {
- if (maybe_func.length) {
- let funcAddr = parseInt(maybe_func[0]);
- let state = this.parseState(maybe_func[1]);
- this.profile_.addFuncCode(type, name, timestamp, start, size, funcAddr, state);
- } else {
- this.profile_.addCode(type, name, timestamp, start, size);
- }
- }
-
- processCodeMove(from, to) {
- this.profile_.moveCode(from, to);
- }
-
- processCodeDelete(start) {
- this.profile_.deleteCode(start);
- }
-
- processFunctionMove(from, to) {
- this.profile_.moveFunc(from, to);
- }
-
- formatPC(pc, line, column) {
- let entry = this.profile_.findEntry(pc);
- if (!entry) return "<unknown>"
- if (entry.type === "Builtin") {
- return entry.name;
- }
- let name = entry.func.getName();
- let array = this.formatPCRegexp_.exec(name);
- if (array === null) {
- entry = name;
- } else {
- entry = entry.getState() + array[1];
- }
- return entry + ":" + line + ":" + column;
- }
-
- processMap(type, time, from, to, pc, line, column, reason, name) {
- let time_ = parseInt(time);
- if (type === "Deprecate") return this.deprecateMap(type, time_, from);
- let from_ = this.getExistingMap(from, time_);
- let to_ = this.getExistingMap(to, time_);
- let edge = new Edge(type, name, reason, time, from_, to_);
- to_.filePosition = this.formatPC(pc, line, column);
- edge.finishSetup();
- }
-
- deprecateMap(type, time, id) {
- this.getExistingMap(id, time).deprecate();
- }
-
- processMapCreate(time, id) {
- // map-create events might override existing maps if the addresses get
- // recycled. Hence we do not check for existing maps.
- let map = this.createMap(id, time);
- }
-
- processMapDetails(time, id, string) {
- //TODO(cbruni): fix initial map logging.
- let map = this.getExistingMap(id, time);
- map.description = string;
- }
-
- createMap(id, time) {
- let map = new V8Map(id, time);
- this.timeline_.push(map);
- return map;
- }
-
- getExistingMap(id, time) {
- if (id === "0x000000000000") return undefined;
- let map = V8Map.get(id, time);
- if (map === undefined) {
- console.error("No map details provided: id=" + id);
- // Manually patch in a map to continue running.
- return this.createMap(id, time);
- };
- return map;
- }
-}
-
-// ===========================================================================
-
-class V8Map {
- constructor(id, time = -1) {
- if (!id) throw "Invalid ID";
- this.id = id;
- this.time = time;
- if (!(time > 0)) throw "Invalid time";
- this.description = "";
- this.edge = void 0;
- this.children = [];
- this.depth = 0;
- this._isDeprecated = false;
- this.deprecationTargets = null;
- V8Map.set(id, this);
- this.leftId = 0;
- this.rightId = 0;
- this.filePosition = "";
- }
-
- finalizeRootMap(id) {
- let stack = [this];
- while (stack.length > 0) {
- let current = stack.pop();
- if (current.leftId !== 0) {
- console.error("Skipping potential parent loop between maps:", current)
- continue;
- }
- current.finalize(id)
- id += 1;
- current.children.forEach(edge => stack.push(edge.to))
- // TODO implement rightId
- }
- return id;
- }
-
- finalize(id) {
- // Initialize preorder tree traversal Ids for fast subtree inclusion checks
- if (id <= 0) throw "invalid id";
- let currentId = id;
- this.leftId = currentId
- }
-
-
- parent() {
- if (this.edge === void 0) return void 0;
- return this.edge.from;
- }
-
- isDeprecated() {
- return this._isDeprecated;
- }
-
- deprecate() {
- this._isDeprecated = true;
- }
-
- isRoot() {
- return this.edge === void 0 || this.edge.from === void 0;
- }
-
- contains(map) {
- return this.leftId < map.leftId && map.rightId < this.rightId;
- }
-
- addEdge(edge) {
- this.children.push(edge);
- }
-
- chunkIndex(chunks) {
- // Did anybody say O(n)?
- for (let i = 0; i < chunks.length; i++) {
- let chunk = chunks[i];
- if (chunk.isEmpty()) continue;
- if (chunk.last().time < this.time) continue;
- return i;
- }
- return -1;
- }
-
- position(chunks) {
- let index = this.chunkIndex(chunks);
- let xFrom = (index + 0.5) * kChunkWidth;
- let yFrom = kChunkHeight - chunks[index].yOffset(this);
- return [xFrom, yFrom];
- }
-
- transitions() {
- let transitions = Object.create(null);
- let current = this;
- while (current) {
- let edge = current.edge;
- if (edge && edge.isTransition()) {
- transitions[edge.name] = edge;
- }
- current = current.parent()
- }
- return transitions;
- }
-
- getType() {
- return this.edge === void 0 ? "new" : this.edge.type;
- }
-
- isBootstrapped() {
- return this.edge === void 0;
- }
-
- getParents() {
- let parents = [];
- let current = this.parent();
- while (current) {
- parents.push(current);
- current = current.parent();
- }
- return parents;
- }
-
-
- static get(id, time = undefined) {
- let maps = this.cache.get(id);
- if(maps){
- for (let i = 0; i < maps.length; i++) {
- //TODO: Implement time based map search
- if(maps[i].time === time){
- return maps[i];
- }
- }
- // default return the latest
- return maps[maps.length-1];
- }
- }
-
- static set(id, map) {
- if(this.cache.has(id)){
- this.cache.get(id).push(map);
- } else {
- this.cache.set(id, [map]);
- }
- }
-}
-
-V8Map.cache = new Map();
-
-
-
-// ===========================================================================
-class Edge {
- constructor(type, name, reason, time, from, to) {
- this.type = type;
- this.name = name;
- this.reason = reason;
- this.time = time;
- this.from = from;
- this.to = to;
- }
-
- finishSetup() {
- let from = this.from
- if (from) from.addEdge(this);
- let to = this.to;
- if (to === undefined) return;
- to.edge = this;
- if (from === undefined ) return;
- if (to === from) throw "From and to must be distinct.";
- if (to.time < from.time) {
- console.error("invalid time order");
- }
- let newDepth = from.depth + 1;
- if (to.depth > 0 && to.depth != newDepth) {
- console.error("Depth has already been initialized");
- }
- to.depth = newDepth;
- }
-
- chunkIndex(chunks) {
- // Did anybody say O(n)?
- for (let i = 0; i < chunks.length; i++) {
- let chunk = chunks[i];
- if (chunk.isEmpty()) continue;
- if (chunk.last().time < this.time) continue;
- return i;
- }
- return -1;
- }
-
- parentEdge() {
- if (!this.from) return undefined;
- return this.from.edge;
- }
-
- chainLength() {
- let length = 0;
- let prev = this;
- while (prev) {
- prev = this.parent;
- length++;
- }
- return length;
- }
-
- isTransition() {
- return this.type === "Transition"
- }
-
- isFastToSlow() {
- return this.type === "Normalize"
- }
-
- isSlowToFast() {
- return this.type === "SlowToFast"
- }
-
- isInitial() {
- return this.type === "InitialMap"
- }
-
- isBootstrapped() {
- return this.type === "new"
- }
-
- isReplaceDescriptors() {
- return this.type === "ReplaceDescriptors"
- }
-
- isCopyAsPrototype() {
- return this.reason === "CopyAsPrototype"
- }
-
- isOptimizeAsPrototype() {
- return this.reason === "OptimizeAsPrototype"
- }
-
- symbol() {
- if (this.isTransition()) return "+";
- if (this.isFastToSlow()) return "⊡";
- if (this.isSlowToFast()) return "⊛";
- if (this.isReplaceDescriptors()) {
- if (this.name) return "+";
- return "∥";
- }
- return "";
- }
-
- toString() {
- let s = this.symbol();
- if (this.isTransition()) return s + this.name;
- if (this.isFastToSlow()) return s + this.reason;
- if (this.isCopyAsPrototype()) return s + "Copy as Prototype";
- if (this.isOptimizeAsPrototype()) {
- return s + "Optimize as Prototype";
- }
- if (this.isReplaceDescriptors() && this.name) {
- return this.type + " " + this.symbol() + this.name;
- }
- return this.type + " " + (this.reason ? this.reason : "") + " " +
- (this.name ? this.name : "")
- }
-}
-
-
-// ===========================================================================
-class Marker {
- constructor(time, name) {
- this.time = parseInt(time);
- this.name = name;
- }
-}
-
-// ===========================================================================
-class Timeline {
- constructor() {
- this.values = [];
- this.transitions = new Map();
- this.markers = [];
- this.startTime = 0;
- this.endTime = 0;
- }
-
- push(map) {
- let time = map.time;
- if (!this.isEmpty() && this.last().time > time) {
- // Invalid insertion order, might happen without --single-process,
- // finding insertion point.
- let insertionPoint = this.find(time);
- this.values.splice(insertionPoint, map);
- } else {
- this.values.push(map);
- }
- if (time > 0) {
- this.endTime = Math.max(this.endTime, time);
- if (this.startTime === 0) {
- this.startTime = time;
- } else {
- this.startTime = Math.min(this.startTime, time);
- }
- }
- }
-
- addMarker(time, message) {
- this.markers.push(new Marker(time, message));
- }
-
- finalize() {
- let id = 0;
- this.forEach(map => {
- if (map.isRoot()) id = map.finalizeRootMap(id + 1);
- if (map.edge && map.edge.name) {
- let edge = map.edge;
- let list = this.transitions.get(edge.name);
- if (list === undefined) {
- this.transitions.set(edge.name, [edge]);
- } else {
- list.push(edge);
- }
- }
- });
- this.markers.sort((a, b) => b.time - a.time);
- }
-
- at(index) {
- return this.values[index]
- }
-
- isEmpty() {
- return this.size() === 0
- }
-
- size() {
- return this.values.length
- }
-
- first() {
- return this.values.first()
- }
-
- last() {
- return this.values.last()
- }
-
- duration() {
- return this.last().time - this.first().time
- }
-
- forEachChunkSize(count, fn) {
- const increment = this.duration() / count;
- let currentTime = this.first().time + increment;
- let index = 0;
- for (let i = 0; i < count; i++) {
- let nextIndex = this.find(currentTime, index);
- let nextTime = currentTime + increment;
- fn(index, nextIndex, currentTime, nextTime);
- index = nextIndex
- currentTime = nextTime;
- }
- }
-
- chunkSizes(count) {
- let chunks = [];
- this.forEachChunkSize(count, (start, end) => chunks.push(end - start));
- return chunks;
- }
-
- chunks(count) {
- let chunks = [];
- let emptyMarkers = [];
- this.forEachChunkSize(count, (start, end, startTime, endTime) => {
- let items = this.values.slice(start, end);
- let markers = this.markersAt(startTime, endTime);
- chunks.push(new Chunk(chunks.length, startTime, endTime, items, markers));
- });
- return chunks;
- }
-
- range(start, end) {
- const first = this.find(start);
- if (first < 0) return [];
- const last = this.find(end, first);
- return this.values.slice(first, last);
- }
-
- find(time, offset = 0) {
- return this.basicFind(this.values, each => each.time - time, offset);
- }
-
- markersAt(startTime, endTime) {
- let start = this.basicFind(this.markers, each => each.time - startTime);
- let end = this.basicFind(this.markers, each => each.time - endTime, start);
- return this.markers.slice(start, end);
- }
-
- basicFind(array, cmp, offset = 0) {
- let min = offset;
- let max = array.length;
- while (min < max) {
- let mid = min + Math.floor((max - min) / 2);
- let result = cmp(array[mid]);
- if (result > 0) {
- max = mid - 1;
- } else {
- min = mid + 1;
- }
- }
- return min;
- }
-
- count(filter) {
- return this.values.reduce((sum, each) => {
- return sum + (filter(each) === true ? 1 : 0);
- }, 0);
- }
-
- filter(predicate) {
- return this.values.filter(predicate);
- }
-
- filterUniqueTransitions(filter) {
- // Returns a list of Maps whose parent is not in the list.
- return this.values.filter(map => {
- if (filter(map) === false) return false;
- let parent = map.parent();
- if (parent === undefined) return true;
- return filter(parent) === false;
- });
- }
-
- depthHistogram() {
- return this.values.histogram(each => each.depth);
- }
-
- fanOutHistogram() {
- return this.values.histogram(each => each.children.length);
- }
-
- forEach(fn) {
- return this.values.forEach(fn)
- }
-}
-
-
-// ===========================================================================
-class Chunk {
- constructor(index, start, end, items, markers) {
- this.index = index;
- this.start = start;
- this.end = end;
- this.items = items;
- this.markers = markers
- this.height = 0;
- }
-
- isEmpty() {
- return this.items.length === 0;
- }
-
- last() {
- return this.at(this.size() - 1);
- }
-
- first() {
- return this.at(0);
- }
-
- at(index) {
- return this.items[index];
- }
-
- size() {
- return this.items.length;
- }
-
- yOffset(map) {
- // items[0] == oldest map, displayed at the top of the chunk
- // items[n-1] == youngest map, displayed at the bottom of the chunk
- return (1 - (this.indexOf(map) + 0.5) / this.size()) * this.height;
- }
-
- indexOf(map) {
- return this.items.indexOf(map);
- }
-
- has(map) {
- if (this.isEmpty()) return false;
- return this.first().time <= map.time && map.time <= this.last().time;
- }
-
- next(chunks) {
- return this.findChunk(chunks, 1);
- }
-
- prev(chunks) {
- return this.findChunk(chunks, -1);
- }
-
- findChunk(chunks, delta) {
- let i = this.index + delta;
- let chunk = chunks[i];
- while (chunk && chunk.size() === 0) {
- i += delta;
- chunk = chunks[i]
- }
- return chunk;
- }
-
- getTransitionBreakdown() {
- return BreakDown(this.items, map => map.getType())
- }
-
- getUniqueTransitions() {
- // Filter out all the maps that have parents within the same chunk.
- return this.items.filter(map => !map.parent() || !this.has(map.parent()));
- }
-}
-
-
-// ===========================================================================
-function BreakDown(list, map_fn) {
- if (map_fn === void 0) {
- map_fn = each => each;
- }
- let breakdown = {__proto__:null};
- list.forEach(each=> {
- let type = map_fn(each);
- let v = breakdown[type];
- breakdown[type] = (v | 0) + 1
- });
- return Object.entries(breakdown)
- .sort((a,b) => a[1] - b[1]);
-}
-
-
-// ===========================================================================
-export class ArgumentsProcessor extends BaseArgumentsProcessor {
- getArgsDispatch() {
- return {
- '--range': ['range', 'auto,auto',
- 'Specify the range limit as [start],[end]'
- ],
- '--source-map': ['sourceMap', null,
- 'Specify the source map that should be used for output'
- ]
- };
- }
-
- getDefaultResults() {
- return {
- logFileName: 'v8.log',
- range: 'auto,auto',
- };
- }
-}
diff --git a/deps/v8/tools/mb/mb_unittest.py b/deps/v8/tools/mb/mb_unittest.py
index 3a0b89b29d..765cacbc58 100755
--- a/deps/v8/tools/mb/mb_unittest.py
+++ b/deps/v8/tools/mb/mb_unittest.py
@@ -337,9 +337,11 @@ class UnitTest(unittest.TestCase):
self.check(['gen', '-m', 'fake_master', '-b', 'fake_args_bot',
'//out/Debug'],
mbw=mbw, ret=0)
- self.assertEqual(
- mbw.files['/fake_src/out/Debug/args.gn'],
- 'import("//build/args/bots/fake_master/fake_args_bot.gn")\n')
+ # TODO(almuthanna): disable test temporarily to
+ # solve this issue https://crbug.com/v8/11102
+ # self.assertEqual(
+ # mbw.files['/fake_src/out/Debug/args.gn'],
+ # 'import("//build/args/bots/fake_master/fake_args_bot.gn")\n')
def test_gen_args_file_mixins(self):
mbw = self.fake_mbw()
diff --git a/deps/v8/tools/parse-processor-driver.mjs b/deps/v8/tools/parse-processor-driver.mjs
index 9c72d744ad..bec5b782ea 100644
--- a/deps/v8/tools/parse-processor-driver.mjs
+++ b/deps/v8/tools/parse-processor-driver.mjs
@@ -8,7 +8,7 @@ import {
} from "./parse-processor.mjs";
function processArguments(args) {
- var processor = new ArgumentsProcessor(args);
+ const processor = new ArgumentsProcessor(args);
if (processor.parse()) {
return processor.result();
} else {
@@ -22,17 +22,17 @@ function initSourceMapSupport() {
// Overwrite the load function to load scripts synchronously.
SourceMap.load = function(sourceMapURL) {
- var content = readFile(sourceMapURL);
- var sourceMapObject = (JSON.parse(content));
+ const content = readFile(sourceMapURL);
+ const sourceMapObject = (JSON.parse(content));
return new SourceMap(sourceMapURL, sourceMapObject);
};
}
-var params = processArguments(arguments);
-var sourceMap = null;
+const params = processArguments(arguments);
+let sourceMap = null;
if (params.sourceMap) {
initSourceMapSupport();
sourceMap = SourceMap.load(params.sourceMap);
}
-var parseProcessor = new ParseProcessor();
+const parseProcessor = new ParseProcessor();
parseProcessor.processLogFile(params.logFileName);
diff --git a/deps/v8/tools/parse-processor.mjs b/deps/v8/tools/parse-processor.mjs
index ed010d55a4..f78c4c0261 100644
--- a/deps/v8/tools/parse-processor.mjs
+++ b/deps/v8/tools/parse-processor.mjs
@@ -1,8 +1,6 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-"use strict";
-
import { LogReader, parseString } from "./logreader.mjs";
import { BaseArgumentsProcessor } from "./arguments.mjs";
@@ -339,7 +337,7 @@ class Script extends CompilationUnit {
calculateMetrics(printSummary) {
let log = (str) => this.summary += str + '\n';
- log("SCRIPT: " + this.id);
+ log(`SCRIPT: ${this.id}`);
let all = this.funktions;
if (all.length === 0) return;
@@ -354,7 +352,7 @@ class Script extends CompilationUnit {
let value = (funktions.length + "").padStart(6) +
(nofPercent + "%").padStart(5) +
BYTES(ownBytes, this.bytesTotal).padStart(10);
- log((" - " + name).padEnd(20) + value);
+ log((` - ${name}`).padEnd(20) + value);
this.metrics.set(name + "-bytes", ownBytes);
this.metrics.set(name + "-count", funktions.length);
this.metrics.set(name + "-count-percent", nofPercent);
@@ -362,7 +360,7 @@ class Script extends CompilationUnit {
Math.round(ownBytes / this.bytesTotal * 100));
};
- log(" - file: " + this.file);
+ log(` - file: ${this.file}`);
log(' - details: ' +
'isEval=' + this.isEval + ' deserialized=' + this.isDeserialized +
' streamed=' + this.isStreamingCompiled);
@@ -409,7 +407,7 @@ class Script extends CompilationUnit {
// [start+delta*2, acc(metric0, start, start+delta*2), ...],
// ...
// ]
- if (end <= start) throw 'Invalid ranges [' + start + ',' + end + ']';
+ if (end <= start) throw `Invalid ranges [${start},${end}]`;
const timespan = end - start;
const kSteps = Math.ceil(timespan / delta);
// To reduce the time spent iterating over the funktions of this script
@@ -607,8 +605,8 @@ class ExecutionCost {
}
toString() {
- return (' - ' + this.prefix + '-time:').padEnd(24) +
- (" executed=" + formatNumber(this.executedCost) + 'ms').padEnd(20) +
+ return (` - ${this.prefix}-time:`).padEnd(24) +
+ (` executed=${formatNumber(this.executedCost)}ms`).padEnd(20) +
" non-executed=" + formatNumber(this.nonExecutedCost) + 'ms';
}
@@ -623,11 +621,11 @@ class ExecutionCost {
class Funktion extends CompilationUnit {
constructor(name, start, end, script) {
super();
- if (start < 0) throw "invalid start position: " + start;
+ if (start < 0) throw `invalid start position: ${start}`;
if (script.isEval) {
if (end < start) throw 'invalid start end positions';
} else {
- if (end <= 0) throw 'invalid end position: ' + end;
+ if (end <= 0) throw `invalid end position: ${end}`;
if (end <= start) throw 'invalid start end positions';
}
@@ -722,7 +720,7 @@ class Funktion extends CompilationUnit {
}
toString(details = true) {
- let result = 'function' + (this.name ? ' ' + this.name : '') +
+ let result = `function${this.name ? ` ${this.name}` : ''}` +
`() range=${this.start}-${this.end}`;
if (details) result += ` script=${this.script ? this.script.id : 'X'}`;
return result;
@@ -841,7 +839,7 @@ export class ParseProcessor extends LogReader {
processLogFile(fileName) {
this.collectEntries = true
this.lastLogFileName_ = fileName;
- var line;
+ let line;
while (line = readline()) {
this.processLogLine(line);
}
@@ -886,7 +884,7 @@ export class ParseProcessor extends LogReader {
functionName) {
let handlerFn = this.functionEventDispatchTable_[eventName];
if (handlerFn === undefined) {
- console.error('Couldn\'t find handler for function event:' + eventName);
+ console.error(`Couldn't find handler for function event:${eventName}`);
}
handlerFn(
scriptId, startPosition, endPosition, duration, timestamp,
@@ -965,7 +963,7 @@ export class ParseProcessor extends LogReader {
script.preparseTimestamp = toTimestamp(timestamp);
return;
default:
- console.error('Unhandled script event: ' + eventName);
+ console.error(`Unhandled script event: ${eventName}`);
}
}
diff --git a/deps/v8/tools/profile.mjs b/deps/v8/tools/profile.mjs
index 50864dce0f..b2e953f247 100644
--- a/deps/v8/tools/profile.mjs
+++ b/deps/v8/tools/profile.mjs
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import { CodeMap } from "./codemap.mjs";
+import { CodeMap, CodeEntry } from "./codemap.mjs";
import { ConsArray } from "./consarray.mjs";
// TODO: move to separate modules
@@ -42,7 +42,6 @@ export class SourcePosition {
}
export class Script {
-
constructor(id, name, source) {
this.id = id;
this.name = name;
@@ -56,13 +55,13 @@ export class Script {
let sourcePosition = this.lineToColumn.get(line)?.get(column);
if (sourcePosition === undefined) {
sourcePosition = new SourcePosition(this, line, column, )
- this.#addSourcePosition(line, column, sourcePosition);
+ this._addSourcePosition(line, column, sourcePosition);
}
sourcePosition.addEntry(entry);
return sourcePosition;
}
- #addSourcePosition(line, column, sourcePosition) {
+ _addSourcePosition(line, column, sourcePosition) {
let columnToSourcePosition;
if (this.lineToColumn.has(line)) {
columnToSourcePosition = this.lineToColumn.get(line);
@@ -81,455 +80,424 @@ export class Script {
*
* @constructor
*/
-export function Profile() {
- this.codeMap_ = new CodeMap();
- this.topDownTree_ = new CallTree();
- this.bottomUpTree_ = new CallTree();
- this.c_entries_ = {};
- this.ticks_ = [];
- this.scripts_ = [];
- this.urlToScript_ = new Map();
-};
-
-
-/**
- * Returns whether a function with the specified name must be skipped.
- * Should be overriden by subclasses.
- *
- * @param {string} name Function name.
- */
-Profile.prototype.skipThisFunction = function (name) {
- return false;
-};
-
-
-/**
- * Enum for profiler operations that involve looking up existing
- * code entries.
- *
- * @enum {number}
- */
-Profile.Operation = {
- MOVE: 0,
- DELETE: 1,
- TICK: 2
-};
-
-
-/**
- * Enum for code state regarding its dynamic optimization.
- *
- * @enum {number}
- */
-Profile.CodeState = {
- COMPILED: 0,
- OPTIMIZABLE: 1,
- OPTIMIZED: 2
-};
-
-
-/**
- * Called whenever the specified operation has failed finding a function
- * containing the specified address. Should be overriden by subclasses.
- * See the Profile.Operation enum for the list of
- * possible operations.
- *
- * @param {number} operation Operation.
- * @param {number} addr Address of the unknown code.
- * @param {number} opt_stackPos If an unknown address is encountered
- * during stack strace processing, specifies a position of the frame
- * containing the address.
- */
-Profile.prototype.handleUnknownCode = function (
- operation, addr, opt_stackPos) {
-};
+export class Profile {
+ codeMap_ = new CodeMap();
+ topDownTree_ = new CallTree();
+ bottomUpTree_ = new CallTree();
+ c_entries_ = {};
+ ticks_ = [];
+ scripts_ = [];
+ urlToScript_ = new Map();
+
+ /**
+ * Returns whether a function with the specified name must be skipped.
+ * Should be overriden by subclasses.
+ *
+ * @param {string} name Function name.
+ */
+ skipThisFunction(name) {
+ return false;
+ }
+ /**
+ * Enum for profiler operations that involve looking up existing
+ * code entries.
+ *
+ * @enum {number}
+ */
+ static Operation = {
+ MOVE: 0,
+ DELETE: 1,
+ TICK: 2
+ }
-/**
- * Registers a library.
- *
- * @param {string} name Code entry name.
- * @param {number} startAddr Starting address.
- * @param {number} endAddr Ending address.
- */
-Profile.prototype.addLibrary = function (
- name, startAddr, endAddr) {
- var entry = new CodeMap.CodeEntry(
- endAddr - startAddr, name, 'SHARED_LIB');
- this.codeMap_.addLibrary(startAddr, entry);
- return entry;
-};
+ /**
+ * Enum for code state regarding its dynamic optimization.
+ *
+ * @enum {number}
+ */
+ static CodeState = {
+ COMPILED: 0,
+ OPTIMIZABLE: 1,
+ OPTIMIZED: 2
+ }
+ /**
+ * Called whenever the specified operation has failed finding a function
+ * containing the specified address. Should be overriden by subclasses.
+ * See the Profile.Operation enum for the list of
+ * possible operations.
+ *
+ * @param {number} operation Operation.
+ * @param {number} addr Address of the unknown code.
+ * @param {number} opt_stackPos If an unknown address is encountered
+ * during stack strace processing, specifies a position of the frame
+ * containing the address.
+ */
+ handleUnknownCode(operation, addr, opt_stackPos) {}
+
+ /**
+ * Registers a library.
+ *
+ * @param {string} name Code entry name.
+ * @param {number} startAddr Starting address.
+ * @param {number} endAddr Ending address.
+ */
+ addLibrary(name, startAddr, endAddr) {
+ const entry = new CodeEntry(endAddr - startAddr, name, 'SHARED_LIB');
+ this.codeMap_.addLibrary(startAddr, entry);
+ return entry;
+ }
-/**
- * Registers statically compiled code entry.
- *
- * @param {string} name Code entry name.
- * @param {number} startAddr Starting address.
- * @param {number} endAddr Ending address.
- */
-Profile.prototype.addStaticCode = function (
- name, startAddr, endAddr) {
- var entry = new CodeMap.CodeEntry(
- endAddr - startAddr, name, 'CPP');
- this.codeMap_.addStaticCode(startAddr, entry);
- return entry;
-};
+ /**
+ * Registers statically compiled code entry.
+ *
+ * @param {string} name Code entry name.
+ * @param {number} startAddr Starting address.
+ * @param {number} endAddr Ending address.
+ */
+ addStaticCode(name, startAddr, endAddr) {
+ const entry = new CodeEntry(endAddr - startAddr, name, 'CPP');
+ this.codeMap_.addStaticCode(startAddr, entry);
+ return entry;
+ }
+ /**
+ * Registers dynamic (JIT-compiled) code entry.
+ *
+ * @param {string} type Code entry type.
+ * @param {string} name Code entry name.
+ * @param {number} start Starting address.
+ * @param {number} size Code entry size.
+ */
+ addCode(type, name, timestamp, start, size) {
+ const entry = new DynamicCodeEntry(size, type, name);
+ this.codeMap_.addCode(start, entry);
+ return entry;
+ }
-/**
- * Registers dynamic (JIT-compiled) code entry.
- *
- * @param {string} type Code entry type.
- * @param {string} name Code entry name.
- * @param {number} start Starting address.
- * @param {number} size Code entry size.
- */
-Profile.prototype.addCode = function (
- type, name, timestamp, start, size) {
- var entry = new Profile.DynamicCodeEntry(size, type, name);
- this.codeMap_.addCode(start, entry);
- return entry;
-};
+ /**
+ * Registers dynamic (JIT-compiled) code entry.
+ *
+ * @param {string} type Code entry type.
+ * @param {string} name Code entry name.
+ * @param {number} start Starting address.
+ * @param {number} size Code entry size.
+ * @param {number} funcAddr Shared function object address.
+ * @param {Profile.CodeState} state Optimization state.
+ */
+ addFuncCode(type, name, timestamp, start, size, funcAddr, state) {
+ // As code and functions are in the same address space,
+ // it is safe to put them in a single code map.
+ let func = this.codeMap_.findDynamicEntryByStartAddress(funcAddr);
+ if (!func) {
+ func = new FunctionEntry(name);
+ this.codeMap_.addCode(funcAddr, func);
+ } else if (func.name !== name) {
+ // Function object has been overwritten with a new one.
+ func.name = name;
+ }
+ let entry = this.codeMap_.findDynamicEntryByStartAddress(start);
+ if (entry) {
+ if (entry.size === size && entry.func === func) {
+ // Entry state has changed.
+ entry.state = state;
+ } else {
+ this.codeMap_.deleteCode(start);
+ entry = null;
+ }
+ }
+ if (!entry) {
+ entry = new DynamicFuncCodeEntry(size, type, func, state);
+ this.codeMap_.addCode(start, entry);
+ }
+ return entry;
+ }
+ /**
+ * Reports about moving of a dynamic code entry.
+ *
+ * @param {number} from Current code entry address.
+ * @param {number} to New code entry address.
+ */
+ moveCode(from, to) {
+ try {
+ this.codeMap_.moveCode(from, to);
+ } catch (e) {
+ this.handleUnknownCode(Profile.Operation.MOVE, from);
+ }
+ }
-/**
- * Registers dynamic (JIT-compiled) code entry.
- *
- * @param {string} type Code entry type.
- * @param {string} name Code entry name.
- * @param {number} start Starting address.
- * @param {number} size Code entry size.
- * @param {number} funcAddr Shared function object address.
- * @param {Profile.CodeState} state Optimization state.
- */
-Profile.prototype.addFuncCode = function (
- type, name, timestamp, start, size, funcAddr, state) {
- // As code and functions are in the same address space,
- // it is safe to put them in a single code map.
- var func = this.codeMap_.findDynamicEntryByStartAddress(funcAddr);
- if (!func) {
- func = new Profile.FunctionEntry(name);
- this.codeMap_.addCode(funcAddr, func);
- } else if (func.name !== name) {
- // Function object has been overwritten with a new one.
- func.name = name;
+ deoptCode( timestamp, code, inliningId, scriptOffset, bailoutType,
+ sourcePositionText, deoptReasonText) {
}
- var entry = this.codeMap_.findDynamicEntryByStartAddress(start);
- if (entry) {
- if (entry.size === size && entry.func === func) {
- // Entry state has changed.
- entry.state = state;
- } else {
+
+ /**
+ * Reports about deletion of a dynamic code entry.
+ *
+ * @param {number} start Starting address.
+ */
+ deleteCode(start) {
+ try {
this.codeMap_.deleteCode(start);
- entry = null;
+ } catch (e) {
+ this.handleUnknownCode(Profile.Operation.DELETE, start);
}
}
- if (!entry) {
- entry = new Profile.DynamicFuncCodeEntry(size, type, func, state);
- this.codeMap_.addCode(start, entry);
- }
- return entry;
-};
-
-/**
- * Reports about moving of a dynamic code entry.
- *
- * @param {number} from Current code entry address.
- * @param {number} to New code entry address.
- */
-Profile.prototype.moveCode = function (from, to) {
- try {
- this.codeMap_.moveCode(from, to);
- } catch (e) {
- this.handleUnknownCode(Profile.Operation.MOVE, from);
+ /**
+ * Adds source positions for given code.
+ */
+ addSourcePositions(start, script, startPos, endPos, sourcePositions,
+ inliningPositions, inlinedFunctions) {
+ // CLI does not need source code => ignore.
}
-};
-
-Profile.prototype.deoptCode = function (
- timestamp, code, inliningId, scriptOffset, bailoutType,
- sourcePositionText, deoptReasonText) {
-};
-/**
- * Reports about deletion of a dynamic code entry.
- *
- * @param {number} start Starting address.
- */
-Profile.prototype.deleteCode = function (start) {
- try {
- this.codeMap_.deleteCode(start);
- } catch (e) {
- this.handleUnknownCode(Profile.Operation.DELETE, start);
+ /**
+ * Adds script source code.
+ */
+ addScriptSource(id, url, source) {
+ const script = new Script(id, url, source);
+ this.scripts_[id] = script;
+ this.urlToScript_.set(url, script);
}
-};
-
-/**
- * Adds source positions for given code.
- */
-Profile.prototype.addSourcePositions = function (
- start, script, startPos, endPos, sourcePositions, inliningPositions,
- inlinedFunctions) {
- // CLI does not need source code => ignore.
-};
-
-/**
- * Adds script source code.
- */
-Profile.prototype.addScriptSource = function (id, url, source) {
- const script = new Script(id, url, source);
- this.scripts_[id] = script;
- this.urlToScript_.set(url, script);
-};
-
-
-/**
- * Adds script source code.
- */
-Profile.prototype.getScript = function (url) {
- return this.urlToScript_.get(url);
-};
-/**
- * Reports about moving of a dynamic code entry.
- *
- * @param {number} from Current code entry address.
- * @param {number} to New code entry address.
- */
-Profile.prototype.moveFunc = function (from, to) {
- if (this.codeMap_.findDynamicEntryByStartAddress(from)) {
- this.codeMap_.moveCode(from, to);
+ /**
+ * Adds script source code.
+ */
+ getScript(url) {
+ return this.urlToScript_.get(url);
}
-};
-
-
-/**
- * Retrieves a code entry by an address.
- *
- * @param {number} addr Entry address.
- */
-Profile.prototype.findEntry = function (addr) {
- return this.codeMap_.findEntry(addr);
-};
+ /**
+ * Reports about moving of a dynamic code entry.
+ *
+ * @param {number} from Current code entry address.
+ * @param {number} to New code entry address.
+ */
+ moveFunc(from, to) {
+ if (this.codeMap_.findDynamicEntryByStartAddress(from)) {
+ this.codeMap_.moveCode(from, to);
+ }
+ }
-/**
- * Records a tick event. Stack must contain a sequence of
- * addresses starting with the program counter value.
- *
- * @param {Array<number>} stack Stack sample.
- */
-Profile.prototype.recordTick = function (time_ns, vmState, stack) {
- var processedStack = this.resolveAndFilterFuncs_(stack);
- this.bottomUpTree_.addPath(processedStack);
- processedStack.reverse();
- this.topDownTree_.addPath(processedStack);
-};
+ /**
+ * Retrieves a code entry by an address.
+ *
+ * @param {number} addr Entry address.
+ */
+ findEntry(addr) {
+ return this.codeMap_.findEntry(addr);
+ }
+ /**
+ * Records a tick event. Stack must contain a sequence of
+ * addresses starting with the program counter value.
+ *
+ * @param {Array<number>} stack Stack sample.
+ */
+ recordTick(time_ns, vmState, stack) {
+ const processedStack = this.resolveAndFilterFuncs_(stack);
+ this.bottomUpTree_.addPath(processedStack);
+ processedStack.reverse();
+ this.topDownTree_.addPath(processedStack);
+ }
-/**
- * Translates addresses into function names and filters unneeded
- * functions.
- *
- * @param {Array<number>} stack Stack sample.
- */
-Profile.prototype.resolveAndFilterFuncs_ = function (stack) {
- var result = [];
- var last_seen_c_function = '';
- var look_for_first_c_function = false;
- for (var i = 0; i < stack.length; ++i) {
- var entry = this.codeMap_.findEntry(stack[i]);
- if (entry) {
- var name = entry.getName();
- if (i === 0 && (entry.type === 'CPP' || entry.type === 'SHARED_LIB')) {
- look_for_first_c_function = true;
- }
- if (look_for_first_c_function && entry.type === 'CPP') {
- last_seen_c_function = name;
- }
- if (!this.skipThisFunction(name)) {
- result.push(name);
+ /**
+ * Translates addresses into function names and filters unneeded
+ * functions.
+ *
+ * @param {Array<number>} stack Stack sample.
+ */
+ resolveAndFilterFuncs_(stack) {
+ const result = [];
+ let last_seen_c_function = '';
+ let look_for_first_c_function = false;
+ for (let i = 0; i < stack.length; ++i) {
+ const entry = this.codeMap_.findEntry(stack[i]);
+ if (entry) {
+ const name = entry.getName();
+ if (i === 0 && (entry.type === 'CPP' || entry.type === 'SHARED_LIB')) {
+ look_for_first_c_function = true;
+ }
+ if (look_for_first_c_function && entry.type === 'CPP') {
+ last_seen_c_function = name;
+ }
+ if (!this.skipThisFunction(name)) {
+ result.push(name);
+ }
+ } else {
+ this.handleUnknownCode(Profile.Operation.TICK, stack[i], i);
+ if (i === 0) result.push("UNKNOWN");
}
- } else {
- this.handleUnknownCode(Profile.Operation.TICK, stack[i], i);
- if (i === 0) result.push("UNKNOWN");
- }
- if (look_for_first_c_function &&
- i > 0 &&
- (!entry || entry.type !== 'CPP') &&
- last_seen_c_function !== '') {
- if (this.c_entries_[last_seen_c_function] === undefined) {
- this.c_entries_[last_seen_c_function] = 0;
+ if (look_for_first_c_function &&
+ i > 0 &&
+ (!entry || entry.type !== 'CPP') &&
+ last_seen_c_function !== '') {
+ if (this.c_entries_[last_seen_c_function] === undefined) {
+ this.c_entries_[last_seen_c_function] = 0;
+ }
+ this.c_entries_[last_seen_c_function]++;
+ look_for_first_c_function = false; // Found it, we're done.
}
- this.c_entries_[last_seen_c_function]++;
- look_for_first_c_function = false; // Found it, we're done.
}
+ return result;
}
- return result;
-};
-
-
-/**
- * Performs a BF traversal of the top down call graph.
- *
- * @param {function(CallTree.Node)} f Visitor function.
- */
-Profile.prototype.traverseTopDownTree = function (f) {
- this.topDownTree_.traverse(f);
-};
-
-
-/**
- * Performs a BF traversal of the bottom up call graph.
- *
- * @param {function(CallTree.Node)} f Visitor function.
- */
-Profile.prototype.traverseBottomUpTree = function (f) {
- this.bottomUpTree_.traverse(f);
-};
-
-
-/**
- * Calculates a top down profile for a node with the specified label.
- * If no name specified, returns the whole top down calls tree.
- *
- * @param {string} opt_label Node label.
- */
-Profile.prototype.getTopDownProfile = function (opt_label) {
- return this.getTreeProfile_(this.topDownTree_, opt_label);
-};
+ /**
+ * Performs a BF traversal of the top down call graph.
+ *
+ * @param {function(CallTreeNode)} f Visitor function.
+ */
+ traverseTopDownTree(f) {
+ this.topDownTree_.traverse(f);
+ }
-/**
- * Calculates a bottom up profile for a node with the specified label.
- * If no name specified, returns the whole bottom up calls tree.
- *
- * @param {string} opt_label Node label.
- */
-Profile.prototype.getBottomUpProfile = function (opt_label) {
- return this.getTreeProfile_(this.bottomUpTree_, opt_label);
-};
+ /**
+ * Performs a BF traversal of the bottom up call graph.
+ *
+ * @param {function(CallTreeNode)} f Visitor function.
+ */
+ traverseBottomUpTree(f) {
+ this.bottomUpTree_.traverse(f);
+ }
+ /**
+ * Calculates a top down profile for a node with the specified label.
+ * If no name specified, returns the whole top down calls tree.
+ *
+ * @param {string} opt_label Node label.
+ */
+ getTopDownProfile(opt_label) {
+ return this.getTreeProfile_(this.topDownTree_, opt_label);
+ }
-/**
- * Helper function for calculating a tree profile.
- *
- * @param {Profile.CallTree} tree Call tree.
- * @param {string} opt_label Node label.
- */
-Profile.prototype.getTreeProfile_ = function (tree, opt_label) {
- if (!opt_label) {
- tree.computeTotalWeights();
- return tree;
- } else {
- var subTree = tree.cloneSubtree(opt_label);
- subTree.computeTotalWeights();
- return subTree;
+ /**
+ * Calculates a bottom up profile for a node with the specified label.
+ * If no name specified, returns the whole bottom up calls tree.
+ *
+ * @param {string} opt_label Node label.
+ */
+ getBottomUpProfile(opt_label) {
+ return this.getTreeProfile_(this.bottomUpTree_, opt_label);
}
-};
+ /**
+ * Helper function for calculating a tree profile.
+ *
+ * @param {Profile.CallTree} tree Call tree.
+ * @param {string} opt_label Node label.
+ */
+ getTreeProfile_(tree, opt_label) {
+ if (!opt_label) {
+ tree.computeTotalWeights();
+ return tree;
+ } else {
+ const subTree = tree.cloneSubtree(opt_label);
+ subTree.computeTotalWeights();
+ return subTree;
+ }
+ }
-/**
- * Calculates a flat profile of callees starting from a node with
- * the specified label. If no name specified, starts from the root.
- *
- * @param {string} opt_label Starting node label.
- */
-Profile.prototype.getFlatProfile = function (opt_label) {
- var counters = new CallTree();
- var rootLabel = opt_label || CallTree.ROOT_NODE_LABEL;
- var precs = {};
- precs[rootLabel] = 0;
- var root = counters.findOrAddChild(rootLabel);
-
- this.topDownTree_.computeTotalWeights();
- this.topDownTree_.traverseInDepth(
- function onEnter(node) {
- if (!(node.label in precs)) {
- precs[node.label] = 0;
- }
- var nodeLabelIsRootLabel = node.label == rootLabel;
- if (nodeLabelIsRootLabel || precs[rootLabel] > 0) {
- if (precs[rootLabel] == 0) {
- root.selfWeight += node.selfWeight;
- root.totalWeight += node.totalWeight;
- } else {
- var rec = root.findOrAddChild(node.label);
- rec.selfWeight += node.selfWeight;
- if (nodeLabelIsRootLabel || precs[node.label] == 0) {
- rec.totalWeight += node.totalWeight;
+ /**
+ * Calculates a flat profile of callees starting from a node with
+ * the specified label. If no name specified, starts from the root.
+ *
+ * @param {string} opt_label Starting node label.
+ */
+ getFlatProfile(opt_label) {
+ const counters = new CallTree();
+ const rootLabel = opt_label || CallTree.ROOT_NODE_LABEL;
+ const precs = {};
+ precs[rootLabel] = 0;
+ const root = counters.findOrAddChild(rootLabel);
+
+ this.topDownTree_.computeTotalWeights();
+ this.topDownTree_.traverseInDepth(
+ function onEnter(node) {
+ if (!(node.label in precs)) {
+ precs[node.label] = 0;
+ }
+ const nodeLabelIsRootLabel = node.label == rootLabel;
+ if (nodeLabelIsRootLabel || precs[rootLabel] > 0) {
+ if (precs[rootLabel] == 0) {
+ root.selfWeight += node.selfWeight;
+ root.totalWeight += node.totalWeight;
+ } else {
+ const rec = root.findOrAddChild(node.label);
+ rec.selfWeight += node.selfWeight;
+ if (nodeLabelIsRootLabel || precs[node.label] == 0) {
+ rec.totalWeight += node.totalWeight;
+ }
}
+ precs[node.label]++;
}
- precs[node.label]++;
- }
- },
- function onExit(node) {
- if (node.label == rootLabel || precs[rootLabel] > 0) {
- precs[node.label]--;
- }
- },
- null);
-
- if (!opt_label) {
- // If we have created a flat profile for the whole program, we don't
- // need an explicit root in it. Thus, replace the counters tree
- // root with the node corresponding to the whole program.
- counters.root_ = root;
- } else {
- // Propagate weights so percents can be calculated correctly.
- counters.getRoot().selfWeight = root.selfWeight;
- counters.getRoot().totalWeight = root.totalWeight;
+ },
+ function onExit(node) {
+ if (node.label == rootLabel || precs[rootLabel] > 0) {
+ precs[node.label]--;
+ }
+ },
+ null);
+
+ if (!opt_label) {
+ // If we have created a flat profile for the whole program, we don't
+ // need an explicit root in it. Thus, replace the counters tree
+ // root with the node corresponding to the whole program.
+ counters.root_ = root;
+ } else {
+ // Propagate weights so percents can be calculated correctly.
+ counters.getRoot().selfWeight = root.selfWeight;
+ counters.getRoot().totalWeight = root.totalWeight;
+ }
+ return counters;
}
- return counters;
-};
-
-Profile.CEntryNode = function (name, ticks) {
- this.name = name;
- this.ticks = ticks;
-}
-
-
-Profile.prototype.getCEntryProfile = function () {
- var result = [new Profile.CEntryNode("TOTAL", 0)];
- var total_ticks = 0;
- for (var f in this.c_entries_) {
- var ticks = this.c_entries_[f];
- total_ticks += ticks;
- result.push(new Profile.CEntryNode(f, ticks));
+ getCEntryProfile() {
+ const result = [new CEntryNode("TOTAL", 0)];
+ let total_ticks = 0;
+ for (let f in this.c_entries_) {
+ const ticks = this.c_entries_[f];
+ total_ticks += ticks;
+ result.push(new CEntryNode(f, ticks));
+ }
+ result[0].ticks = total_ticks; // Sorting will keep this at index 0.
+ result.sort((n1, n2) => n2.ticks - n1.ticks || (n2.name < n1.name ? -1 : 1));
+ return result;
}
- result[0].ticks = total_ticks; // Sorting will keep this at index 0.
- result.sort(function (n1, n2) {
- return n2.ticks - n1.ticks || (n2.name < n1.name ? -1 : 1)
- });
- return result;
-}
-/**
- * Cleans up function entries that are not referenced by code entries.
- */
-Profile.prototype.cleanUpFuncEntries = function () {
- var referencedFuncEntries = [];
- var entries = this.codeMap_.getAllDynamicEntriesWithAddresses();
- for (var i = 0, l = entries.length; i < l; ++i) {
- if (entries[i][1].constructor === Profile.FunctionEntry) {
- entries[i][1].used = false;
+ /**
+ * Cleans up function entries that are not referenced by code entries.
+ */
+ cleanUpFuncEntries() {
+ const referencedFuncEntries = [];
+ const entries = this.codeMap_.getAllDynamicEntriesWithAddresses();
+ for (let i = 0, l = entries.length; i < l; ++i) {
+ if (entries[i][1].constructor === FunctionEntry) {
+ entries[i][1].used = false;
+ }
}
- }
- for (var i = 0, l = entries.length; i < l; ++i) {
- if ("func" in entries[i][1]) {
- entries[i][1].func.used = true;
+ for (let i = 0, l = entries.length; i < l; ++i) {
+ if ("func" in entries[i][1]) {
+ entries[i][1].func.used = true;
+ }
}
- }
- for (var i = 0, l = entries.length; i < l; ++i) {
- if (entries[i][1].constructor === Profile.FunctionEntry &&
- !entries[i][1].used) {
- this.codeMap_.deleteCode(entries[i][0]);
+ for (let i = 0, l = entries.length; i < l; ++i) {
+ if (entries[i][1].constructor === FunctionEntry &&
+ !entries[i][1].used) {
+ this.codeMap_.deleteCode(entries[i][0]);
+ }
}
}
-};
+}
+
+class CEntryNode {
+ constructor(name, ticks) {
+ this.name = name;
+ this.ticks = ticks;
+ }
+}
/**
@@ -540,35 +508,30 @@ Profile.prototype.cleanUpFuncEntries = function () {
* @param {string} name Function name.
* @constructor
*/
-Profile.DynamicCodeEntry = function (size, type, name) {
- CodeMap.CodeEntry.call(this, size, name, type);
-};
-
-
-/**
- * Returns node name.
- */
-Profile.DynamicCodeEntry.prototype.getName = function () {
- return this.type + ': ' + this.name;
-};
-
-
-/**
- * Returns raw node name (without type decoration).
- */
-Profile.DynamicCodeEntry.prototype.getRawName = function () {
- return this.name;
-};
-
+class DynamicCodeEntry extends CodeEntry {
+ constructor(size, type, name) {
+ super(size, name, type);
+ }
+
+ getName() {
+ return this.type + ': ' + this.name;
+ }
-Profile.DynamicCodeEntry.prototype.isJSFunction = function () {
- return false;
-};
+ /**
+ * Returns raw node name (without type decoration).
+ */
+ getRawName() {
+ return this.name;
+ }
+ isJSFunction() {
+ return false;
+ }
-Profile.DynamicCodeEntry.prototype.toString = function () {
- return this.getName() + ': ' + this.size.toString(16);
-};
+ toString() {
+ return this.getName() + ': ' + this.size.toString(16);
+ }
+}
/**
@@ -576,51 +539,42 @@ Profile.DynamicCodeEntry.prototype.toString = function () {
*
* @param {number} size Code size.
* @param {string} type Code type.
- * @param {Profile.FunctionEntry} func Shared function entry.
+ * @param {FunctionEntry} func Shared function entry.
* @param {Profile.CodeState} state Code optimization state.
* @constructor
*/
-Profile.DynamicFuncCodeEntry = function (size, type, func, state) {
- CodeMap.CodeEntry.call(this, size, '', type);
- this.func = func;
- this.state = state;
-};
-
-Profile.DynamicFuncCodeEntry.STATE_PREFIX = ["", "~", "*"];
-
-/**
- * Returns state.
- */
-Profile.DynamicFuncCodeEntry.prototype.getState = function () {
- return Profile.DynamicFuncCodeEntry.STATE_PREFIX[this.state];
-};
-
-/**
- * Returns node name.
- */
-Profile.DynamicFuncCodeEntry.prototype.getName = function () {
- var name = this.func.getName();
- return this.type + ': ' + this.getState() + name;
-};
-
-
-/**
- * Returns raw node name (without type decoration).
- */
-Profile.DynamicFuncCodeEntry.prototype.getRawName = function () {
- return this.func.getName();
-};
+class DynamicFuncCodeEntry extends CodeEntry {
+ constructor(size, type, func, state) {
+ super(size, '', type);
+ this.func = func;
+ this.state = state;
+ }
+ static STATE_PREFIX = ["", "~", "*"];
+ getState() {
+ return DynamicFuncCodeEntry.STATE_PREFIX[this.state];
+ }
-Profile.DynamicFuncCodeEntry.prototype.isJSFunction = function () {
- return true;
-};
+ getName() {
+ const name = this.func.getName();
+ return this.type + ': ' + this.getState() + name;
+ }
+ /**
+ * Returns raw node name (without type decoration).
+ */
+ getRawName() {
+ return this.func.getName();
+ }
-Profile.DynamicFuncCodeEntry.prototype.toString = function () {
- return this.getName() + ': ' + this.size.toString(16);
-};
+ isJSFunction() {
+ return true;
+ }
+ toString() {
+ return this.getName() + ': ' + this.size.toString(16);
+ }
+}
/**
* Creates a shared function object entry.
@@ -628,304 +582,279 @@ Profile.DynamicFuncCodeEntry.prototype.toString = function () {
* @param {string} name Function name.
* @constructor
*/
-Profile.FunctionEntry = function (name) {
- CodeMap.CodeEntry.call(this, 0, name);
-};
-
-
-/**
- * Returns node name.
- */
-Profile.FunctionEntry.prototype.getName = function () {
- var name = this.name;
- if (name.length == 0) {
- name = '<anonymous>';
- } else if (name.charAt(0) == ' ') {
- // An anonymous function with location: " aaa.js:10".
- name = '<anonymous>' + name;
- }
- return name;
-};
+class FunctionEntry extends CodeEntry {
+ constructor(name) {
+ super(0, name);
+ }
-Profile.FunctionEntry.prototype.toString = CodeMap.CodeEntry.prototype.toString;
+ /**
+ * Returns node name.
+ */
+ getName() {
+ let name = this.name;
+ if (name.length == 0) {
+ name = '<anonymous>';
+ } else if (name.charAt(0) == ' ') {
+ // An anonymous function with location: " aaa.js:10".
+ name = `<anonymous>${name}`;
+ }
+ return name;
+ }
+}
/**
* Constructs a call graph.
*
* @constructor
*/
-function CallTree() {
- this.root_ = new CallTree.Node(
- CallTree.ROOT_NODE_LABEL);
-};
-
-
-/**
- * The label of the root node.
- */
-CallTree.ROOT_NODE_LABEL = '';
-
-
-/**
- * @private
- */
-CallTree.prototype.totalsComputed_ = false;
-
-
-/**
- * Returns the tree root.
- */
-CallTree.prototype.getRoot = function () {
- return this.root_;
-};
-
-
-/**
- * Adds the specified call path, constructing nodes as necessary.
- *
- * @param {Array<string>} path Call path.
- */
-CallTree.prototype.addPath = function (path) {
- if (path.length == 0) {
- return;
+class CallTree {
+ root_ = new CallTreeNode(CallTree.ROOT_NODE_LABEL);
+ totalsComputed_ = false;
+
+ /**
+ * The label of the root node.
+ */
+ static ROOT_NODE_LABEL = '';
+
+ /**
+ * Returns the tree root.
+ */
+ getRoot() {
+ return this.root_;
}
- var curr = this.root_;
- for (var i = 0; i < path.length; ++i) {
- curr = curr.findOrAddChild(path[i]);
- }
- curr.selfWeight++;
- this.totalsComputed_ = false;
-};
-
-/**
- * Finds an immediate child of the specified parent with the specified
- * label, creates a child node if necessary. If a parent node isn't
- * specified, uses tree root.
- *
- * @param {string} label Child node label.
- */
-CallTree.prototype.findOrAddChild = function (label) {
- return this.root_.findOrAddChild(label);
-};
-
-
-/**
- * Creates a subtree by cloning and merging all subtrees rooted at nodes
- * with a given label. E.g. cloning the following call tree on label 'A'
- * will give the following result:
- *
- * <A>--<B> <B>
- * / /
- * <root> == clone on 'A' ==> <root>--<A>
- * \ \
- * <C>--<A>--<D> <D>
- *
- * And <A>'s selfWeight will be the sum of selfWeights of <A>'s from the
- * source call tree.
- *
- * @param {string} label The label of the new root node.
- */
-CallTree.prototype.cloneSubtree = function (label) {
- var subTree = new CallTree();
- this.traverse(function (node, parent) {
- if (!parent && node.label != label) {
- return null;
+ /**
+ * Adds the specified call path, constructing nodes as necessary.
+ *
+ * @param {Array<string>} path Call path.
+ */
+ addPath(path) {
+ if (path.length == 0) {
+ return;
}
- var child = (parent ? parent : subTree).findOrAddChild(node.label);
- child.selfWeight += node.selfWeight;
- return child;
- });
- return subTree;
-};
-
-
-/**
- * Computes total weights in the call graph.
- */
-CallTree.prototype.computeTotalWeights = function () {
- if (this.totalsComputed_) {
- return;
+ let curr = this.root_;
+ for (let i = 0; i < path.length; ++i) {
+ curr = curr.findOrAddChild(path[i]);
+ }
+ curr.selfWeight++;
+ this.totalsComputed_ = false;
}
- this.root_.computeTotalWeight();
- this.totalsComputed_ = true;
-};
+ /**
+ * Finds an immediate child of the specified parent with the specified
+ * label, creates a child node if necessary. If a parent node isn't
+ * specified, uses tree root.
+ *
+ * @param {string} label Child node label.
+ */
+ findOrAddChild(label) {
+ return this.root_.findOrAddChild(label);
+ }
-/**
- * Traverses the call graph in preorder. This function can be used for
- * building optionally modified tree clones. This is the boilerplate code
- * for this scenario:
- *
- * callTree.traverse(function(node, parentClone) {
- * var nodeClone = cloneNode(node);
- * if (parentClone)
- * parentClone.addChild(nodeClone);
- * return nodeClone;
- * });
- *
- * @param {function(CallTree.Node, *)} f Visitor function.
- * The second parameter is the result of calling 'f' on the parent node.
- */
-CallTree.prototype.traverse = function (f) {
- var pairsToProcess = new ConsArray();
- pairsToProcess.concat([{ node: this.root_, param: null }]);
- while (!pairsToProcess.atEnd()) {
- var pair = pairsToProcess.next();
- var node = pair.node;
- var newParam = f(node, pair.param);
- var morePairsToProcess = [];
- node.forEachChild(function (child) {
- morePairsToProcess.push({ node: child, param: newParam });
+ /**
+ * Creates a subtree by cloning and merging all subtrees rooted at nodes
+ * with a given label. E.g. cloning the following call tree on label 'A'
+ * will give the following result:
+ *
+ * <A>--<B> <B>
+ * / /
+ * <root> == clone on 'A' ==> <root>--<A>
+ * \ \
+ * <C>--<A>--<D> <D>
+ *
+ * And <A>'s selfWeight will be the sum of selfWeights of <A>'s from the
+ * source call tree.
+ *
+ * @param {string} label The label of the new root node.
+ */
+ cloneSubtree(label) {
+ const subTree = new CallTree();
+ this.traverse((node, parent) => {
+ if (!parent && node.label != label) {
+ return null;
+ }
+ const child = (parent ? parent : subTree).findOrAddChild(node.label);
+ child.selfWeight += node.selfWeight;
+ return child;
});
- pairsToProcess.concat(morePairsToProcess);
+ return subTree;
}
-};
+ /**
+ * Computes total weights in the call graph.
+ */
+ computeTotalWeights() {
+ if (this.totalsComputed_) return;
+ this.root_.computeTotalWeight();
+ this.totalsComputed_ = true;
+ }
-/**
- * Performs an indepth call graph traversal.
- *
- * @param {function(CallTree.Node)} enter A function called
- * prior to visiting node's children.
- * @param {function(CallTree.Node)} exit A function called
- * after visiting node's children.
- */
-CallTree.prototype.traverseInDepth = function (enter, exit) {
- function traverse(node) {
- enter(node);
- node.forEachChild(traverse);
- exit(node);
+ /**
+ * Traverses the call graph in preorder. This function can be used for
+ * building optionally modified tree clones. This is the boilerplate code
+ * for this scenario:
+ *
+ * callTree.traverse(function(node, parentClone) {
+ * var nodeClone = cloneNode(node);
+ * if (parentClone)
+ * parentClone.addChild(nodeClone);
+ * return nodeClone;
+ * });
+ *
+ * @param {function(CallTreeNode, *)} f Visitor function.
+ * The second parameter is the result of calling 'f' on the parent node.
+ */
+ traverse(f) {
+ const pairsToProcess = new ConsArray();
+ pairsToProcess.concat([{ node: this.root_, param: null }]);
+ while (!pairsToProcess.atEnd()) {
+ const pair = pairsToProcess.next();
+ const node = pair.node;
+ const newParam = f(node, pair.param);
+ const morePairsToProcess = [];
+ node.forEachChild((child) => {
+ morePairsToProcess.push({ node: child, param: newParam });
+ });
+ pairsToProcess.concat(morePairsToProcess);
+ }
}
- traverse(this.root_);
-};
+
+ /**
+ * Performs an indepth call graph traversal.
+ *
+ * @param {function(CallTreeNode)} enter A function called
+ * prior to visiting node's children.
+ * @param {function(CallTreeNode)} exit A function called
+ * after visiting node's children.
+ */
+ traverseInDepth(enter, exit) {
+ function traverse(node) {
+ enter(node);
+ node.forEachChild(traverse);
+ exit(node);
+ }
+ traverse(this.root_);
+ }
+}
/**
* Constructs a call graph node.
*
* @param {string} label Node label.
- * @param {CallTree.Node} opt_parent Node parent.
- */
-CallTree.Node = function (label, opt_parent) {
- this.label = label;
- this.parent = opt_parent;
- this.children = {};
-};
-
-
-/**
- * Node self weight (how many times this node was the last node in
- * a call path).
- * @type {number}
- */
-CallTree.Node.prototype.selfWeight = 0;
-
-
-/**
- * Node total weight (includes weights of all children).
- * @type {number}
+ * @param {CallTreeNode} opt_parent Node parent.
*/
-CallTree.Node.prototype.totalWeight = 0;
+ class CallTreeNode {
+ /**
+ * Node self weight (how many times this node was the last node in
+ * a call path).
+ * @type {number}
+ */
+ selfWeight = 0;
+
+ /**
+ * Node total weight (includes weights of all children).
+ * @type {number}
+ */
+ totalWeight = 0;
+ children = {};
+
+ constructor(label, opt_parent) {
+ this.label = label;
+ this.parent = opt_parent;
+ }
-/**
- * Adds a child node.
- *
- * @param {string} label Child node label.
- */
-CallTree.Node.prototype.addChild = function (label) {
- var child = new CallTree.Node(label, this);
- this.children[label] = child;
- return child;
-};
-
+ /**
+ * Adds a child node.
+ *
+ * @param {string} label Child node label.
+ */
+ addChild(label) {
+ const child = new CallTreeNode(label, this);
+ this.children[label] = child;
+ return child;
+ }
-/**
- * Computes node's total weight.
- */
-CallTree.Node.prototype.computeTotalWeight =
- function () {
- var totalWeight = this.selfWeight;
+ /**
+ * Computes node's total weight.
+ */
+ computeTotalWeight() {
+ let totalWeight = this.selfWeight;
this.forEachChild(function (child) {
totalWeight += child.computeTotalWeight();
});
return this.totalWeight = totalWeight;
- };
-
-
-/**
- * Returns all node's children as an array.
- */
-CallTree.Node.prototype.exportChildren = function () {
- var result = [];
- this.forEachChild(function (node) { result.push(node); });
- return result;
-};
-
-
-/**
- * Finds an immediate child with the specified label.
- *
- * @param {string} label Child node label.
- */
-CallTree.Node.prototype.findChild = function (label) {
- return this.children[label] || null;
-};
-
-
-/**
- * Finds an immediate child with the specified label, creates a child
- * node if necessary.
- *
- * @param {string} label Child node label.
- */
-CallTree.Node.prototype.findOrAddChild = function (label) {
- return this.findChild(label) || this.addChild(label);
-};
+ }
+ /**
+ * Returns all node's children as an array.
+ */
+ exportChildren() {
+ const result = [];
+ this.forEachChild(function (node) { result.push(node); });
+ return result;
+ }
-/**
- * Calls the specified function for every child.
- *
- * @param {function(CallTree.Node)} f Visitor function.
- */
-CallTree.Node.prototype.forEachChild = function (f) {
- for (var c in this.children) {
- f(this.children[c]);
+ /**
+ * Finds an immediate child with the specified label.
+ *
+ * @param {string} label Child node label.
+ */
+ findChild(label) {
+ return this.children[label] || null;
}
-};
+ /**
+ * Finds an immediate child with the specified label, creates a child
+ * node if necessary.
+ *
+ * @param {string} label Child node label.
+ */
+ findOrAddChild(label) {
+ return this.findChild(label) || this.addChild(label);
+ }
-/**
- * Walks up from the current node up to the call tree root.
- *
- * @param {function(CallTree.Node)} f Visitor function.
- */
-CallTree.Node.prototype.walkUpToRoot = function (f) {
- for (var curr = this; curr != null; curr = curr.parent) {
- f(curr);
+ /**
+ * Calls the specified function for every child.
+ *
+ * @param {function(CallTreeNode)} f Visitor function.
+ */
+ forEachChild(f) {
+ for (let c in this.children) {
+ f(this.children[c]);
+ }
}
-};
+ /**
+ * Walks up from the current node up to the call tree root.
+ *
+ * @param {function(CallTreeNode)} f Visitor function.
+ */
+ walkUpToRoot(f) {
+ for (let curr = this; curr != null; curr = curr.parent) {
+ f(curr);
+ }
+ }
-/**
- * Tries to find a node with the specified path.
- *
- * @param {Array<string>} labels The path.
- * @param {function(CallTree.Node)} opt_f Visitor function.
- */
-CallTree.Node.prototype.descendToChild = function (
- labels, opt_f) {
- for (var pos = 0, curr = this; pos < labels.length && curr != null; pos++) {
- var child = curr.findChild(labels[pos]);
- if (opt_f) {
- opt_f(child, pos);
+ /**
+ * Tries to find a node with the specified path.
+ *
+ * @param {Array<string>} labels The path.
+ * @param {function(CallTreeNode)} opt_f Visitor function.
+ */
+ descendToChild(labels, opt_f) {
+ let curr = this;
+ for (let pos = 0; pos < labels.length && curr != null; pos++) {
+ const child = curr.findChild(labels[pos]);
+ if (opt_f) {
+ opt_f(child, pos);
+ }
+ curr = child;
}
- curr = child;
+ return curr;
}
- return curr;
-};
+}
export function JsonProfile() {
this.codeMap_ = new CodeMap();
@@ -937,7 +866,7 @@ export function JsonProfile() {
JsonProfile.prototype.addLibrary = function (
name, startAddr, endAddr) {
- var entry = new CodeMap.CodeEntry(
+ const entry = new CodeEntry(
endAddr - startAddr, name, 'SHARED_LIB');
this.codeMap_.addLibrary(startAddr, entry);
@@ -948,7 +877,7 @@ JsonProfile.prototype.addLibrary = function (
JsonProfile.prototype.addStaticCode = function (
name, startAddr, endAddr) {
- var entry = new CodeMap.CodeEntry(
+ const entry = new CodeEntry(
endAddr - startAddr, name, 'CPP');
this.codeMap_.addStaticCode(startAddr, entry);
@@ -967,7 +896,7 @@ JsonProfile.prototype.addCode = function (
codeId = staticEntry.entry.codeId;
}
- var entry = new CodeMap.CodeEntry(size, name, 'CODE');
+ const entry = new CodeEntry(size, name, 'CODE');
this.codeMap_.addCode(start, entry);
entry.codeId = codeId;
@@ -975,7 +904,7 @@ JsonProfile.prototype.addCode = function (
name: entry.name,
timestamp: timestamp,
type: entry.type,
- kind: kind
+ kind: kind,
};
return entry;
@@ -985,22 +914,22 @@ JsonProfile.prototype.addFuncCode = function (
kind, name, timestamp, start, size, funcAddr, state) {
// As code and functions are in the same address space,
// it is safe to put them in a single code map.
- var func = this.codeMap_.findDynamicEntryByStartAddress(funcAddr);
+ let func = this.codeMap_.findDynamicEntryByStartAddress(funcAddr);
if (!func) {
- var func = new CodeMap.CodeEntry(0, name, 'SFI');
+ func = new CodeEntry(0, name, 'SFI');
this.codeMap_.addCode(funcAddr, func);
func.funcId = this.functionEntries_.length;
- this.functionEntries_.push({ name: name, codes: [] });
+ this.functionEntries_.push({ name, codes: [] });
} else if (func.name !== name) {
// Function object has been overwritten with a new one.
func.name = name;
func.funcId = this.functionEntries_.length;
- this.functionEntries_.push({ name: name, codes: [] });
+ this.functionEntries_.push({ name, codes: [] });
}
// TODO(jarin): Insert the code object into the SFI's code list.
- var entry = this.codeMap_.findDynamicEntryByStartAddress(start);
+ let entry = this.codeMap_.findDynamicEntryByStartAddress(start);
if (entry) {
if (entry.size === size && entry.func === func) {
// Entry state has changed.
@@ -1011,7 +940,7 @@ JsonProfile.prototype.addFuncCode = function (
}
}
if (!entry) {
- entry = new CodeMap.CodeEntry(size, name, 'JS');
+ entry = new CodeEntry(size, name, 'JS');
this.codeMap_.addCode(start, entry);
entry.codeId = this.codeEntries_.length;
@@ -1031,7 +960,7 @@ JsonProfile.prototype.addFuncCode = function (
type: entry.type,
kind: kind,
func: func.funcId,
- tm: timestamp
+ tm: timestamp,
});
}
return entry;
@@ -1041,25 +970,25 @@ JsonProfile.prototype.moveCode = function (from, to) {
try {
this.codeMap_.moveCode(from, to);
} catch (e) {
- printErr("Move: unknown source " + from);
+ printErr(`Move: unknown source ${from}`);
}
};
JsonProfile.prototype.addSourcePositions = function (
start, script, startPos, endPos, sourcePositions, inliningPositions,
inlinedFunctions) {
- var entry = this.codeMap_.findDynamicEntryByStartAddress(start);
+ const entry = this.codeMap_.findDynamicEntryByStartAddress(start);
if (!entry) return;
- var codeId = entry.codeId;
+ const codeId = entry.codeId;
// Resolve the inlined functions list.
if (inlinedFunctions.length > 0) {
inlinedFunctions = inlinedFunctions.substring(1).split("S");
- for (var i = 0; i < inlinedFunctions.length; i++) {
- var funcAddr = parseInt(inlinedFunctions[i]);
- var func = this.codeMap_.findDynamicEntryByStartAddress(funcAddr);
+ for (let i = 0; i < inlinedFunctions.length; i++) {
+ const funcAddr = parseInt(inlinedFunctions[i]);
+ const func = this.codeMap_.findDynamicEntryByStartAddress(funcAddr);
if (!func || func.funcId === undefined) {
- printErr("Could not find function " + inlinedFunctions[i]);
+ printErr(`Could not find function ${inlinedFunctions[i]}`);
inlinedFunctions[i] = null;
} else {
inlinedFunctions[i] = func.funcId;
@@ -1083,7 +1012,6 @@ JsonProfile.prototype.addScriptSource = function (id, url, source) {
this.scripts_[id] = new Script(id, url, source);
};
-
JsonProfile.prototype.deoptCode = function (
timestamp, code, inliningId, scriptOffset, bailoutType,
sourcePositionText, deoptReasonText) {
@@ -1100,7 +1028,7 @@ JsonProfile.prototype.deoptCode = function (
scriptOffset: scriptOffset,
posText: sourcePositionText,
reason: deoptReasonText,
- bailoutType: bailoutType
+ bailoutType: bailoutType,
};
}
}
@@ -1110,7 +1038,7 @@ JsonProfile.prototype.deleteCode = function (start) {
try {
this.codeMap_.deleteCode(start);
} catch (e) {
- printErr("Delete: unknown address " + start);
+ printErr(`Delete: unknown address ${start}`);
}
};
@@ -1127,9 +1055,9 @@ JsonProfile.prototype.findEntry = function (addr) {
JsonProfile.prototype.recordTick = function (time_ns, vmState, stack) {
// TODO(jarin) Resolve the frame-less case (when top of stack is
// known code).
- var processedStack = [];
- for (var i = 0; i < stack.length; i++) {
- var resolved = this.codeMap_.findAddress(stack[i]);
+ const processedStack = [];
+ for (let i = 0; i < stack.length; i++) {
+ const resolved = this.codeMap_.findAddress(stack[i]);
if (resolved) {
processedStack.push(resolved.entry.codeId, resolved.offset);
} else {
@@ -1157,7 +1085,7 @@ JsonProfile.prototype.writeJson = function () {
write(',\n');
write(' "ticks": [\n');
- for (var i = 0; i < this.ticks_.length; i++) {
+ for (let i = 0; i < this.ticks_.length; i++) {
write(' ');
writeJson(this.ticks_[i]);
if (i < this.ticks_.length - 1) {
diff --git a/deps/v8/tools/profile_view.mjs b/deps/v8/tools/profile_view.mjs
index 9349cc6a7a..9ee687dafd 100644
--- a/deps/v8/tools/profile_view.mjs
+++ b/deps/v8/tools/profile_view.mjs
@@ -47,12 +47,12 @@ export function ViewBuilder(samplingRate) {
*/
ViewBuilder.prototype.buildView = function(
callTree, opt_bottomUpViewWeights) {
- var head;
- var samplingRate = this.samplingRate;
- var createViewNode = this.createViewNode;
+ let head;
+ const samplingRate = this.samplingRate;
+ const createViewNode = this.createViewNode;
callTree.traverse(function(node, viewParent) {
- var totalWeight = node.totalWeight * samplingRate;
- var selfWeight = node.selfWeight * samplingRate;
+ const totalWeight = node.totalWeight * samplingRate;
+ let selfWeight = node.selfWeight * samplingRate;
if (opt_bottomUpViewWeights === true) {
if (viewParent === head) {
selfWeight = totalWeight;
@@ -60,7 +60,7 @@ ViewBuilder.prototype.buildView = function(
selfWeight = 0;
}
}
- var viewNode = createViewNode(node.label, totalWeight, selfWeight, head);
+ const viewNode = createViewNode(node.label, totalWeight, selfWeight, head);
if (viewParent) {
viewParent.addChild(viewNode);
} else {
@@ -68,7 +68,7 @@ ViewBuilder.prototype.buildView = function(
}
return viewNode;
});
- var view = this.createView(head);
+ const view = this.createView(head);
return view;
};
@@ -79,9 +79,7 @@ ViewBuilder.prototype.buildView = function(
* @param {ProfileView.Node} head View head node.
* @return {ProfileView} Profile view.
*/
-ViewBuilder.prototype.createView = function(head) {
- return new ProfileView(head);
-};
+ViewBuilder.prototype.createView = head => new ProfileView(head);
/**
@@ -96,11 +94,11 @@ ViewBuilder.prototype.createView = function(head) {
* @param {ProfileView.Node} head Profile view head.
* @return {ProfileView.Node} Profile view node.
*/
-ViewBuilder.prototype.createViewNode = function(
- funcName, totalTime, selfTime, head) {
- return new ProfileView.Node(
- funcName, totalTime, selfTime, head);
-};
+ViewBuilder.prototype.createViewNode = (
+ funcName, totalTime, selfTime, head) =>
+ new ProfileView.Node(
+ funcName, totalTime, selfTime, head)
+;
/**
@@ -135,10 +133,10 @@ ProfileView.prototype.sort = function(sortFunc) {
* @param {function(ProfileView.Node)} f Visitor function.
*/
ProfileView.prototype.traverse = function(f) {
- var nodesToTraverse = new ConsArray();
+ const nodesToTraverse = new ConsArray();
nodesToTraverse.concat([this.head]);
while (!nodesToTraverse.atEnd()) {
- var node = nodesToTraverse.next();
+ const node = nodesToTraverse.next();
f(node);
nodesToTraverse.concat(node.children);
}
diff --git a/deps/v8/tools/sourcemap.mjs b/deps/v8/tools/sourcemap.mjs
index 77af4133cf..8ddab13cb7 100644
--- a/deps/v8/tools/sourcemap.mjs
+++ b/deps/v8/tools/sourcemap.mjs
@@ -77,7 +77,7 @@ WebInspector.SourceMap = function(sourceMappingURL, payload)
if (!WebInspector.SourceMap.prototype._base64Map) {
const base64Digits = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
WebInspector.SourceMap.prototype._base64Map = {};
- for (var i = 0; i < base64Digits.length; ++i)
+ for (let i = 0; i < base64Digits.length; ++i)
WebInspector.SourceMap.prototype._base64Map[base64Digits.charAt(i)] = i;
}
@@ -107,7 +107,7 @@ WebInspector.SourceMap.load = function(sourceMapURL, compiledURL, callback)
function contentLoaded(error, statusCode, headers, content)
{
if (error || !content || statusCode >= 400) {
- console.error("Could not load content for " + sourceMapURL + " : " + (error || ("HTTP status code: " + statusCode)));
+ console.error(`Could not load content for ${sourceMapURL} : ${error || (`HTTP status code: ${statusCode}`)}`);
callback(null);
return;
}
@@ -115,8 +115,8 @@ WebInspector.SourceMap.load = function(sourceMapURL, compiledURL, callback)
if (content.slice(0, 3) === ")]}")
content = content.substring(content.indexOf('\n'));
try {
- var payload = /** @type {SourceMapV3} */ (JSON.parse(content));
- var baseURL = sourceMapURL.startsWith("data:") ? compiledURL : sourceMapURL;
+ const payload = /** @type {SourceMapV3} */ (JSON.parse(content));
+ const baseURL = sourceMapURL.startsWith("data:") ? compiledURL : sourceMapURL;
callback(new WebInspector.SourceMap(baseURL, payload));
} catch(e) {
console.error(e.message);
@@ -129,7 +129,7 @@ WebInspector.SourceMap.prototype = {
/**
* @return {Array.<string>}
*/
- sources: function()
+ sources()
{
return Object.keys(this._sources);
},
@@ -138,7 +138,7 @@ WebInspector.SourceMap.prototype = {
* @param {string} sourceURL
* @return {string|undefined}
*/
- sourceContent: function(sourceURL)
+ sourceContent(sourceURL)
{
return this._sourceContentByURL[sourceURL];
},
@@ -148,12 +148,12 @@ WebInspector.SourceMap.prototype = {
* @param {WebInspector.ResourceType} contentType
* @return {WebInspector.ContentProvider}
*/
- sourceContentProvider: function(sourceURL, contentType)
+ sourceContentProvider(sourceURL, contentType)
{
- var lastIndexOfDot = sourceURL.lastIndexOf(".");
- var extension = lastIndexOfDot !== -1 ? sourceURL.substr(lastIndexOfDot + 1) : "";
- var mimeType = WebInspector.ResourceType.mimeTypesForExtensions[extension.toLowerCase()];
- var sourceContent = this.sourceContent(sourceURL);
+ const lastIndexOfDot = sourceURL.lastIndexOf(".");
+ const extension = lastIndexOfDot !== -1 ? sourceURL.substr(lastIndexOfDot + 1) : "";
+ const mimeType = WebInspector.ResourceType.mimeTypesForExtensions[extension.toLowerCase()];
+ const sourceContent = this.sourceContent(sourceURL);
if (sourceContent)
return new WebInspector.StaticContentProvider(contentType, sourceContent, mimeType);
return new WebInspector.CompilerSourceMappingContentProvider(sourceURL, contentType, mimeType);
@@ -162,7 +162,7 @@ WebInspector.SourceMap.prototype = {
/**
* @param {SourceMapV3} mappingPayload
*/
- _parseMappingPayload: function(mappingPayload)
+ _parseMappingPayload(mappingPayload)
{
if (mappingPayload.sections)
this._parseSections(mappingPayload.sections);
@@ -173,10 +173,10 @@ WebInspector.SourceMap.prototype = {
/**
* @param {Array.<SourceMapV3.Section>} sections
*/
- _parseSections: function(sections)
+ _parseSections(sections)
{
- for (var i = 0; i < sections.length; ++i) {
- var section = sections[i];
+ for (let i = 0; i < sections.length; ++i) {
+ const section = sections[i];
this._parseMap(section.map, section.offset.line, section.offset.column);
}
},
@@ -186,14 +186,14 @@ WebInspector.SourceMap.prototype = {
* @param {number} columnNumber in compiled resource
* @return {?Array}
*/
- findEntry: function(lineNumber, columnNumber)
+ findEntry(lineNumber, columnNumber)
{
- var first = 0;
- var count = this._mappings.length;
+ let first = 0;
+ let count = this._mappings.length;
while (count > 1) {
- var step = count >> 1;
- var middle = first + step;
- var mapping = this._mappings[middle];
+ const step = count >> 1;
+ const middle = first + step;
+ const mapping = this._mappings[middle];
if (lineNumber < mapping[0] || (lineNumber === mapping[0] && columnNumber < mapping[1]))
count = step;
else {
@@ -201,7 +201,7 @@ WebInspector.SourceMap.prototype = {
count -= step;
}
}
- var entry = this._mappings[first];
+ const entry = this._mappings[first];
if (!first && entry && (lineNumber < entry[0] || (lineNumber === entry[0] && columnNumber < entry[1])))
return null;
return entry;
@@ -212,11 +212,11 @@ WebInspector.SourceMap.prototype = {
* @param {number} lineNumber in the originating resource
* @return {Array}
*/
- findEntryReversed: function(sourceURL, lineNumber)
+ findEntryReversed(sourceURL, lineNumber)
{
- var mappings = this._reverseMappingsBySourceURL[sourceURL];
+ const mappings = this._reverseMappingsBySourceURL[sourceURL];
for ( ; lineNumber < mappings.length; ++lineNumber) {
- var mapping = mappings[lineNumber];
+ const mapping = mappings[lineNumber];
if (mapping)
return mapping;
}
@@ -226,32 +226,32 @@ WebInspector.SourceMap.prototype = {
/**
* @override
*/
- _parseMap: function(map, lineNumber, columnNumber)
+ _parseMap(map, lineNumber, columnNumber)
{
- var sourceIndex = 0;
- var sourceLineNumber = 0;
- var sourceColumnNumber = 0;
- var nameIndex = 0;
-
- var sources = [];
- var originalToCanonicalURLMap = {};
- for (var i = 0; i < map.sources.length; ++i) {
- var originalSourceURL = map.sources[i];
- var sourceRoot = map.sourceRoot || "";
- if (sourceRoot && !sourceRoot.endsWith("/"))
- sourceRoot += "/";
- var href = sourceRoot + originalSourceURL;
- var url = WebInspector.ParsedURL.completeURL(this._sourceMappingURL, href) || href;
+ let sourceIndex = 0;
+ let sourceLineNumber = 0;
+ let sourceColumnNumber = 0;
+ let nameIndex = 0;
+
+ const sources = [];
+ const originalToCanonicalURLMap = {};
+ for (let i = 0; i < map.sources.length; ++i) {
+ const originalSourceURL = map.sources[i];
+ let sourceRoot = map.sourceRoot || "";
+ if (sourceRoot && !sourceRoot.endsWith("/")) sourceRoot += "/";
+ const href = sourceRoot + originalSourceURL;
+ const url = WebInspector.ParsedURL.completeURL(this._sourceMappingURL, href) || href;
originalToCanonicalURLMap[originalSourceURL] = url;
sources.push(url);
this._sources[url] = true;
- if (map.sourcesContent && map.sourcesContent[i])
+ if (map.sourcesContent && map.sourcesContent[i]) {
this._sourceContentByURL[url] = map.sourcesContent[i];
+ }
}
- var stringCharIterator = new WebInspector.SourceMap.StringCharIterator(map.mappings);
- var sourceURL = sources[sourceIndex];
+ const stringCharIterator = new WebInspector.SourceMap.StringCharIterator(map.mappings);
+ let sourceURL = sources[sourceIndex];
while (true) {
if (stringCharIterator.peek() === ",")
@@ -272,7 +272,7 @@ WebInspector.SourceMap.prototype = {
continue;
}
- var sourceIndexDelta = this._decodeVLQ(stringCharIterator);
+ const sourceIndexDelta = this._decodeVLQ(stringCharIterator);
if (sourceIndexDelta) {
sourceIndex += sourceIndexDelta;
sourceURL = sources[sourceIndex];
@@ -285,17 +285,18 @@ WebInspector.SourceMap.prototype = {
this._mappings.push([lineNumber, columnNumber, sourceURL, sourceLineNumber, sourceColumnNumber]);
}
- for (var i = 0; i < this._mappings.length; ++i) {
- var mapping = this._mappings[i];
- var url = mapping[2];
- if (!url)
- continue;
- if (!this._reverseMappingsBySourceURL[url])
+ for (let i = 0; i < this._mappings.length; ++i) {
+ const mapping = this._mappings[i];
+ const url = mapping[2];
+ if (!url) continue;
+ if (!this._reverseMappingsBySourceURL[url]) {
this._reverseMappingsBySourceURL[url] = [];
- var reverseMappings = this._reverseMappingsBySourceURL[url];
- var sourceLine = mapping[3];
- if (!reverseMappings[sourceLine])
+ }
+ const reverseMappings = this._reverseMappingsBySourceURL[url];
+ const sourceLine = mapping[3];
+ if (!reverseMappings[sourceLine]) {
reverseMappings[sourceLine] = [mapping[0], mapping[1]];
+ }
}
},
@@ -303,7 +304,7 @@ WebInspector.SourceMap.prototype = {
* @param {string} char
* @return {boolean}
*/
- _isSeparator: function(char)
+ _isSeparator(char)
{
return char === "," || char === ";";
},
@@ -312,19 +313,20 @@ WebInspector.SourceMap.prototype = {
* @param {WebInspector.SourceMap.StringCharIterator} stringCharIterator
* @return {number}
*/
- _decodeVLQ: function(stringCharIterator)
+ _decodeVLQ(stringCharIterator)
{
// Read unsigned value.
- var result = 0;
- var shift = 0;
+ let result = 0;
+ let shift = 0;
+ let digit;
do {
- var digit = this._base64Map[stringCharIterator.next()];
+ digit = this._base64Map[stringCharIterator.next()];
result += (digit & this._VLQ_BASE_MASK) << shift;
shift += this._VLQ_BASE_SHIFT;
} while (digit & this._VLQ_CONTINUATION_MASK);
// Fix the sign.
- var negative = result & 1;
+ const negative = result & 1;
// Use unsigned right shift, so that the 32nd bit is properly shifted
// to the 31st, and the 32nd becomes unset.
result >>>= 1;
@@ -359,7 +361,7 @@ WebInspector.SourceMap.StringCharIterator.prototype = {
/**
* @return {string}
*/
- next: function()
+ next()
{
return this._string.charAt(this._position++);
},
@@ -367,7 +369,7 @@ WebInspector.SourceMap.StringCharIterator.prototype = {
/**
* @return {string}
*/
- peek: function()
+ peek()
{
return this._string.charAt(this._position);
},
@@ -375,7 +377,7 @@ WebInspector.SourceMap.StringCharIterator.prototype = {
/**
* @return {boolean}
*/
- hasNext: function()
+ hasNext()
{
return this._position < this._string.length;
}
diff --git a/deps/v8/tools/splaytree.mjs b/deps/v8/tools/splaytree.mjs
index 867274a787..eaba4e4b57 100644
--- a/deps/v8/tools/splaytree.mjs
+++ b/deps/v8/tools/splaytree.mjs
@@ -75,7 +75,7 @@ SplayTree.prototype.insert = function(key, value) {
if (this.root_.key == key) {
return;
}
- var node = new SplayTree.Node(key, value);
+ const node = new SplayTree.Node(key, value);
if (key > this.root_.key) {
node.left = this.root_;
node.right = this.root_.right;
@@ -99,17 +99,17 @@ SplayTree.prototype.insert = function(key, value) {
*/
SplayTree.prototype.remove = function(key) {
if (this.isEmpty()) {
- throw Error('Key not found: ' + key);
+ throw Error(`Key not found: ${key}`);
}
this.splay_(key);
if (this.root_.key != key) {
- throw Error('Key not found: ' + key);
+ throw Error(`Key not found: ${key}`);
}
- var removed = this.root_;
+ const removed = this.root_;
if (!this.root_.left) {
this.root_ = this.root_.right;
} else {
- var right = this.root_.right;
+ const { right } = this.root_;
this.root_ = this.root_.left;
// Splay to make sure that the new root has an empty right child.
this.splay_(key);
@@ -144,7 +144,7 @@ SplayTree.prototype.findMin = function() {
if (this.isEmpty()) {
return null;
}
- var current = this.root_;
+ let current = this.root_;
while (current.left) {
current = current.left;
}
@@ -159,7 +159,7 @@ SplayTree.prototype.findMax = function(opt_startNode) {
if (this.isEmpty()) {
return null;
}
- var current = opt_startNode || this.root_;
+ let current = opt_startNode || this.root_;
while (current.right) {
current = current.right;
}
@@ -195,7 +195,7 @@ SplayTree.prototype.findGreatestLessThan = function(key) {
* with keys.
*/
SplayTree.prototype.exportKeysAndValues = function() {
- var result = [];
+ const result = [];
this.traverse_(function(node) { result.push([node.key, node.value]); });
return result;
};
@@ -205,7 +205,7 @@ SplayTree.prototype.exportKeysAndValues = function() {
* @return {Array<*>} An array containing all the values of tree's nodes.
*/
SplayTree.prototype.exportValues = function() {
- var result = [];
+ const result = [];
this.traverse_(function(node) { result.push(node.value); });
return result;
};
@@ -230,9 +230,9 @@ SplayTree.prototype.splay_ = function(key) {
// the L tree of the algorithm. The left child of the dummy node
// will hold the R tree of the algorithm. Using a dummy node, left
// and right will always be nodes and we avoid special cases.
- var dummy, left, right;
+ let dummy, left, right;
dummy = left = right = new SplayTree.Node(null, null);
- var current = this.root_;
+ let current = this.root_;
while (true) {
if (key < current.key) {
if (!current.left) {
@@ -240,7 +240,7 @@ SplayTree.prototype.splay_ = function(key) {
}
if (key < current.left.key) {
// Rotate right.
- var tmp = current.left;
+ const tmp = current.left;
current.left = tmp.right;
tmp.right = current;
current = tmp;
@@ -258,7 +258,7 @@ SplayTree.prototype.splay_ = function(key) {
}
if (key > current.right.key) {
// Rotate left.
- var tmp = current.right;
+ const tmp = current.right;
current.right = tmp.left;
tmp.left = current;
current = tmp;
@@ -290,9 +290,9 @@ SplayTree.prototype.splay_ = function(key) {
* @private
*/
SplayTree.prototype.traverse_ = function(f) {
- var nodesToVisit = [this.root_];
+ const nodesToVisit = [this.root_];
while (nodesToVisit.length > 0) {
- var node = nodesToVisit.shift();
+ const node = nodesToVisit.shift();
if (node == null) {
continue;
}
diff --git a/deps/v8/tools/system-analyzer/app-model.mjs b/deps/v8/tools/system-analyzer/app-model.mjs
index 37fa5ae2f3..a0b176c170 100644
--- a/deps/v8/tools/system-analyzer/app-model.mjs
+++ b/deps/v8/tools/system-analyzer/app-model.mjs
@@ -3,100 +3,116 @@
// found in the LICENSE file.
class State {
- #timeSelection = { start: 0, end: Infinity };
- #map;
- #ic;
- #selectedMapLogEvents;
- #selectedIcLogEvents;
- #selectedSourcePositionLogEvents;
- #nofChunks;
- #chunks;
- #icTimeline;
- #mapTimeline;
- #minStartTime = Number.POSITIVE_INFINITY;
- #maxEndTime = Number.NEGATIVE_INFINITY;
+ _timeSelection = {start: 0, end: Infinity};
+ _map;
+ _ic;
+ _selectedMapLogEntries;
+ _selectedIcLogEntries;
+ _selectedDeoptLogEntries;
+ _selectedSourcePositions;
+ _nofChunks;
+ _chunks;
+ _icTimeline;
+ _mapTimeline;
+ _deoptTimeline;
+ _minStartTime = Number.POSITIVE_INFINITY;
+ _maxEndTime = Number.NEGATIVE_INFINITY;
get minStartTime() {
- return this.#minStartTime;
+ return this._minStartTime;
}
get maxEndTime() {
- return this.#maxEndTime;
+ return this._maxEndTime;
}
- #updateTimeRange(timeline) {
- this.#minStartTime = Math.min(this.#minStartTime, timeline.startTime);
- this.#maxEndTime = Math.max(this.#maxEndTime, timeline.endTime);
+
+ selectTimeRange(start, end) {
+ this.timeSelection.start = start;
+ this.timeSelection.end = end;
+ this._icTimeline.selectTimeRange(start, end);
+ this._mapTimeline.selectTimeRange(start, end);
+ this._deoptTimeline.selectTimeRange(start, end);
+ }
+
+ _updateTimeRange(timeline) {
+ this._minStartTime = Math.min(this._minStartTime, timeline.startTime);
+ this._maxEndTime = Math.max(this._maxEndTime, timeline.endTime);
+ timeline.startTime = this._minStartTime;
+ timeline.endTime = this._maxEndTime;
}
get mapTimeline() {
- return this.#mapTimeline;
+ return this._mapTimeline;
}
set mapTimeline(timeline) {
- this.#updateTimeRange(timeline);
- timeline.startTime = this.#minStartTime;
- timeline.endTime = this.#maxEndTime;
- this.#mapTimeline = timeline;
+ this._updateTimeRange(timeline);
+ this._mapTimeline = timeline;
+ }
+ get icTimeline() {
+ return this._icTimeline;
}
set icTimeline(timeline) {
- this.#updateTimeRange(timeline);
- timeline.startTime = this.#minStartTime;
- timeline.endTime = this.#maxEndTime;
- this.#icTimeline = timeline;
+ this._updateTimeRange(timeline);
+ this._icTimeline = timeline;
}
- get icTimeline() {
- return this.#icTimeline;
+ get deoptTimeline() {
+ return this._deoptTimeline;
+ }
+ set deoptTimeline(timeline) {
+ this._updateTimeRange(timeline);
+ this._deoptTimeline = timeline;
}
set chunks(value) {
- //TODO(zcankara) split up between maps and ics, and every timeline track
- this.#chunks = value;
+ // TODO(zcankara) split up between maps and ics, and every timeline track
+ this._chunks = value;
}
get chunks() {
- //TODO(zcankara) split up between maps and ics, and every timeline track
- return this.#chunks;
+ // TODO(zcankara) split up between maps and ics, and every timeline track
+ return this._chunks;
}
get nofChunks() {
- return this.#nofChunks;
+ return this._nofChunks;
}
set nofChunks(count) {
- this.#nofChunks = count;
+ this._nofChunks = count;
}
get map() {
- //TODO(zcankara) rename as selectedMapEvents, array of selected events
- return this.#map;
+ // TODO(zcankara) rename as selectedMapEvents, array of selected events
+ return this._map;
}
set map(value) {
- //TODO(zcankara) rename as selectedMapEvents, array of selected events
+ // TODO(zcankara) rename as selectedMapEvents, array of selected events
if (!value) return;
- this.#map = value;
+ this._map = value;
}
get ic() {
- //TODO(zcankara) rename selectedICEvents, array of selected events
- return this.#ic;
+ // TODO(zcankara) rename selectedICEvents, array of selected events
+ return this._ic;
}
set ic(value) {
- //TODO(zcankara) rename selectedIcEvents, array of selected events
+ // TODO(zcankara) rename selectedIcEvents, array of selected events
if (!value) return;
- this.#ic = value;
+ this._ic = value;
}
- get selectedMapLogEvents() {
- return this.#selectedMapLogEvents;
+ get selectedMapLogEntries() {
+ return this._selectedMapLogEntries;
}
- set selectedMapLogEvents(value) {
+ set selectedMapLogEntries(value) {
if (!value) return;
- this.#selectedMapLogEvents = value;
+ this._selectedMapLogEntries = value;
}
- get selectedSourcePositionLogEvents() {
- return this.#selectedSourcePositionLogEvents;
+ get selectedSourcePositions() {
+ return this._selectedSourcePositions;
}
- set selectedSourcePositionLogEvents(value) {
- this.#selectedSourcePositionLogEvents = value;
+ set selectedSourcePositions(value) {
+ this._selectedSourcePositions = value;
}
- get selectedIcLogEvents() {
- return this.#selectedIcLogEvents;
+ get selectedIcLogEntries() {
+ return this._selectedIcLogEntries;
}
- set selectedIcLogEvents(value) {
+ set selectedIcLogEntries(value) {
if (!value) return;
- this.#selectedIcLogEvents = value;
+ this._selectedIcLogEntries = value;
}
get timeSelection() {
- return this.#timeSelection;
+ return this._timeSelection;
}
get entries() {
if (!this.map) return {};
@@ -106,4 +122,4 @@ class State {
}
}
-export { State };
+export {State};
diff --git a/deps/v8/tools/system-analyzer/events.mjs b/deps/v8/tools/system-analyzer/events.mjs
index 8e9a5a0b44..69529233b4 100644
--- a/deps/v8/tools/system-analyzer/events.mjs
+++ b/deps/v8/tools/system-analyzer/events.mjs
@@ -3,44 +3,49 @@
// found in the LICENSE file.
class SelectionEvent extends CustomEvent {
- static name = "showentries";
+ // TODO: turn into static class fields once Safari supports it.
+ static get name() {
+ return 'showentries';
+ }
constructor(entries) {
- super(SelectionEvent.name, { bubbles: true, composed: true });
+ super(SelectionEvent.name, {bubbles: true, composed: true});
if (!Array.isArray(entries) || entries.length == 0) {
- throw new Error("No valid entries selected!");
+ throw new Error('No valid entries selected!');
}
this.entries = entries;
}
}
class FocusEvent extends CustomEvent {
- static name = "showentrydetail";
+ static get name() {
+ return 'showentrydetail';
+ }
constructor(entry) {
- super(FocusEvent.name, { bubbles: true, composed: true });
+ super(FocusEvent.name, {bubbles: true, composed: true});
this.entry = entry;
}
}
class SelectTimeEvent extends CustomEvent {
- static name = 'timerangeselect';
+ static get name() {
+ return 'timerangeselect';
+ }
constructor(start, end) {
- super(SelectTimeEvent.name, { bubbles: true, composed: true });
+ super(SelectTimeEvent.name, {bubbles: true, composed: true});
this.start = start;
this.end = end;
}
}
class SynchronizeSelectionEvent extends CustomEvent {
- static name = 'syncselection';
+ static get name() {
+ return 'syncselection';
+ }
constructor(start, end) {
- super(SynchronizeSelectionEvent.name, { bubbles: true, composed: true });
+ super(SynchronizeSelectionEvent.name, {bubbles: true, composed: true});
this.start = start;
this.end = end;
}
}
-
-export {
- SelectionEvent, FocusEvent, SelectTimeEvent,
- SynchronizeSelectionEvent
-};
+export {SelectionEvent, FocusEvent, SelectTimeEvent, SynchronizeSelectionEvent};
diff --git a/deps/v8/tools/system-analyzer/helper.mjs b/deps/v8/tools/system-analyzer/helper.mjs
index 782b3f3456..854a51fcf3 100644
--- a/deps/v8/tools/system-analyzer/helper.mjs
+++ b/deps/v8/tools/system-analyzer/helper.mjs
@@ -22,26 +22,6 @@ function formatSeconds(millis) {
return (millis * kMillis2Seconds).toFixed(2) + 's';
}
-function defineCustomElement(path, generator) {
- let name = path.substring(path.lastIndexOf("/") + 1, path.length);
- path = path + '-template.html';
- fetch(path)
- .then(stream => stream.text())
- .then(
- templateText => customElements.define(name, generator(templateText)));
-}
-
-// DOM Helpers
-function removeAllChildren(node) {
- let range = document.createRange();
- range.selectNodeContents(node);
- range.deleteContents();
-}
-
-function $(id) {
- return document.querySelector(id)
-}
-
class CSSColor {
static getColor(name) {
const style = getComputedStyle(document.body);
@@ -101,7 +81,6 @@ class CSSColor {
static get violet() {
return CSSColor.getColor('violet');
}
-
}
function typeToColor(type) {
@@ -136,26 +115,77 @@ function typeToColor(type) {
return CSSColor.secondaryColor;
}
+class DOM {
+ static div(classes) {
+ const node = document.createElement('div');
+ if (classes !== void 0) {
+ if (typeof classes === 'string') {
+ node.classList.add(classes);
+ } else {
+ classes.forEach(cls => node.classList.add(cls));
+ }
+ }
+ return node;
+ }
+ static table(className) {
+ const node = document.createElement('table');
+ if (className) node.classList.add(className);
+ return node;
+ }
-function div(classes) {
- let node = document.createElement('div');
- if (classes !== void 0) {
- if (typeof classes === 'string') {
- node.classList.add(classes);
- } else {
- classes.forEach(cls => node.classList.add(cls));
+ static td(textOrNode, className) {
+ const node = document.createElement('td');
+ if (typeof textOrNode === 'object') {
+ node.appendChild(textOrNode);
+ } else if (textOrNode) {
+ node.innerText = textOrNode;
}
+ if (className) node.classList.add(className);
+ return node;
+ }
+
+ static tr(className) {
+ const node = document.createElement('tr');
+ if (className) node.classList.add(className);
+ return node;
+ }
+
+ static text(string) {
+ return document.createTextNode(string);
+ }
+
+ static removeAllChildren(node) {
+ let range = document.createRange();
+ range.selectNodeContents(node);
+ range.deleteContents();
+ }
+
+ static defineCustomElement(path, generator) {
+ let name = path.substring(path.lastIndexOf('/') + 1, path.length);
+ path = path + '-template.html';
+ fetch(path)
+ .then(stream => stream.text())
+ .then(
+ templateText =>
+ customElements.define(name, generator(templateText)));
}
- return node;
+}
+
+function $(id) {
+ return document.querySelector(id)
}
class V8CustomElement extends HTMLElement {
+ _updateTimeoutId;
+ _updateCallback = this._update.bind(this);
+
constructor(templateText) {
super();
- const shadowRoot = this.attachShadow({ mode: 'open' });
+ const shadowRoot = this.attachShadow({mode: 'open'});
shadowRoot.innerHTML = templateText;
}
+
$(id) {
return this.shadowRoot.querySelector(id);
}
@@ -164,32 +194,54 @@ class V8CustomElement extends HTMLElement {
return this.shadowRoot.querySelectorAll(query);
}
- div(classes) { return div(classes) }
+ update() {
+ // Use timeout tasks to asynchronously update the UI without blocking.
+ clearTimeout(this._updateTimeoutId);
+ const kDelayMs = 5;
+ this._updateTimeoutId = setTimeout(this._updateCallback, kDelayMs);
+ }
+
+ _update() {
+ throw Error('Subclass responsibility');
+ }
+}
- table(className) {
- let node = document.createElement('table')
- if (className) node.classList.add(className)
- return node;
+class LazyTable {
+ constructor(table, rowData, rowElementCreator) {
+ this._table = table;
+ this._rowData = rowData;
+ this._rowElementCreator = rowElementCreator;
+ const tbody = table.querySelector('tbody');
+ table.replaceChild(document.createElement('tbody'), tbody);
+ table.querySelector('tfoot td').onclick = (e) => this._addMoreRows();
+ this._addMoreRows();
}
- td(textOrNode) {
- let node = document.createElement('td');
- if (typeof textOrNode === 'object') {
- node.appendChild(textOrNode);
- } else {
- node.innerText = textOrNode;
- }
- return node;
+ _nextRowDataSlice() {
+ return this._rowData.splice(0, 100);
}
- tr() {
- return document.createElement('tr');
+ _addMoreRows() {
+ const fragment = new DocumentFragment();
+ for (let row of this._nextRowDataSlice()) {
+ const tr = this._rowElementCreator(row);
+ fragment.appendChild(tr);
+ }
+ this._table.querySelector('tbody').appendChild(fragment);
}
+}
- removeAllChildren(node) { return removeAllChildren(node); }
+function delay(time) {
+ return new Promise(resolver => setTimeout(resolver, time));
}
export {
- defineCustomElement, V8CustomElement, removeAllChildren,
- $, div, typeToColor, CSSColor
+ DOM,
+ $,
+ V8CustomElement,
+ formatBytes,
+ typeToColor,
+ CSSColor,
+ delay,
+ LazyTable,
};
diff --git a/deps/v8/tools/system-analyzer/ic-model.mjs b/deps/v8/tools/system-analyzer/ic-model.mjs
index 8340e9b756..2bb40b6853 100644
--- a/deps/v8/tools/system-analyzer/ic-model.mjs
+++ b/deps/v8/tools/system-analyzer/ic-model.mjs
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-import Processor from "./processor.mjs";
+import {IcLogEntry} from './log/ic.mjs';
// For compatibility with console scripts:
print = console.log;
@@ -23,11 +23,11 @@ export class Group {
}
createSubGroups() {
+ // TODO: use Map
this.groups = {};
- for (let i = 0; i < Processor.kProperties.length; i++) {
- let subProperty = Processor.kProperties[i];
- if (this.property == subProperty) continue;
- this.groups[subProperty] = Group.groupBy(this.entries, subProperty);
+ for (const propertyName of IcLogEntry.propertyNames) {
+ if (this.property == propertyName) continue;
+ this.groups[propertyName] = Group.groupBy(this.entries, propertyName);
}
}
@@ -51,8 +51,7 @@ export class Group {
group.percentage = Math.round(group.count / length * 100 * 100) / 100;
result.push(group);
}
- result.sort((a, b) => { return b.count - a.count });
+ result.sort((a, b) => {return b.count - a.count});
return result;
}
-
}
diff --git a/deps/v8/tools/system-analyzer/ic-panel-template.html b/deps/v8/tools/system-analyzer/ic-panel-template.html
index 53c5eb77fc..ee08901fb0 100644
--- a/deps/v8/tools/system-analyzer/ic-panel-template.html
+++ b/deps/v8/tools/system-analyzer/ic-panel-template.html
@@ -25,37 +25,49 @@ found in the LICENSE file. -->
padding: 0.5em 0 0.2em 0;
}
- .entry-details {}
-
- .entry-details TD {}
-
- .details {
- width: 0.1em;
- }
-
- .details span {
- padding: 0 0.4em 0 0.4em;
- background-color: var(--on-surface-color);
- color: var(--surface-color);
- border-radius: 25px;
+ .toggle {
+ width: 1em;
text-align: center;
cursor: -webkit-zoom-in;
+ color: rgba(var(--border-color), 1);
+ }
+ .toggle::before {
+ content: "▶";
+ }
+ .open .toggle::before {
+ content: "▼";
+ }
+
+ .panel {
+ position: relative;
+ min-height: 200px;
}
#legend {
- padding-right: 20px;
+ position: absolute;
+ right: 10px;
+ top: 10px;
+ background-color: var(--surface-color);
+ border-radius: 5px;
+ border: 3px solid rgba(var(--border-color), 0.2);
+ padding: 0 10px 0 10px;
}
- dl {
- float: right;
- border-style: solid;
- border-width: 1px;
- padding: 20px;
+ #legend dt {
+ font-family: monospace;
+ }
+ #legend h3 {
+ margin-top: 10px;
+ }
+ .scroller {
+ max-height: 800px;
+ overflow-y: scroll;
}
</style>
<div class="panel">
- <h2>IC Panel</h2>
+ <h2>IC Panel <span id="count"></span></h2>
<div id="legend">
+ <h3>Legend</h3>
<dl>
<dt>0</dt>
<dd>uninitialized</dd>
@@ -73,22 +85,14 @@ found in the LICENSE file. -->
<dd>generic</dd>
</dl>
</div>
- <h3>Data</h3>
- <p>Trace Count: <span id="count">0</span></p>
- <h3>Result</h3>
<p>
- Group-Key:
+ Group by IC-property:
<select id="group-key"></select>
</p>
- <p>
- Filter by Time
- <input type="search" id="filter-time-start" placeholder="start"></input> :
- <input type="search" id="filter-time-end" placeholder="end"></input>
- <button id="filterICTimeBtn">Filter</button>
- <p>
- <table id="table" width="100%">
- <tbody id="table-body">
- </tbody>
- </table>
- </p>
+ <div class="panelBody">
+ <table id="table" width="100%">
+ <tbody id="table-body">
+ </tbody>
+ </table>
+ </div>
</div>
diff --git a/deps/v8/tools/system-analyzer/ic-panel.mjs b/deps/v8/tools/system-analyzer/ic-panel.mjs
index a8f68c31f5..d81d06d0d6 100644
--- a/deps/v8/tools/system-analyzer/ic-panel.mjs
+++ b/deps/v8/tools/system-analyzer/ic-panel.mjs
@@ -2,223 +2,190 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-import { Group } from './ic-model.mjs';
-import Processor from "./processor.mjs";
-import { MapLogEvent } from "./log/map.mjs";
-import { FocusEvent, SelectTimeEvent, SelectionEvent } from './events.mjs';
-import { defineCustomElement, V8CustomElement } from './helper.mjs';
-
-defineCustomElement('ic-panel', (templateText) =>
- class ICPanel extends V8CustomElement {
- #selectedLogEvents;
- #timeline;
- constructor() {
- super(templateText);
- this.initGroupKeySelect();
- this.groupKey.addEventListener(
- 'change', e => this.updateTable(e));
- this.$('#filterICTimeBtn').addEventListener(
- 'click', e => this.handleICTimeFilter(e));
- }
- set timeline(value) {
- console.assert(value !== undefined, "timeline undefined!");
- this.#timeline = value;
- this.selectedLogEvents = this.#timeline.all;
- this.updateCount();
- }
- get groupKey() {
- return this.$('#group-key');
- }
-
- get table() {
- return this.$('#table');
- }
-
- get tableBody() {
- return this.$('#table-body');
- }
-
- get count() {
- return this.$('#count');
- }
-
- get spanSelectAll() {
- return this.querySelectorAll("span");
- }
-
- set selectedLogEvents(value) {
- this.#selectedLogEvents = value;
- this.updateCount();
- this.updateTable();
- }
-
- updateCount() {
- this.count.innerHTML = this.selectedLogEvents.length;
- }
-
- get selectedLogEvents() {
- return this.#selectedLogEvents;
- }
-
- updateTable(event) {
- let select = this.groupKey;
- let key = select.options[select.selectedIndex].text;
- let tableBody = this.tableBody;
- this.removeAllChildren(tableBody);
- let groups = Group.groupBy(this.selectedLogEvents, key, true);
- this.render(groups, tableBody);
- }
-
- escapeHtml(unsafe) {
- if (!unsafe) return "";
- return unsafe.toString()
- .replace(/&/g, "&amp;")
- .replace(/</g, "&lt;")
- .replace(/>/g, "&gt;")
- .replace(/"/g, "&quot;")
- .replace(/'/g, "&#039;");
- }
- processValue(unsafe) {
- if (!unsafe) return "";
- if (!unsafe.startsWith("http")) return this.escapeHtml(unsafe);
- let a = document.createElement("a");
- a.href = unsafe;
- a.textContent = unsafe;
- return a;
- }
-
- td(tr, content, className) {
- let node = document.createElement("td");
- if (typeof content == "object") {
- node.appendChild(content);
- } else {
- node.innerHTML = content;
- }
- node.className = className;
- tr.appendChild(node);
- return node
- }
-
- handleMapClick(e) {
- const entry = e.target.parentNode.entry;
- const id = entry.key;
- const selectedMapLogEvents =
- this.searchIcLogEventToMapLogEvent(id, entry.entries);
- this.dispatchEvent(new SelectionEvent(selectedMapLogEvents));
- }
-
- searchIcLogEventToMapLogEvent(id, icLogEvents) {
- // searches for mapLogEvents using the id, time
- const selectedMapLogEventsSet = new Set();
- for (const icLogEvent of icLogEvents) {
- const time = icLogEvent.time;
- const selectedMap = MapLogEvent.get(id, time);
- selectedMapLogEventsSet.add(selectedMap);
- }
- return Array.from(selectedMapLogEventsSet);
- }
-
- //TODO(zcankara) Handle in the processor for events with source positions.
- handleFilePositionClick(e) {
- const entry = e.target.parentNode.entry;
- this.dispatchEvent(new FocusEvent(entry.filePosition));
- }
-
- render(entries, parent) {
- let fragment = document.createDocumentFragment();
- let max = Math.min(1000, entries.length)
- for (let i = 0; i < max; i++) {
- let entry = entries[i];
- let tr = document.createElement("tr");
- tr.entry = entry;
- //TODO(zcankara) Create one bound method and use it everywhere
- if (entry.property === "map") {
- tr.addEventListener('click', e => this.handleMapClick(e));
- tr.classList.add('clickable');
- } else if (entry.property == "filePosition") {
- tr.classList.add('clickable');
- tr.addEventListener('click',
- e => this.handleFilePositionClick(e));
+import {FocusEvent, SelectionEvent, SelectTimeEvent} from './events.mjs';
+import {delay, DOM, V8CustomElement} from './helper.mjs';
+import {Group} from './ic-model.mjs';
+import {IcLogEntry} from './log/ic.mjs';
+import {MapLogEntry} from './log/map.mjs';
+
+DOM.defineCustomElement(
+ 'ic-panel', (templateText) => class ICPanel extends V8CustomElement {
+ _selectedLogEntries;
+ _timeline;
+ constructor() {
+ super(templateText);
+ this.initGroupKeySelect();
+ this.groupKey.addEventListener('change', e => this.updateTable(e));
+ }
+ set timeline(value) {
+ console.assert(value !== undefined, 'timeline undefined!');
+ this._timeline = value;
+ this.selectedLogEntries = this._timeline.all;
+ this.update();
+ }
+ get groupKey() {
+ return this.$('#group-key');
+ }
+
+ get table() {
+ return this.$('#table');
+ }
+
+ get tableBody() {
+ return this.$('#table-body');
+ }
+
+ get count() {
+ return this.$('#count');
+ }
+
+ get spanSelectAll() {
+ return this.querySelectorAll('span');
+ }
+
+ set selectedLogEntries(value) {
+ this._selectedLogEntries = value;
+ this.update();
+ }
+
+ _update() {
+ this._updateCount();
+ this._updateTable();
+ }
+
+ _updateCount() {
+ this.count.innerHTML = `length=${this._selectedLogEntries.length}`;
+ }
+
+ _updateTable(event) {
+ let select = this.groupKey;
+ let key = select.options[select.selectedIndex].text;
+ DOM.removeAllChildren(this.tableBody);
+ let groups = Group.groupBy(this._selectedLogEntries, key, true);
+ this._render(groups, this.tableBody);
+ }
+
+ escapeHtml(unsafe) {
+ if (!unsafe) return '';
+ return unsafe.toString()
+ .replace(/&/g, '&amp;')
+ .replace(/</g, '&lt;')
+ .replace(/>/g, '&gt;')
+ .replace(/"/g, '&quot;')
+ .replace(/'/g, '&#039;');
+ }
+
+ handleMapClick(e) {
+ const group = e.target.parentNode.entry;
+ const id = group.key;
+ const selectedMapLogEntries =
+ this.searchIcLogEntryToMapLogEntry(id, group.entries);
+ this.dispatchEvent(new SelectionEvent(selectedMapLogEntries));
+ }
+
+ searchIcLogEntryToMapLogEntry(id, icLogEntries) {
+ // searches for mapLogEntries using the id, time
+ const selectedMapLogEntriesSet = new Set();
+ for (const icLogEntry of icLogEntries) {
+ const selectedMap = MapLogEntry.get(id, icLogEntry.time);
+ selectedMapLogEntriesSet.add(selectedMap);
+ }
+ return Array.from(selectedMapLogEntriesSet);
+ }
+
+ // TODO(zcankara) Handle in the processor for events with source
+ // positions.
+ handleFilePositionClick(e) {
+ const tr = e.target.parentNode;
+ const sourcePosition = tr.group.entries[0].sourcePosition;
+ this.dispatchEvent(new FocusEvent(sourcePosition));
+ }
+
+ _render(groups, parent) {
+ const fragment = document.createDocumentFragment();
+ const max = Math.min(1000, groups.length)
+ const detailsClickHandler = this.handleDetailsClick.bind(this);
+ const mapClickHandler = this.handleMapClick.bind(this);
+ const fileClickHandler = this.handleFilePositionClick.bind(this);
+ for (let i = 0; i < max; i++) {
+ const group = groups[i];
+ const tr = DOM.tr();
+ tr.group = group;
+ const details = tr.appendChild(DOM.td('', 'toggle'));
+ details.onclick = detailsClickHandler;
+ tr.appendChild(DOM.td(group.percentage + '%', 'percentage'));
+ tr.appendChild(DOM.td(group.count, 'count'));
+ const valueTd = tr.appendChild(DOM.td(group.key, 'key'));
+ if (group.property === 'map') {
+ valueTd.onclick = mapClickHandler;
+ valueTd.classList.add('clickable');
+ } else if (group.property == 'filePosition') {
+ valueTd.classList.add('clickable');
+ valueTd.onclick = fileClickHandler;
+ }
+ fragment.appendChild(tr);
+ }
+ const omitted = groups.length - max;
+ if (omitted > 0) {
+ const tr = DOM.tr();
+ const tdNode = tr.appendChild(DOM.td(`Omitted ${omitted} entries.`));
+ tdNode.colSpan = 4;
+ fragment.appendChild(tr);
+ }
+ parent.appendChild(fragment);
+ }
+
+ handleDetailsClick(event) {
+ const tr = event.target.parentNode;
+ const group = tr.group;
+ // Create subgroup in-place if the don't exist yet.
+ if (group.groups === undefined) {
+ group.createSubGroups();
+ this.renderDrilldown(group, tr);
}
- let details = this.td(tr, '<span>&#8505;</a>', 'details');
- //TODO(zcankara) don't keep the whole function context alive
- details.onclick = _ => this.toggleDetails(details);
- this.td(tr, entry.percentage + "%", 'percentage');
- this.td(tr, entry.count, 'count');
- this.td(tr, this.processValue(entry.key), 'key');
- fragment.appendChild(tr);
- }
- let omitted = entries.length - max;
- if (omitted > 0) {
- let tr = document.createElement("tr");
- let tdNode = this.td(tr, 'Omitted ' + omitted + " entries.");
- tdNode.colSpan = 4;
- fragment.appendChild(tr);
- }
- parent.appendChild(fragment);
- }
-
-
- renderDrilldown(entry, previousSibling) {
- let tr = document.createElement('tr');
- tr.className = "entry-details";
- tr.style.display = "none";
- // indent by one td.
- tr.appendChild(document.createElement("td"));
- let td = document.createElement("td");
- td.colSpan = 3;
- for (let key in entry.groups) {
- td.appendChild(this.renderDrilldownGroup(entry, key));
- }
- tr.appendChild(td);
- // Append the new TR after previousSibling.
- previousSibling.parentNode.insertBefore(tr, previousSibling.nextSibling)
- }
-
- renderDrilldownGroup(entry, key) {
- let max = 20;
- let group = entry.groups[key];
- let div = document.createElement("div")
- div.className = 'drilldown-group-title'
- div.textContent = key + ' [top ' + max + ' out of ' + group.length + ']';
- let table = document.createElement("table");
- this.render(group.slice(0, max), table, false)
- div.appendChild(table);
- return div;
- }
-
- toggleDetails(node) {
- let tr = node.parentNode;
- let entry = tr.entry;
- // Create subgroup in-place if the don't exist yet.
- if (entry.groups === undefined) {
- entry.createSubGroups();
- this.renderDrilldown(entry, tr);
- }
- let details = tr.nextSibling;
- let display = details.style.display;
- if (display != "none") {
- display = "none";
- } else {
- display = "table-row"
- };
- details.style.display = display;
- }
-
- initGroupKeySelect() {
- let select = this.groupKey;
- select.options.length = 0;
- for (let i in Processor.kProperties) {
- let option = document.createElement("option");
- option.text = Processor.kProperties[i];
- select.add(option);
- }
- }
-
- handleICTimeFilter(e) {
- this.dispatchEvent(new SelectTimeEvent(
- parseInt(this.$('#filter-time-start').value),
- parseInt(this.$('#filter-time-end').value)));
- }
-
- });
+ let detailsTr = tr.nextSibling;
+ if (tr.classList.contains('open')) {
+ tr.classList.remove('open');
+ detailsTr.style.display = 'none';
+ } else {
+ tr.classList.add('open');
+ detailsTr.style.display = 'table-row';
+ }
+ }
+
+ renderDrilldown(group, previousSibling) {
+ let tr = DOM.tr('entry-details');
+ tr.style.display = 'none';
+ // indent by one td.
+ tr.appendChild(DOM.td());
+ let td = DOM.td();
+ td.colSpan = 3;
+ for (let key in group.groups) {
+ this.renderDrilldownGroup(td, group.groups[key], key);
+ }
+ tr.appendChild(td);
+ // Append the new TR after previousSibling.
+ previousSibling.parentNode.insertBefore(tr, previousSibling.nextSibling)
+ }
+
+ renderDrilldownGroup(td, children, key) {
+ const max = 20;
+ const div = DOM.div('drilldown-group-title');
+ div.textContent =
+ `Grouped by ${key} [top ${max} out of ${children.length}]`;
+ td.appendChild(div);
+ const table = DOM.table();
+ this._render(children.slice(0, max), table, false)
+ td.appendChild(table);
+ }
+
+ initGroupKeySelect() {
+ const select = this.groupKey;
+ select.options.length = 0;
+ for (const propertyName of IcLogEntry.propertyNames) {
+ const option = document.createElement('option');
+ option.text = propertyName;
+ select.add(option);
+ }
+ }
+ });
diff --git a/deps/v8/tools/system-analyzer/index.css b/deps/v8/tools/system-analyzer/index.css
index c3defb5b8c..5b55182f68 100644
--- a/deps/v8/tools/system-analyzer/index.css
+++ b/deps/v8/tools/system-analyzer/index.css
@@ -18,6 +18,7 @@
--blue: #6e77dc;
--orange: #dc9b6e;
--violet: #d26edc;
+ --border-color: 128, 128, 128;
}
[data-theme="light"] {
@@ -43,28 +44,60 @@
}
body {
- font-family: "Roboto", sans-serif;
+ font-family: sans-serif;
font-size: 14px;
color: var(--on-background-color);
- margin-left: 2.5%;
- margin-right: 2.5%;
+ margin: 10px 10px 0 10px;
background-color: var(--background-color);
- letter-spacing: 0.5px;
}
-h2,
-h4 {
- box-shadow: 0 4px 8px 0 rgba(0, 0, 0, 0.2);
- transition: 0.3s;
- color: var(--on-surface-color);
- padding: 10px 20px;
- text-align: center;
- text-decoration: none;
+
+section {
+ margin-bottom: 10px;
+}
+
+::-webkit-scrollbar, ::-webkit-scrollbar-track, ::-webkit-scrollbar-corner {
+ background-color: rgba(0, 0, 0, 0.0);
+}
+::-webkit-scrollbar, ::-webkit-scrollbar-track {
+ width: 10px;
+ height: 10px;
+}
+::-webkit-scrollbar-thumb {
+ background-color: rgba(128, 128, 128, 0.5);
+ border-radius: 8px;
+ cursor: pointer;
+}
+::-webkit-scrollbar-thumb:hover {
+ background-color: rgba(128, 128, 128, 0.8);
+}
+
+kbd {
+ color: var(--on-primary-color);
+ background-color: var(--primary-color);
+ border-radius: 3px;
+ border: 1px solid var(--on-primary-color);
display: inline-block;
+ font-size: .9em;
+ font-weight: bold;
+ padding: 0px 4px 2px 4px;
+ white-space: nowrap;
}
+
+a {
+ color: var(--primary-color);
+ text-decoration: none;
+}
+a:hover {
+ color: var(--secondary-color);
+}
+a:link {
+ color: var(--secondary-color);
+}
+
dl {
display: grid;
grid-template-columns: min-content auto;
- grid-gap: 10px;
+ grid-gap: 5px;
}
dt {
text-align: right;
@@ -73,15 +106,25 @@ dt {
dd {
margin: 0;
}
+
.panel {
- box-shadow: 0 4px 8px 0 rgba(0, 0, 0, 0.2);
- transition: 0.3s;
background-color: var(--surface-color);
color: var(--on-surface-color);
padding: 10px 10px 10px 10px;
- margin: auto;
- overflow-x: scroll;
+ border-radius: 10px;
+ border: 3px solid rgba(var(--border-color), 0.2);
+}
+
+.panelBody {
+ max-height: 800px;
+ overflow-y: scroll;
+ margin: 0 -10px -10px 0;
+}
+
+.panel > h2 {
+ margin-top: 5px;
}
+
button {
cursor: pointer;
}
@@ -90,11 +133,21 @@ select,
button {
background-color: var(--surface-color);
color: var(--on-surface-color);
+ border: 2px solid rgba(var(--border-color), 0.4);
+ border-radius: 5px;
+ padding: 2px;
+}
+input:hover,
+select:hover,
+button:hover {
+ border: 2px solid rgba(var(--border-color), 0.6);
}
+
.colorbox {
width: 10px;
height: 10px;
border: 1px var(--background-color) solid;
+ border-radius: 50%;
}
.primary {
@@ -146,10 +199,4 @@ button {
background-color: var(--primary-color);
color: var(--on-primary-color);
cursor: pointer;
-}
-
-a:link {
- color: var(--secondary-color);
- background-color: transparent;
- text-decoration: none;
-}
+} \ No newline at end of file
diff --git a/deps/v8/tools/system-analyzer/index.html b/deps/v8/tools/system-analyzer/index.html
index c910446114..a861300f91 100644
--- a/deps/v8/tools/system-analyzer/index.html
+++ b/deps/v8/tools/system-analyzer/index.html
@@ -2,34 +2,29 @@
<!-- Copyright 2020 the V8 project authors. All rights reserved.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file. -->
-
<html lang="en">
-
<head>
<meta charset="UTF-8">
<title>Indicium</title>
- <link href='https://fonts.googleapis.com/css?family=Roboto' rel='stylesheet'>
<!-- <link rel="icon" type="image/png" href="/images/favicon.png"/> -->
- <script type="module" src="index.mjs"></script>
- <link rel="stylesheet" type="text/css" href="./index.css">
- <style>
- #instructions {
- padding: 10px 10px 60px 10px;
- margin: auto;
- }
- kbd {
- background-color: var(--primary-color);
- color: var(--on-primary-color);
- border-radius: 3px;
- border: 1px solid var(--on-primary-color);
- display: inline-block;
- font-size: .9em;
- font-weight: bold;
- padding: 0px 4px 2px 4px;
- white-space: nowrap;
- }
+ <link rel="modulepreload" href="./log-file-reader.mjs" >
+ <link rel="modulepreload" href="./helper.mjs" >
+ <link rel="preload" href="./log-file-reader-template.html" as="fetch" crossorigin="anonymous">
+ <script type="module">
+ // Force instatiating the log-reader before anything else.
+ import "./log-file-reader.mjs";
+ // Delay loading of the main App
+ (async function() {
+ let module = await import('./index.mjs');
+ globalThis.app = new module.App("#log-file-reader", "#map-panel", "#map-stats-panel",
+ "#timeline-panel", "#ic-panel", "#map-track", "#ic-track", "#deopt-track",
+ "#source-panel");
+ })();
+ </script>
+ <link rel="stylesheet" type="text/css" href="./index.css">
+ <style>
.theme-switch-wrapper {
display: inline-block;
align-items: center;
@@ -37,9 +32,9 @@ found in the LICENSE file. -->
.theme-switch {
display: inline-block;
- height: 34px;
+ height: 16px;
position: relative;
- width: 60px;
+ width: 38px;
}
.theme-switch input {
@@ -54,150 +49,138 @@ found in the LICENSE file. -->
position: absolute;
right: 0;
top: 0;
- transition: .4s;
+ border-radius: 34px;
}
.slider:before {
background-color: var(--surface-color);
- bottom: 4px;
+ position: absolute;
+ height: 10px;
+ width: 10px;
+ bottom: 3px;
content: "";
- height: 26px;
left: 4px;
- position: absolute;
- transition: .4s;
- width: 26px;
- }
-
- input:checked+.slider {
- background-color: var(--primary-color);
+ border-radius: 50%;
}
input:checked+.slider:before {
- transform: translateX(26px);
- }
-
- .slider.round {
- border-radius: 34px;
- }
-
- .slider.round:before {
- border-radius: 50%;
+ transform: translateX(20px);
}
#container.initial {
display: none;
}
- #container.loaded {
- display: grid;
- align-content: center;
- grid-template-columns: repeat(auto-fit, minmax(400px, 800px));
- grid-template-rows: repeat(auto-fit, minmax(400px, 800px));
- grid-auto-flow: dense;
- }
-
- #container.loaded>#timeline-panel {
- grid-column: span 2;
- overflow: scroll;
+ #timeline-panel {
+ width: 100%;
}
- a {
- color: var(--primary-color);
+ .panels{
+ display: grid;
+ align-content: center;
+ grid-template-columns: repeat(auto-fill, minmax(500px, 1fr));
+ grid-auto-flow: row dense;
+ grid-gap: 10px;
+ margin-top: 10px;
}
- a:hover {
- color: var(--secondary-color);
+ dt::after {
+ content: ":";
}
</style>
- <script type="module">
- import { App } from './index.mjs';
-
- globalThis.app = new App("#log-file-reader", "#map-panel",
- "#timeline-panel", "#ic-panel", "#map-track", "#ic-track",
- "#source-panel");
- </script>
</head>
<body>
- <div id="content">
- <section id="file-reader">
- <log-file-reader id="log-file-reader"></log-file-reader>
- </section>
- <div class="theme-switch-wrapper">
- <label class="theme-switch" for="checkbox">
- <input type="checkbox" id="checkbox" />
- <div class="slider round"></div>
- </label>
- </div>
- <div id="container" class="initial">
- <timeline-panel id="timeline-panel">
- <timeline-track id="map-track"></timeline-track>
- <timeline-track id="ic-track"></timeline-track>
- </timeline-panel>
+ <section id="file-reader">
+ <log-file-reader id="log-file-reader"></log-file-reader>
+ </section>
+
+ <section id="container" class="initial">
+ <timeline-panel id="timeline-panel">
+ <timeline-track id="map-track"></timeline-track>
+ <timeline-track id="ic-track"></timeline-track>
+ <timeline-track id="deopt-track"></timeline-track>
+ </timeline-panel>
+ <div class="panels">
<map-panel id="map-panel"></map-panel>
+ <stats-panel id="map-stats-panel"></stats-panel>
<ic-panel id="ic-panel" onchange="app.handleSelectIc(event)"></ic-panel>
<source-panel id="source-panel"></source-panel>
</div>
- </div>
- <div id="instructions">
- <h2>Instructions</h2>
- <p>
- Unified web interface to analyse runtime information stored in the v8 log.
- </p>
- For generating log file from
- <a href="https://v8.dev/docs/build" target="_blank">d8</a>:
- <p>
- Log Options:
- </p>
- <dl>
- <dt>--trace-maps:</dt>
- <dd>Log<a href="https://v8.dev/blog/fast-properties" target="_blank">
- Maps</a></dd>
- <dt>--trace_ic:</dt>
- <dd>Log
- <a href="https://mathiasbynens.be/notes/shapes-ics" target="_blank">
- ICs</a></dd>
- <dt>--log-source-code:</dt>
- <dd>Log source code</dd>
- </dl>
- Usage:
- <ul>
- <li><code>/path/do/d8 --trace-maps --trace_ic --log-source-code $FILE
- </code></li>
- </ul>
- For generating a log file from Chrome:
- <ul>
- <li><code>/path/to/chrome --user-data-dir=/var/tmp/chr1 --no-sandbox
- --js-flags='--trace-ic --trace-maps --log-source-code’
- $WEBSITE_URL</code></li>
- </ul>
- <h3>Keyboard Shortcuts for Navigation</h3>
- <dl>
- <dt><kbd>SHIFT</kbd> + <kbd>Arrow Up</kbd></dt>
- <dd>Follow Map transition forward (first child)</dd>
-
- <dt><kbd>SHIFT</kbd> + <kbd>Arrow Down</kbd></dt>
- <dd>Follow Map transition backwards</dd>
-
- <dt><kbd>Arrow Up</kbd></dt>
- <dd>Go to previous Map chunk</dd>
-
- <dt><kbd>Arrow Down</kbd></dt>
- <dd>Go to next Map in chunk</dd>
-
- <dt><kbd>Arrow Left</kbd></dt>
- <dd>Go to previous chunk</dd>
-
- <dt><kbd>Arrow Right</kbd></dt>
- <dd>Go to next chunk</dd>
-
- <dt><kbd>+</kbd></dt>
- <dd>Timeline zoom in</dd>
-
- <dt><kbd>-</kbd></dt>
- <dd>Timeline zoom out</dd>
- </dl>
+ </section>
+
+ <div class="panels">
+ <section id="settings" class="panel">
+ <h2>Settings</h2>
+ <span>Theme:</span>
+ <div class="theme-switch-wrapper">
+ <label class="theme-switch" for="theme-switch-input">
+ <input type="checkbox" id="theme-switch-input" />
+ <div class="slider"></div>
+ </label>
+ </div>
+ </section>
+
+ <section id="instructions" class="panel">
+ <h2>Instructions</h2>
+ <p>
+ Unified web interface to analyse runtime information stored in the v8 log.
+ </p>
+ For generating a v8.log file from <a href="https://v8.dev/docs/build">d8</a>:
+ <ul>
+ <li>
+ <code>/path/do/d8 --trace-maps --trace_ic --log-source-code $FILE</code>
+ </li>
+ </ul>
+ For generating a v8.log file from Chrome:
+ <ul>
+ <li>
+ <code>/path/to/chrome --user-data-dir=/var/tmp/chr$RANDOM --no-sandbox
+ --js-flags='--trace-ic --trace-maps --log-source-code’
+ $WEBSITE_URL</code>
+ </li>
+ </ul>
+
+ <h3>Log Options:</h3>
+ <dl class="d8-options">
+ <dt><code>--trace-maps</code></dt>
+ <dd>Log<a href="https://v8.dev/blog/fast-properties" target="_blank">
+ Maps</a></dd>
+ <dt><code>--trace-ic</code></dt>
+ <dd>Log
+ <a href="https://mathiasbynens.be/notes/shapes-ics" target="_blank">
+ ICs</a></dd>
+ <dt><code>--log-source-code</code></dt>
+ <dd>Log source code</dd>
+ </dl>
+
+ <h3>Keyboard Shortcuts for Navigation</h3>
+ <dl>
+ <dt><kbd>SHIFT</kbd> + <kbd>Arrow Up</kbd></dt>
+ <dd>Follow Map transition forward (first child)</dd>
+
+ <dt><kbd>SHIFT</kbd> + <kbd>Arrow Down</kbd></dt>
+ <dd>Follow Map transition backwards</dd>
+
+ <dt><kbd>Arrow Up</kbd></dt>
+ <dd>Go to previous Map chunk</dd>
+
+ <dt><kbd>Arrow Down</kbd></dt>
+ <dd>Go to next Map in chunk</dd>
+
+ <dt><kbd>Arrow Left</kbd></dt>
+ <dd>Go to previous chunk</dd>
+
+ <dt><kbd>Arrow Right</kbd></dt>
+ <dd>Go to next chunk</dd>
+
+ <dt><kbd>+</kbd></dt>
+ <dd>Timeline zoom in</dd>
+
+ <dt><kbd>-</kbd></dt>
+ <dd>Timeline zoom out</dd>
+ </dl>
+ </section>
</div>
</body>
-
</html>
diff --git a/deps/v8/tools/system-analyzer/index.mjs b/deps/v8/tools/system-analyzer/index.mjs
index 80e5b79948..dfc858e5d6 100644
--- a/deps/v8/tools/system-analyzer/index.mjs
+++ b/deps/v8/tools/system-analyzer/index.mjs
@@ -2,167 +2,178 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+import {SourcePosition} from '../profile.mjs';
-import { SelectionEvent, FocusEvent, SelectTimeEvent } from "./events.mjs";
-import { State } from "./app-model.mjs";
-import { MapLogEvent } from "./log/map.mjs";
-import { IcLogEvent } from "./log/ic.mjs";
-import Processor from "./processor.mjs";
-import { SourcePosition } from "../profile.mjs";
-import { $ } from "./helper.mjs";
-import "./ic-panel.mjs";
-import "./timeline-panel.mjs";
-import "./map-panel.mjs";
-import "./log-file-reader.mjs";
-import "./source-panel.mjs";
-
+import {State} from './app-model.mjs';
+import {FocusEvent, SelectionEvent, SelectTimeEvent} from './events.mjs';
+import {$} from './helper.mjs';
+import {IcLogEntry} from './log/ic.mjs';
+import {MapLogEntry} from './log/map.mjs';
+import {Processor} from './processor.mjs';
class App {
- #state;
- #view;
- #navigation;
- constructor(fileReaderId, mapPanelId, timelinePanelId,
- icPanelId, mapTrackId, icTrackId, sourcePanelId) {
- this.#view = {
+ _state;
+ _view;
+ _navigation;
+ _startupPromise;
+ constructor(
+ fileReaderId, mapPanelId, mapStatsPanelId, timelinePanelId, icPanelId,
+ mapTrackId, icTrackId, deoptTrackId, sourcePanelId) {
+ this._view = {
+ __proto__: null,
logFileReader: $(fileReaderId),
icPanel: $(icPanelId),
mapPanel: $(mapPanelId),
+ mapStatsPanel: $(mapStatsPanelId),
timelinePanel: $(timelinePanelId),
mapTrack: $(mapTrackId),
icTrack: $(icTrackId),
+ deoptTrack: $(deoptTrackId),
sourcePanel: $(sourcePanelId)
};
- this.#state = new State();
- this.#navigation = new Navigation(this.#state, this.#view);
- document.addEventListener('keydown',
- e => this.#navigation.handleKeyDown(e));
this.toggleSwitch = $('.theme-switch input[type="checkbox"]');
- this.toggleSwitch.addEventListener("change", (e) => this.switchTheme(e));
- this.#view.logFileReader.addEventListener("fileuploadstart", (e) =>
- this.handleFileUpload(e)
- );
- this.#view.logFileReader.addEventListener("fileuploadend", (e) =>
- this.handleDataUpload(e)
- );
- Object.entries(this.#view).forEach(([_, panel]) => {
- panel.addEventListener(SelectionEvent.name,
- e => this.handleShowEntries(e));
- panel.addEventListener(FocusEvent.name,
- e => this.handleShowEntryDetail(e));
- panel.addEventListener(SelectTimeEvent.name,
- e => this.handleTimeRangeSelect(e));
- });
+ this.toggleSwitch.addEventListener('change', (e) => this.switchTheme(e));
+ this._view.logFileReader.addEventListener(
+ 'fileuploadstart', (e) => this.handleFileUploadStart(e));
+ this._view.logFileReader.addEventListener(
+ 'fileuploadend', (e) => this.handleFileUploadEnd(e));
+ this._startupPromise = this.runAsyncInitialize();
+ }
+
+ async runAsyncInitialize() {
+ await Promise.all([
+ import('./ic-panel.mjs'),
+ import('./timeline-panel.mjs'),
+ import('./stats-panel.mjs'),
+ import('./map-panel.mjs'),
+ import('./source-panel.mjs'),
+ ]);
+ document.addEventListener(
+ 'keydown', e => this._navigation?.handleKeyDown(e));
+ document.addEventListener(
+ SelectionEvent.name, e => this.handleShowEntries(e));
+ document.addEventListener(
+ FocusEvent.name, e => this.handleShowEntryDetail(e));
+ document.addEventListener(
+ SelectTimeEvent.name, e => this.handleTimeRangeSelect(e));
}
+
handleShowEntries(e) {
- if (e.entries[0] instanceof MapLogEvent) {
+ if (e.entries[0] instanceof MapLogEntry) {
this.showMapEntries(e.entries);
- } else if (e.entries[0] instanceof IcLogEvent) {
+ } else if (e.entries[0] instanceof IcLogEntry) {
this.showIcEntries(e.entries);
} else if (e.entries[0] instanceof SourcePosition) {
this.showSourcePositionEntries(e.entries);
} else {
- throw new Error("Unknown selection type!");
+ throw new Error('Unknown selection type!');
}
+ e.stopPropagation();
}
showMapEntries(entries) {
- this.#state.selectedMapLogEvents = entries;
- this.#view.mapPanel.selectedMapLogEvents = this.#state.selectedMapLogEvents;
+ this._state.selectedMapLogEntries = entries;
+ this._view.mapPanel.selectedMapLogEntries = entries;
+ this._view.mapStatsPanel.selectedLogEntries = entries;
}
showIcEntries(entries) {
- this.#state.selectedIcLogEvents = entries;
- this.#view.icPanel.selectedLogEvents = this.#state.selectedIcLogEvents;
+ this._state.selectedIcLogEntries = entries;
+ this._view.icPanel.selectedLogEntries = entries;
+ }
+ showDeoptEntries(entries) {
+ this._state.selectedDeoptLogEntries = entries;
}
showSourcePositionEntries(entries) {
- //TODO(zcankara) Handle multiple source position selection events
- this.#view.sourcePanel.selectedSourcePositions = entries;
+ // TODO: Handle multiple source position selection events
+ this._view.sourcePanel.selectedSourcePositions = entries
}
handleTimeRangeSelect(e) {
this.selectTimeRange(e.start, e.end);
+ e.stopPropagation();
+ }
+
+ selectTimeRange(start, end) {
+ this._state.selectTimeRange(start, end);
+ this.showMapEntries(this._state.mapTimeline.selection);
+ this.showIcEntries(this._state.icTimeline.selection);
+ this.showDeoptEntries(this._state.deoptTimeline.selection);
+ this._view.timelinePanel.timeSelection = {start, end};
}
+
handleShowEntryDetail(e) {
- if (e.entry instanceof MapLogEvent) {
- this.selectMapLogEvent(e.entry);
- } else if (e.entry instanceof IcLogEvent) {
- this.selectICLogEvent(e.entry);
+ if (e.entry instanceof MapLogEntry) {
+ this.selectMapLogEntry(e.entry);
+ } else if (e.entry instanceof IcLogEntry) {
+ this.selectICLogEntry(e.entry);
} else if (e.entry instanceof SourcePosition) {
- this.selectSourcePositionEvent(e.entry);
+ this.selectSourcePosition(e.entry);
} else {
- throw new Error("Unknown selection type!");
+ throw new Error('Unknown selection type!');
}
+ e.stopPropagation();
}
- selectTimeRange(start, end) {
- this.#state.timeSelection.start = start;
- this.#state.timeSelection.end = end;
- this.#state.icTimeline.selectTimeRange(start, end);
- this.#state.mapTimeline.selectTimeRange(start, end);
- this.#view.mapPanel.selectedMapLogEvents =
- this.#state.mapTimeline.selection;
- this.#view.icPanel.selectedLogEvents = this.#state.icTimeline.selection;
- }
- selectMapLogEvent(entry) {
- this.#state.map = entry;
- this.#view.mapTrack.selectedEntry = entry;
- this.#view.mapPanel.map = entry;
+ selectMapLogEntry(entry) {
+ this._state.map = entry;
+ this._view.mapTrack.selectedEntry = entry;
+ this._view.mapPanel.map = entry;
}
- selectICLogEvent(entry) {
- this.#state.ic = entry;
- this.#view.icPanel.selectedLogEvents = [entry];
+ selectICLogEntry(entry) {
+ this._state.ic = entry;
+ this._view.icPanel.selectedLogEntries = [entry];
}
- selectSourcePositionEvent(sourcePositions) {
+ selectSourcePosition(sourcePositions) {
if (!sourcePositions.script) return;
- this.#view.sourcePanel.selectedSourcePositions = [sourcePositions];
+ this._view.sourcePanel.selectedSourcePositions = [sourcePositions];
}
- handleFileUpload(e) {
+ handleFileUploadStart(e) {
this.restartApp();
- $("#container").className = "initial";
+ $('#container').className = 'initial';
}
+
restartApp() {
- this.#state = new State();
- this.#navigation = new Navigation(this.#state, this.#view);
- }
- // Event log processing
- handleLoadTextProcessor(text) {
- let logProcessor = new Processor();
- logProcessor.processString(text);
- return logProcessor;
+ this._state = new State();
+ this._navigation = new Navigation(this._state, this._view);
}
- // call when a new file uploaded
- handleDataUpload(e) {
- if (!e.detail) return;
- $("#container").className = "loaded";
- // instantiate the app logic
- let fileData = e.detail;
- const processor = this.handleLoadTextProcessor(fileData.chunk);
- const mapTimeline = processor.mapTimeline;
- const icTimeline = processor.icTimeline;
- //TODO(zcankara) Make sure only one instance of src event map ic id match
- // Load map log events timeline.
- this.#state.mapTimeline = mapTimeline;
- // Transitions must be set before timeline for stats panel.
- this.#view.mapPanel.transitions = this.#state.mapTimeline.transitions;
- this.#view.mapTrack.data = mapTimeline;
- this.#state.chunks = this.#view.mapTrack.chunks;
- this.#view.mapPanel.timeline = mapTimeline;
- // Load ic log events timeline.
- this.#state.icTimeline = icTimeline;
- this.#view.icPanel.timeline = icTimeline;
- this.#view.icTrack.data = icTimeline;
- this.#view.sourcePanel.data = processor.scripts
- this.fileLoaded = true;
+ async handleFileUploadEnd(e) {
+ await this._startupPromise;
+ try {
+ const processor = new Processor(e.detail);
+ const mapTimeline = processor.mapTimeline;
+ const icTimeline = processor.icTimeline;
+ const deoptTimeline = processor.deoptTimeline;
+ this._state.mapTimeline = mapTimeline;
+ this._state.icTimeline = icTimeline;
+ this._state.deoptTimeline = deoptTimeline;
+ // Transitions must be set before timeline for stats panel.
+ this._view.mapPanel.timeline = mapTimeline;
+ this._view.mapTrack.data = mapTimeline;
+ this._view.mapStatsPanel.transitions =
+ this._state.mapTimeline.transitions;
+ this._view.mapStatsPanel.timeline = mapTimeline;
+ this._view.icPanel.timeline = icTimeline;
+ this._view.icTrack.data = icTimeline;
+ this._view.deoptTrack.data = deoptTimeline;
+ this._view.sourcePanel.data = processor.scripts
+ } catch (e) {
+ this._view.logFileReader.error = 'Log file contains errors!'
+ throw (e);
+ } finally {
+ $('#container').className = 'loaded';
+ this.fileLoaded = true;
+ }
}
refreshTimelineTrackView() {
- this.#view.mapTrack.data = this.#state.mapTimeline;
- this.#view.icTrack.data = this.#state.icTimeline;
+ this._view.mapTrack.data = this._state.mapTimeline;
+ this._view.icTrack.data = this._state.icTimeline;
+ this._view.deoptTrack.data = this._state.deoptTimeline;
}
switchTheme(event) {
- document.documentElement.dataset.theme = event.target.checked
- ? "light"
- : "dark";
+ document.documentElement.dataset.theme =
+ event.target.checked ? 'light' : 'dark';
if (this.fileLoaded) {
this.refreshTimelineTrackView();
}
@@ -170,10 +181,10 @@ class App {
}
class Navigation {
- #view;
+ _view;
constructor(state, view) {
this.state = state;
- this.#view = view;
+ this._view = view;
}
get map() {
return this.state.map
@@ -182,37 +193,37 @@ class Navigation {
this.state.map = value
}
get chunks() {
- return this.state.chunks
+ return this.state.mapTimeline.chunks;
}
increaseTimelineResolution() {
- this.#view.timelinePanel.nofChunks *= 1.5;
+ this._view.timelinePanel.nofChunks *= 1.5;
this.state.nofChunks *= 1.5;
}
decreaseTimelineResolution() {
- this.#view.timelinePanel.nofChunks /= 1.5;
+ this._view.timelinePanel.nofChunks /= 1.5;
this.state.nofChunks /= 1.5;
}
selectNextEdge() {
if (!this.map) return;
if (this.map.children.length != 1) return;
this.map = this.map.children[0].to;
- this.#view.mapTrack.selectedEntry = this.map;
+ this._view.mapTrack.selectedEntry = this.map;
this.updateUrl();
- this.#view.mapPanel.map = this.map;
+ this._view.mapPanel.map = this.map;
}
selectPrevEdge() {
if (!this.map) return;
if (!this.map.parent()) return;
this.map = this.map.parent();
- this.#view.mapTrack.selectedEntry = this.map;
+ this._view.mapTrack.selectedEntry = this.map;
this.updateUrl();
- this.#view.mapPanel.map = this.map;
+ this._view.mapPanel.map = this.map;
}
selectDefaultMap() {
this.map = this.chunks[0].at(0);
- this.#view.mapTrack.selectedEntry = this.map;
+ this._view.mapTrack.selectedEntry = this.map;
this.updateUrl();
- this.#view.mapPanel.map = this.map;
+ this._view.mapPanel.map = this.map;
}
moveInChunks(next) {
if (!this.map) return this.selectDefaultMap();
@@ -227,9 +238,9 @@ class Navigation {
if (!chunk) return;
index = Math.min(index, chunk.size() - 1);
this.map = chunk.at(index);
- this.#view.mapTrack.selectedEntry = this.map;
+ this._view.mapTrack.selectedEntry = this.map;
this.updateUrl();
- this.#view.mapPanel.map = this.map;
+ this._view.mapPanel.map = this.map;
}
moveInChunk(delta) {
if (!this.map) return this.selectDefaultMap();
@@ -245,9 +256,9 @@ class Navigation {
map = chunk.at(index);
}
this.map = map;
- this.#view.mapTrack.selectedEntry = this.map;
+ this._view.mapTrack.selectedEntry = this.map;
this.updateUrl();
- this.#view.mapPanel.map = this.map;
+ this._view.mapPanel.map = this.map;
}
updateUrl() {
let entries = this.state.entries;
@@ -256,7 +267,7 @@ class Navigation {
}
handleKeyDown(event) {
switch (event.key) {
- case "ArrowUp":
+ case 'ArrowUp':
event.preventDefault();
if (event.shiftKey) {
this.selectPrevEdge();
@@ -264,7 +275,7 @@ class Navigation {
this.moveInChunk(-1);
}
return false;
- case "ArrowDown":
+ case 'ArrowDown':
event.preventDefault();
if (event.shiftKey) {
this.selectNextEdge();
@@ -272,20 +283,20 @@ class Navigation {
this.moveInChunk(1);
}
return false;
- case "ArrowLeft":
+ case 'ArrowLeft':
this.moveInChunks(false);
break;
- case "ArrowRight":
+ case 'ArrowRight':
this.moveInChunks(true);
break;
- case "+":
+ case '+':
this.increaseTimelineResolution();
break;
- case "-":
+ case '-':
this.decreaseTimelineResolution();
break;
}
}
}
-export { App };
+export {App};
diff --git a/deps/v8/tools/system-analyzer/log-file-reader-template.html b/deps/v8/tools/system-analyzer/log-file-reader-template.html
index e20ce1ea20..e54d45990a 100644
--- a/deps/v8/tools/system-analyzer/log-file-reader-template.html
+++ b/deps/v8/tools/system-analyzer/log-file-reader-template.html
@@ -7,26 +7,26 @@ found in the LICENSE file. -->
</head>
<style>
#fileReader {
- width: 100%;
height: 100px;
line-height: 100px;
text-align: center;
- border-radius: 5px;
cursor: pointer;
transition: all 0.5s ease-in-out;
- border: 2px solid var(--primary-color);
background-color: var(--surface-color);
}
+
+ #fileReader:hover {
+ background-color: var(--primary-color);
+ color: var(--on-primary-color);
+ }
- #fileReader.done {
+ .done #fileReader{
height: 20px;
line-height: 20px;
- box-shadow: 0 10px 20px rgba(0, 0, 0, 0.19), 0 6px 6px rgba(0, 0, 0, 0.23);
}
- #fileReader:hover {
- background-color: var(--primary-color);
- color: var(--on-primary-color);
+ .fail #fileReader {
+ background-color: var(--error-color);
}
.loading #fileReader {
@@ -50,7 +50,6 @@ found in the LICENSE file. -->
height: 100%;
background-color: var(--file-reader-background-color);
}
-
#spinner {
position: absolute;
width: 100px;
@@ -74,9 +73,8 @@ found in the LICENSE file. -->
}
}
</style>
-
-<section id="fileReaderSection">
- <div id="fileReader" tabindex=1>
+<div id="root">
+ <div id="fileReader" class="panel" tabindex=1>
<span id="label">
Drag and drop a v8.log file into this area, or click to choose from disk.
</span>
@@ -85,4 +83,4 @@ found in the LICENSE file. -->
<div id="loader">
<div id="spinner"></div>
</div>
-</section>
+</div>
diff --git a/deps/v8/tools/system-analyzer/log-file-reader.mjs b/deps/v8/tools/system-analyzer/log-file-reader.mjs
index 4b5238d89d..c46d792d00 100644
--- a/deps/v8/tools/system-analyzer/log-file-reader.mjs
+++ b/deps/v8/tools/system-analyzer/log-file-reader.mjs
@@ -1,80 +1,84 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-import { defineCustomElement, V8CustomElement } from './helper.mjs';
+import {DOM, V8CustomElement} from './helper.mjs';
-defineCustomElement('log-file-reader', (templateText) =>
- class LogFileReader extends V8CustomElement {
- constructor() {
- super(templateText);
- this.addEventListener('click', e => this.handleClick(e));
- this.addEventListener('dragover', e => this.handleDragOver(e));
- this.addEventListener('drop', e => this.handleChange(e));
- this.$('#file').addEventListener('change', e => this.handleChange(e));
- this.$('#fileReader').addEventListener('keydown',
- e => this.handleKeyEvent(e));
- }
+DOM.defineCustomElement('log-file-reader',
+ (templateText) =>
+ class LogFileReader extends V8CustomElement {
+ constructor() {
+ super(templateText);
+ this.addEventListener('click', e => this.handleClick(e));
+ this.addEventListener('dragover', e => this.handleDragOver(e));
+ this.addEventListener('drop', e => this.handleChange(e));
+ this.$('#file').addEventListener('change', e => this.handleChange(e));
+ this.$('#fileReader')
+ .addEventListener('keydown', e => this.handleKeyEvent(e));
+ }
- get section() {
- return this.$('#fileReaderSection');
- }
+ set error(message) {
+ this._updateLabel(message);
+ this.root.className = 'fail';
+ }
- updateLabel(text) {
- this.$('#label').innerText = text;
- }
+ _updateLabel(text) {
+ this.$('#label').innerText = text;
+ }
- handleKeyEvent(event) {
- if (event.key == "Enter") this.handleClick(event);
- }
+ handleKeyEvent(event) {
+ if (event.key == 'Enter') this.handleClick(event);
+ }
- handleClick(event) {
- this.$('#file').click();
- }
+ handleClick(event) {
+ this.$('#file').click();
+ }
- handleChange(event) {
- // Used for drop and file change.
- event.preventDefault();
- this.dispatchEvent(new CustomEvent(
- 'fileuploadstart', { bubbles: true, composed: true }));
- var host = event.dataTransfer ? event.dataTransfer : event.target;
- this.readFile(host.files[0]);
- }
+ handleChange(event) {
+ // Used for drop and file change.
+ event.preventDefault();
+ this.dispatchEvent(
+ new CustomEvent('fileuploadstart', {bubbles: true, composed: true}));
+ const host = event.dataTransfer ? event.dataTransfer : event.target;
+ this.readFile(host.files[0]);
+ }
- handleDragOver(event) {
- event.preventDefault();
- }
+ handleDragOver(event) {
+ event.preventDefault();
+ }
- connectedCallback() {
- this.$('#fileReader').focus();
- }
+ connectedCallback() {
+ this.fileReader.focus();
+ }
+
+ get fileReader() {
+ return this.$('#fileReader');
+ }
- readFile(file) {
- if (!file) {
- this.updateLabel('Failed to load file.');
- return;
- }
- this.$('#fileReader').blur();
- this.section.className = 'loading';
- const reader = new FileReader();
- reader.onload = (e) => {
- try {
- let dataModel = Object.create(null);
- dataModel.file = file;
- dataModel.chunk = e.target.result;
- this.updateLabel('Finished loading \'' + file.name + '\'.');
- this.dispatchEvent(new CustomEvent(
- 'fileuploadend', {
- bubbles: true, composed: true,
- detail: dataModel
- }));
- this.section.className = 'success';
- this.$('#fileReader').classList.add('done');
- } catch (err) {
- console.error(err);
- this.section.className = 'failure';
- }
- };
- // Delay the loading a bit to allow for CSS animations to happen.
- setTimeout(() => reader.readAsText(file), 0);
+ get root() {
+ return this.$('#root');
+ }
+
+ readFile(file) {
+ if (!file) {
+ this.error = 'Failed to load file.';
+ return;
}
- });
+ this.fileReader.blur();
+ this.root.className = 'loading';
+ const reader = new FileReader();
+ reader.onload = (e) => this.handleFileLoad(e, file);
+ // Delay the loading a bit to allow for CSS animations to happen.
+ setTimeout(() => reader.readAsText(file), 0);
+ }
+
+ handleFileLoad(e, file) {
+ const chunk = e.target.result;
+ this._updateLabel(`Finished loading '${file.name}'.`);
+ this.dispatchEvent(new CustomEvent('fileuploadend', {
+ bubbles: true,
+ composed: true,
+ detail: chunk,
+ }));
+ this.root.className = 'done';
+ }
+});
diff --git a/deps/v8/tools/system-analyzer/log/deopt.mjs b/deps/v8/tools/system-analyzer/log/deopt.mjs
new file mode 100644
index 0000000000..f3ff1a71a2
--- /dev/null
+++ b/deps/v8/tools/system-analyzer/log/deopt.mjs
@@ -0,0 +1,10 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+import {LogEntry} from './log.mjs';
+
+export class DeoptLogEntry extends LogEntry {
+ constructor(type, time) {
+ super(type, time);
+ }
+}
diff --git a/deps/v8/tools/system-analyzer/log/ic.mjs b/deps/v8/tools/system-analyzer/log/ic.mjs
index 5001e60c36..b6c7ec5553 100644
--- a/deps/v8/tools/system-analyzer/log/ic.mjs
+++ b/deps/v8/tools/system-analyzer/log/ic.mjs
@@ -1,12 +1,12 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-import { Event } from './log.mjs';
+import {LogEntry} from './log.mjs';
-class IcLogEvent extends Event {
+export class IcLogEntry extends LogEntry {
constructor(
- type, fn_file, time, line, column, key, oldState, newState, map, reason,
- script, additional) {
+ type, fn_file, time, line, column, key, oldState, newState, map, reason,
+ script, modifier, additional) {
super(type, time);
this.category = 'other';
if (this.type.indexOf('Store') !== -1) {
@@ -27,9 +27,9 @@ class IcLogEvent extends Event {
this.reason = reason;
this.additional = additional;
this.script = script;
+ this.modifier = modifier;
}
-
parseMapProperties(parts, offset) {
let next = parts[++offset];
if (!next.startsWith('dict')) return offset;
@@ -55,6 +55,11 @@ class IcLogEvent extends Event {
this.file = parts[offset];
return offset;
}
-}
-export { IcLogEvent };
+ static get propertyNames() {
+ return [
+ 'type', 'category', 'functionName', 'filePosition', 'state', 'key', 'map',
+ 'reason', 'file'
+ ];
+ }
+}
diff --git a/deps/v8/tools/system-analyzer/log/log.mjs b/deps/v8/tools/system-analyzer/log/log.mjs
index 2945f0e76b..69195d7853 100644
--- a/deps/v8/tools/system-analyzer/log/log.mjs
+++ b/deps/v8/tools/system-analyzer/log/log.mjs
@@ -2,21 +2,22 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
-class Event {
- #time;
- #type;
+export class LogEntry {
+ _time;
+ _type;
constructor(type, time) {
- //TODO(zcankara) remove type and add empty getters to override
- this.#time = time;
- this.#type = type;
+ // TODO(zcankara) remove type and add empty getters to override
+ this._time = time;
+ this._type = type;
}
get time() {
- return this.#time;
+ return this._time;
}
get type() {
- return this.#type;
+ return this._type;
}
-}
-
-export { Event };
+ // Returns an Array of all possible #type values.
+ static get allTypes() {
+ throw new Error('Not implemented.');
+ }
+} \ No newline at end of file
diff --git a/deps/v8/tools/system-analyzer/log/map.mjs b/deps/v8/tools/system-analyzer/log/map.mjs
index 38c8a9a63a..4df6fb847c 100644
--- a/deps/v8/tools/system-analyzer/log/map.mjs
+++ b/deps/v8/tools/system-analyzer/log/map.mjs
@@ -2,20 +2,19 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-import { typeToColor } from '../helper.mjs';
-import { Event } from './log.mjs';
+import {LogEntry} from './log.mjs';
// ===========================================================================
// Map Log Events
-const kChunkHeight = 250;
+const kChunkHeight = 200;
const kChunkWidth = 10;
function define(prototype, name, fn) {
- Object.defineProperty(prototype, name, { value: fn, enumerable: false });
+ Object.defineProperty(prototype, name, {value: fn, enumerable: false});
}
-define(Array.prototype, 'max', function (fn) {
+define(Array.prototype, 'max', function(fn) {
if (this.length === 0) return undefined;
if (fn === undefined) fn = (each) => each;
let max = fn(this[0]);
@@ -24,22 +23,21 @@ define(Array.prototype, 'max', function (fn) {
}
return max;
})
-define(Array.prototype, 'first', function () {
+define(Array.prototype, 'first', function() {
return this[0]
});
-define(Array.prototype, 'last', function () {
+define(Array.prototype, 'last', function() {
return this[this.length - 1]
});
// ===========================================================================
// Map Log Events
-class MapLogEvent extends Event {
+class MapLogEntry extends LogEntry {
edge = void 0;
children = [];
depth = 0;
- // TODO(zcankara): Change this to private class field.
- #isDeprecated = false;
+ _isDeprecated = false;
deprecatedTargets = null;
leftId = 0;
rightId = 0;
@@ -49,7 +47,7 @@ class MapLogEvent extends Event {
constructor(id, time) {
if (!time) throw new Error('Invalid time');
super(id, time);
- MapLogEvent.set(id, this);
+ MapLogEntry.set(id, this);
this.id = id;
}
@@ -58,7 +56,7 @@ class MapLogEvent extends Event {
while (stack.length > 0) {
let current = stack.pop();
if (current.leftId !== 0) {
- console.error('Skipping potential parent loop between maps:', current)
+ console.warn('Skipping potential parent loop between maps:', current)
continue;
}
current.finalize(id)
@@ -82,11 +80,11 @@ class MapLogEvent extends Event {
}
isDeprecated() {
- return this.#isDeprecated;
+ return this._isDeprecated;
}
deprecate() {
- this.#isDeprecated = true;
+ this._isDeprecated = true;
}
isRoot() {
@@ -172,7 +170,7 @@ class MapLogEvent extends Event {
}
}
-MapLogEvent.cache = new Map();
+MapLogEntry.cache = new Map();
// ===========================================================================
class Edge {
@@ -185,24 +183,20 @@ class Edge {
this.to = to;
}
- getColor() {
- return typeToColor(this.type);
- }
-
finishSetup() {
- let from = this.from;
+ const from = this.from;
if (from) from.addEdge(this);
- let to = this.to;
+ const to = this.to;
if (to === undefined) return;
to.edge = this;
if (from === undefined) return;
if (to === from) throw 'From and to must be distinct.';
if (to.time < from.time) {
- console.error('invalid time order');
+ console.warn('invalid time order');
}
let newDepth = from.depth + 1;
if (to.depth > 0 && to.depth != newDepth) {
- console.error('Depth has already been initialized');
+ console.warn('Depth has already been initialized');
}
to.depth = newDepth;
}
@@ -288,9 +282,8 @@ class Edge {
return this.type + ' ' + this.symbol() + this.name;
}
return this.type + ' ' + (this.reason ? this.reason : '') + ' ' +
- (this.name ? this.name : '')
+ (this.name ? this.name : '')
}
}
-
-export { MapLogEvent, Edge, kChunkWidth, kChunkHeight };
+export {MapLogEntry, Edge, kChunkWidth, kChunkHeight};
diff --git a/deps/v8/tools/system-analyzer/map-panel-template.html b/deps/v8/tools/system-analyzer/map-panel-template.html
index 6363a6d7c3..12d6ec5a14 100644
--- a/deps/v8/tools/system-analyzer/map-panel-template.html
+++ b/deps/v8/tools/system-analyzer/map-panel-template.html
@@ -10,7 +10,6 @@ found in the LICENSE file. -->
width: 200px;
}
</style>
-<stats-panel id="stats-panel"></stats-panel>
<div class="panel">
<h2>Map Panel</h2>
<map-transitions id="map-transitions"></map-transitions>
diff --git a/deps/v8/tools/system-analyzer/map-panel.mjs b/deps/v8/tools/system-analyzer/map-panel.mjs
index c78bce91a2..1516038a2d 100644
--- a/deps/v8/tools/system-analyzer/map-panel.mjs
+++ b/deps/v8/tools/system-analyzer/map-panel.mjs
@@ -1,90 +1,72 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-import "./stats-panel.mjs";
-import "./map-panel/map-details.mjs";
-import "./map-panel/map-transitions.mjs";
-import { FocusEvent } from './events.mjs';
-import { MapLogEvent } from "./log/map.mjs";
-import { defineCustomElement, V8CustomElement } from './helper.mjs';
+import './stats-panel.mjs';
+import './map-panel/map-details.mjs';
+import './map-panel/map-transitions.mjs';
-defineCustomElement('map-panel', (templateText) =>
- class MapPanel extends V8CustomElement {
- #map;
- constructor() {
- super(templateText);
- this.searchBarBtn.addEventListener(
- 'click', e => this.handleSearchBar(e));
- this.addEventListener(
- FocusEvent.name, e => this.handleUpdateMapDetails(e));
- }
+import {FocusEvent} from './events.mjs';
+import {DOM, V8CustomElement} from './helper.mjs';
+import {MapLogEntry} from './log/map.mjs';
- handleUpdateMapDetails(e) {
- if (e.entry instanceof MapLogEvent) {
- this.mapDetailsPanel.mapDetails = e.entry;
- }
- }
+DOM.defineCustomElement('map-panel',
+ (templateText) =>
+ class MapPanel extends V8CustomElement {
+ _map;
+ constructor() {
+ super(templateText);
+ this.searchBarBtn.addEventListener('click', e => this.handleSearchBar(e));
+ this.addEventListener(FocusEvent.name, e => this.handleUpdateMapDetails(e));
+ }
- get statsPanel() {
- return this.$('#stats-panel');
+ handleUpdateMapDetails(e) {
+ if (e.entry instanceof MapLogEntry) {
+ this.mapDetailsPanel.map = e.entry;
}
+ }
- get mapTransitionsPanel() {
- return this.$('#map-transitions');
- }
+ get mapTransitionsPanel() {
+ return this.$('#map-transitions');
+ }
- get mapDetailsPanel() {
- return this.$('#map-details');
- }
+ get mapDetailsPanel() {
+ return this.$('#map-details');
+ }
- get searchBarBtn() {
- return this.$('#searchBarBtn');
- }
+ get searchBarBtn() {
+ return this.$('#searchBarBtn');
+ }
- get searchBar() {
- return this.$('#searchBar');
- }
+ get searchBar() {
+ return this.$('#searchBar');
+ }
- get mapDetails() {
- return this.mapDetailsPanel.mapDetails;
- }
+ set timeline(timeline) {
+ this._timeline = timeline;
+ }
- // send a timeline to the stats-panel
- set timeline(value) {
- console.assert(value !== undefined, "timeline undefined!");
- this.statsPanel.timeline = value;
- this.statsPanel.update();
- }
- get transitions() {
- return this.statsPanel.transitions;
- }
- set transitions(value) {
- this.statsPanel.transitions = value;
- }
-
- set map(value) {
- this.#map = value;
- this.mapTransitionsPanel.map = this.#map;
- }
+ set map(value) {
+ this._map = value;
+ this.mapTransitionsPanel.map = this._map;
+ }
- handleSearchBar(e) {
- let searchBar = this.$('#searchBarInput');
- let searchBarInput = searchBar.value;
- //access the map from model cache
- let selectedMap = MapLogEvent.get(parseInt(searchBarInput));
- if (selectedMap) {
- searchBar.className = "success";
- } else {
- searchBar.className = "failure";
- }
- this.dispatchEvent(new FocusEvent(selectedMap));
- }
-
- set selectedMapLogEvents(list) {
- this.mapTransitionsPanel.selectedMapLogEvents = list;
- }
- get selectedMapLogEvents() {
- return this.mapTransitionsPanel.selectedMapLogEvents;
+ handleSearchBar(e) {
+ let searchBar = this.$('#searchBarInput');
+ let searchBarInput = searchBar.value;
+ // access the map from model cache
+ let selectedMap = MapLogEntry.get(parseInt(searchBarInput));
+ if (selectedMap) {
+ searchBar.className = 'success';
+ } else {
+ searchBar.className = 'failure';
}
+ this.dispatchEvent(new FocusEvent(selectedMap));
+ }
- });
+ set selectedMapLogEntries(list) {
+ this.mapTransitionsPanel.selectedMapLogEntries = list;
+ }
+ get selectedMapLogEntries() {
+ return this.mapTransitionsPanel.selectedMapLogEntries;
+ }
+});
diff --git a/deps/v8/tools/system-analyzer/map-panel/map-details.mjs b/deps/v8/tools/system-analyzer/map-panel/map-details.mjs
index d471609dba..bcf8f9c9aa 100644
--- a/deps/v8/tools/system-analyzer/map-panel/map-details.mjs
+++ b/deps/v8/tools/system-analyzer/map-panel/map-details.mjs
@@ -1,48 +1,47 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-import { V8CustomElement, defineCustomElement } from "../helper.mjs";
-import { FocusEvent } from "../events.mjs";
+import {FocusEvent} from '../events.mjs';
+import {DOM, V8CustomElement} from '../helper.mjs';
+
+DOM.defineCustomElement(
+ './map-panel/map-details',
+ (templateText) => class MapDetails extends V8CustomElement {
+ _map;
-defineCustomElement(
- "./map-panel/map-details",
- (templateText) =>
- class MapDetails extends V8CustomElement {
constructor() {
super(templateText);
- this.#filePositionNode.addEventListener("click", e =>
- this.handleFilePositionClick(e)
- );
- this.selectedMap = undefined;
+ this._filePositionNode.onclick = e => this._handleFilePositionClick(e);
}
- get mapDetails() {
- return this.$("#mapDetails");
+
+ get _mapDetails() {
+ return this.$('#mapDetails');
}
- get #filePositionNode() {
- return this.$("#filePositionNode");
+ get _filePositionNode() {
+ return this.$('#filePositionNode');
}
- setSelectedMap(value) {
- this.selectedMap = value;
+ set map(map) {
+ if (this._map === map) return;
+ this._map = map;
+ this.update();
}
- set mapDetails(map) {
- let details = "";
- let clickableDetails = "";
- if (map) {
- clickableDetails += "ID: " + map.id;
- clickableDetails += "\nSource location: " + map.filePosition;
- details += "\n" + map.description;
- this.setSelectedMap(map);
+ _update() {
+ let details = '';
+ let clickableDetails = '';
+ if (this._map) {
+ clickableDetails = `ID: ${this._map.id}`;
+ clickableDetails += `\nSource location: ${this._map.filePosition}`;
+ details = this._map.description;
}
- this.#filePositionNode.innerText = clickableDetails;
- this.#filePositionNode.classList.add("clickable");
- this.mapDetails.innerText = details;
+ this._filePositionNode.innerText = clickableDetails;
+ this._filePositionNode.classList.add('clickable');
+ this._mapDetails.innerText = details;
}
- handleFilePositionClick() {
- this.dispatchEvent(new FocusEvent(this.selectedMap.sourcePosition));
+ _handleFilePositionClick(event) {
+ this.dispatchEvent(new FocusEvent(this._map.sourcePosition));
}
- }
-);
+ });
diff --git a/deps/v8/tools/system-analyzer/map-panel/map-transitions-template.html b/deps/v8/tools/system-analyzer/map-panel/map-transitions-template.html
index 99fb251b19..c4cab2bf46 100644
--- a/deps/v8/tools/system-analyzer/map-panel/map-transitions-template.html
+++ b/deps/v8/tools/system-analyzer/map-panel/map-transitions-template.html
@@ -41,7 +41,7 @@ found in the LICENSE file. -->
}
.map.selected {
- border-color: var(--map-background-color);
+ border-color: var(--on-surface-color);
}
.transitions {
diff --git a/deps/v8/tools/system-analyzer/map-panel/map-transitions.mjs b/deps/v8/tools/system-analyzer/map-panel/map-transitions.mjs
index d508b88694..60462a1db2 100644
--- a/deps/v8/tools/system-analyzer/map-panel/map-transitions.mjs
+++ b/deps/v8/tools/system-analyzer/map-panel/map-transitions.mjs
@@ -1,194 +1,184 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-import { V8CustomElement, defineCustomElement } from "../helper.mjs";
-import { FocusEvent } from "../events.mjs";
-
-defineCustomElement(
- "./map-panel/map-transitions",
- (templateText) =>
- class MapTransitions extends V8CustomElement {
- #map;
- #selectedMapLogEvents;
- constructor() {
- super(templateText);
- this.transitionView.addEventListener("mousemove", (e) =>
- this.handleTransitionViewChange(e)
- );
- this.currentNode = this.transitionView;
- this.currentMap = undefined;
- }
-
- get transitionView() {
- return this.$("#transitionView");
- }
-
- get tooltip() {
- return this.$("#tooltip");
- }
-
- get tooltipContents() {
- return this.$("#tooltipContents");
- }
-
- set map(value) {
- this.#map = value;
- this.showMap();
- }
-
- handleTransitionViewChange(e) {
- this.tooltip.style.left = e.pageX + "px";
- this.tooltip.style.top = e.pageY + "px";
- let map = e.target.map;
- if (map) {
- this.tooltipContents.innerText = map.description;
- }
- }
-
- selectMap(map) {
- this.currentMap = map;
- this.showMap();
- this.dispatchEvent(new FocusEvent(map));
- }
-
- dblClickSelectMap(map) {
- this.dispatchEvent(new FocusEvent(map));
- }
-
- showMap() {
- // Called when a map selected
- if (this.currentMap === this.#map) return;
- this.currentMap = this.#map;
- this.selectedMapLogEvents = [this.#map];
- this.dispatchEvent(new FocusEvent(this.#map));
- }
-
- showMaps() {
- // Timeline dbl click to show map transitions of selected maps
- this.transitionView.style.display = "none";
- this.removeAllChildren(this.transitionView);
- this.selectedMapLogEvents.forEach((map) =>
- this.addMapAndParentTransitions(map));
- this.transitionView.style.display = "";
- }
-
- set selectedMapLogEvents(list) {
- this.#selectedMapLogEvents = list;
- this.showMaps();
- }
-
- get selectedMapLogEvents() {
- return this.#selectedMapLogEvents;
- }
-
- addMapAndParentTransitions(map) {
- if (map === void 0) return;
- this.currentNode = this.transitionView;
- let parents = map.getParents();
- if (parents.length > 0) {
- this.addTransitionTo(parents.pop());
- parents.reverse().forEach((each) => this.addTransitionTo(each));
- }
- let mapNode = this.addSubtransitions(map);
- // Mark and show the selected map.
- mapNode.classList.add("selected");
- if (this.selectedMap == map) {
- setTimeout(
- () =>
- mapNode.scrollIntoView({
- behavior: "smooth",
- block: "nearest",
- inline: "nearest",
- }),
- 1
- );
- }
- }
-
- addMapNode(map) {
- let node = this.div("map");
- if (map.edge) node.style.backgroundColor = map.edge.getColor();
- node.map = map;
- node.addEventListener("click", () => this.selectMap(map));
- node.addEventListener("dblclick", () => this.dblClickSelectMap(map));
- if (map.children.length > 1) {
- node.innerText = map.children.length;
- let showSubtree = this.div("showSubtransitions");
- showSubtree.addEventListener("click", (e) =>
- this.toggleSubtree(e, node)
- );
- node.appendChild(showSubtree);
- } else if (map.children.length == 0) {
- node.innerHTML = "&#x25CF;";
- }
- this.currentNode.appendChild(node);
- return node;
- }
-
- addSubtransitions(map) {
- let mapNode = this.addTransitionTo(map);
- // Draw outgoing linear transition line.
- let current = map;
- while (current.children.length == 1) {
- current = current.children[0].to;
- this.addTransitionTo(current);
- }
- return mapNode;
- }
-
- addTransitionEdge(map) {
- let classes = ["transitionEdge"];
- let edge = this.div(classes);
- edge.style.backgroundColor = map.edge.getColor();
- let labelNode = this.div("transitionLabel");
- labelNode.innerText = map.edge.toString();
- edge.appendChild(labelNode);
- return edge;
- }
-
- addTransitionTo(map) {
- // transition[ transitions[ transition[...], transition[...], ...]];
-
- let transition = this.div("transition");
- if (map.isDeprecated()) transition.classList.add("deprecated");
- if (map.edge) {
- transition.appendChild(this.addTransitionEdge(map));
- }
- let mapNode = this.addMapNode(map);
- transition.appendChild(mapNode);
-
- let subtree = this.div("transitions");
- transition.appendChild(subtree);
-
- this.currentNode.appendChild(transition);
- this.currentNode = subtree;
-
- return mapNode;
- }
-
- toggleSubtree(event, node) {
- let map = node.map;
- event.target.classList.toggle("opened");
- let transitionsNode = node.parentElement.querySelector(".transitions");
- let subtransitionNodes = transitionsNode.children;
- if (subtransitionNodes.length <= 1) {
- // Add subtransitions excepth the one that's already shown.
- let visibleTransitionMap =
- subtransitionNodes.length == 1
- ? transitionsNode.querySelector(".map").map
- : void 0;
- map.children.forEach((edge) => {
- if (edge.to != visibleTransitionMap) {
- this.currentNode = transitionsNode;
- this.addSubtransitions(edge.to);
- }
- });
- } else {
- // remove all but the first (currently selected) subtransition
- for (let i = subtransitionNodes.length - 1; i > 0; i--) {
- transitionsNode.removeChild(subtransitionNodes[i]);
- }
+import {FocusEvent, SelectionEvent} from '../events.mjs';
+import {DOM, typeToColor, V8CustomElement} from '../helper.mjs';
+
+DOM.defineCustomElement('./map-panel/map-transitions',
+ (templateText) =>
+ class MapTransitions extends V8CustomElement {
+ _map;
+ _selectedMapLogEntries;
+ _displayedMapsInTree;
+
+ constructor() {
+ super(templateText);
+ this.transitionView.addEventListener(
+ 'mousemove', (e) => this.handleTransitionViewChange(e));
+ this.currentNode = this.transitionView;
+ this.currentMap = undefined;
+ }
+
+ get transitionView() {
+ return this.$('#transitionView');
+ }
+
+ get tooltip() {
+ return this.$('#tooltip');
+ }
+
+ get tooltipContents() {
+ return this.$('#tooltipContents');
+ }
+
+ set map(value) {
+ this._map = value;
+ this.showMap();
+ }
+
+ handleTransitionViewChange(e) {
+ this.tooltip.style.left = e.pageX + 'px';
+ this.tooltip.style.top = e.pageY + 'px';
+ const map = e.target.map;
+ if (map) {
+ this.tooltipContents.innerText = map.description;
+ }
+ }
+
+ _selectMap(map) {
+ this.dispatchEvent(new SelectionEvent([map]));
+ }
+
+ showMap() {
+ if (this.currentMap === this._map) return;
+ this.currentMap = this._map;
+ this.selectedMapLogEntries = [this._map];
+ this.update();
+ }
+
+ _update() {
+ this.transitionView.style.display = 'none';
+ DOM.removeAllChildren(this.transitionView);
+ this._displayedMapsInTree = new Set();
+ // Limit view to 200 maps for performance reasons.
+ this.selectedMapLogEntries.slice(0, 200).forEach(
+ (map) => this.addMapAndParentTransitions(map));
+ this._displayedMapsInTree = undefined;
+ this.transitionView.style.display = '';
+ }
+
+ set selectedMapLogEntries(list) {
+ this._selectedMapLogEntries = list;
+ this.update();
+ }
+
+ get selectedMapLogEntries() {
+ return this._selectedMapLogEntries;
+ }
+
+ addMapAndParentTransitions(map) {
+ if (map === void 0) return;
+ if (this._displayedMapsInTree.has(map)) return;
+ this._displayedMapsInTree.add(map);
+ this.currentNode = this.transitionView;
+ let parents = map.getParents();
+ if (parents.length > 0) {
+ this.addTransitionTo(parents.pop());
+ parents.reverse().forEach((each) => this.addTransitionTo(each));
+ }
+ let mapNode = this.addSubtransitions(map);
+ // Mark and show the selected map.
+ mapNode.classList.add('selected');
+ if (this.selectedMap == map) {
+ setTimeout(
+ () => mapNode.scrollIntoView({
+ behavior: 'smooth',
+ block: 'nearest',
+ inline: 'nearest',
+ }),
+ 1);
+ }
+ }
+
+ addSubtransitions(map) {
+ let mapNode = this.addTransitionTo(map);
+ // Draw outgoing linear transition line.
+ let current = map;
+ while (current.children.length == 1) {
+ current = current.children[0].to;
+ this.addTransitionTo(current);
+ }
+ return mapNode;
+ }
+
+ addTransitionEdge(map) {
+ let classes = ['transitionEdge'];
+ let edge = DOM.div(classes);
+ edge.style.backgroundColor = typeToColor(map.edge);
+ let labelNode = DOM.div('transitionLabel');
+ labelNode.innerText = map.edge.toString();
+ edge.appendChild(labelNode);
+ return edge;
+ }
+
+ addTransitionTo(map) {
+ // transition[ transitions[ transition[...], transition[...], ...]];
+ this._displayedMapsInTree?.add(map);
+ let transition = DOM.div('transition');
+ if (map.isDeprecated()) transition.classList.add('deprecated');
+ if (map.edge) {
+ transition.appendChild(this.addTransitionEdge(map));
+ }
+ let mapNode = this.addMapNode(map);
+ transition.appendChild(mapNode);
+
+ let subtree = DOM.div('transitions');
+ transition.appendChild(subtree);
+
+ this.currentNode.appendChild(transition);
+ this.currentNode = subtree;
+
+ return mapNode;
+ }
+
+ addMapNode(map) {
+ let node = DOM.div('map');
+ if (map.edge) node.style.backgroundColor = typeToColor(map.edge);
+ node.map = map;
+ node.addEventListener('click', () => this._selectMap(map));
+ if (map.children.length > 1) {
+ node.innerText = map.children.length;
+ let showSubtree = DOM.div('showSubtransitions');
+ showSubtree.addEventListener('click', (e) => this.toggleSubtree(e, node));
+ node.appendChild(showSubtree);
+ } else if (map.children.length == 0) {
+ node.innerHTML = '&#x25CF;';
+ }
+ this.currentNode.appendChild(node);
+ return node;
+ }
+
+ toggleSubtree(event, node) {
+ let map = node.map;
+ event.target.classList.toggle('opened');
+ let transitionsNode = node.parentElement.querySelector('.transitions');
+ let subtransitionNodes = transitionsNode.children;
+ if (subtransitionNodes.length <= 1) {
+ // Add subtransitions excepth the one that's already shown.
+ let visibleTransitionMap = subtransitionNodes.length == 1 ?
+ transitionsNode.querySelector('.map').map :
+ void 0;
+ map.children.forEach((edge) => {
+ if (edge.to != visibleTransitionMap) {
+ this.currentNode = transitionsNode;
+ this.addSubtransitions(edge.to);
}
+ });
+ } else {
+ // remove all but the first (currently selected) subtransition
+ for (let i = subtransitionNodes.length - 1; i > 0; i--) {
+ transitionsNode.removeChild(subtransitionNodes[i]);
}
}
-);
+ }
+});
diff --git a/deps/v8/tools/system-analyzer/processor.mjs b/deps/v8/tools/system-analyzer/processor.mjs
index 0634174aef..49448bb9da 100644
--- a/deps/v8/tools/system-analyzer/processor.mjs
+++ b/deps/v8/tools/system-analyzer/processor.mjs
@@ -2,23 +2,25 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-import { MapLogEvent, Edge } from "./log/map.mjs";
-import { IcLogEvent } from "./log/ic.mjs";
-import { Timeline } from "./timeline.mjs";
-import { LogReader, parseString, parseVarArgs } from "../logreader.mjs";
-import { Profile } from "../profile.mjs";
+import {LogReader, parseString, parseVarArgs} from '../logreader.mjs';
+import {Profile} from '../profile.mjs';
-// ===========================================================================
+import {DeoptLogEntry} from './log/deopt.mjs';
+import {IcLogEntry} from './log/ic.mjs';
+import {Edge, MapLogEntry} from './log/map.mjs';
+import {Timeline} from './timeline.mjs';
+// ===========================================================================
-class Processor extends LogReader {
- #profile = new Profile();
- #mapTimeline = new Timeline();
- #icTimeline = new Timeline();
- #formatPCRegexp = /(.*):[0-9]+:[0-9]+$/;
+export class Processor extends LogReader {
+ _profile = new Profile();
+ _mapTimeline = new Timeline();
+ _icTimeline = new Timeline();
+ _deoptTimeline = new Timeline();
+ _formatPCRegexp = /(.*):[0-9]+:[0-9]+$/;
MAJOR_VERSION = 7;
MINOR_VERSION = 6;
- constructor() {
+ constructor(logString) {
super();
this.propertyICParser = [
parseInt, parseInt, parseInt, parseInt, parseString, parseString,
@@ -33,9 +35,17 @@ class Processor extends LogReader {
],
processor: this.processCodeCreation
},
+ 'code-deopt': {
+ parsers: [
+ parseInt, parseInt, parseInt, parseInt, parseInt, parseString,
+ parseString, parseString
+ ],
+ processor: this.processCodeDeopt
+ },
'v8-version': {
parsers: [
- parseInt, parseInt,
+ parseInt,
+ parseInt,
],
processor: this.processV8Version
},
@@ -44,12 +54,12 @@ class Processor extends LogReader {
processor: this.processScriptSource
},
'code-move':
- { parsers: [parseInt, parseInt], processor: this.processCodeMove },
- 'code-delete': { parsers: [parseInt], processor: this.processCodeDelete },
+ {parsers: [parseInt, parseInt], processor: this.processCodeMove},
+ 'code-delete': {parsers: [parseInt], processor: this.processCodeDelete},
'sfi-move':
- { parsers: [parseInt, parseInt], processor: this.processFunctionMove },
+ {parsers: [parseInt, parseInt], processor: this.processFunctionMove},
'map-create':
- { parsers: [parseInt, parseString], processor: this.processMapCreate },
+ {parsers: [parseInt, parseString], processor: this.processMapCreate},
'map': {
parsers: [
parseString, parseInt, parseString, parseString, parseInt, parseInt,
@@ -90,6 +100,7 @@ class Processor extends LogReader {
processor: this.processPropertyIC.bind(this, 'StoreInArrayLiteralIC')
},
};
+ if (logString) this.processString(logString);
}
printError(str) {
@@ -114,7 +125,7 @@ class Processor extends LogReader {
this.processLogLine(line);
}
} catch (e) {
- console.error('Error occurred during parsing, trying to continue: ' + e);
+ console.error(`Error occurred during parsing, trying to continue: ${e}`);
}
this.finalize();
}
@@ -131,23 +142,23 @@ class Processor extends LogReader {
}
} catch (e) {
console.error(
- 'Error occurred during parsing line ' + i +
- ', trying to continue: ' + e);
+ `Error occurred during parsing line ${i}` +
+ ', trying to continue: ' + e);
}
this.finalize();
}
finalize() {
// TODO(cbruni): print stats;
- this.#mapTimeline.transitions = new Map();
+ this._mapTimeline.transitions = new Map();
let id = 0;
- this.#mapTimeline.forEach(map => {
+ this._mapTimeline.forEach(map => {
if (map.isRoot()) id = map.finalizeRootMap(id + 1);
if (map.edge && map.edge.name) {
- let edge = map.edge;
- let list = this.#mapTimeline.transitions.get(edge.name);
+ const edge = map.edge;
+ const list = this._mapTimeline.transitions.get(edge.name);
if (list === undefined) {
- this.#mapTimeline.transitions.set(edge.name, [edge]);
+ this._mapTimeline.transitions.set(edge.name, [edge]);
} else {
list.push(edge);
}
@@ -167,45 +178,50 @@ class Processor extends LogReader {
case '*':
return Profile.CodeState.OPTIMIZED;
}
- throw new Error('unknown code state: ' + s);
+ throw new Error(`unknown code state: ${s}`);
}
processCodeCreation(type, kind, timestamp, start, size, name, maybe_func) {
if (maybe_func.length) {
- let funcAddr = parseInt(maybe_func[0]);
- let state = this.parseState(maybe_func[1]);
- this.#profile.addFuncCode(
- type, name, timestamp, start, size, funcAddr, state);
+ const funcAddr = parseInt(maybe_func[0]);
+ const state = this.parseState(maybe_func[1]);
+ this._profile.addFuncCode(
+ type, name, timestamp, start, size, funcAddr, state);
} else {
- this.#profile.addCode(type, name, timestamp, start, size);
+ this._profile.addCode(type, name, timestamp, start, size);
}
}
+ processCodeDeopt(
+ timestamp, codeSize, instructionStart, inliningId, scriptOffset,
+ deoptKind, deoptLocation, deoptReason) {
+ this._deoptTimeline.push(new DeoptLogEntry(deoptKind, timestamp));
+ }
processV8Version(majorVersion, minorVersion) {
- if (
- (majorVersion == this.MAJOR_VERSION && minorVersion <= this.MINOR_VERSION)
- || (majorVersion < this.MAJOR_VERSION)) {
+ if ((majorVersion == this.MAJOR_VERSION &&
+ minorVersion <= this.MINOR_VERSION) ||
+ (majorVersion < this.MAJOR_VERSION)) {
window.alert(
- `Unsupported version ${majorVersion}.${minorVersion}. \n` +
- `Please use the matching tool for given the V8 version.`);
+ `Unsupported version ${majorVersion}.${minorVersion}. \n` +
+ `Please use the matching tool for given the V8 version.`);
}
}
processScriptSource(scriptId, url, source) {
- this.#profile.addScriptSource(scriptId, url, source);
+ this._profile.addScriptSource(scriptId, url, source);
}
processCodeMove(from, to) {
- this.#profile.moveCode(from, to);
+ this._profile.moveCode(from, to);
}
processCodeDelete(start) {
- this.#profile.deleteCode(start);
+ this._profile.deleteCode(start);
}
processFunctionMove(from, to) {
- this.#profile.moveFunc(from, to);
+ this._profile.moveFunc(from, to);
}
formatName(entry) {
@@ -218,34 +234,34 @@ class Processor extends LogReader {
}
processPropertyIC(
- type, pc, time, line, column, old_state, new_state, map, key, modifier,
- slow_reason) {
+ type, pc, time, line, column, old_state, new_state, map, key, modifier,
+ slow_reason) {
let fnName = this.functionName(pc);
let parts = fnName.split(' ');
- let fileName = parts[1];
+ let fileName = parts[parts.length - 1];
let script = this.getScript(fileName);
// TODO: Use SourcePosition here directly
- let entry = new IcLogEvent(
- type, fnName, time, line, column, key, old_state, new_state, map,
- slow_reason, script);
+ let entry = new IcLogEntry(
+ type, fnName, time, line, column, key, old_state, new_state, map,
+ slow_reason, script, modifier);
if (script) {
entry.sourcePosition = script.addSourcePosition(line, column, entry);
}
- this.#icTimeline.push(entry);
+ this._icTimeline.push(entry);
}
functionName(pc) {
- let entry = this.#profile.findEntry(pc);
+ let entry = this._profile.findEntry(pc);
return this.formatName(entry);
}
formatPC(pc, line, column) {
- let entry = this.#profile.findEntry(pc);
+ let entry = this._profile.findEntry(pc);
if (!entry) return '<unknown>'
- if (entry.type === 'Builtin') {
- return entry.name;
- }
+ if (entry.type === 'Builtin') {
+ return entry.name;
+ }
let name = entry.func.getName();
- let array = this.#formatPCRegexp.exec(name);
+ let array = this._formatPCRegexp.exec(name);
if (array === null) {
entry = name;
} else {
@@ -255,22 +271,27 @@ class Processor extends LogReader {
}
processFileName(filePositionLine) {
- if (!(/\s/.test(filePositionLine))) return;
+ if (!filePositionLine.includes(' ')) return;
+ // Try to handle urls with file positions: https://foo.bar.com/:17:330"
filePositionLine = filePositionLine.split(' ');
- let file = filePositionLine[1].split(':')[0];
- return file;
+ let parts = filePositionLine[1].split(':');
+ if (parts[0].length <= 5) return parts[0] + ':' + parts[1];
+ return parts[1];
}
processMap(type, time, from, to, pc, line, column, reason, name) {
let time_ = parseInt(time);
if (type === 'Deprecate') return this.deprecateMap(type, time_, from);
- let from_ = this.getExistingMap(from, time_);
- let to_ = this.getExistingMap(to, time_);
+ let from_ = this.getExistingMapEntry(from, time_);
+ let to_ = this.getExistingMapEntry(to, time_);
// TODO: use SourcePosition directly.
let edge = new Edge(type, name, reason, time, from_, to_);
to_.filePosition = this.formatPC(pc, line, column);
let fileName = this.processFileName(to_.filePosition);
- to_.script = this.getScript(fileName);
+ // TODO: avoid undefined source positions.
+ if (fileName !== undefined) {
+ to_.script = this.getScript(fileName);
+ }
if (to_.script) {
to_.sourcePosition = to_.script.addSourcePosition(line, column, to_)
}
@@ -278,40 +299,40 @@ class Processor extends LogReader {
}
deprecateMap(type, time, id) {
- this.getExistingMap(id, time).deprecate();
+ this.getExistingMapEntry(id, time).deprecate();
}
processMapCreate(time, id) {
// map-create events might override existing maps if the addresses get
// recycled. Hence we do not check for existing maps.
- let map = this.createMap(id, time);
+ let map = this.createMapEntry(id, time);
}
processMapDetails(time, id, string) {
// TODO(cbruni): fix initial map logging.
- let map = this.getExistingMap(id, time);
+ let map = this.getExistingMapEntry(id, time);
map.description = string;
}
- createMap(id, time) {
- let map = new MapLogEvent(id, time);
- this.#mapTimeline.push(map);
+ createMapEntry(id, time) {
+ let map = new MapLogEntry(id, time);
+ this._mapTimeline.push(map);
return map;
}
- getExistingMap(id, time) {
+ getExistingMapEntry(id, time) {
if (id === '0x000000000000') return undefined;
- let map = MapLogEvent.get(id, time);
+ let map = MapLogEntry.get(id, time);
if (map === undefined) {
- console.error('No map details provided: id=' + id);
+ console.error(`No map details provided: id=${id}`);
// Manually patch in a map to continue running.
- return this.createMap(id, time);
+ return this.createMapEntry(id, time);
};
return map;
}
getScript(url) {
- const script = this.#profile.getScript(url);
+ const script = this._profile.getScript(url);
// TODO create placeholder script for empty urls.
if (script === undefined) {
console.error(`Could not find script for url: '${url}'`)
@@ -320,28 +341,18 @@ class Processor extends LogReader {
}
get icTimeline() {
- return this.#icTimeline;
+ return this._icTimeline;
}
get mapTimeline() {
- return this.#mapTimeline;
+ return this._mapTimeline;
+ }
+
+ get deoptTimeline() {
+ return this._deoptTimeline;
}
get scripts() {
- return this.#profile.scripts_.filter(script => script !== undefined);
+ return this._profile.scripts_.filter(script => script !== undefined);
}
}
-
-Processor.kProperties = [
- 'type',
- 'category',
- 'functionName',
- 'filePosition',
- 'state',
- 'key',
- 'map',
- 'reason',
- 'file'
-];
-
-export { Processor as default };
diff --git a/deps/v8/tools/system-analyzer/source-panel-template.html b/deps/v8/tools/system-analyzer/source-panel-template.html
index 102d30ea28..01b777042f 100644
--- a/deps/v8/tools/system-analyzer/source-panel-template.html
+++ b/deps/v8/tools/system-analyzer/source-panel-template.html
@@ -19,7 +19,7 @@ found in the LICENSE file. -->
}
pre.scriptNode span::before {
- content: counter(sourceLineCounter) " ";
+ content: counter(sourceLineCounter) ": ";
display: inline-block;
width: 4em;
padding-left: auto;
@@ -27,26 +27,28 @@ found in the LICENSE file. -->
text-align: right;
}
-mark {
- width: 1ch;
- height: 1lh;
- border-radius: 0.1lh;
- border: 0.5px var(--background-color) solid;
- cursor: pointer;
-}
+ mark {
+ width: 1ch;
+ border-radius: 2px;
+ border: 0.5px var(--background-color) solid;
+ cursor: pointer;
+ background-color: var(--primary-color);
+ color: var(--on-primary-color);
+ }
+
+ .marked {
+ background-color: var(--secondary-color);
+ }
-.marked {
- background-color: var(--primary-color);
- color: var(--on-primary-color);
-}
+ #script-dropdown {
+ width: 100%;
+ margin-bottom: 10px;
+ }
</style>
<div class="panel">
<h2>Source Panel</h2>
- <div class="script-dropdown">
- <label for="scripts-label">Scripts:</label>
- <select id="script-dropdown"></select>
- </div>
- <div id="script">
+ <select id="script-dropdown"></select>
+ <div id="script" class="panelBody">
<pre class="scripNode"></pre>
</div>
</div>
diff --git a/deps/v8/tools/system-analyzer/source-panel.mjs b/deps/v8/tools/system-analyzer/source-panel.mjs
index a10f2bccd2..a4dc07fb45 100644
--- a/deps/v8/tools/system-analyzer/source-panel.mjs
+++ b/deps/v8/tools/system-analyzer/source-panel.mjs
@@ -1,127 +1,173 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-import { V8CustomElement, defineCustomElement } from "./helper.mjs";
-import { SelectionEvent, FocusEvent } from "./events.mjs";
-import { MapLogEvent } from "./log/map.mjs";
-import { IcLogEvent } from "./log/ic.mjs";
-
-defineCustomElement(
- "source-panel",
- (templateText) =>
- class SourcePanel extends V8CustomElement {
- #selectedSourcePositions;
- #scripts = [];
- #script;
- constructor() {
- super(templateText);
- this.scriptDropdown.addEventListener(
- 'change', e => this.handleSelectScript(e));
- }
- get script() {
- return this.$('#script');
- }
- get scriptNode() {
- return this.$('.scriptNode');
- }
- set script(script) {
- this.#script = script;
- this.renderSourcePanel();
- }
- set selectedSourcePositions(sourcePositions) {
- this.#selectedSourcePositions = sourcePositions;
- }
- get selectedSourcePositions() {
- return this.#selectedSourcePositions;
- }
- set data(value) {
- this.#scripts = value;
- this.initializeScriptDropdown();
- this.script = this.#scripts[0];
- }
- get scriptDropdown() {
- return this.$("#script-dropdown");
- }
- initializeScriptDropdown() {
- this.#scripts.sort((a, b) => a.name.localeCompare(b.name));
- let select = this.scriptDropdown;
- select.options.length = 0;
- for (const script of this.#scripts) {
- const option = document.createElement("option");
- option.text = `${script.name} (id=${script.id})`;
- option.script = script;
- select.add(option);
- }
- }
+import {FocusEvent, SelectionEvent} from './events.mjs';
+import {delay, DOM, formatBytes, V8CustomElement} from './helper.mjs';
+import {IcLogEntry} from './log/ic.mjs';
+import {MapLogEntry} from './log/map.mjs';
+
+DOM.defineCustomElement('source-panel',
+ (templateText) =>
+ class SourcePanel extends V8CustomElement {
+ _selectedSourcePositions = [];
+ _sourcePositionsToMarkNodes;
+ _scripts = [];
+ _script;
+ constructor() {
+ super(templateText);
+ this.scriptDropdown.addEventListener(
+ 'change', e => this._handleSelectScript(e));
+ }
- renderSourcePanel() {
- const builder = new LineBuilder(this, this.#script);
- const scriptNode = builder.createScriptNode();
- const oldScriptNode = this.script.childNodes[1];
- this.script.replaceChild(scriptNode, oldScriptNode);
- }
+ get script() {
+ return this.$('#script');
+ }
- handleSelectScript(e) {
- const option = this.scriptDropdown.options[this.scriptDropdown.selectedIndex];
- this.script = option.script;
- }
+ get scriptNode() {
+ return this.$('.scriptNode');
+ }
- handleSourcePositionClick(e) {
- let icLogEvents = [];
- let mapLogEvents = [];
- for (const entry of e.target.sourcePosition.entries) {
- if (entry instanceof MapLogEvent) {
- mapLogEvents.push(entry);
- } else if (entry instanceof IcLogEvent) {
- icLogEvents.push(entry);
- }
- }
- if (icLogEvents.length > 0 ) {
- this.dispatchEvent(new SelectionEvent(icLogEvents));
- this.dispatchEvent(new FocusEvent(icLogEvents[0]));
- }
- if (mapLogEvents.length > 0) {
- this.dispatchEvent(new SelectionEvent(mapLogEvents));
- this.dispatchEvent(new FocusEvent(mapLogEvents[0]));
- }
- }
+ set script(script) {
+ if (this._script === script) return;
+ this._script = script;
+ this._renderSourcePanel();
+ this._updateScriptDropdownSelection();
+ }
+
+ set selectedSourcePositions(sourcePositions) {
+ this._selectedSourcePositions = sourcePositions;
+ // TODO: highlight multiple scripts
+ this.script = sourcePositions[0]?.script;
+ this._focusSelectedMarkers();
+ }
+
+ set data(scripts) {
+ this._scripts = scripts;
+ this._initializeScriptDropdown();
+ }
+
+ get scriptDropdown() {
+ return this.$('#script-dropdown');
+ }
+
+ _initializeScriptDropdown() {
+ this._scripts.sort((a, b) => a.name.localeCompare(b.name));
+ let select = this.scriptDropdown;
+ select.options.length = 0;
+ for (const script of this._scripts) {
+ const option = document.createElement('option');
+ const size = formatBytes(script.source.length);
+ option.text = `${script.name} (id=${script.id} size=${size})`;
+ option.script = script;
+ select.add(option);
+ }
+ }
+ _updateScriptDropdownSelection() {
+ this.scriptDropdown.selectedIndex =
+ this._script ? this._scripts.indexOf(this._script) : -1;
+ }
+
+ async _renderSourcePanel() {
+ let scriptNode;
+ if (this._script) {
+ await delay(1);
+ const builder =
+ new LineBuilder(this, this._script, this._selectedSourcePositions);
+ scriptNode = builder.createScriptNode();
+ this._sourcePositionsToMarkNodes = builder.sourcePositionToMarkers;
+ } else {
+ scriptNode = document.createElement('pre');
+ this._selectedMarkNodes = undefined;
+ }
+ const oldScriptNode = this.script.childNodes[1];
+ this.script.replaceChild(scriptNode, oldScriptNode);
+ }
+ async _focusSelectedMarkers() {
+ await delay(100);
+ // Remove all marked nodes.
+ for (let markNode of this._sourcePositionsToMarkNodes.values()) {
+ markNode.className = '';
+ }
+ for (let sourcePosition of this._selectedSourcePositions) {
+ this._sourcePositionsToMarkNodes.get(sourcePosition).className = 'marked';
}
-);
+ const sourcePosition = this._selectedSourcePositions[0];
+ if (!sourcePosition) return;
+ const markNode = this._sourcePositionsToMarkNodes.get(sourcePosition);
+ markNode.scrollIntoView(
+ {behavior: 'smooth', block: 'nearest', inline: 'center'});
+ }
+
+ _handleSelectScript(e) {
+ const option =
+ this.scriptDropdown.options[this.scriptDropdown.selectedIndex];
+ this.script = option.script;
+ this.selectLogEntries(this._script.entries());
+ }
+ handleSourcePositionClick(e) {
+ this.selectLogEntries(e.target.sourcePosition.entries)
+ }
+
+ selectLogEntries(logEntries) {
+ let icLogEntries = [];
+ let mapLogEntries = [];
+ for (const entry of logEntries) {
+ if (entry instanceof MapLogEntry) {
+ mapLogEntries.push(entry);
+ } else if (entry instanceof IcLogEntry) {
+ icLogEntries.push(entry);
+ }
+ }
+ if (icLogEntries.length > 0) {
+ this.dispatchEvent(new SelectionEvent(icLogEntries));
+ }
+ if (mapLogEntries.length > 0) {
+ this.dispatchEvent(new SelectionEvent(mapLogEntries));
+ }
+ }
+});
class SourcePositionIterator {
- #entries;
- #index = 0;
+ _entries;
+ _index = 0;
constructor(sourcePositions) {
- this.#entries = sourcePositions;
+ this._entries = sourcePositions;
+ }
+
+ * forLine(lineIndex) {
+ this._findStart(lineIndex);
+ while (!this._done() && this._current().line === lineIndex) {
+ yield this._current();
+ this._next();
+ }
}
- *forLine(lineIndex) {
- while(!this.#done() && this.#current().line === lineIndex) {
- yield this.#current();
- this.#next();
+ _findStart(lineIndex) {
+ while (!this._done() && this._current().line < lineIndex) {
+ this._next();
}
}
- #current() {
- return this.#entries[this.#index];
+ _current() {
+ return this._entries[this._index];
}
- #done() {
- return this.#index + 1 >= this.#entries.length;
+ _done() {
+ return this._index + 1 >= this._entries.length;
}
- #next() {
- this.#index++;
+ _next() {
+ this._index++;
}
}
-function * lineIterator(source) {
+function* lineIterator(source) {
let current = 0;
let line = 1;
- while(current < source.length) {
- const next = source.indexOf("\n", current);
+ while (current < source.length) {
+ const next = source.indexOf('\n', current);
if (next === -1) break;
yield [line, source.substring(current, next)];
line++;
@@ -131,59 +177,61 @@ function * lineIterator(source) {
}
class LineBuilder {
- #script
- #clickHandler
- #sourcePositions
-
- constructor(panel, script) {
- this.#script = script;
- this.#clickHandler = panel.handleSourcePositionClick.bind(panel);
+ _script;
+ _clickHandler;
+ _sourcePositions;
+ _selection;
+ _sourcePositionToMarkers = new Map();
+
+ constructor(panel, script, highlightPositions) {
+ this._script = script;
+ this._selection = new Set(highlightPositions);
+ this._clickHandler = panel.handleSourcePositionClick.bind(panel);
// TODO: sort on script finalization.
script.sourcePositions.sort((a, b) => {
if (a.line === b.line) return a.column - b.column;
return a.line - b.line;
- })
- this.#sourcePositions
- = new SourcePositionIterator(script.sourcePositions);
+ });
+ this._sourcePositions = new SourcePositionIterator(script.sourcePositions);
+ }
+ get sourcePositionToMarkers() {
+ return this._sourcePositionToMarkers;
}
createScriptNode() {
- const scriptNode = document.createElement("pre");
+ const scriptNode = document.createElement('pre');
scriptNode.classList.add('scriptNode');
- for (let [lineIndex, line] of lineIterator(this.#script.source)) {
- scriptNode.appendChild(this.#createLineNode(lineIndex, line));
+ for (let [lineIndex, line] of lineIterator(this._script.source)) {
+ scriptNode.appendChild(this._createLineNode(lineIndex, line));
}
return scriptNode;
}
- #createLineNode(lineIndex, line) {
- const lineNode = document.createElement("span");
- let columnIndex = 0;
- for (const sourcePosition of this.#sourcePositions.forLine(lineIndex)) {
+ _createLineNode(lineIndex, line) {
+ const lineNode = document.createElement('span');
+ let columnIndex = 0;
+ for (const sourcePosition of this._sourcePositions.forLine(lineIndex)) {
const nextColumnIndex = sourcePosition.column - 1;
- lineNode.appendChild(
- document.createTextNode(
- line.substring(columnIndex, nextColumnIndex)));
+ lineNode.appendChild(document.createTextNode(
+ line.substring(columnIndex, nextColumnIndex)));
columnIndex = nextColumnIndex;
lineNode.appendChild(
- this.#createMarkerNode(line[columnIndex], sourcePosition));
+ this._createMarkerNode(line[columnIndex], sourcePosition));
columnIndex++;
}
lineNode.appendChild(
- document.createTextNode(line.substring(columnIndex) + "\n"));
+ document.createTextNode(line.substring(columnIndex) + '\n'));
return lineNode;
}
- #createMarkerNode(text, sourcePosition) {
- const marker = document.createElement("mark");
- marker.classList.add('marked');
+ _createMarkerNode(text, sourcePosition) {
+ const marker = document.createElement('mark');
+ this._sourcePositionToMarkers.set(sourcePosition, marker);
marker.textContent = text;
marker.sourcePosition = sourcePosition;
- marker.onclick = this.#clickHandler;
+ marker.onclick = this._clickHandler;
return marker;
}
-
-
} \ No newline at end of file
diff --git a/deps/v8/tools/system-analyzer/stats-panel-template.html b/deps/v8/tools/system-analyzer/stats-panel-template.html
index 7aa149a588..fb91fad1cf 100644
--- a/deps/v8/tools/system-analyzer/stats-panel-template.html
+++ b/deps/v8/tools/system-analyzer/stats-panel-template.html
@@ -14,43 +14,60 @@ found in the LICENSE file. -->
margin: auto;
}
- #stats table {
+ table {
flex: 1;
- padding-right: 50px;
max-height: 250px;
display: inline-block;
overflow-y: scroll;
+ border-collapse: collapse;
+ }
+ table td {
+ padding: 2px;
}
- #stats table td {
- cursor: pointer;
+ table thead td {
+ border-bottom: 1px var(--on-surface-color) dotted;
}
- #stats .transitionTable {
- overflow-y: scroll;
+ table tbody td {
+ cursor: pointer;
}
- #stats .transitionTable tr {
+ #nameTable tr {
max-width: 200px;
}
- #stats .transitionType {
+ #nameTable tr td:nth-child(1) {
text-align: right;
- max-width: 380px;
}
- #stats .transitionType tr td:nth-child(2) {
- text-align: left;
+ #typeTable {
+ text-align: right;
+ max-width: 380px;
}
- #stats table thead td {
- border-bottom: 1px var(--on-surface-color) dotted;
+ #typeTable tr td:nth-child(2) {
+ text-align: left;
}
</style>
<div class="panel">
- <h2>Stats Panel</h2>
- <h3>Stats</h3>
+ <h2>Map Stats</h2>
<section id="stats">
+ <table id="typeTable" class="statsTable">
+ <thead>
+ <tr><td></td><td>Type</td><td>Count</td><td>Percent</td></tr>
+ </thead>
+ <tbody></tbody>
+ </table>
+ <table id="nameTable">
+ <thead>
+ <tr><td>Count</td><td>Propery Name</td></tr>
+ </thead>
+ <tbody></tbody>
+ <tfoot>
+ <tr><td colspan="2" class="clickable">Show more...</td></tr>
+ </tfoo>
+ </table>
</section>
</div>
diff --git a/deps/v8/tools/system-analyzer/stats-panel.mjs b/deps/v8/tools/system-analyzer/stats-panel.mjs
index 9e637015bc..dd0ac78489 100644
--- a/deps/v8/tools/system-analyzer/stats-panel.mjs
+++ b/deps/v8/tools/system-analyzer/stats-panel.mjs
@@ -1,43 +1,40 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-import { V8CustomElement, defineCustomElement } from "./helper.mjs";
-import { SelectionEvent } from "./events.mjs";
+import {SelectionEvent} from './events.mjs';
+import {DOM, V8CustomElement} from './helper.mjs';
+import {delay, LazyTable} from './helper.mjs';
-defineCustomElement(
- "stats-panel",
- (templateText) =>
- class StatsPanel extends V8CustomElement {
- #timeline;
- #transitions;
+DOM.defineCustomElement(
+ 'stats-panel', (templateText) => class StatsPanel extends V8CustomElement {
+ _timeline;
+ _transitions;
+ _selectedLogEntries;
constructor() {
super(templateText);
}
get stats() {
- return this.$("#stats");
+ return this.$('#stats');
}
- set timeline(value) {
- //TODO(zcankara) Trigger update
- this.#timeline = value;
+ set timeline(timeline) {
+ this._timeline = timeline;
+ this.selectedLogEntries = timeline.all
}
- get timeline() {
- return this.#timeline;
+ set selectedLogEntries(entries) {
+ this._selectedLogEntries = entries;
+ this.update();
}
set transitions(value) {
- this.#transitions = value;
+ this._transitions = value;
}
- get transitions() {
- return this.#transitions;
- }
-
- filterUniqueTransitions(filter) {
+ _filterUniqueTransitions(filter) {
// Returns a list of Maps whose parent is not in the list.
- return this.timeline.filter((map) => {
+ return this._selectedLogEntries.filter((map) => {
if (filter(map) === false) return false;
let parent = map.parent();
if (parent === undefined) return true;
@@ -45,95 +42,88 @@ defineCustomElement(
});
}
- update() {
- this.removeAllChildren(this.stats);
- this.updateGeneralStats();
- this.updateNamedTransitionsStats();
+ _update() {
+ this._updateGeneralStats();
+ this._updateNamedTransitionsStats();
}
- updateGeneralStats() {
- console.assert(this.#timeline !== undefined, "Timeline not set yet!");
+ _updateGeneralStats() {
+ console.assert(this._timeline !== undefined, 'Timeline not set yet!');
let pairs = [
- ["Total", null, (e) => true],
- ["Transitions", "primary", (e) => e.edge && e.edge.isTransition()],
- ["Fast to Slow", "violet", (e) => e.edge && e.edge.isFastToSlow()],
- ["Slow to Fast", "orange", (e) => e.edge && e.edge.isSlowToFast()],
- ["Initial Map", "yellow", (e) => e.edge && e.edge.isInitial()],
+ ['Transitions', 'primary', (e) => e.edge && e.edge.isTransition()],
+ ['Fast to Slow', 'violet', (e) => e.edge && e.edge.isFastToSlow()],
+ ['Slow to Fast', 'orange', (e) => e.edge && e.edge.isSlowToFast()],
+ ['Initial Map', 'yellow', (e) => e.edge && e.edge.isInitial()],
[
- "Replace Descriptors",
- "red",
+ 'Replace Descriptors',
+ 'red',
(e) => e.edge && e.edge.isReplaceDescriptors(),
],
[
- "Copy as Prototype",
- "red",
+ 'Copy as Prototype',
+ 'red',
(e) => e.edge && e.edge.isCopyAsPrototype(),
],
[
- "Optimize as Prototype",
+ 'Optimize as Prototype',
null,
(e) => e.edge && e.edge.isOptimizeAsPrototype(),
],
- ["Deprecated", null, (e) => e.isDeprecated()],
- ["Bootstrapped", "green", (e) => e.isBootstrapped()],
+ ['Deprecated', null, (e) => e.isDeprecated()],
+ ['Bootstrapped', 'green', (e) => e.isBootstrapped()],
+ ['Total', null, (e) => true],
];
- let text = "";
- let tableNode = this.table("transitionType");
- tableNode.innerHTML =
- "<thead><tr><td>Color</td><td>Type</td><td>Count</td>" +
- "<td>Percent</td></tr></thead>";
- let name, filter;
- let total = this.timeline.size();
+ let tbody = document.createElement('tbody');
+ let total = this._selectedLogEntries.length;
pairs.forEach(([name, color, filter]) => {
- let row = this.tr();
+ let row = DOM.tr();
if (color !== null) {
- row.appendChild(this.td(this.div(["colorbox", color])));
+ row.appendChild(DOM.td(DOM.div(['colorbox', color])));
} else {
- row.appendChild(this.td(""));
+ row.appendChild(DOM.td(''));
}
row.classList.add('clickable');
row.onclick = (e) => {
// lazily compute the stats
let node = e.target.parentNode;
if (node.maps == undefined) {
- node.maps = this.filterUniqueTransitions(filter);
+ node.maps = this._filterUniqueTransitions(filter);
}
this.dispatchEvent(new SelectionEvent(node.maps));
};
- row.appendChild(this.td(name));
- let count = this.timeline.count(filter);
- row.appendChild(this.td(count));
+ row.appendChild(DOM.td(name));
+ let count = this._count(filter);
+ row.appendChild(DOM.td(count));
let percent = Math.round((count / total) * 1000) / 10;
- row.appendChild(this.td(percent.toFixed(1) + "%"));
- tableNode.appendChild(row);
+ row.appendChild(DOM.td(percent.toFixed(1) + '%'));
+ tbody.appendChild(row);
});
- this.stats.appendChild(tableNode);
+ this.$('#typeTable').replaceChild(tbody, this.$('#typeTable tbody'));
+ }
+
+ _count(filter) {
+ let count = 0;
+ for (const map of this._selectedLogEntries) {
+ if (filter(map)) count++;
+ }
+ return count;
}
- updateNamedTransitionsStats() {
- let tableNode = this.table("transitionTable");
- let nameMapPairs = Array.from(this.transitions.entries());
- tableNode.innerHTML =
- "<thead><tr><td>Propery Name</td><td>#</td></tr></thead>";
- nameMapPairs
- .sort((a, b) => b[1].length - a[1].length)
- .forEach(([name, maps]) => {
- let row = this.tr();
- row.maps = maps;
- row.classList.add('clickable');
- row.addEventListener("click", (e) =>
- this.dispatchEvent(
- new SelectionEvent(
- e.target.parentNode.maps.map((map) => map.to)
- )
- )
- );
- row.appendChild(this.td(name));
- row.appendChild(this.td(maps.length));
- tableNode.appendChild(row);
- });
- this.stats.appendChild(tableNode);
+ _updateNamedTransitionsStats() {
+ let rowData = Array.from(this._transitions.entries());
+ rowData.sort((a, b) => b[1].length - a[1].length);
+ new LazyTable(this.$('#nameTable'), rowData, ([name, maps]) => {
+ let row = DOM.tr();
+ row.maps = maps;
+ row.classList.add('clickable');
+ row.addEventListener(
+ 'click',
+ (e) => this.dispatchEvent(new SelectionEvent(
+ e.target.parentNode.maps.map((map) => map.to))));
+ row.appendChild(DOM.td(maps.length));
+ row.appendChild(DOM.td(name));
+ return row;
+ });
}
- }
-);
+ });
diff --git a/deps/v8/tools/system-analyzer/timeline-panel-template.html b/deps/v8/tools/system-analyzer/timeline-panel-template.html
index dd86994716..2641c71441 100644
--- a/deps/v8/tools/system-analyzer/timeline-panel-template.html
+++ b/deps/v8/tools/system-analyzer/timeline-panel-template.html
@@ -5,54 +5,9 @@ found in the LICENSE file. -->
<head>
<link href="./index.css" rel="stylesheet">
</head>
-<style>
- #timelineOverview {
- width: 100%;
- height: 50px;
- position: relative;
- margin-top: -50px;
- margin-bottom: 10px;
- background-size: 100% 100%;
- border: 1px var(--primary-color) solid;
- border-width: 1px 0 1px 0;
- overflow: hidden;
- }
-
- #timelineOverviewIndicator {
- height: 100%;
- position: absolute;
- box-shadow: 0px 2px 20px -5px var(--primary-color) inset;
- top: 0px;
- cursor: ew-resize;
- }
-
- #timelineOverviewIndicator .leftMask,
- #timelineOverviewIndicator .rightMask {
- background-color: rgba(240, 230, 230, 0.3);
- width: 10000px;
- height: 100%;
- position: absolute;
- top: 0px;
- }
-
- #timelineOverviewIndicator .leftMask {
- right: 100%;
- }
-
- #timelineOverviewIndicator .rightMask {
- left: 100%;
- }
-</style>
<div class="panel">
<h2>Timeline Panel</h2>
- <h3>Timeline</h3>
<div>
<slot></slot>
</div>
- <div id="timelineOverview">
- <div id="timelineOverviewIndicator">
- <div class="leftMask"></div>
- <div class="rightMask"></div>
- </div>
- </div>
</div>
diff --git a/deps/v8/tools/system-analyzer/timeline-panel.mjs b/deps/v8/tools/system-analyzer/timeline-panel.mjs
index afe05c24bd..a61d2efc90 100644
--- a/deps/v8/tools/system-analyzer/timeline-panel.mjs
+++ b/deps/v8/tools/system-analyzer/timeline-panel.mjs
@@ -2,92 +2,54 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-import { defineCustomElement, V8CustomElement } from './helper.mjs';
-import { SynchronizeSelectionEvent } from './events.mjs';
import './timeline/timeline-track.mjs';
-defineCustomElement('timeline-panel', (templateText) =>
- class TimelinePanel extends V8CustomElement {
- #timeSelection = { start: 0, end: Infinity };
- constructor() {
- super(templateText);
- this.timelineOverview.addEventListener(
- 'mousemove', e => this.handleTimelineIndicatorMove(e));
- this.addEventListener(
- 'scrolltrack', e => this.handleTrackScroll(e));
- this.addEventListener(
- SynchronizeSelectionEvent.name, e => this.handleMouseMoveSelection(e));
- this.backgroundCanvas = document.createElement('canvas');
- this.isLocked = false;
- }
-
- get timelineOverview() {
- return this.$('#timelineOverview');
- }
-
- get timelineOverviewIndicator() {
- return this.$('#timelineOverviewIndicator');
- }
-
- //TODO(zcankara) Remove dependency to timelineCanvas here
- get timelineCanvas() {
- return this.timelineTracks[0].timelineCanvas;
- }
- //TODO(zcankara) Remove dependency to timeline here
- get timeline() {
- return this.timelineTracks[0].timeline;
- }
- set nofChunks(count) {
- for (const track of this.timelineTracks) {
- track.nofChunks = count;
+import {SynchronizeSelectionEvent} from './events.mjs';
+import {DOM, V8CustomElement} from './helper.mjs';
+
+DOM.defineCustomElement(
+ 'timeline-panel',
+ (templateText) => class TimelinePanel extends V8CustomElement {
+ constructor() {
+ super(templateText);
+ this.addEventListener('scrolltrack', e => this.handleTrackScroll(e));
+ this.addEventListener(
+ SynchronizeSelectionEvent.name,
+ e => this.handleSelectionSyncronization(e));
}
- }
- get nofChunks() {
- return this.timelineTracks[0].nofChunks;
- }
- get timelineTracks() {
- return this.$("slot").assignedNodes().filter(
- track => track.nodeType === Node.ELEMENT_NODE);
- }
- handleTrackScroll(event) {
- //TODO(zcankara) add forEachTrack helper method
- for (const track of this.timelineTracks) {
- track.scrollLeft = event.detail;
+
+ set nofChunks(count) {
+ for (const track of this.timelineTracks) {
+ track.nofChunks = count;
+ }
}
- }
- handleMouseMoveSelection(event) {
- this.selectionMouseMove(event.start, event.end);
- }
+ get nofChunks() {
+ return this.timelineTracks[0].nofChunks;
+ }
- selectionMouseMove(start, end) {
- for (const track of this.timelineTracks) {
- track.startTime = start;
- track.endTime = end;
+ get timelineTracks() {
+ return this.$('slot').assignedNodes().filter(
+ node => node.nodeType === Node.ELEMENT_NODE);
}
- }
- handleTimelineIndicatorMove(event) {
- if (event.buttons == 0) return;
- let timelineTotalWidth = this.timelineCanvas.offsetWidth;
- let factor = this.timelineOverview.offsetWidth / timelineTotalWidth;
- for (const track of this.timelineTracks) {
- track.timelineIndicatorMove(event.movementX / factor);
+ handleTrackScroll(event) {
+ // TODO(zcankara) add forEachTrack helper method
+ for (const track of this.timelineTracks) {
+ track.scrollLeft = event.detail;
+ }
}
- this.updateOverviewWindow();
- }
- updateOverviewWindow() {
- let indicator = this.timelineOverviewIndicator;
- let totalIndicatorWidth =
- this.timelineOverview.offsetWidth;
- let div = this.timeline;
- let timelineTotalWidth = this.timelineCanvas.offsetWidth;
- let factor = totalIndicatorWidth / timelineTotalWidth;
- let width = div.offsetWidth * factor;
- let left = div.scrollLeft * factor;
- indicator.style.width = width + 'px';
- indicator.style.left = left + 'px';
- }
+ handleSelectionSyncronization(event) {
+ this.timeSelection = {start: event.start, end: event.end};
+ }
- });
+ set timeSelection(timeSelection) {
+ if (timeSelection.start > timeSelection.end) {
+ throw new Error('Invalid time range');
+ }
+ for (const track of this.timelineTracks) {
+ track.timeSelection = timeSelection;
+ }
+ }
+ });
diff --git a/deps/v8/tools/system-analyzer/timeline.mjs b/deps/v8/tools/system-analyzer/timeline.mjs
index 16e12cc4aa..996f108b6a 100644
--- a/deps/v8/tools/system-analyzer/timeline.mjs
+++ b/deps/v8/tools/system-analyzer/timeline.mjs
@@ -3,34 +3,49 @@
// found in the LICENSE file.
class Timeline {
- #values;
- #selection;
- #uniqueTypes;
- constructor() {
- this.#values = [];
+ // Class:
+ _model;
+ // Array of #model instances:
+ _values;
+ // Current selection, subset of #values:
+ _selection;
+ _uniqueTypes;
+
+ constructor(model) {
+ this._model = model;
+ this._values = [];
this.startTime = 0;
this.endTime = 0;
}
+
+ get model() {
+ return this._model;
+ }
+
get all() {
- return this.#values;
+ return this._values;
}
+
get selection() {
- return this.#selection;
+ return this._selection;
}
+
set selection(value) {
- this.#selection = value;
+ this._selection = value;
}
+
selectTimeRange(start, end) {
- this.#selection = this.filter(
- e => e.time >= start && e.time <= end);
+ this._selection = this.filter(e => e.time >= start && e.time <= end);
}
+
getChunks(windowSizeMs) {
- //TODO(zcankara) Fill this one
+ // TODO(zcankara) Fill this one
return this.chunkSizes(windowSizeMs);
}
+
get values() {
- //TODO(zcankara) Not to break something delete later
- return this.#values;
+ // TODO(zcankara) Not to break something delete later
+ return this._values;
}
count(filter) {
@@ -49,9 +64,9 @@ class Timeline {
// Invalid insertion order, might happen without --single-process,
// finding insertion point.
let insertionPoint = this.find(time);
- this.#values.splice(insertionPoint, event);
+ this._values.splice(insertionPoint, event);
} else {
- this.#values.push(event);
+ this._values.push(event);
}
if (time > 0) {
this.endTime = Math.max(this.endTime, time);
@@ -64,7 +79,7 @@ class Timeline {
}
at(index) {
- return this.#values[index];
+ return this._values[index];
}
isEmpty() {
@@ -72,39 +87,25 @@ class Timeline {
}
size() {
- return this.#values.length;
+ return this._values.length;
+ }
+
+ get length() {
+ return this._values.length;
}
first() {
- return this.#values[0];
+ return this._values[0];
}
last() {
- return this.#values[this.#values.length - 1];
+ return this._values[this._values.length - 1];
}
duration() {
return this.last().time - this.first().time;
}
- groupByTypes() {
- this.#uniqueTypes = new Map();
- for (const entry of this.all) {
- if (!this.#uniqueTypes.has(entry.type)) {
- this.#uniqueTypes.set(entry.type, [entry]);
- } else {
- this.#uniqueTypes.get(entry.type).push(entry);
- }
- }
- }
-
- get uniqueTypes() {
- if (this.#uniqueTypes === undefined) {
- this.groupByTypes();
- }
- return this.#uniqueTypes;
- }
-
forEachChunkSize(count, fn) {
const increment = this.duration() / count;
let currentTime = this.first().time + increment;
@@ -127,7 +128,7 @@ class Timeline {
chunks(count) {
let chunks = [];
this.forEachChunkSize(count, (start, end, startTime, endTime) => {
- let items = this.#values.slice(start, end);
+ let items = this._values.slice(start, end);
chunks.push(new Chunk(chunks.length, startTime, endTime, items));
});
return chunks;
@@ -137,14 +138,14 @@ class Timeline {
const first = this.find(start);
if (first < 0) return [];
const last = this.find(end, first);
- return this.#values.slice(first, last);
+ return this._values.slice(first, last);
}
find(time, offset = 0) {
- return this.#find(this.#values, each => each.time - time, offset);
+ return this._find(this._values, each => each.time - time, offset);
}
- #find(array, cmp, offset = 0) {
+ _find(array, cmp, offset = 0) {
let min = offset;
let max = array.length;
while (min < max) {
@@ -159,16 +160,28 @@ class Timeline {
return min;
}
+ initializeTypes() {
+ const types = new Map();
+ for (const entry of this.all) {
+ types.get(entry.type)?.push(entry) ?? types.set(entry.type, [entry])
+ }
+ return this._uniqueTypes = types;
+ }
+
+ get uniqueTypes() {
+ return this._uniqueTypes ?? this.initializeTypes();
+ }
+
depthHistogram() {
- return this.#values.histogram(each => each.depth);
+ return this._values.histogram(each => each.depth);
}
fanOutHistogram() {
- return this.#values.histogram(each => each.children.length);
+ return this._values.histogram(each => each.children.length);
}
forEach(fn) {
- return this.#values.forEach(fn);
+ return this._values.forEach(fn);
}
}
@@ -239,7 +252,7 @@ class Chunk {
if (event_fn === void 0) {
event_fn = each => each;
}
- let breakdown = { __proto__: null };
+ let breakdown = {__proto__: null};
this.items.forEach(each => {
const type = event_fn(each);
const v = breakdown[type];
@@ -251,7 +264,6 @@ class Chunk {
filter() {
return this.items.filter(map => !map.parent() || !this.has(map.parent()));
}
-
}
-export { Timeline, Chunk };
+export {Timeline, Chunk};
diff --git a/deps/v8/tools/system-analyzer/timeline/timeline-track-template.html b/deps/v8/tools/system-analyzer/timeline/timeline-track-template.html
index 93f30747be..e14b927e4b 100644
--- a/deps/v8/tools/system-analyzer/timeline/timeline-track-template.html
+++ b/deps/v8/tools/system-analyzer/timeline/timeline-track-template.html
@@ -8,11 +8,10 @@ found in the LICENSE file. -->
<style>
#timeline {
position: relative;
- height: 300px;
+ height: calc(200px + 12px);
overflow-y: hidden;
overflow-x: scroll;
user-select: none;
- background-color: var(--timeline-background-color);
}
#timelineLabel {
@@ -20,20 +19,20 @@ found in the LICENSE file. -->
transform-origin: left bottom 0;
position: absolute;
left: 0;
- width: 250px;
+ width: 200px;
text-align: center;
font-size: 10px;
opacity: 0.5;
}
#timelineChunks {
- height: 250px;
+ height: 200px;
position: absolute;
margin-right: 100px;
}
#timelineCanvas {
- height: 250px;
+ height: 200px;
position: relative;
overflow: visible;
pointer-events: none;
@@ -41,40 +40,54 @@ found in the LICENSE file. -->
.chunk {
width: 6px;
- border: 0px var(--timeline-background-color) solid;
- border-width: 0 2px 0 2px;
position: absolute;
background-size: 100% 100%;
image-rendering: pixelated;
bottom: 0px;
+ background-color: var(--on-surface-color);
+ cursor: pointer;
+ }
+ .chunk:hover {
+ border-radius: 2px 2px 0 0;
+ margin: 0 0 -2px -2px;
+ border: 2px var(--primary-color) solid;
}
.timestamp {
- height: 250px;
+ height: 200px;
width: 100px;
- border-left: 1px var(--surface-color) dashed;
+ border-left: 1px var(--on-surface-color) dashed;
padding-left: 4px;
position: absolute;
pointer-events: none;
font-size: 10px;
- opacity: 0.5;
}
#legend {
position: relative;
float: right;
- text-align: center;
width: 100%;
max-width: 280px;
padding-left: 20px;
padding-top: 10px;
+ border-collapse: collapse;
}
th,
td {
width: 200px;
- text-align: center;
- padding: 5px;
+ text-align: left;
+ padding-bottom: 3px;
+ }
+
+ /* right align numbers */
+ #legend td:nth-of-type(4n+3),
+ #legend td:nth-of-type(4n+4) {
+ text-align: right;
+ }
+
+ .legendTypeColumn {
+ width: 100%;
}
.timeline {
@@ -90,35 +103,36 @@ found in the LICENSE file. -->
z-index: 3;
cursor: col-resize;
}
+ #timeline .leftHandle {
+ border-left: 1px solid var(--on-surface-color);
+ }
+ #timeline .rightHandle {
+ border-right: 1px solid var(--on-surface-color);
+ }
#timeline .selection {
background-color: rgba(133, 68, 163, 0.5);
height: 100%;
position: absolute;
- z-index: 2;
}
</style>
-<div class="timeline">
- <div id="legend">
- <table>
- <thead>
- <tr>
- <td>Color</td>
- <td>Type</td>
- <td>Count</td>
- <td>Percent</td>
- </tr>
- </thead>
- <tbody id="legendContent">
- </tbody>
- </table>
- </div>
- <div id="timeline">
- <div class="leftHandle"></div>
- <div class="selection"></div>
- <div class="rightHandle"></div>
- <div id="timelineLabel">Frequency</div>
- <div id="timelineChunks"></div>
- <canvas id="timelineCanvas"></canvas>
- </div>
-</div>
+<table id="legend" class="typeStatsTable">
+ <thead>
+ <tr>
+ <td></td>
+ <td>Type</td>
+ <td>Count</td>
+ <td>Percent</td>
+ </tr>
+ </thead>
+ <tbody id="legendContent">
+ </tbody>
+</table>
+<div id="timeline">
+ <div class="leftHandle"></div>
+ <div class="selection"></div>
+ <div class="rightHandle"></div>
+ <div id="timelineLabel">Frequency</div>
+ <div id="timelineChunks"></div>
+ <canvas id="timelineCanvas"></canvas>
+</div> \ No newline at end of file
diff --git a/deps/v8/tools/system-analyzer/timeline/timeline-track.mjs b/deps/v8/tools/system-analyzer/timeline/timeline-track.mjs
index 4905b782f2..a37bcce2c5 100644
--- a/deps/v8/tools/system-analyzer/timeline/timeline-track.mjs
+++ b/deps/v8/tools/system-analyzer/timeline/timeline-track.mjs
@@ -2,503 +2,517 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-import {
- defineCustomElement, V8CustomElement,
- typeToColor, CSSColor
-} from '../helper.mjs';
-import { kChunkWidth, kChunkHeight } from "../log/map.mjs";
-import {
- SelectionEvent, FocusEvent, SelectTimeEvent,
- SynchronizeSelectionEvent
-} from '../events.mjs';
-
-defineCustomElement('./timeline/timeline-track', (templateText) =>
- class TimelineTrack extends V8CustomElement {
- static SELECTION_OFFSET = 20;
- #timeline;
- #nofChunks = 400;
- #chunks;
- #selectedEntry;
- #timeToPixel;
- #timeSelection = { start: 0, end: Infinity };
- #isSelected = false;
- #timeStartOffset;
- #mouseDownTime;
- constructor() {
- super(templateText);
- this.timeline.addEventListener("scroll",
- e => this.handleTimelineScroll(e));
- this.timeline.addEventListener("mousedown",
- e => this.handleTimeSelectionMouseDown(e));
- this.timeline.addEventListener("mouseup",
- e => this.handleTimeSelectionMouseUp(e));
- this.timeline.addEventListener("mousemove",
- e => this.handleTimeSelectionMouseMove(e));
- this.backgroundCanvas = document.createElement('canvas');
- this.isLocked = false;
- }
+import {FocusEvent, SelectionEvent, SelectTimeEvent, SynchronizeSelectionEvent} from '../events.mjs';
+import {CSSColor, delay, DOM, V8CustomElement} from '../helper.mjs';
+import {kChunkHeight, kChunkWidth} from '../log/map.mjs';
+
+const kColors = [
+ CSSColor.green,
+ CSSColor.violet,
+ CSSColor.orange,
+ CSSColor.yellow,
+ CSSColor.primaryColor,
+ CSSColor.red,
+ CSSColor.blue,
+ CSSColor.yellow,
+ CSSColor.secondaryColor,
+];
+
+DOM.defineCustomElement('./timeline/timeline-track',
+ (templateText) =>
+ class TimelineTrack extends V8CustomElement {
+ // TODO turn into static field once Safari supports it.
+ static get SELECTION_OFFSET() {
+ return 10
+ };
+ _timeline;
+ _nofChunks = 400;
+ _chunks;
+ _selectedEntry;
+ _timeToPixel;
+ _timeSelection = {start: -1, end: Infinity};
+ _timeStartOffset;
+ _selectionOriginTime;
+ _typeToColor;
+ constructor() {
+ super(templateText);
+ this.timeline.addEventListener('scroll', e => this.handleTimelineScroll(e));
+ this.timeline.addEventListener(
+ 'mousedown', e => this.handleTimeSelectionMouseDown(e));
+ this.timeline.addEventListener(
+ 'mouseup', e => this.handleTimeSelectionMouseUp(e));
+ this.timeline.addEventListener(
+ 'mousemove', e => this.handleTimeSelectionMouseMove(e));
+ this.backgroundCanvas = document.createElement('canvas');
+ this.isLocked = false;
+ }
- handleTimeSelectionMouseDown(e) {
- if (e.target.className === "chunk") return;
- this.#isSelected = true;
- this.#mouseDownTime = this.positionToTime(e.clientX);
- }
- handleTimeSelectionMouseMove(e) {
- if (!this.#isSelected) return;
- let mouseMoveTime = this.positionToTime(e.clientX);
- let startTime = this.#mouseDownTime;
- let endTime = mouseMoveTime;
- if (this.isOnLeftHandle(e.clientX)) {
- startTime = mouseMoveTime;
- endTime = this.positionToTime(this.rightHandlePosX);
- } else if (this.isOnRightHandle(e.clientX)) {
- startTime = this.positionToTime(this.leftHandlePosX);
- endTime = mouseMoveTime;
- }
- this.dispatchEvent(new SynchronizeSelectionEvent(
- Math.min(startTime, endTime),
- Math.max(startTime, endTime)));
- }
- handleTimeSelectionMouseUp(e) {
- this.#isSelected = false;
- this.dispatchEvent(new SelectTimeEvent(this.#timeSelection.start,
- this.#timeSelection.end));
+ handleTimeSelectionMouseDown(e) {
+ let xPosition = e.clientX
+ // Update origin time in case we click on a handle.
+ if (this.isOnLeftHandle(xPosition)) {
+ xPosition = this.rightHandlePosX;
}
- isOnLeftHandle(posX) {
- return (Math.abs(this.leftHandlePosX - posX)
- <= TimelineTrack.SELECTION_OFFSET);
- }
- isOnRightHandle(posX) {
- return (Math.abs(this.rightHandlePosX - posX)
- <= TimelineTrack.SELECTION_OFFSET);
+ else if (this.isOnRightHandle(xPosition)) {
+ xPosition = this.leftHandlePosX;
}
+ this._selectionOriginTime = this.positionToTime(xPosition);
+ }
+ isOnLeftHandle(posX) {
+ return (
+ Math.abs(this.leftHandlePosX - posX) <= TimelineTrack.SELECTION_OFFSET);
+ }
- set startTime(value) {
- console.assert(
- value <= this.#timeSelection.end,
- "Selection start time greater than end time!");
- this.#timeSelection.start = value;
- this.updateSelection();
- }
- set endTime(value) {
- console.assert(
- value > this.#timeSelection.start,
- "Selection end time smaller than start time!");
- this.#timeSelection.end = value;
- this.updateSelection();
- }
+ isOnRightHandle(posX) {
+ return (
+ Math.abs(this.rightHandlePosX - posX) <=
+ TimelineTrack.SELECTION_OFFSET);
+ }
- updateSelection() {
- let startTimePos = this.timeToPosition(this.#timeSelection.start);
- let endTimePos = this.timeToPosition(this.#timeSelection.end);
- this.leftHandle.style.left = startTimePos + "px";
- this.selection.style.left = startTimePos + "px";
- this.rightHandle.style.left = endTimePos + "px";
- this.selection.style.width =
- Math.abs(this.rightHandlePosX - this.leftHandlePosX) + "px";
- }
+ handleTimeSelectionMouseMove(e) {
+ if (!this._isSelecting) return;
+ const currentTime = this.positionToTime(e.clientX);
+ this.dispatchEvent(new SynchronizeSelectionEvent(
+ Math.min(this._selectionOriginTime, currentTime),
+ Math.max(this._selectionOriginTime, currentTime)));
+ }
- get leftHandlePosX() {
- let leftHandlePosX = this.leftHandle.getBoundingClientRect().x;
- return leftHandlePosX;
- }
- get rightHandlePosX() {
- let rightHandlePosX = this.rightHandle.getBoundingClientRect().x;
- return rightHandlePosX;
- }
+ handleTimeSelectionMouseUp(e) {
+ this._selectionOriginTime = -1;
+ const delta = this._timeSelection.end - this._timeSelection.start;
+ if (delta <= 1 || isNaN(delta)) return;
+ this.dispatchEvent(new SelectTimeEvent(
+ this._timeSelection.start, this._timeSelection.end));
+ }
- // Maps the clicked x position to the x position on timeline canvas
- positionOnTimeline(posX) {
- let rect = this.timeline.getBoundingClientRect();
- let posClickedX = posX - rect.left + this.timeline.scrollLeft;
- return posClickedX;
- }
+ set timeSelection(selection) {
+ this._timeSelection.start = selection.start;
+ this._timeSelection.end = selection.end;
+ this.updateSelection();
+ }
- positionToTime(posX) {
- let posTimelineX = this.positionOnTimeline(posX) + this.#timeStartOffset;
- return posTimelineX / this.#timeToPixel;
- }
+ get _isSelecting() {
+ return this._selectionOriginTime >= 0;
+ }
- timeToPosition(time) {
- let posX = time * this.#timeToPixel;
- posX -= this.#timeStartOffset
- return posX;
- }
+ updateSelection() {
+ const startPosition = this.timeToPosition(this._timeSelection.start);
+ const endPosition = this.timeToPosition(this._timeSelection.end);
+ const delta = endPosition - startPosition;
+ this.leftHandle.style.left = startPosition + 'px';
+ this.selection.style.left = startPosition + 'px';
+ this.rightHandle.style.left = endPosition + 'px';
+ this.selection.style.width = delta + 'px';
+ }
- get leftHandle() {
- return this.$('.leftHandle');
- }
- get rightHandle() {
- return this.$('.rightHandle');
- }
- get selection() {
- return this.$('.selection');
- }
+ get leftHandlePosX() {
+ return this.leftHandle.getBoundingClientRect().x;
+ }
- get timelineCanvas() {
- return this.$('#timelineCanvas');
- }
+ get rightHandlePosX() {
+ return this.rightHandle.getBoundingClientRect().x;
+ }
- get timelineChunks() {
- return this.$('#timelineChunks');
- }
+ // Maps the clicked x position to the x position on timeline canvas
+ positionOnTimeline(posX) {
+ let rect = this.timeline.getBoundingClientRect();
+ let posClickedX = posX - rect.left + this.timeline.scrollLeft;
+ return posClickedX;
+ }
- get timeline() {
- return this.$('#timeline');
- }
+ positionToTime(posX) {
+ let posTimelineX = this.positionOnTimeline(posX) + this._timeStartOffset;
+ return posTimelineX / this._timeToPixel;
+ }
- get timelineLegend() {
- return this.$('#legend');
- }
+ timeToPosition(time) {
+ let posX = time * this._timeToPixel;
+ posX -= this._timeStartOffset
+ return posX;
+ }
- get timelineLegendContent() {
- return this.$('#legendContent');
- }
- set data(value) {
- this.#timeline = value;
- this.updateChunks();
- this.updateTimeline();
- this.renderLegend();
- }
+ get leftHandle() {
+ return this.$('.leftHandle');
+ }
- get data() {
- return this.#timeline;
- }
+ get rightHandle() {
+ return this.$('.rightHandle');
+ }
- set nofChunks(count) {
- this.#nofChunks = count;
- this.updateChunks();
- this.updateTimeline();
- }
- get nofChunks() {
- return this.#nofChunks;
- }
- updateChunks() {
- this.#chunks = this.data.chunks(this.nofChunks);
- }
- get chunks() {
- return this.#chunks;
- }
- set selectedEntry(value) {
- this.#selectedEntry = value;
- if (value.edge) this.redraw();
- }
- get selectedEntry() {
- return this.#selectedEntry;
- }
+ get selection() {
+ return this.$('.selection');
+ }
+
+ get timelineCanvas() {
+ return this.$('#timelineCanvas');
+ }
+
+ get timelineChunks() {
+ return this.$('#timelineChunks');
+ }
+
+ get timeline() {
+ return this.$('#timeline');
+ }
+
+ get timelineLegend() {
+ return this.$('#legend');
+ }
+
+ get timelineLegendContent() {
+ return this.$('#legendContent');
+ }
+
+ set data(value) {
+ this._timeline = value;
+ this._resetTypeToColorCache();
+ this.update();
+ }
+
+ _update() {
+ this._updateChunks();
+ this._updateTimeline();
+ this._renderLegend();
+ }
- set scrollLeft(offset) {
- this.timeline.scrollLeft = offset;
+ _resetTypeToColorCache() {
+ this._typeToColor = new Map();
+ let lastIndex = 0;
+ for (const type of this.data.uniqueTypes.keys()) {
+ this._typeToColor.set(type, kColors[lastIndex++]);
}
+ }
+
+ get data() {
+ return this._timeline;
+ }
+
+ set nofChunks(count) {
+ this._nofChunks = count;
+ this.update();
+ }
+
+ get nofChunks() {
+ return this._nofChunks;
+ }
- renderLegend() {
- let timelineLegend = this.timelineLegend;
- let timelineLegendContent = this.timelineLegendContent;
- this.removeAllChildren(timelineLegendContent);
- let row = this.tr();
- row.entries = this.data.all;
- row.classList.add('clickable');
+ _updateChunks() {
+ this._chunks = this.data.chunks(this.nofChunks);
+ }
+
+ get chunks() {
+ return this._chunks;
+ }
+
+ set selectedEntry(value) {
+ this._selectedEntry = value;
+ if (value.edge) this.redraw();
+ }
+
+ get selectedEntry() {
+ return this._selectedEntry;
+ }
+
+ set scrollLeft(offset) {
+ this.timeline.scrollLeft = offset;
+ }
+
+ typeToColor(type) {
+ return this._typeToColor.get(type);
+ }
+
+ _renderLegend() {
+ let timelineLegendContent = this.timelineLegendContent;
+ DOM.removeAllChildren(timelineLegendContent);
+ this._timeline.uniqueTypes.forEach((entries, type) => {
+ let row = DOM.tr('clickable');
+ row.entries = entries;
row.addEventListener('dblclick', e => this.handleEntryTypeDblClick(e));
- row.appendChild(this.td(""));
- let td = this.td("All");
+ let color = this.typeToColor(type);
+ if (color !== null) {
+ let div = DOM.div('colorbox');
+ div.style.backgroundColor = color;
+ row.appendChild(DOM.td(div));
+ } else {
+ row.appendChild(DOM.td());
+ }
+ let td = DOM.td(type);
row.appendChild(td);
- row.appendChild(this.td(this.data.all.length));
- row.appendChild(this.td("100%"));
+ row.appendChild(DOM.td(entries.length));
+ let percent = (entries.length / this.data.all.length) * 100;
+ row.appendChild(DOM.td(percent.toFixed(1) + '%'));
timelineLegendContent.appendChild(row);
- let colorIterator = 0;
- this.#timeline.uniqueTypes.forEach((entries, type) => {
- let row = this.tr();
- row.entries = entries;
- row.classList.add('clickable');
- row.addEventListener('dblclick', e => this.handleEntryTypeDblClick(e));
- let color = typeToColor(type);
- if (color !== null) {
- let div = this.div(["colorbox"]);
- div.style.backgroundColor = color;
- row.appendChild(this.td(div));
- } else {
- row.appendChild(this.td(""));
- }
- let td = this.td(type);
- row.appendChild(td);
- row.appendChild(this.td(entries.length));
- let percent = (entries.length / this.data.all.length) * 100;
- row.appendChild(this.td(percent.toFixed(1) + "%"));
- timelineLegendContent.appendChild(row);
- colorIterator += 1;
- });
- timelineLegend.appendChild(timelineLegendContent);
- }
+ });
+ // Add Total row.
+ let row = DOM.tr();
+ row.appendChild(DOM.td(''));
+ row.appendChild(DOM.td('All'));
+ row.appendChild(DOM.td(this.data.all.length));
+ row.appendChild(DOM.td('100%'));
+ timelineLegendContent.appendChild(row);
+ this.timelineLegend.appendChild(timelineLegendContent);
+ }
- handleEntryTypeDblClick(e) {
- this.dispatchEvent(new SelectionEvent(e.target.parentNode.entries));
- }
+ handleEntryTypeDblClick(e) {
+ this.dispatchEvent(new SelectionEvent(e.target.parentNode.entries));
+ }
- timelineIndicatorMove(offset) {
- this.timeline.scrollLeft += offset;
- }
+ timelineIndicatorMove(offset) {
+ this.timeline.scrollLeft += offset;
+ }
- handleTimelineScroll(e) {
- let horizontal = e.currentTarget.scrollLeft;
- this.dispatchEvent(new CustomEvent(
- 'scrolltrack', {
- bubbles: true, composed: true,
- detail: horizontal
- }));
- }
+ handleTimelineScroll(e) {
+ let horizontal = e.currentTarget.scrollLeft;
+ this.dispatchEvent(new CustomEvent(
+ 'scrolltrack', {bubbles: true, composed: true, detail: horizontal}));
+ }
- asyncSetTimelineChunkBackground(backgroundTodo) {
- const kIncrement = 100;
- let start = 0;
- let delay = 1;
- while (start < backgroundTodo.length) {
- let end = Math.min(start + kIncrement, backgroundTodo.length);
- setTimeout((from, to) => {
- for (let i = from; i < to; i++) {
- let [chunk, node] = backgroundTodo[i];
- this.setTimelineChunkBackground(chunk, node);
- }
- }, delay++, start, end);
- start = end;
+ async setChunkBackgrounds(backgroundTodo) {
+ const kMaxDuration = 50;
+ let lastTime = 0;
+ for (let [chunk, node] of backgroundTodo) {
+ const current = performance.now();
+ if (current - lastTime > kMaxDuration) {
+ await delay(25);
+ lastTime = current;
}
+ this.setChunkBackground(chunk, node);
}
+ }
- setTimelineChunkBackground(chunk, node) {
- // Render the types of transitions as bar charts
- const kHeight = chunk.height;
- const kWidth = 1;
- this.backgroundCanvas.width = kWidth;
- this.backgroundCanvas.height = kHeight;
- let ctx = this.backgroundCanvas.getContext('2d');
- ctx.clearRect(0, 0, kWidth, kHeight);
- let y = 0;
- let total = chunk.size();
- let type, count;
- if (true) {
- chunk.getBreakdown(map => map.type).forEach(([type, count]) => {
- ctx.fillStyle = typeToColor(type);
- let height = count / total * kHeight;
- ctx.fillRect(0, y, kWidth, y + height);
- y += height;
- });
- } else {
- chunk.items.forEach(map => {
- ctx.fillStyle = typeToColor(map.type);
- let y = chunk.yOffset(map);
- ctx.fillRect(0, y, kWidth, y + 1);
- });
- }
-
- let imageData = this.backgroundCanvas.toDataURL('image/webp', 0.2);
- node.style.backgroundImage = 'url(' + imageData + ')';
+ setChunkBackground(chunk, node) {
+ // Render the types of transitions as bar charts
+ const kHeight = chunk.height;
+ const kWidth = 1;
+ this.backgroundCanvas.width = kWidth;
+ this.backgroundCanvas.height = kHeight;
+ let ctx = this.backgroundCanvas.getContext('2d');
+ ctx.clearRect(0, 0, kWidth, kHeight);
+ let y = 0;
+ let total = chunk.size();
+ let type, count;
+ if (true) {
+ chunk.getBreakdown(map => map.type).forEach(([type, count]) => {
+ ctx.fillStyle = this.typeToColor(type);
+ let height = count / total * kHeight;
+ ctx.fillRect(0, y, kWidth, y + height);
+ y += height;
+ });
+ } else {
+ chunk.items.forEach(map => {
+ ctx.fillStyle = this.typeToColor(map.type);
+ let y = chunk.yOffset(map);
+ ctx.fillRect(0, y, kWidth, y + 1);
+ });
}
- updateTimeline() {
- let chunksNode = this.timelineChunks;
- this.removeAllChildren(chunksNode);
- let chunks = this.chunks;
- let max = chunks.max(each => each.size());
- let start = this.data.startTime;
- let end = this.data.endTime;
- let duration = end - start;
- this.#timeToPixel = chunks.length * kChunkWidth / duration;
- this.#timeStartOffset = start * this.#timeToPixel;
- let addTimestamp = (time, name) => {
- let timeNode = this.div('timestamp');
- timeNode.innerText = name;
- timeNode.style.left = ((time - start) * this.#timeToPixel) + 'px';
- chunksNode.appendChild(timeNode);
- };
- let backgroundTodo = [];
- for (let i = 0; i < chunks.length; i++) {
- let chunk = chunks[i];
- let height = (chunk.size() / max * kChunkHeight);
- chunk.height = height;
- if (chunk.isEmpty()) continue;
- let node = this.div();
- node.className = 'chunk';
- node.style.left =
- ((chunks[i].start - start) * this.#timeToPixel) + 'px';
- node.style.height = height + 'px';
- node.chunk = chunk;
- node.addEventListener('mousemove', e => this.handleChunkMouseMove(e));
- node.addEventListener('click', e => this.handleChunkClick(e));
- node.addEventListener('dblclick', e => this.handleChunkDoubleClick(e));
- backgroundTodo.push([chunk, node])
- chunksNode.appendChild(node);
- }
- this.asyncSetTimelineChunkBackground(backgroundTodo)
-
- // Put a time marker roughly every 20 chunks.
- let expected = duration / chunks.length * 20;
- let interval = (10 ** Math.floor(Math.log10(expected)));
- let correction = Math.log10(expected / interval);
- correction = (correction < 0.33) ? 1 : (correction < 0.75) ? 2.5 : 5;
- interval *= correction;
-
- let time = start;
- while (time < end) {
- addTimestamp(time, ((time - start) / 1000) + ' ms');
- time += interval;
- }
- this.redraw();
- }
+ let imageData = this.backgroundCanvas.toDataURL('image/webp', 0.2);
+ node.style.backgroundImage = `url(${imageData})`;
+ }
- handleChunkMouseMove(event) {
- if (this.isLocked) return false;
- let chunk = event.target.chunk;
- if (!chunk) return;
- // topmost map (at chunk.height) == map #0.
- let relativeIndex =
+ _updateTimeline() {
+ let chunksNode = this.timelineChunks;
+ DOM.removeAllChildren(chunksNode);
+ let chunks = this.chunks;
+ let max = chunks.max(each => each.size());
+ let start = this.data.startTime;
+ let end = this.data.endTime;
+ let duration = end - start;
+ this._timeToPixel = chunks.length * kChunkWidth / duration;
+ this._timeStartOffset = start * this._timeToPixel;
+ let addTimestamp = (time, name) => {
+ let timeNode = DOM.div('timestamp');
+ timeNode.innerText = name;
+ timeNode.style.left = ((time - start) * this._timeToPixel) + 'px';
+ chunksNode.appendChild(timeNode);
+ };
+ let backgroundTodo = [];
+ for (let i = 0; i < chunks.length; i++) {
+ let chunk = chunks[i];
+ let height = (chunk.size() / max * kChunkHeight);
+ chunk.height = height;
+ if (chunk.isEmpty()) continue;
+ let node = DOM.div();
+ node.className = 'chunk';
+ node.style.left = ((chunks[i].start - start) * this._timeToPixel) + 'px';
+ node.style.height = height + 'px';
+ node.chunk = chunk;
+ node.addEventListener('mousemove', e => this.handleChunkMouseMove(e));
+ node.addEventListener('click', e => this.handleChunkClick(e));
+ node.addEventListener('dblclick', e => this.handleChunkDoubleClick(e));
+ backgroundTodo.push([chunk, node])
+ chunksNode.appendChild(node);
+ }
+ this.setChunkBackgrounds(backgroundTodo);
+
+ // Put a time marker roughly every 20 chunks.
+ let expected = duration / chunks.length * 20;
+ let interval = (10 ** Math.floor(Math.log10(expected)));
+ let correction = Math.log10(expected / interval);
+ correction = (correction < 0.33) ? 1 : (correction < 0.75) ? 2.5 : 5;
+ interval *= correction;
+
+ let time = start;
+ while (time < end) {
+ addTimestamp(time, ((time - start) / 1000) + ' ms');
+ time += interval;
+ }
+ this.redraw();
+ }
+
+ handleChunkMouseMove(event) {
+ if (this.isLocked) return false;
+ if (this._isSelecting) return false;
+ let chunk = event.target.chunk;
+ if (!chunk) return;
+ // topmost map (at chunk.height) == map #0.
+ let relativeIndex =
Math.round(event.layerY / event.target.offsetHeight * chunk.size());
- let map = chunk.at(relativeIndex);
- this.dispatchEvent(new FocusEvent(map));
- }
+ let map = chunk.at(relativeIndex);
+ this.dispatchEvent(new FocusEvent(map));
+ }
- handleChunkClick(event) {
- this.isLocked = !this.isLocked;
- }
+ handleChunkClick(event) {
+ this.isLocked = !this.isLocked;
+ }
- handleChunkDoubleClick(event) {
- this.isLocked = true;
- let chunk = event.target.chunk;
- if (!chunk) return;
- let maps = chunk.items;
- this.dispatchEvent(new SelectionEvent(maps));
- }
+ handleChunkDoubleClick(event) {
+ let chunk = event.target.chunk;
+ if (!chunk) return;
+ this.dispatchEvent(new SelectTimeEvent(chunk.start, chunk.end));
+ }
- redraw() {
- let canvas = this.timelineCanvas;
- canvas.width = (this.chunks.length + 1) * kChunkWidth;
- canvas.height = kChunkHeight;
- let ctx = canvas.getContext('2d');
- ctx.clearRect(0, 0, canvas.width, kChunkHeight);
- if (!this.selectedEntry || !this.selectedEntry.edge) return;
- this.drawEdges(ctx);
- }
- setMapStyle(map, ctx) {
- ctx.fillStyle = map.edge && map.edge.from ?
- CSSColor.onBackgroundColor : CSSColor.onPrimaryColor;
- }
+ redraw() {
+ let canvas = this.timelineCanvas;
+ canvas.width = (this.chunks.length + 1) * kChunkWidth;
+ canvas.height = kChunkHeight;
+ let ctx = canvas.getContext('2d');
+ ctx.clearRect(0, 0, canvas.width, kChunkHeight);
+ if (!this.selectedEntry || !this.selectedEntry.edge) return;
+ this.drawEdges(ctx);
+ }
+ setMapStyle(map, ctx) {
+ ctx.fillStyle = map.edge && map.edge.from ? CSSColor.onBackgroundColor :
+ CSSColor.onPrimaryColor;
+ }
- setEdgeStyle(edge, ctx) {
- let color = typeToColor(edge.type);
- ctx.strokeStyle = color;
- ctx.fillStyle = color;
- }
+ setEdgeStyle(edge, ctx) {
+ let color = this.typeToColor(edge.type);
+ ctx.strokeStyle = color;
+ ctx.fillStyle = color;
+ }
- markMap(ctx, map) {
- let [x, y] = map.position(this.chunks);
- ctx.beginPath();
- this.setMapStyle(map, ctx);
- ctx.arc(x, y, 3, 0, 2 * Math.PI);
- ctx.fill();
- ctx.beginPath();
- ctx.fillStyle = CSSColor.onBackgroundColor;
- ctx.arc(x, y, 2, 0, 2 * Math.PI);
- ctx.fill();
- }
+ markMap(ctx, map) {
+ let [x, y] = map.position(this.chunks);
+ ctx.beginPath();
+ this.setMapStyle(map, ctx);
+ ctx.arc(x, y, 3, 0, 2 * Math.PI);
+ ctx.fill();
+ ctx.beginPath();
+ ctx.fillStyle = CSSColor.onBackgroundColor;
+ ctx.arc(x, y, 2, 0, 2 * Math.PI);
+ ctx.fill();
+ }
- markSelectedMap(ctx, map) {
- let [x, y] = map.position(this.chunks);
- ctx.beginPath();
- this.setMapStyle(map, ctx);
- ctx.arc(x, y, 6, 0, 2 * Math.PI);
- ctx.strokeStyle = CSSColor.onBackgroundColor;
- ctx.stroke();
- }
+ markSelectedMap(ctx, map) {
+ let [x, y] = map.position(this.chunks);
+ ctx.beginPath();
+ this.setMapStyle(map, ctx);
+ ctx.arc(x, y, 6, 0, 2 * Math.PI);
+ ctx.strokeStyle = CSSColor.onBackgroundColor;
+ ctx.stroke();
+ }
- drawEdges(ctx) {
- // Draw the trace of maps in reverse order to make sure the outgoing
- // transitions of previous maps aren't drawn over.
- const kMaxOutgoingEdges = 100;
- let nofEdges = 0;
- let stack = [];
- let current = this.selectedEntry;
- while (current && nofEdges < kMaxOutgoingEdges) {
- nofEdges += current.children.length;
- stack.push(current);
- current = current.parent();
+ drawEdges(ctx) {
+ // Draw the trace of maps in reverse order to make sure the outgoing
+ // transitions of previous maps aren't drawn over.
+ const kMaxOutgoingEdges = 100;
+ let nofEdges = 0;
+ let stack = [];
+ let current = this.selectedEntry;
+ while (current && nofEdges < kMaxOutgoingEdges) {
+ nofEdges += current.children.length;
+ stack.push(current);
+ current = current.parent();
+ }
+ ctx.save();
+ this.drawOutgoingEdges(ctx, this.selectedEntry, 3);
+ ctx.restore();
+
+ let labelOffset = 15;
+ let xPrev = 0;
+ while (current = stack.pop()) {
+ if (current.edge) {
+ this.setEdgeStyle(current.edge, ctx);
+ let [xTo, yTo] = this.drawEdge(ctx, current.edge, true, labelOffset);
+ if (xTo == xPrev) {
+ labelOffset += 8;
+ } else {
+ labelOffset = 15
+ }
+ xPrev = xTo;
}
+ this.markMap(ctx, current);
+ current = current.parent();
ctx.save();
- this.drawOutgoingEdges(ctx, this.selectedEntry, 3);
+ // this.drawOutgoingEdges(ctx, current, 1);
ctx.restore();
-
- let labelOffset = 15;
- let xPrev = 0;
- while (current = stack.pop()) {
- if (current.edge) {
- this.setEdgeStyle(current.edge, ctx);
- let [xTo, yTo] = this.drawEdge(ctx, current.edge, true, labelOffset);
- if (xTo == xPrev) {
- labelOffset += 8;
- } else {
- labelOffset = 15
- }
- xPrev = xTo;
- }
- this.markMap(ctx, current);
- current = current.parent();
- ctx.save();
- // this.drawOutgoingEdges(ctx, current, 1);
- ctx.restore();
- }
- // Mark selected map
- this.markSelectedMap(ctx, this.selectedEntry);
}
+ // Mark selected map
+ this.markSelectedMap(ctx, this.selectedEntry);
+ }
- drawEdge(ctx, edge, showLabel = true, labelOffset = 20) {
- if (!edge.from || !edge.to) return [-1, -1];
- let [xFrom, yFrom] = edge.from.position(this.chunks);
- let [xTo, yTo] = edge.to.position(this.chunks);
- let sameChunk = xTo == xFrom;
- if (sameChunk) labelOffset += 8;
-
- ctx.beginPath();
- ctx.moveTo(xFrom, yFrom);
- let offsetX = 20;
- let offsetY = 20;
- let midX = xFrom + (xTo - xFrom) / 2;
- let midY = (yFrom + yTo) / 2 - 100;
+ drawEdge(ctx, edge, showLabel = true, labelOffset = 20) {
+ if (!edge.from || !edge.to) return [-1, -1];
+ let [xFrom, yFrom] = edge.from.position(this.chunks);
+ let [xTo, yTo] = edge.to.position(this.chunks);
+ let sameChunk = xTo == xFrom;
+ if (sameChunk) labelOffset += 8;
+
+ ctx.beginPath();
+ ctx.moveTo(xFrom, yFrom);
+ let offsetX = 20;
+ let offsetY = 20;
+ let midX = xFrom + (xTo - xFrom) / 2;
+ let midY = (yFrom + yTo) / 2 - 100;
+ if (!sameChunk) {
+ ctx.quadraticCurveTo(midX, midY, xTo, yTo);
+ } else {
+ ctx.lineTo(xTo, yTo);
+ }
+ if (!showLabel) {
+ ctx.stroke();
+ } else {
+ let centerX, centerY;
if (!sameChunk) {
- ctx.quadraticCurveTo(midX, midY, xTo, yTo);
+ centerX = (xFrom / 2 + midX + xTo / 2) / 2;
+ centerY = (yFrom / 2 + midY + yTo / 2) / 2;
} else {
- ctx.lineTo(xTo, yTo);
+ centerX = xTo;
+ centerY = yTo;
}
- if (!showLabel) {
- ctx.stroke();
- } else {
- let centerX, centerY;
- if (!sameChunk) {
- centerX = (xFrom / 2 + midX + xTo / 2) / 2;
- centerY = (yFrom / 2 + midY + yTo / 2) / 2;
- } else {
- centerX = xTo;
- centerY = yTo;
- }
- ctx.moveTo(centerX, centerY);
- ctx.lineTo(centerX + offsetX, centerY - labelOffset);
- ctx.stroke();
- ctx.textAlign = 'left';
- ctx.fillStyle = typeToColor(edge.type);
- ctx.fillText(
+ ctx.moveTo(centerX, centerY);
+ ctx.lineTo(centerX + offsetX, centerY - labelOffset);
+ ctx.stroke();
+ ctx.textAlign = 'left';
+ ctx.fillStyle = this.typeToColor(edge.type);
+ ctx.fillText(
edge.toString(), centerX + offsetX + 2, centerY - labelOffset);
- }
- return [xTo, yTo];
}
+ return [xTo, yTo];
+ }
- drawOutgoingEdges(ctx, map, max = 10, depth = 0) {
- if (!map) return;
- if (depth >= max) return;
- ctx.globalAlpha = 0.5 - depth * (0.3 / max);
- ctx.strokeStyle = CSSColor.timelineBackgroundColor;
- const limit = Math.min(map.children.length, 100)
- for (let i = 0; i < limit; i++) {
- let edge = map.children[i];
- this.drawEdge(ctx, edge, true);
- this.drawOutgoingEdges(ctx, edge.to, max, depth + 1);
- }
+ drawOutgoingEdges(ctx, map, max = 10, depth = 0) {
+ if (!map) return;
+ if (depth >= max) return;
+ ctx.globalAlpha = 0.5 - depth * (0.3 / max);
+ ctx.strokeStyle = CSSColor.timelineBackgroundColor;
+ const limit = Math.min(map.children.length, 100)
+ for (let i = 0; i < limit; i++) {
+ let edge = map.children[i];
+ this.drawEdge(ctx, edge, true);
+ this.drawOutgoingEdges(ctx, edge.to, max, depth + 1);
}
}
-);
+});
diff --git a/deps/v8/tools/testrunner/base_runner.py b/deps/v8/tools/testrunner/base_runner.py
index d3674a4f8b..54a9e61b16 100644
--- a/deps/v8/tools/testrunner/base_runner.py
+++ b/deps/v8/tools/testrunner/base_runner.py
@@ -351,9 +351,6 @@ class BaseTestRunner(object):
help="Path to a file for storing json results.")
parser.add_option('--slow-tests-cutoff', type="int", default=100,
help='Collect N slowest tests')
- parser.add_option("--junitout", help="File name of the JUnit output")
- parser.add_option("--junittestsuite", default="v8tests",
- help="The testsuite name in the JUnit output file")
parser.add_option("--exit-after-n-failures", type="int", default=100,
help="Exit after the first N failures instead of "
"running all tests. Pass 0 to disable this feature.")
@@ -760,9 +757,6 @@ class BaseTestRunner(object):
def _create_progress_indicators(self, test_count, options):
procs = [PROGRESS_INDICATORS[options.progress]()]
- if options.junitout:
- procs.append(progress.JUnitTestProgressIndicator(options.junitout,
- options.junittestsuite))
if options.json_test_results:
procs.append(progress.JsonTestProgressIndicator(self.framework_name))
diff --git a/deps/v8/tools/testrunner/local/junit_output.py b/deps/v8/tools/testrunner/local/junit_output.py
deleted file mode 100644
index 52f31ec422..0000000000
--- a/deps/v8/tools/testrunner/local/junit_output.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# Copyright 2013 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-import xml.etree.ElementTree as xml
-
-
-class JUnitTestOutput:
- def __init__(self, test_suite_name):
- self.root = xml.Element("testsuite")
- self.root.attrib["name"] = test_suite_name
-
- def HasRunTest(self, test_name, test_cmd, test_duration, test_failure):
- testCaseElement = xml.Element("testcase")
- testCaseElement.attrib["name"] = test_name
- testCaseElement.attrib["cmd"] = test_cmd
- testCaseElement.attrib["time"] = str(round(test_duration, 3))
- if len(test_failure):
- failureElement = xml.Element("failure")
- failureElement.text = test_failure
- testCaseElement.append(failureElement)
- self.root.append(testCaseElement)
-
- def FinishAndWrite(self, f):
- xml.ElementTree(self.root).write(f, "UTF-8")
diff --git a/deps/v8/tools/testrunner/local/variants.py b/deps/v8/tools/testrunner/local/variants.py
index 0f8e20536c..4236c1678a 100644
--- a/deps/v8/tools/testrunner/local/variants.py
+++ b/deps/v8/tools/testrunner/local/variants.py
@@ -12,7 +12,7 @@ ALL_VARIANT_FLAGS = {
# Alias of exhaustive variants, but triggering new test framework features.
"infra_staging": [[]],
"interpreted_regexp": [["--regexp-interpret-all"]],
- "experimental_regexp": [["--enable-experimental-regexp-engine"]],
+ "experimental_regexp": [["--default-to-experimental-regexp-engine"]],
"jitless": [["--jitless"]],
"minor_mc": [["--minor-mc"]],
"nci": [["--turbo-nci"]],
@@ -28,11 +28,13 @@ ALL_VARIANT_FLAGS = {
# For WebAssembly, we test "Liftoff-only" in the nooptimization variant and
# "TurboFan-only" in the stress variant. The WebAssembly configuration is
# independent of JS optimizations, so we can combine those configs.
- "nooptimization": [["--no-opt", "--liftoff", "--no-wasm-tier-up"]],
+ "nooptimization": [["--no-opt", "--liftoff", "--no-wasm-tier-up",
+ "--wasm-generic-wrapper"]],
"slow_path": [["--force-slow-path"]],
"stress": [["--stress-opt", "--no-liftoff", "--stress-lazy-source-positions"]],
"stress_concurrent_allocation": [["--stress-concurrent-allocation"]],
"stress_js_bg_compile_wasm_code_gc": [["--stress-background-compile",
+ "--finalize-streaming-on-background",
"--stress-wasm-code-gc"]],
"stress_incremental_marking": [["--stress-incremental-marking"]],
"stress_snapshot": [["--stress-snapshot"]],
@@ -59,10 +61,10 @@ INCOMPATIBLE_FLAGS_PER_VARIANT = {
"stress_js_bg_compile_wasm_code_gc": ["--no-stress-background-compile"],
"stress": ["--no-stress-opt", "--always-opt", "--no-always-opt", "--liftoff", "--max-inlined-bytecode-size=*",
"--max-inlined-bytecode-size-cumulative=*", "--stress-inline"],
- "turboprop": ["--turbo-inlining", "--interrupt-budget=*", "--no-turboprop"],
+ "turboprop": ["--interrupt-budget=*", "--no-turboprop"],
"code_serializer": ["--cache=after-execute", "--cache=full-code-cache", "--cache=none"],
"no_local_heaps": ["--concurrent-inlining", "--turboprop"],
- "experimental_regexp": ["--no-enable-experimental-regexp-engine"],
+ "experimental_regexp": ["--no-enable-experimental-regexp-engine", "--no-default-to-experimental-regexp-engine"],
}
# Flags that lead to a contradiction under certain build variables.
diff --git a/deps/v8/tools/testrunner/objects/testcase.py b/deps/v8/tools/testrunner/objects/testcase.py
index 72ca01a421..e037f99679 100644
--- a/deps/v8/tools/testrunner/objects/testcase.py
+++ b/deps/v8/tools/testrunner/objects/testcase.py
@@ -49,13 +49,21 @@ RESOURCES_PATTERN = re.compile(r"//\s+Resources:(.*)")
LOAD_PATTERN = re.compile(
r"(?:load|readbuffer|read)\((?:'|\")([^'\"]+)(?:'|\")\)")
# Pattern to auto-detect files to push on Android for statements like:
-# import "path/to/file.js"
-MODULE_RESOURCES_PATTERN_1 = re.compile(
- r"(?:import|export)(?:\(| )(?:'|\")([^'\"]+)(?:'|\")")
-# Pattern to auto-detect files to push on Android for statements like:
# import foobar from "path/to/file.js"
-MODULE_RESOURCES_PATTERN_2 = re.compile(
- r"(?:import|export).*from (?:'|\")([^'\"]+)(?:'|\")")
+# import {foo, bar} from "path/to/file.js"
+# export {"foo" as "bar"} from "path/to/file.js"
+MODULE_FROM_RESOURCES_PATTERN = re.compile(
+ r"(?:import|export).*?from\s*\(?['\"]([^'\"]+)['\"]",
+ re.MULTILINE | re.DOTALL)
+# Pattern to detect files to push on Android for statements like:
+# import "path/to/file.js"
+# import("module.mjs").catch()...
+MODULE_IMPORT_RESOURCES_PATTERN = re.compile(
+ r"import\s*\(?['\"]([^'\"]+)['\"]",
+ re.MULTILINE | re.DOTALL)
+# Pattern to detect and strip test262 frontmatter from tests to prevent false
+# positives for MODULE_RESOURCES_PATTERN above.
+TEST262_FRONTMATTER_PATTERN = re.compile(r"/\*---.*?---\*/", re.DOTALL)
TIMEOUT_LONG = "long"
@@ -402,19 +410,26 @@ class D8TestCase(TestCase):
result = []
def add_path(path):
result.append(os.path.abspath(path.replace('/', os.path.sep)))
+ def add_import_path(import_path):
+ add_path(os.path.normpath(
+ os.path.join(os.path.dirname(file), import_path)))
+ def strip_test262_frontmatter(input):
+ return TEST262_FRONTMATTER_PATTERN.sub('', input)
for match in RESOURCES_PATTERN.finditer(source):
# There are several resources per line. Relative to base dir.
for path in match.group(1).strip().split():
add_path(path)
+ # Strip test262 frontmatter before looking for load() and import/export
+ # statements.
+ source = strip_test262_frontmatter(source)
for match in LOAD_PATTERN.finditer(source):
# Files in load statements are relative to base dir.
add_path(match.group(1))
- for match in MODULE_RESOURCES_PATTERN_1.finditer(source):
- # Imported files are relative to the file importing them.
- add_path(os.path.join(os.path.dirname(file), match.group(1)))
- for match in MODULE_RESOURCES_PATTERN_2.finditer(source):
- # Imported files are relative to the file importing them.
- add_path(os.path.join(os.path.dirname(file), match.group(1)))
+ # Imported files are relative to the file importing them.
+ for match in MODULE_FROM_RESOURCES_PATTERN.finditer(source):
+ add_import_path(match.group(1))
+ for match in MODULE_IMPORT_RESOURCES_PATTERN.finditer(source):
+ add_import_path(match.group(1))
return result
def _get_resources(self):
diff --git a/deps/v8/tools/testrunner/outproc/base.py b/deps/v8/tools/testrunner/outproc/base.py
index 847b2242ff..9646b96c06 100644
--- a/deps/v8/tools/testrunner/outproc/base.py
+++ b/deps/v8/tools/testrunner/outproc/base.py
@@ -137,6 +137,9 @@ class ExpectedOutProc(OutProc):
self._regenerate_expected_files = regenerate_expected_files
def _is_failure_output(self, output):
+ if output.exit_code != 0:
+ return True
+
with open(self._expected_filename, 'r') as f:
expected_lines = f.readlines()
diff --git a/deps/v8/tools/testrunner/testproc/progress.py b/deps/v8/tools/testrunner/testproc/progress.py
index 634ef7c2f2..9ff943a5c2 100644
--- a/deps/v8/tools/testrunner/testproc/progress.py
+++ b/deps/v8/tools/testrunner/testproc/progress.py
@@ -15,7 +15,6 @@ import time
from . import base
from . import util
-from ..local import junit_output
def print_failure_header(test):
@@ -349,45 +348,6 @@ class MonochromeProgressIndicator(CompactProgressIndicator):
print(("\r" + (" " * last_length) + "\r"), end='')
-class JUnitTestProgressIndicator(ProgressIndicator):
- def __init__(self, junitout, junittestsuite):
- super(JUnitTestProgressIndicator, self).__init__()
- self._requirement = base.DROP_PASS_STDOUT
-
- self.outputter = junit_output.JUnitTestOutput(junittestsuite)
- if junitout:
- self.outfile = open(junitout, "w")
- else:
- self.outfile = sys.stdout
-
- def _on_result_for(self, test, result):
- # TODO(majeski): Support for dummy/grouped results
- fail_text = ""
- output = result.output
- if result.has_unexpected_output:
- stdout = output.stdout.strip()
- if len(stdout):
- fail_text += "stdout:\n%s\n" % stdout
- stderr = output.stderr.strip()
- if len(stderr):
- fail_text += "stderr:\n%s\n" % stderr
- fail_text += "Command: %s" % result.cmd.to_string()
- if output.HasCrashed():
- fail_text += "exit code: %d\n--- CRASHED ---" % output.exit_code
- if output.HasTimedOut():
- fail_text += "--- TIMEOUT ---"
- self.outputter.HasRunTest(
- test_name=str(test),
- test_cmd=result.cmd.to_string(relative=True),
- test_duration=output.duration,
- test_failure=fail_text)
-
- def finished(self):
- self.outputter.FinishAndWrite(self.outfile)
- if self.outfile != sys.stdout:
- self.outfile.close()
-
-
class JsonTestProgressIndicator(ProgressIndicator):
def __init__(self, framework_name):
super(JsonTestProgressIndicator, self).__init__()
diff --git a/deps/v8/tools/tickprocessor-driver.mjs b/deps/v8/tools/tickprocessor-driver.mjs
index a8cce2f708..e7020e388d 100644
--- a/deps/v8/tools/tickprocessor-driver.mjs
+++ b/deps/v8/tools/tickprocessor-driver.mjs
@@ -34,7 +34,7 @@ import {
// Tick Processor's code flow.
function processArguments(args) {
- var processor = new ArgumentsProcessor(args);
+ const processor = new ArgumentsProcessor(args);
if (processor.parse()) {
return processor.result();
} else {
@@ -48,25 +48,25 @@ function initSourceMapSupport() {
// Overwrite the load function to load scripts synchronously.
SourceMap.load = function(sourceMapURL) {
- var content = readFile(sourceMapURL);
- var sourceMapObject = (JSON.parse(content));
+ const content = readFile(sourceMapURL);
+ const sourceMapObject = (JSON.parse(content));
return new SourceMap(sourceMapURL, sourceMapObject);
};
}
-var entriesProviders = {
+const entriesProviders = {
'unix': UnixCppEntriesProvider,
'windows': WindowsCppEntriesProvider,
'mac': MacCppEntriesProvider
};
-var params = processArguments(arguments);
-var sourceMap = null;
+const params = processArguments(arguments);
+let sourceMap = null;
if (params.sourceMap) {
initSourceMapSupport();
sourceMap = SourceMap.load(params.sourceMap);
}
-var tickProcessor = new TickProcessor(
+const tickProcessor = new TickProcessor(
new (entriesProviders[params.platform])(params.nm, params.objdump, params.targetRootFS,
params.apkEmbeddedLibrary),
params.separateIc,
diff --git a/deps/v8/tools/tickprocessor.mjs b/deps/v8/tools/tickprocessor.mjs
index b5aff3b23f..5b746d943a 100644
--- a/deps/v8/tools/tickprocessor.mjs
+++ b/deps/v8/tools/tickprocessor.mjs
@@ -36,31 +36,30 @@ export function inherits(childCtor, parentCtor) {
};
-function V8Profile(separateIc, separateBytecodes, separateBuiltins,
- separateStubs) {
- Profile.call(this);
- var regexps = [];
- if (!separateIc) regexps.push(V8Profile.IC_RE);
- if (!separateBytecodes) regexps.push(V8Profile.BYTECODES_RE);
- if (!separateBuiltins) regexps.push(V8Profile.BUILTINS_RE);
- if (!separateStubs) regexps.push(V8Profile.STUBS_RE);
- if (regexps.length > 0) {
- this.skipThisFunction = function(name) {
- for (var i=0; i<regexps.length; i++) {
- if (regexps[i].test(name)) return true;
- }
- return false;
- };
+class V8Profile extends Profile {
+ static IC_RE =
+ /^(LoadGlobalIC: )|(Handler: )|(?:CallIC|LoadIC|StoreIC)|(?:Builtin: (?:Keyed)?(?:Load|Store)IC_)/;
+ static BYTECODES_RE = /^(BytecodeHandler: )/;
+ static BUILTINS_RE = /^(Builtin: )/;
+ static STUBS_RE = /^(Stub: )/;
+
+ constructor(separateIc, separateBytecodes, separateBuiltins, separateStubs) {
+ super();
+ const regexps = [];
+ if (!separateIc) regexps.push(V8Profile.IC_RE);
+ if (!separateBytecodes) regexps.push(V8Profile.BYTECODES_RE);
+ if (!separateBuiltins) regexps.push(V8Profile.BUILTINS_RE);
+ if (!separateStubs) regexps.push(V8Profile.STUBS_RE);
+ if (regexps.length > 0) {
+ this.skipThisFunction = function(name) {
+ for (let i=0; i<regexps.length; i++) {
+ if (regexps[i].test(name)) return true;
+ }
+ return false;
+ };
+ }
}
-};
-inherits(V8Profile, Profile);
-
-
-V8Profile.IC_RE =
- /^(LoadGlobalIC: )|(Handler: )|(?:CallIC|LoadIC|StoreIC)|(?:Builtin: (?:Keyed)?(?:Load|Store)IC_)/;
-V8Profile.BYTECODES_RE = /^(BytecodeHandler: )/
-V8Profile.BUILTINS_RE = /^(Builtin: )/
-V8Profile.STUBS_RE = /^(Stub: )/
+}
/**
@@ -85,7 +84,7 @@ function parseState(s) {
case "~": return Profile.CodeState.OPTIMIZABLE;
case "*": return Profile.CodeState.OPTIMIZED;
}
- throw new Error("unknown code state: " + s);
+ throw new Error(`unknown code state: ${s}`);
}
@@ -166,29 +165,29 @@ export function TickProcessor(
this.stateFilter_ = stateFilter;
this.runtimeTimerFilter_ = runtimeTimerFilter;
this.sourceMap = sourceMap;
- var ticks = this.ticks_ =
+ const ticks = this.ticks_ =
{ total: 0, unaccounted: 0, excluded: 0, gc: 0 };
distortion = parseInt(distortion);
// Convert picoseconds to nanoseconds.
this.distortion_per_entry = isNaN(distortion) ? 0 : (distortion / 1000);
this.distortion = 0;
- var rangelimits = range ? range.split(",") : [];
- var range_start = parseInt(rangelimits[0]);
- var range_end = parseInt(rangelimits[1]);
+ const rangelimits = range ? range.split(",") : [];
+ const range_start = parseInt(rangelimits[0]);
+ const range_end = parseInt(rangelimits[1]);
// Convert milliseconds to nanoseconds.
this.range_start = isNaN(range_start) ? -Infinity : (range_start * 1000);
this.range_end = isNaN(range_end) ? Infinity : (range_end * 1000)
V8Profile.prototype.handleUnknownCode = function(
operation, addr, opt_stackPos) {
- var op = Profile.Operation;
+ const op = Profile.Operation;
switch (operation) {
case op.MOVE:
- printErr('Code move event for unknown code: 0x' + addr.toString(16));
+ printErr(`Code move event for unknown code: 0x${addr.toString(16)}`);
break;
case op.DELETE:
- printErr('Code delete event for unknown code: 0x' + addr.toString(16));
+ printErr(`Code delete event for unknown code: 0x${addr.toString(16)}`);
break;
case op.TICK:
// Only unknown PCs (the first frame) are reported as unaccounted,
@@ -273,7 +272,7 @@ TickProcessor.prototype.isJsCode = function(name) {
TickProcessor.prototype.processLogFile = function(fileName) {
this.lastLogFileName_ = fileName;
- var line;
+ let line;
while (line = readline()) {
this.processLogLine(line);
}
@@ -283,18 +282,18 @@ TickProcessor.prototype.processLogFile = function(fileName) {
TickProcessor.prototype.processLogFileInTest = function(fileName) {
// Hack file name to avoid dealing with platform specifics.
this.lastLogFileName_ = 'v8.log';
- var contents = readFile(fileName);
+ const contents = readFile(fileName);
this.processLogChunk(contents);
};
TickProcessor.prototype.processSharedLibrary = function(
name, startAddr, endAddr, aslrSlide) {
- var entry = this.profile_.addLibrary(name, startAddr, endAddr, aslrSlide);
+ const entry = this.profile_.addLibrary(name, startAddr, endAddr, aslrSlide);
this.setCodeType(entry.getName(), 'SHARED_LIB');
- var self = this;
- var libFuncs = this.cppEntriesProvider_.parseVmSymbols(
+ const self = this;
+ const libFuncs = this.cppEntriesProvider_.parseVmSymbols(
name, startAddr, endAddr, aslrSlide, function(fName, fStart, fEnd) {
self.profile_.addStaticCode(fName, fStart, fEnd);
self.setCodeType(fName, 'CPP');
@@ -305,8 +304,8 @@ TickProcessor.prototype.processSharedLibrary = function(
TickProcessor.prototype.processCodeCreation = function(
type, kind, timestamp, start, size, name, maybe_func) {
if (maybe_func.length) {
- var funcAddr = parseInt(maybe_func[0]);
- var state = parseState(maybe_func[1]);
+ const funcAddr = parseInt(maybe_func[0]);
+ const state = parseState(maybe_func[1]);
this.profile_.addFuncCode(type, name, timestamp, start, size, funcAddr, state);
} else {
this.profile_.addCode(type, name, timestamp, start, size);
@@ -386,7 +385,7 @@ TickProcessor.prototype.processTick = function(pc,
} else if (tos_or_external_callback) {
// Find out, if top of stack was pointing inside a JS function
// meaning that we have encountered a frameless invocation.
- var funcEntry = this.profile_.findEntry(tos_or_external_callback);
+ const funcEntry = this.profile_.findEntry(tos_or_external_callback);
if (!funcEntry || !funcEntry.isJSFunction || !funcEntry.isJSFunction()) {
tos_or_external_callback = 0;
}
@@ -412,14 +411,14 @@ TickProcessor.prototype.processHeapSampleBegin = function(space, state, ticks) {
TickProcessor.prototype.processHeapSampleEnd = function(space, state) {
if (space != 'Heap' || !this.currentProducerProfile_) return;
- print('Generation ' + this.generation_ + ':');
- var tree = this.currentProducerProfile_;
+ print(`Generation ${this.generation_}:`);
+ const tree = this.currentProducerProfile_;
tree.computeTotalWeights();
- var producersView = this.viewBuilder_.buildView(tree);
+ const producersView = this.viewBuilder_.buildView(tree);
// Sort by total time, desc, then by name, desc.
- producersView.sort(function(rec1, rec2) {
- return rec2.totalTime - rec1.totalTime ||
- (rec2.internalFuncName < rec1.internalFuncName ? -1 : 1); });
+ producersView.sort((rec1, rec2) =>
+ rec2.totalTime - rec1.totalTime ||
+ (rec2.internalFuncName < rec1.internalFuncName ? -1 : 1) );
this.printHeavyProfile(producersView.head.children);
this.currentProducerProfile_ = null;
@@ -433,46 +432,46 @@ TickProcessor.prototype.printStatistics = function() {
return;
}
- print('Statistical profiling result from ' + this.lastLogFileName_ +
+ print(`Statistical profiling result from ${this.lastLogFileName_}` +
', (' + this.ticks_.total +
' ticks, ' + this.ticks_.unaccounted + ' unaccounted, ' +
this.ticks_.excluded + ' excluded).');
if (this.ticks_.total == 0) return;
- var flatProfile = this.profile_.getFlatProfile();
- var flatView = this.viewBuilder_.buildView(flatProfile);
+ const flatProfile = this.profile_.getFlatProfile();
+ const flatView = this.viewBuilder_.buildView(flatProfile);
// Sort by self time, desc, then by name, desc.
- flatView.sort(function(rec1, rec2) {
- return rec2.selfTime - rec1.selfTime ||
- (rec2.internalFuncName < rec1.internalFuncName ? -1 : 1); });
- var totalTicks = this.ticks_.total;
+ flatView.sort((rec1, rec2) =>
+ rec2.selfTime - rec1.selfTime ||
+ (rec2.internalFuncName < rec1.internalFuncName ? -1 : 1) );
+ let totalTicks = this.ticks_.total;
if (this.ignoreUnknown_) {
totalTicks -= this.ticks_.unaccounted;
}
- var printAllTicks = !this.onlySummary_;
+ const printAllTicks = !this.onlySummary_;
// Count library ticks
- var flatViewNodes = flatView.head.children;
- var self = this;
+ const flatViewNodes = flatView.head.children;
+ const self = this;
- var libraryTicks = 0;
+ let libraryTicks = 0;
if(printAllTicks) this.printHeader('Shared libraries');
this.printEntries(flatViewNodes, totalTicks, null,
- function(name) { return self.isSharedLibrary(name); },
+ name => self.isSharedLibrary(name),
function(rec) { libraryTicks += rec.selfTime; }, printAllTicks);
- var nonLibraryTicks = totalTicks - libraryTicks;
+ const nonLibraryTicks = totalTicks - libraryTicks;
- var jsTicks = 0;
+ let jsTicks = 0;
if(printAllTicks) this.printHeader('JavaScript');
this.printEntries(flatViewNodes, totalTicks, nonLibraryTicks,
- function(name) { return self.isJsCode(name); },
+ name => self.isJsCode(name),
function(rec) { jsTicks += rec.selfTime; }, printAllTicks);
- var cppTicks = 0;
+ let cppTicks = 0;
if(printAllTicks) this.printHeader('C++');
this.printEntries(flatViewNodes, totalTicks, nonLibraryTicks,
- function(name) { return self.isCppCode(name); },
+ name => self.isCppCode(name),
function(rec) { cppTicks += rec.selfTime; }, printAllTicks);
this.printHeader('Summary');
@@ -488,22 +487,22 @@ TickProcessor.prototype.printStatistics = function() {
if(printAllTicks) {
print('\n [C++ entry points]:');
print(' ticks cpp total name');
- var c_entry_functions = this.profile_.getCEntryProfile();
- var total_c_entry = c_entry_functions[0].ticks;
- for (var i = 1; i < c_entry_functions.length; i++) {
+ const c_entry_functions = this.profile_.getCEntryProfile();
+ const total_c_entry = c_entry_functions[0].ticks;
+ for (let i = 1; i < c_entry_functions.length; i++) {
const c = c_entry_functions[i];
this.printLine(c.name, c.ticks, total_c_entry, totalTicks);
}
this.printHeavyProfHeader();
- var heavyProfile = this.profile_.getBottomUpProfile();
- var heavyView = this.viewBuilder_.buildView(heavyProfile);
+ const heavyProfile = this.profile_.getBottomUpProfile();
+ const heavyView = this.viewBuilder_.buildView(heavyProfile);
// To show the same percentages as in the flat profile.
heavyView.head.totalTime = totalTicks;
// Sort by total time, desc, then by name, desc.
- heavyView.sort(function(rec1, rec2) {
- return rec2.totalTime - rec1.totalTime ||
- (rec2.internalFuncName < rec1.internalFuncName ? -1 : 1); });
+ heavyView.sort((rec1, rec2) =>
+ rec2.totalTime - rec1.totalTime ||
+ (rec2.internalFuncName < rec1.internalFuncName ? -1 : 1) );
this.printHeavyProfile(heavyView.head.children);
}
};
@@ -512,7 +511,7 @@ TickProcessor.prototype.printStatistics = function() {
function padLeft(s, len) {
s = s.toString();
if (s.length < len) {
- var padLength = len - s.length;
+ const padLength = len - s.length;
if (!(padLength in padLeft)) {
padLeft[padLength] = new Array(padLength + 1).join(' ');
}
@@ -523,18 +522,18 @@ function padLeft(s, len) {
TickProcessor.prototype.printHeader = function(headerTitle) {
- print('\n [' + headerTitle + ']:');
+ print(`\n [${headerTitle}]:`);
print(' ticks total nonlib name');
};
TickProcessor.prototype.printLine = function(
entry, ticks, totalTicks, nonLibTicks) {
- var pct = ticks * 100 / totalTicks;
- var nonLibPct = nonLibTicks != null
+ const pct = ticks * 100 / totalTicks;
+ const nonLibPct = nonLibTicks != null
? padLeft((ticks * 100 / nonLibTicks).toFixed(1), 5) + '% '
: ' ';
- print(' ' + padLeft(ticks, 5) + ' ' +
+ print(` ${padLeft(ticks, 5)} ` +
padLeft(pct.toFixed(1), 5) + '% ' +
nonLibPct +
entry);
@@ -554,8 +553,8 @@ TickProcessor.prototype.printHeavyProfHeader = function() {
TickProcessor.prototype.processProfile = function(
profile, filterP, func) {
- for (var i = 0, n = profile.length; i < n; ++i) {
- var rec = profile[i];
+ for (let i = 0, n = profile.length; i < n; ++i) {
+ const rec = profile[i];
if (!filterP(rec.internalFuncName)) {
continue;
}
@@ -564,8 +563,8 @@ TickProcessor.prototype.processProfile = function(
};
TickProcessor.prototype.getLineAndColumn = function(name) {
- var re = /:([0-9]+):([0-9]+)$/;
- var array = re.exec(name);
+ const re = /:([0-9]+):([0-9]+)$/;
+ const array = re.exec(name);
if (!array) {
return null;
}
@@ -581,28 +580,28 @@ TickProcessor.prototype.formatFunctionName = function(funcName) {
if (!this.hasSourceMap()) {
return funcName;
}
- var lc = this.getLineAndColumn(funcName);
+ const lc = this.getLineAndColumn(funcName);
if (lc == null) {
return funcName;
}
// in source maps lines and columns are zero based
- var lineNumber = lc.line - 1;
- var column = lc.column - 1;
- var entry = this.sourceMap.findEntry(lineNumber, column);
- var sourceFile = entry[2];
- var sourceLine = entry[3] + 1;
- var sourceColumn = entry[4] + 1;
+ const lineNumber = lc.line - 1;
+ const column = lc.column - 1;
+ const entry = this.sourceMap.findEntry(lineNumber, column);
+ const sourceFile = entry[2];
+ const sourceLine = entry[3] + 1;
+ const sourceColumn = entry[4] + 1;
return sourceFile + ':' + sourceLine + ':' + sourceColumn + ' -> ' + funcName;
};
TickProcessor.prototype.printEntries = function(
profile, totalTicks, nonLibTicks, filterP, callback, printAllTicks) {
- var that = this;
+ const that = this;
this.processProfile(profile, filterP, function (rec) {
if (rec.selfTime == 0) return;
callback(rec);
- var funcName = that.formatFunctionName(rec.internalFuncName);
+ const funcName = that.formatFunctionName(rec.internalFuncName);
if(printAllTicks) {
that.printLine(funcName, rec.selfTime, totalTicks, nonLibTicks);
}
@@ -611,14 +610,14 @@ TickProcessor.prototype.printEntries = function(
TickProcessor.prototype.printHeavyProfile = function(profile, opt_indent) {
- var self = this;
- var indent = opt_indent || 0;
- var indentStr = padLeft('', indent);
- this.processProfile(profile, function() { return true; }, function (rec) {
+ const self = this;
+ const indent = opt_indent || 0;
+ const indentStr = padLeft('', indent);
+ this.processProfile(profile, () => true, function (rec) {
// Cut off too infrequent callers.
if (rec.parentTotalPercent < TickProcessor.CALL_PROFILE_CUTOFF_PCT) return;
- var funcName = self.formatFunctionName(rec.internalFuncName);
- print(' ' + padLeft(rec.totalTime, 5) + ' ' +
+ const funcName = self.formatFunctionName(rec.internalFuncName);
+ print(` ${padLeft(rec.totalTime, 5)} ` +
padLeft(rec.parentTotalPercent.toFixed(1), 5) + '% ' +
indentStr + funcName);
// Limit backtrace depth.
@@ -641,8 +640,8 @@ CppEntriesProvider.prototype.parseVmSymbols = function(
libName, libStart, libEnd, libASLRSlide, processorFunc) {
this.loadSymbols(libName);
- var lastUnknownSize;
- var lastAdded;
+ let lastUnknownSize;
+ let lastAdded;
function inRange(funcInfo, start, end) {
return funcInfo.start >= start && funcInfo.end <= end;
@@ -682,7 +681,7 @@ CppEntriesProvider.prototype.parseVmSymbols = function(
}
while (true) {
- var funcInfo = this.parseNextLine();
+ const funcInfo = this.parseNextLine();
if (funcInfo === null) {
continue;
} else if (funcInfo === false) {
@@ -707,9 +706,7 @@ CppEntriesProvider.prototype.loadSymbols = function(libName) {
};
-CppEntriesProvider.prototype.parseNextLine = function() {
- return false;
-};
+CppEntriesProvider.prototype.parseNextLine = () => false;
export function UnixCppEntriesProvider(nmExec, objdumpExec, targetRootFS, apkEmbeddedLibrary) {
@@ -760,17 +757,17 @@ UnixCppEntriesProvider.prototype.parseNextLine = function() {
if (this.symbols.length == 0) {
return false;
}
- var lineEndPos = this.symbols[0].indexOf('\n', this.parsePos);
+ const lineEndPos = this.symbols[0].indexOf('\n', this.parsePos);
if (lineEndPos == -1) {
this.symbols.shift();
this.parsePos = 0;
return this.parseNextLine();
}
- var line = this.symbols[0].substring(this.parsePos, lineEndPos);
+ const line = this.symbols[0].substring(this.parsePos, lineEndPos);
this.parsePos = lineEndPos + 1;
- var fields = line.match(this.FUNC_RE);
- var funcInfo = null;
+ const fields = line.match(this.FUNC_RE);
+ let funcInfo = null;
if (fields) {
funcInfo = { name: fields[3], start: parseInt(fields[1], 16) + this.fileOffsetMinusVma };
if (fields[2]) {
@@ -830,9 +827,9 @@ WindowsCppEntriesProvider.EXE_IMAGE_BASE = 0x00400000;
WindowsCppEntriesProvider.prototype.loadSymbols = function(libName) {
libName = this.targetRootFS + libName;
- var fileNameFields = libName.match(WindowsCppEntriesProvider.FILENAME_RE);
+ const fileNameFields = libName.match(WindowsCppEntriesProvider.FILENAME_RE);
if (!fileNameFields) return;
- var mapFileName = fileNameFields[1] + '.map';
+ const mapFileName = fileNameFields[1] + '.map';
this.moduleType_ = fileNameFields[2].toLowerCase();
try {
this.symbols = read(mapFileName);
@@ -844,26 +841,26 @@ WindowsCppEntriesProvider.prototype.loadSymbols = function(libName) {
WindowsCppEntriesProvider.prototype.parseNextLine = function() {
- var lineEndPos = this.symbols.indexOf('\r\n', this.parsePos);
+ const lineEndPos = this.symbols.indexOf('\r\n', this.parsePos);
if (lineEndPos == -1) {
return false;
}
- var line = this.symbols.substring(this.parsePos, lineEndPos);
+ const line = this.symbols.substring(this.parsePos, lineEndPos);
this.parsePos = lineEndPos + 2;
// Image base entry is above all other symbols, so we can just
// terminate parsing.
- var imageBaseFields = line.match(WindowsCppEntriesProvider.IMAGE_BASE_RE);
+ const imageBaseFields = line.match(WindowsCppEntriesProvider.IMAGE_BASE_RE);
if (imageBaseFields) {
- var imageBase = parseInt(imageBaseFields[1], 16);
+ const imageBase = parseInt(imageBaseFields[1], 16);
if ((this.moduleType_ == 'exe') !=
(imageBase == WindowsCppEntriesProvider.EXE_IMAGE_BASE)) {
return false;
}
}
- var fields = line.match(WindowsCppEntriesProvider.FUNC_RE);
+ const fields = line.match(WindowsCppEntriesProvider.FUNC_RE);
return fields ?
{ name: this.unmangleName(fields[1]), start: parseInt(fields[2], 16) } :
null;
@@ -881,8 +878,8 @@ WindowsCppEntriesProvider.prototype.parseNextLine = function() {
WindowsCppEntriesProvider.prototype.unmangleName = function(name) {
// Empty or non-mangled name.
if (name.length < 1 || name.charAt(0) != '?') return name;
- var nameEndPos = name.indexOf('@@');
- var components = name.substring(1, nameEndPos).split('@');
+ const nameEndPos = name.indexOf('@@');
+ const components = name.substring(1, nameEndPos).split('@');
components.reverse();
return components.join('::');
};
diff --git a/deps/v8/tools/v8_presubmit.py b/deps/v8/tools/v8_presubmit.py
index 6fbc3ad2ed..db008aabf1 100755
--- a/deps/v8/tools/v8_presubmit.py
+++ b/deps/v8/tools/v8_presubmit.py
@@ -131,6 +131,39 @@ def TorqueLintWorker(command):
print('Error running format-torque.py')
process.kill()
+def JSLintWorker(command):
+ def format_file(command):
+ try:
+ file_name = command[-1]
+ with open(file_name, "r") as file_handle:
+ contents = file_handle.read()
+
+ process = subprocess.Popen(command, stdout=PIPE, stderr=subprocess.PIPE)
+ output, err = process.communicate()
+ rc = process.returncode
+ if rc != 0:
+ sys.stdout.write("error code " + str(rc) + " running clang-format.\n")
+ return rc
+
+ if output != contents:
+ return 1
+
+ return 0
+ except KeyboardInterrupt:
+ process.kill()
+ except Exception:
+ print('Error running clang-format. Please make sure you have depot_tools' +
+ ' in your $PATH. Lint check skipped.')
+ process.kill()
+
+ rc = format_file(command)
+ if rc == 1:
+ # There are files that need to be formatted, let's format them in place.
+ file_name = command[-1]
+ sys.stdout.write("Formatting %s.\n" % (file_name))
+ rc = format_file(command[:-1] + ["-i", file_name])
+ return rc
+
class FileContentsCache(object):
def __init__(self, sums_file_name):
@@ -392,6 +425,33 @@ class TorqueLintProcessor(CacheableSourceFileProcessor):
return None, arguments
+class JSLintProcessor(CacheableSourceFileProcessor):
+ """
+ Check .{m}js file to verify they follow the JS Style guide.
+ """
+ def __init__(self, use_cache=True):
+ super(JSLintProcessor, self).__init__(
+ use_cache=use_cache, cache_file_path='.jslint-cache',
+ file_type='JavaScript')
+
+ def IsRelevant(self, name):
+ return name.endswith('.js') or name.endswith('.mjs')
+
+ def GetPathsToSearch(self):
+ return ['tools/system-analyzer']
+
+ def GetProcessorWorker(self):
+ return JSLintWorker
+
+ def GetProcessorScript(self):
+ for path in [TOOLS_PATH] + os.environ["PATH"].split(os.pathsep):
+ path = path.strip('"')
+ clang_format = os.path.join(path, 'clang_format.py')
+ if os.path.isfile(clang_format):
+ return clang_format, []
+
+ return None, []
+
COPYRIGHT_HEADER_PATTERN = re.compile(
r'Copyright [\d-]*20[0-2][0-9] the V8 project authors. All rights reserved.')
@@ -708,6 +768,9 @@ def Main():
print("Running Torque formatting check...")
success &= TorqueLintProcessor(use_cache=use_linter_cache).RunOnPath(
workspace)
+ print("Running JavaScript formatting check...")
+ success &= JSLintProcessor(use_cache=use_linter_cache).RunOnPath(
+ workspace)
print("Running copyright header, trailing whitespaces and " \
"two empty lines between declarations check...")
success &= SourceProcessor().RunOnPath(workspace)
diff --git a/deps/v8/tools/v8heapconst.py b/deps/v8/tools/v8heapconst.py
index d8e81c4909..0dd31d4ad2 100644
--- a/deps/v8/tools/v8heapconst.py
+++ b/deps/v8/tools/v8heapconst.py
@@ -62,17 +62,17 @@ INSTANCE_TYPES = {
98: "FUNCTION_TEMPLATE_RARE_DATA_TYPE",
99: "INTERCEPTOR_INFO_TYPE",
100: "INTERPRETER_DATA_TYPE",
- 101: "PROMISE_CAPABILITY_TYPE",
- 102: "PROMISE_REACTION_TYPE",
- 103: "PROPERTY_DESCRIPTOR_OBJECT_TYPE",
- 104: "PROTOTYPE_INFO_TYPE",
- 105: "SCRIPT_TYPE",
- 106: "SOURCE_TEXT_MODULE_INFO_ENTRY_TYPE",
- 107: "STACK_FRAME_INFO_TYPE",
- 108: "STACK_TRACE_FRAME_TYPE",
- 109: "TEMPLATE_OBJECT_DESCRIPTION_TYPE",
- 110: "TUPLE2_TYPE",
- 111: "WASM_CAPI_FUNCTION_DATA_TYPE",
+ 101: "MODULE_REQUEST_TYPE",
+ 102: "PROMISE_CAPABILITY_TYPE",
+ 103: "PROMISE_REACTION_TYPE",
+ 104: "PROPERTY_DESCRIPTOR_OBJECT_TYPE",
+ 105: "PROTOTYPE_INFO_TYPE",
+ 106: "SCRIPT_TYPE",
+ 107: "SOURCE_TEXT_MODULE_INFO_ENTRY_TYPE",
+ 108: "STACK_FRAME_INFO_TYPE",
+ 109: "STACK_TRACE_FRAME_TYPE",
+ 110: "TEMPLATE_OBJECT_DESCRIPTION_TYPE",
+ 111: "TUPLE2_TYPE",
112: "WASM_EXCEPTION_TAG_TYPE",
113: "WASM_EXPORTED_FUNCTION_DATA_TYPE",
114: "WASM_INDIRECT_FUNCTION_TABLE_TYPE",
@@ -113,42 +113,44 @@ INSTANCE_TYPES = {
149: "SMALL_ORDERED_HASH_MAP_TYPE",
150: "SMALL_ORDERED_HASH_SET_TYPE",
151: "SMALL_ORDERED_NAME_DICTIONARY_TYPE",
- 152: "SOURCE_TEXT_MODULE_TYPE",
- 153: "SYNTHETIC_MODULE_TYPE",
- 154: "UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE",
- 155: "UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE",
- 156: "WEAK_FIXED_ARRAY_TYPE",
- 157: "TRANSITION_ARRAY_TYPE",
- 158: "CELL_TYPE",
- 159: "CODE_TYPE",
- 160: "CODE_DATA_CONTAINER_TYPE",
- 161: "COVERAGE_INFO_TYPE",
- 162: "DESCRIPTOR_ARRAY_TYPE",
- 163: "EMBEDDER_DATA_ARRAY_TYPE",
- 164: "FEEDBACK_METADATA_TYPE",
- 165: "FEEDBACK_VECTOR_TYPE",
- 166: "FILLER_TYPE",
- 167: "FREE_SPACE_TYPE",
- 168: "INTERNAL_CLASS_TYPE",
- 169: "INTERNAL_CLASS_WITH_STRUCT_ELEMENTS_TYPE",
- 170: "MAP_TYPE",
- 171: "ON_HEAP_BASIC_BLOCK_PROFILER_DATA_TYPE",
- 172: "PREPARSE_DATA_TYPE",
- 173: "PROPERTY_ARRAY_TYPE",
- 174: "PROPERTY_CELL_TYPE",
- 175: "SHARED_FUNCTION_INFO_TYPE",
- 176: "SMI_BOX_TYPE",
- 177: "SMI_PAIR_TYPE",
- 178: "SORT_STATE_TYPE",
- 179: "WASM_ARRAY_TYPE",
- 180: "WASM_STRUCT_TYPE",
- 181: "WEAK_ARRAY_LIST_TYPE",
- 182: "WEAK_CELL_TYPE",
- 183: "JS_PROXY_TYPE",
+ 152: "DESCRIPTOR_ARRAY_TYPE",
+ 153: "STRONG_DESCRIPTOR_ARRAY_TYPE",
+ 154: "SOURCE_TEXT_MODULE_TYPE",
+ 155: "SYNTHETIC_MODULE_TYPE",
+ 156: "UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE",
+ 157: "UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE",
+ 158: "WEAK_FIXED_ARRAY_TYPE",
+ 159: "TRANSITION_ARRAY_TYPE",
+ 160: "CELL_TYPE",
+ 161: "CODE_TYPE",
+ 162: "CODE_DATA_CONTAINER_TYPE",
+ 163: "COVERAGE_INFO_TYPE",
+ 164: "EMBEDDER_DATA_ARRAY_TYPE",
+ 165: "FEEDBACK_METADATA_TYPE",
+ 166: "FEEDBACK_VECTOR_TYPE",
+ 167: "FILLER_TYPE",
+ 168: "FREE_SPACE_TYPE",
+ 169: "INTERNAL_CLASS_TYPE",
+ 170: "INTERNAL_CLASS_WITH_STRUCT_ELEMENTS_TYPE",
+ 171: "MAP_TYPE",
+ 172: "ON_HEAP_BASIC_BLOCK_PROFILER_DATA_TYPE",
+ 173: "PREPARSE_DATA_TYPE",
+ 174: "PROPERTY_ARRAY_TYPE",
+ 175: "PROPERTY_CELL_TYPE",
+ 176: "SHARED_FUNCTION_INFO_TYPE",
+ 177: "SMI_BOX_TYPE",
+ 178: "SMI_PAIR_TYPE",
+ 179: "SORT_STATE_TYPE",
+ 180: "WASM_ARRAY_TYPE",
+ 181: "WASM_CAPI_FUNCTION_DATA_TYPE",
+ 182: "WASM_STRUCT_TYPE",
+ 183: "WEAK_ARRAY_LIST_TYPE",
+ 184: "WEAK_CELL_TYPE",
+ 185: "JS_PROXY_TYPE",
1057: "JS_OBJECT_TYPE",
- 184: "JS_GLOBAL_OBJECT_TYPE",
- 185: "JS_GLOBAL_PROXY_TYPE",
- 186: "JS_MODULE_NAMESPACE_TYPE",
+ 186: "JS_GLOBAL_OBJECT_TYPE",
+ 187: "JS_GLOBAL_PROXY_TYPE",
+ 188: "JS_MODULE_NAMESPACE_TYPE",
1040: "JS_SPECIAL_API_OBJECT_TYPE",
1041: "JS_PRIMITIVE_WRAPPER_TYPE",
1042: "JS_MAP_KEY_ITERATOR_TYPE",
@@ -205,16 +207,16 @@ INSTANCE_TYPES = {
# List of known V8 maps.
KNOWN_MAPS = {
- ("read_only_space", 0x02115): (170, "MetaMap"),
+ ("read_only_space", 0x02115): (171, "MetaMap"),
("read_only_space", 0x0213d): (67, "NullMap"),
- ("read_only_space", 0x02165): (162, "DescriptorArrayMap"),
- ("read_only_space", 0x0218d): (156, "WeakFixedArrayMap"),
+ ("read_only_space", 0x02165): (153, "StrongDescriptorArrayMap"),
+ ("read_only_space", 0x0218d): (158, "WeakFixedArrayMap"),
("read_only_space", 0x021cd): (96, "EnumCacheMap"),
("read_only_space", 0x02201): (117, "FixedArrayMap"),
("read_only_space", 0x0224d): (8, "OneByteInternalizedStringMap"),
- ("read_only_space", 0x02299): (167, "FreeSpaceMap"),
- ("read_only_space", 0x022c1): (166, "OnePointerFillerMap"),
- ("read_only_space", 0x022e9): (166, "TwoPointerFillerMap"),
+ ("read_only_space", 0x02299): (168, "FreeSpaceMap"),
+ ("read_only_space", 0x022c1): (167, "OnePointerFillerMap"),
+ ("read_only_space", 0x022e9): (167, "TwoPointerFillerMap"),
("read_only_space", 0x02311): (67, "UninitializedMap"),
("read_only_space", 0x02389): (67, "UndefinedMap"),
("read_only_space", 0x023cd): (66, "HeapNumberMap"),
@@ -226,14 +228,14 @@ KNOWN_MAPS = {
("read_only_space", 0x0257d): (64, "SymbolMap"),
("read_only_space", 0x025a5): (40, "OneByteStringMap"),
("read_only_space", 0x025cd): (129, "ScopeInfoMap"),
- ("read_only_space", 0x025f5): (175, "SharedFunctionInfoMap"),
- ("read_only_space", 0x0261d): (159, "CodeMap"),
- ("read_only_space", 0x02645): (158, "CellMap"),
- ("read_only_space", 0x0266d): (174, "GlobalPropertyCellMap"),
+ ("read_only_space", 0x025f5): (176, "SharedFunctionInfoMap"),
+ ("read_only_space", 0x0261d): (161, "CodeMap"),
+ ("read_only_space", 0x02645): (160, "CellMap"),
+ ("read_only_space", 0x0266d): (175, "GlobalPropertyCellMap"),
("read_only_space", 0x02695): (70, "ForeignMap"),
- ("read_only_space", 0x026bd): (157, "TransitionArrayMap"),
+ ("read_only_space", 0x026bd): (159, "TransitionArrayMap"),
("read_only_space", 0x026e5): (45, "ThinOneByteStringMap"),
- ("read_only_space", 0x0270d): (165, "FeedbackVectorMap"),
+ ("read_only_space", 0x0270d): (166, "FeedbackVectorMap"),
("read_only_space", 0x0273d): (67, "ArgumentsMarkerMap"),
("read_only_space", 0x0279d): (67, "ExceptionMap"),
("read_only_space", 0x027f9): (67, "TerminationExceptionMap"),
@@ -241,13 +243,13 @@ KNOWN_MAPS = {
("read_only_space", 0x028c1): (67, "StaleRegisterMap"),
("read_only_space", 0x02921): (130, "ScriptContextTableMap"),
("read_only_space", 0x02949): (127, "ClosureFeedbackCellArrayMap"),
- ("read_only_space", 0x02971): (164, "FeedbackMetadataArrayMap"),
+ ("read_only_space", 0x02971): (165, "FeedbackMetadataArrayMap"),
("read_only_space", 0x02999): (117, "ArrayListMap"),
("read_only_space", 0x029c1): (65, "BigIntMap"),
("read_only_space", 0x029e9): (128, "ObjectBoilerplateDescriptionMap"),
("read_only_space", 0x02a11): (132, "BytecodeArrayMap"),
- ("read_only_space", 0x02a39): (160, "CodeDataContainerMap"),
- ("read_only_space", 0x02a61): (161, "CoverageInfoMap"),
+ ("read_only_space", 0x02a39): (162, "CodeDataContainerMap"),
+ ("read_only_space", 0x02a61): (163, "CoverageInfoMap"),
("read_only_space", 0x02a89): (133, "FixedDoubleArrayMap"),
("read_only_space", 0x02ab1): (120, "GlobalDictionaryMap"),
("read_only_space", 0x02ad9): (97, "ManyClosuresCellMap"),
@@ -259,8 +261,8 @@ KNOWN_MAPS = {
("read_only_space", 0x02bc9): (123, "OrderedHashMapMap"),
("read_only_space", 0x02bf1): (124, "OrderedHashSetMap"),
("read_only_space", 0x02c19): (125, "OrderedNameDictionaryMap"),
- ("read_only_space", 0x02c41): (172, "PreparseDataMap"),
- ("read_only_space", 0x02c69): (173, "PropertyArrayMap"),
+ ("read_only_space", 0x02c41): (173, "PreparseDataMap"),
+ ("read_only_space", 0x02c69): (174, "PropertyArrayMap"),
("read_only_space", 0x02c91): (93, "SideEffectCallHandlerInfoMap"),
("read_only_space", 0x02cb9): (93, "SideEffectFreeCallHandlerInfoMap"),
("read_only_space", 0x02ce1): (93, "NextCallSideEffectFreeCallHandlerInfoMap"),
@@ -268,15 +270,15 @@ KNOWN_MAPS = {
("read_only_space", 0x02d31): (149, "SmallOrderedHashMapMap"),
("read_only_space", 0x02d59): (150, "SmallOrderedHashSetMap"),
("read_only_space", 0x02d81): (151, "SmallOrderedNameDictionaryMap"),
- ("read_only_space", 0x02da9): (152, "SourceTextModuleMap"),
- ("read_only_space", 0x02dd1): (153, "SyntheticModuleMap"),
- ("read_only_space", 0x02df9): (155, "UncompiledDataWithoutPreparseDataMap"),
- ("read_only_space", 0x02e21): (154, "UncompiledDataWithPreparseDataMap"),
+ ("read_only_space", 0x02da9): (154, "SourceTextModuleMap"),
+ ("read_only_space", 0x02dd1): (155, "SyntheticModuleMap"),
+ ("read_only_space", 0x02df9): (157, "UncompiledDataWithoutPreparseDataMap"),
+ ("read_only_space", 0x02e21): (156, "UncompiledDataWithPreparseDataMap"),
("read_only_space", 0x02e49): (71, "WasmTypeInfoMap"),
- ("read_only_space", 0x02e71): (181, "WeakArrayListMap"),
+ ("read_only_space", 0x02e71): (183, "WeakArrayListMap"),
("read_only_space", 0x02e99): (119, "EphemeronHashTableMap"),
- ("read_only_space", 0x02ec1): (163, "EmbedderDataArrayMap"),
- ("read_only_space", 0x02ee9): (182, "WeakCellMap"),
+ ("read_only_space", 0x02ec1): (164, "EmbedderDataArrayMap"),
+ ("read_only_space", 0x02ee9): (184, "WeakCellMap"),
("read_only_space", 0x02f11): (32, "StringMap"),
("read_only_space", 0x02f39): (41, "ConsOneByteStringMap"),
("read_only_space", 0x02f61): (33, "ConsStringMap"),
@@ -295,72 +297,74 @@ KNOWN_MAPS = {
("read_only_space", 0x03169): (67, "SelfReferenceMarkerMap"),
("read_only_space", 0x03191): (67, "BasicBlockCountersMarkerMap"),
("read_only_space", 0x031d5): (87, "ArrayBoilerplateDescriptionMap"),
- ("read_only_space", 0x032a5): (99, "InterceptorInfoMap"),
- ("read_only_space", 0x05399): (72, "PromiseFulfillReactionJobTaskMap"),
- ("read_only_space", 0x053c1): (73, "PromiseRejectReactionJobTaskMap"),
- ("read_only_space", 0x053e9): (74, "CallableTaskMap"),
- ("read_only_space", 0x05411): (75, "CallbackTaskMap"),
- ("read_only_space", 0x05439): (76, "PromiseResolveThenableJobTaskMap"),
- ("read_only_space", 0x05461): (79, "FunctionTemplateInfoMap"),
- ("read_only_space", 0x05489): (80, "ObjectTemplateInfoMap"),
- ("read_only_space", 0x054b1): (81, "AccessCheckInfoMap"),
- ("read_only_space", 0x054d9): (82, "AccessorInfoMap"),
- ("read_only_space", 0x05501): (83, "AccessorPairMap"),
- ("read_only_space", 0x05529): (84, "AliasedArgumentsEntryMap"),
- ("read_only_space", 0x05551): (85, "AllocationMementoMap"),
- ("read_only_space", 0x05579): (88, "AsmWasmDataMap"),
- ("read_only_space", 0x055a1): (89, "AsyncGeneratorRequestMap"),
- ("read_only_space", 0x055c9): (90, "BreakPointMap"),
- ("read_only_space", 0x055f1): (91, "BreakPointInfoMap"),
- ("read_only_space", 0x05619): (92, "CachedTemplateObjectMap"),
- ("read_only_space", 0x05641): (94, "ClassPositionsMap"),
- ("read_only_space", 0x05669): (95, "DebugInfoMap"),
- ("read_only_space", 0x05691): (98, "FunctionTemplateRareDataMap"),
- ("read_only_space", 0x056b9): (100, "InterpreterDataMap"),
- ("read_only_space", 0x056e1): (101, "PromiseCapabilityMap"),
- ("read_only_space", 0x05709): (102, "PromiseReactionMap"),
- ("read_only_space", 0x05731): (103, "PropertyDescriptorObjectMap"),
- ("read_only_space", 0x05759): (104, "PrototypeInfoMap"),
- ("read_only_space", 0x05781): (105, "ScriptMap"),
- ("read_only_space", 0x057a9): (106, "SourceTextModuleInfoEntryMap"),
- ("read_only_space", 0x057d1): (107, "StackFrameInfoMap"),
- ("read_only_space", 0x057f9): (108, "StackTraceFrameMap"),
- ("read_only_space", 0x05821): (109, "TemplateObjectDescriptionMap"),
- ("read_only_space", 0x05849): (110, "Tuple2Map"),
- ("read_only_space", 0x05871): (111, "WasmCapiFunctionDataMap"),
- ("read_only_space", 0x05899): (112, "WasmExceptionTagMap"),
- ("read_only_space", 0x058c1): (113, "WasmExportedFunctionDataMap"),
- ("read_only_space", 0x058e9): (114, "WasmIndirectFunctionTableMap"),
- ("read_only_space", 0x05911): (115, "WasmJSFunctionDataMap"),
- ("read_only_space", 0x05939): (116, "WasmValueMap"),
- ("read_only_space", 0x05961): (135, "SloppyArgumentsElementsMap"),
- ("read_only_space", 0x05989): (171, "OnHeapBasicBlockProfilerDataMap"),
- ("read_only_space", 0x059b1): (168, "InternalClassMap"),
- ("read_only_space", 0x059d9): (177, "SmiPairMap"),
- ("read_only_space", 0x05a01): (176, "SmiBoxMap"),
- ("read_only_space", 0x05a29): (146, "ExportedSubClassBaseMap"),
- ("read_only_space", 0x05a51): (147, "ExportedSubClassMap"),
- ("read_only_space", 0x05a79): (68, "AbstractInternalClassSubclass1Map"),
- ("read_only_space", 0x05aa1): (69, "AbstractInternalClassSubclass2Map"),
- ("read_only_space", 0x05ac9): (134, "InternalClassWithSmiElementsMap"),
- ("read_only_space", 0x05af1): (169, "InternalClassWithStructElementsMap"),
- ("read_only_space", 0x05b19): (148, "ExportedSubClass2Map"),
- ("read_only_space", 0x05b41): (178, "SortStateMap"),
- ("read_only_space", 0x05b69): (86, "AllocationSiteWithWeakNextMap"),
- ("read_only_space", 0x05b91): (86, "AllocationSiteWithoutWeakNextMap"),
- ("read_only_space", 0x05bb9): (77, "LoadHandler1Map"),
- ("read_only_space", 0x05be1): (77, "LoadHandler2Map"),
- ("read_only_space", 0x05c09): (77, "LoadHandler3Map"),
- ("read_only_space", 0x05c31): (78, "StoreHandler0Map"),
- ("read_only_space", 0x05c59): (78, "StoreHandler1Map"),
- ("read_only_space", 0x05c81): (78, "StoreHandler2Map"),
- ("read_only_space", 0x05ca9): (78, "StoreHandler3Map"),
+ ("read_only_space", 0x032bd): (99, "InterceptorInfoMap"),
+ ("read_only_space", 0x053c9): (72, "PromiseFulfillReactionJobTaskMap"),
+ ("read_only_space", 0x053f1): (73, "PromiseRejectReactionJobTaskMap"),
+ ("read_only_space", 0x05419): (74, "CallableTaskMap"),
+ ("read_only_space", 0x05441): (75, "CallbackTaskMap"),
+ ("read_only_space", 0x05469): (76, "PromiseResolveThenableJobTaskMap"),
+ ("read_only_space", 0x05491): (79, "FunctionTemplateInfoMap"),
+ ("read_only_space", 0x054b9): (80, "ObjectTemplateInfoMap"),
+ ("read_only_space", 0x054e1): (81, "AccessCheckInfoMap"),
+ ("read_only_space", 0x05509): (82, "AccessorInfoMap"),
+ ("read_only_space", 0x05531): (83, "AccessorPairMap"),
+ ("read_only_space", 0x05559): (84, "AliasedArgumentsEntryMap"),
+ ("read_only_space", 0x05581): (85, "AllocationMementoMap"),
+ ("read_only_space", 0x055a9): (88, "AsmWasmDataMap"),
+ ("read_only_space", 0x055d1): (89, "AsyncGeneratorRequestMap"),
+ ("read_only_space", 0x055f9): (90, "BreakPointMap"),
+ ("read_only_space", 0x05621): (91, "BreakPointInfoMap"),
+ ("read_only_space", 0x05649): (92, "CachedTemplateObjectMap"),
+ ("read_only_space", 0x05671): (94, "ClassPositionsMap"),
+ ("read_only_space", 0x05699): (95, "DebugInfoMap"),
+ ("read_only_space", 0x056c1): (98, "FunctionTemplateRareDataMap"),
+ ("read_only_space", 0x056e9): (100, "InterpreterDataMap"),
+ ("read_only_space", 0x05711): (101, "ModuleRequestMap"),
+ ("read_only_space", 0x05739): (102, "PromiseCapabilityMap"),
+ ("read_only_space", 0x05761): (103, "PromiseReactionMap"),
+ ("read_only_space", 0x05789): (104, "PropertyDescriptorObjectMap"),
+ ("read_only_space", 0x057b1): (105, "PrototypeInfoMap"),
+ ("read_only_space", 0x057d9): (106, "ScriptMap"),
+ ("read_only_space", 0x05801): (107, "SourceTextModuleInfoEntryMap"),
+ ("read_only_space", 0x05829): (108, "StackFrameInfoMap"),
+ ("read_only_space", 0x05851): (109, "StackTraceFrameMap"),
+ ("read_only_space", 0x05879): (110, "TemplateObjectDescriptionMap"),
+ ("read_only_space", 0x058a1): (111, "Tuple2Map"),
+ ("read_only_space", 0x058c9): (112, "WasmExceptionTagMap"),
+ ("read_only_space", 0x058f1): (113, "WasmExportedFunctionDataMap"),
+ ("read_only_space", 0x05919): (114, "WasmIndirectFunctionTableMap"),
+ ("read_only_space", 0x05941): (115, "WasmJSFunctionDataMap"),
+ ("read_only_space", 0x05969): (116, "WasmValueMap"),
+ ("read_only_space", 0x05991): (135, "SloppyArgumentsElementsMap"),
+ ("read_only_space", 0x059b9): (152, "DescriptorArrayMap"),
+ ("read_only_space", 0x059e1): (172, "OnHeapBasicBlockProfilerDataMap"),
+ ("read_only_space", 0x05a09): (181, "WasmCapiFunctionDataMap"),
+ ("read_only_space", 0x05a31): (169, "InternalClassMap"),
+ ("read_only_space", 0x05a59): (178, "SmiPairMap"),
+ ("read_only_space", 0x05a81): (177, "SmiBoxMap"),
+ ("read_only_space", 0x05aa9): (146, "ExportedSubClassBaseMap"),
+ ("read_only_space", 0x05ad1): (147, "ExportedSubClassMap"),
+ ("read_only_space", 0x05af9): (68, "AbstractInternalClassSubclass1Map"),
+ ("read_only_space", 0x05b21): (69, "AbstractInternalClassSubclass2Map"),
+ ("read_only_space", 0x05b49): (134, "InternalClassWithSmiElementsMap"),
+ ("read_only_space", 0x05b71): (170, "InternalClassWithStructElementsMap"),
+ ("read_only_space", 0x05b99): (148, "ExportedSubClass2Map"),
+ ("read_only_space", 0x05bc1): (179, "SortStateMap"),
+ ("read_only_space", 0x05be9): (86, "AllocationSiteWithWeakNextMap"),
+ ("read_only_space", 0x05c11): (86, "AllocationSiteWithoutWeakNextMap"),
+ ("read_only_space", 0x05c39): (77, "LoadHandler1Map"),
+ ("read_only_space", 0x05c61): (77, "LoadHandler2Map"),
+ ("read_only_space", 0x05c89): (77, "LoadHandler3Map"),
+ ("read_only_space", 0x05cb1): (78, "StoreHandler0Map"),
+ ("read_only_space", 0x05cd9): (78, "StoreHandler1Map"),
+ ("read_only_space", 0x05d01): (78, "StoreHandler2Map"),
+ ("read_only_space", 0x05d29): (78, "StoreHandler3Map"),
("map_space", 0x02115): (1057, "ExternalMap"),
("map_space", 0x0213d): (1072, "JSMessageObjectMap"),
- ("map_space", 0x02165): (180, "WasmRttEqrefMap"),
- ("map_space", 0x0218d): (180, "WasmRttExternrefMap"),
- ("map_space", 0x021b5): (180, "WasmRttFuncrefMap"),
- ("map_space", 0x021dd): (180, "WasmRttI31refMap"),
+ ("map_space", 0x02165): (182, "WasmRttEqrefMap"),
+ ("map_space", 0x0218d): (182, "WasmRttExternrefMap"),
+ ("map_space", 0x021b5): (182, "WasmRttFuncrefMap"),
+ ("map_space", 0x021dd): (182, "WasmRttI31refMap"),
}
# List of known V8 objects.
@@ -395,20 +399,21 @@ KNOWN_OBJECTS = {
("read_only_space", 0x0325d): "EmptyFeedbackMetadata",
("read_only_space", 0x03269): "EmptyPropertyCell",
("read_only_space", 0x0327d): "EmptyPropertyDictionary",
- ("read_only_space", 0x032cd): "NoOpInterceptorInfo",
- ("read_only_space", 0x032f5): "EmptyWeakArrayList",
- ("read_only_space", 0x03301): "InfinityValue",
- ("read_only_space", 0x0330d): "MinusZeroValue",
- ("read_only_space", 0x03319): "MinusInfinityValue",
- ("read_only_space", 0x03325): "SelfReferenceMarker",
- ("read_only_space", 0x03365): "BasicBlockCountersMarker",
- ("read_only_space", 0x033a9): "OffHeapTrampolineRelocationInfo",
- ("read_only_space", 0x033b5): "TrampolineTrivialCodeDataContainer",
- ("read_only_space", 0x033c1): "TrampolinePromiseRejectionCodeDataContainer",
- ("read_only_space", 0x033cd): "GlobalThisBindingScopeInfo",
- ("read_only_space", 0x03405): "EmptyFunctionScopeInfo",
- ("read_only_space", 0x0342d): "NativeScopeInfo",
- ("read_only_space", 0x03449): "HashSeed",
+ ("read_only_space", 0x032a5): "EmptyOrderedPropertyDictionary",
+ ("read_only_space", 0x032e5): "NoOpInterceptorInfo",
+ ("read_only_space", 0x0330d): "EmptyWeakArrayList",
+ ("read_only_space", 0x03319): "InfinityValue",
+ ("read_only_space", 0x03325): "MinusZeroValue",
+ ("read_only_space", 0x03331): "MinusInfinityValue",
+ ("read_only_space", 0x0333d): "SelfReferenceMarker",
+ ("read_only_space", 0x0337d): "BasicBlockCountersMarker",
+ ("read_only_space", 0x033c1): "OffHeapTrampolineRelocationInfo",
+ ("read_only_space", 0x033cd): "TrampolineTrivialCodeDataContainer",
+ ("read_only_space", 0x033d9): "TrampolinePromiseRejectionCodeDataContainer",
+ ("read_only_space", 0x033e5): "GlobalThisBindingScopeInfo",
+ ("read_only_space", 0x0341d): "EmptyFunctionScopeInfo",
+ ("read_only_space", 0x03445): "NativeScopeInfo",
+ ("read_only_space", 0x03461): "HashSeed",
("old_space", 0x02115): "ArgumentsIteratorAccessor",
("old_space", 0x02159): "ArrayLengthAccessor",
("old_space", 0x0219d): "BoundFunctionLengthAccessor",
@@ -444,27 +449,27 @@ KNOWN_OBJECTS = {
("old_space", 0x02a61): "StringSplitCache",
("old_space", 0x02e69): "RegExpMultipleCache",
("old_space", 0x03271): "BuiltinsConstantsTable",
- ("old_space", 0x0364d): "AsyncFunctionAwaitRejectSharedFun",
- ("old_space", 0x03675): "AsyncFunctionAwaitResolveSharedFun",
- ("old_space", 0x0369d): "AsyncGeneratorAwaitRejectSharedFun",
- ("old_space", 0x036c5): "AsyncGeneratorAwaitResolveSharedFun",
- ("old_space", 0x036ed): "AsyncGeneratorYieldResolveSharedFun",
- ("old_space", 0x03715): "AsyncGeneratorReturnResolveSharedFun",
- ("old_space", 0x0373d): "AsyncGeneratorReturnClosedRejectSharedFun",
- ("old_space", 0x03765): "AsyncGeneratorReturnClosedResolveSharedFun",
- ("old_space", 0x0378d): "AsyncIteratorValueUnwrapSharedFun",
- ("old_space", 0x037b5): "PromiseAllResolveElementSharedFun",
- ("old_space", 0x037dd): "PromiseAllSettledResolveElementSharedFun",
- ("old_space", 0x03805): "PromiseAllSettledRejectElementSharedFun",
- ("old_space", 0x0382d): "PromiseAnyRejectElementSharedFun",
- ("old_space", 0x03855): "PromiseCapabilityDefaultRejectSharedFun",
- ("old_space", 0x0387d): "PromiseCapabilityDefaultResolveSharedFun",
- ("old_space", 0x038a5): "PromiseCatchFinallySharedFun",
- ("old_space", 0x038cd): "PromiseGetCapabilitiesExecutorSharedFun",
- ("old_space", 0x038f5): "PromiseThenFinallySharedFun",
- ("old_space", 0x0391d): "PromiseThrowerFinallySharedFun",
- ("old_space", 0x03945): "PromiseValueThunkFinallySharedFun",
- ("old_space", 0x0396d): "ProxyRevokeSharedFun",
+ ("old_space", 0x03651): "AsyncFunctionAwaitRejectSharedFun",
+ ("old_space", 0x03679): "AsyncFunctionAwaitResolveSharedFun",
+ ("old_space", 0x036a1): "AsyncGeneratorAwaitRejectSharedFun",
+ ("old_space", 0x036c9): "AsyncGeneratorAwaitResolveSharedFun",
+ ("old_space", 0x036f1): "AsyncGeneratorYieldResolveSharedFun",
+ ("old_space", 0x03719): "AsyncGeneratorReturnResolveSharedFun",
+ ("old_space", 0x03741): "AsyncGeneratorReturnClosedRejectSharedFun",
+ ("old_space", 0x03769): "AsyncGeneratorReturnClosedResolveSharedFun",
+ ("old_space", 0x03791): "AsyncIteratorValueUnwrapSharedFun",
+ ("old_space", 0x037b9): "PromiseAllResolveElementSharedFun",
+ ("old_space", 0x037e1): "PromiseAllSettledResolveElementSharedFun",
+ ("old_space", 0x03809): "PromiseAllSettledRejectElementSharedFun",
+ ("old_space", 0x03831): "PromiseAnyRejectElementSharedFun",
+ ("old_space", 0x03859): "PromiseCapabilityDefaultRejectSharedFun",
+ ("old_space", 0x03881): "PromiseCapabilityDefaultResolveSharedFun",
+ ("old_space", 0x038a9): "PromiseCatchFinallySharedFun",
+ ("old_space", 0x038d1): "PromiseGetCapabilitiesExecutorSharedFun",
+ ("old_space", 0x038f9): "PromiseThenFinallySharedFun",
+ ("old_space", 0x03921): "PromiseThrowerFinallySharedFun",
+ ("old_space", 0x03949): "PromiseValueThunkFinallySharedFun",
+ ("old_space", 0x03971): "ProxyRevokeSharedFun",
}
# Lower 32 bits of first page addresses for various heap spaces.
diff --git a/deps/v8/tools/whitespace.txt b/deps/v8/tools/whitespace.txt
index 6e2bc1cf76..d6024b5b7d 100644
--- a/deps/v8/tools/whitespace.txt
+++ b/deps/v8/tools/whitespace.txt
@@ -7,10 +7,11 @@ A Smi balks into a war and says:
The doubles heard this and started to unbox.
The Smi looked at them when a crazy v8-autoroll account showed up...
The autoroller bought a round of Himbeerbrause. Suddenly.....
-The bartender starts to shake the bottles......................
-I can't add trailing whitespaces, so I'm adding this line......
+The bartender starts to shake the bottles........................
+I can't add trailing whitespaces, so I'm adding this line.......
I'm starting to think that just adding trailing whitespaces might not be bad.
Because whitespaces are not that funny.....
Today's answer to life the universe and everything is 12950!
Today's answer to life the universe and everything is 6728!
+Today's answer to life the universe and everything is 6728!!